[librsb] 01/03: New upstream version 1.2.0-rc5

Rafael Laboissiere rafael at debian.org
Fri Sep 23 00:17:01 UTC 2016


This is an automated email from the git hooks/post-receive script.

rafael pushed a commit to branch master
in repository librsb.

commit c1620de57e6ca52d169c53ba8ca3973f019f4f56
Author: Rafael Laboissiere <rafael at debian.org>
Date:   Thu Sep 22 20:40:35 2016 -0300

    New upstream version 1.2.0-rc5
---
 A.mtx                                              |     11 +
 AUTHORS                                            |      2 +
 COPYING                                            |    165 +
 ChangeLog                                          |      0
 INSTALL                                            |    370 +
 L1C.m4                                             |    151 +
 Makefile.am                                        |    603 +
 Makefile.in                                        |   2596 +
 NEWS                                               |    225 +
 README                                             |    678 +
 aclocal.m4                                         |   9616 +
 autogen.sh                                         |     11 +
 bench/Makefile.am                                  |      5 +
 bench/Makefile.in                                  |    434 +
 bench/dense.sh                                     |    333 +
 bench/dense_quick.sh                               |     39 +
 bench/rplot.sh                                     |     53 +
 bench/spmv.sh                                      |    235 +
 bench/trsv.sh                                      |    199 +
 blas_sparse.h                                      |    577 +
 blas_sparse/Makefile.am                            |     11 +
 blas_sparse/Makefile.in                            |    439 +
 blas_sparse/blas_enum.F90                          |     81 +
 blas_sparse/blas_enum.h                            |    143 +
 blas_sparse/blas_sparse.h                          |      8 +
 blas_sparse/blas_sparse_proto.h                    |    205 +
 ch2icfb.c                                          |   1428 +
 compile                                            |    343 +
 config.guess                                       |   1530 +
 config.sub                                         |   1782 +
 configure                                          |  26061 +++
 configure.ac                                       |   1369 +
 depcomp                                            |    708 +
 do_unroll.m4                                       |   1820 +
 doc/Doxyfile                                       |    551 +
 doc/Makefile.am                                    |     70 +
 doc/Makefile.in                                    |    584 +
 doc/html/annotated.html                            |    128 +
 doc/html/bc_s.png                                  |    Bin 0 -> 680 bytes
 doc/html/bdwn.png                                  |    Bin 0 -> 147 bytes
 doc/html/blas__sparse_8h.html                      |   1768 +
 doc/html/classblas__sparse.html                    |   5523 +
 doc/html/classes.html                              |     83 +
 doc/html/classrsb.html                             |   2099 +
 doc/html/closed.png                                |    Bin 0 -> 132 bytes
 doc/html/deprecated.html                           |     76 +
 doc/html/doxygen.css                               |   1163 +
 doc/html/doxygen.png                               |    Bin 0 -> 3779 bytes
 doc/html/dynsections.js                            |     78 +
 doc/html/files.html                                |     75 +
 doc/html/form_0.png                                |    Bin 0 -> 371 bytes
 doc/html/form_1.png                                |    Bin 0 -> 362 bytes
 doc/html/form_10.png                               |    Bin 0 -> 450 bytes
 doc/html/form_11.png                               |    Bin 0 -> 212 bytes
 doc/html/form_12.png                               |    Bin 0 -> 421 bytes
 doc/html/form_13.png                               |    Bin 0 -> 445 bytes
 doc/html/form_14.png                               |    Bin 0 -> 405 bytes
 doc/html/form_15.png                               |    Bin 0 -> 475 bytes
 doc/html/form_16.png                               |    Bin 0 -> 503 bytes
 doc/html/form_17.png                               |    Bin 0 -> 495 bytes
 doc/html/form_18.png                               |    Bin 0 -> 438 bytes
 doc/html/form_19.png                               |    Bin 0 -> 418 bytes
 doc/html/form_2.png                                |    Bin 0 -> 216 bytes
 doc/html/form_20.png                               |    Bin 0 -> 435 bytes
 doc/html/form_21.png                               |    Bin 0 -> 220 bytes
 doc/html/form_22.png                               |    Bin 0 -> 477 bytes
 doc/html/form_23.png                               |    Bin 0 -> 270 bytes
 doc/html/form_24.png                               |    Bin 0 -> 223 bytes
 doc/html/form_25.png                               |    Bin 0 -> 370 bytes
 doc/html/form_26.png                               |    Bin 0 -> 684 bytes
 doc/html/form_27.png                               |    Bin 0 -> 449 bytes
 doc/html/form_28.png                               |    Bin 0 -> 474 bytes
 doc/html/form_29.png                               |    Bin 0 -> 481 bytes
 doc/html/form_3.png                                |    Bin 0 -> 224 bytes
 doc/html/form_30.png                               |    Bin 0 -> 574 bytes
 doc/html/form_31.png                               |    Bin 0 -> 218 bytes
 doc/html/form_32.png                               |    Bin 0 -> 445 bytes
 doc/html/form_33.png                               |    Bin 0 -> 461 bytes
 doc/html/form_34.png                               |    Bin 0 -> 469 bytes
 doc/html/form_35.png                               |    Bin 0 -> 570 bytes
 doc/html/form_36.png                               |    Bin 0 -> 496 bytes
 doc/html/form_37.png                               |    Bin 0 -> 245 bytes
 doc/html/form_38.png                               |    Bin 0 -> 879 bytes
 doc/html/form_39.png                               |    Bin 0 -> 460 bytes
 doc/html/form_4.png                                |    Bin 0 -> 375 bytes
 doc/html/form_40.png                               |    Bin 0 -> 496 bytes
 doc/html/form_41.png                               |    Bin 0 -> 505 bytes
 doc/html/form_42.png                               |    Bin 0 -> 763 bytes
 doc/html/form_43.png                               |    Bin 0 -> 289 bytes
 doc/html/form_44.png                               |    Bin 0 -> 320 bytes
 doc/html/form_45.png                               |    Bin 0 -> 220 bytes
 doc/html/form_46.png                               |    Bin 0 -> 414 bytes
 doc/html/form_47.png                               |    Bin 0 -> 701 bytes
 doc/html/form_48.png                               |    Bin 0 -> 752 bytes
 doc/html/form_49.png                               |    Bin 0 -> 225 bytes
 doc/html/form_5.png                                |    Bin 0 -> 351 bytes
 doc/html/form_50.png                               |    Bin 0 -> 224 bytes
 doc/html/form_6.png                                |    Bin 0 -> 518 bytes
 doc/html/form_7.png                                |    Bin 0 -> 358 bytes
 doc/html/form_8.png                                |    Bin 0 -> 432 bytes
 doc/html/form_9.png                                |    Bin 0 -> 493 bytes
 doc/html/formula.repository                        |     51 +
 doc/html/ftv2blank.png                             |    Bin 0 -> 86 bytes
 doc/html/ftv2cl.png                                |    Bin 0 -> 453 bytes
 doc/html/ftv2doc.png                               |    Bin 0 -> 746 bytes
 doc/html/ftv2folderclosed.png                      |    Bin 0 -> 616 bytes
 doc/html/ftv2folderopen.png                        |    Bin 0 -> 597 bytes
 doc/html/ftv2lastnode.png                          |    Bin 0 -> 86 bytes
 doc/html/ftv2link.png                              |    Bin 0 -> 746 bytes
 doc/html/ftv2mlastnode.png                         |    Bin 0 -> 246 bytes
 doc/html/ftv2mnode.png                             |    Bin 0 -> 246 bytes
 doc/html/ftv2mo.png                                |    Bin 0 -> 403 bytes
 doc/html/ftv2node.png                              |    Bin 0 -> 86 bytes
 doc/html/ftv2ns.png                                |    Bin 0 -> 388 bytes
 doc/html/ftv2plastnode.png                         |    Bin 0 -> 229 bytes
 doc/html/ftv2pnode.png                             |    Bin 0 -> 229 bytes
 doc/html/ftv2splitbar.png                          |    Bin 0 -> 314 bytes
 doc/html/ftv2vertline.png                          |    Bin 0 -> 86 bytes
 doc/html/functions.html                            |     83 +
 doc/html/functions_0x62.html                       |    329 +
 doc/html/functions_0x63.html                       |    132 +
 doc/html/functions_0x64.html                       |    132 +
 doc/html/functions_0x6b.html                       |     83 +
 doc/html/functions_0x6e.html                       |     83 +
 doc/html/functions_0x72.html                       |    602 +
 doc/html/functions_0x73.html                       |    132 +
 doc/html/functions_0x75.html                       |     92 +
 doc/html/functions_0x76.html                       |     83 +
 doc/html/functions_0x7a.html                       |    132 +
 doc/html/functions_func.html                       |    471 +
 doc/html/functions_vars.html                       |     78 +
 doc/html/functions_vars_0x62.html                  |    324 +
 doc/html/functions_vars_0x6b.html                  |     78 +
 doc/html/functions_vars_0x6e.html                  |     78 +
 doc/html/functions_vars_0x72.html                  |    441 +
 doc/html/functions_vars_0x76.html                  |     78 +
 doc/html/globals.html                              |   1729 +
 doc/html/globals_0x72.html                         |    966 +
 doc/html/globals_defs.html                         |    580 +
 doc/html/globals_enum.html                         |    180 +
 doc/html/globals_eval.html                         |    474 +
 doc/html/globals_eval_0x72.html                    |    209 +
 doc/html/globals_func.html                         |   1230 +
 doc/html/globals_func_0x72.html                    |    288 +
 doc/html/globals_type.html                         |    112 +
 doc/html/group__rsb__doc__examples.html            |   1713 +
 doc/html/group__rsb__doc__rsb.html                 |   3742 +
 doc/html/group__rsb__doc__sparse__blas.html        |  14445 ++
 doc/html/index.html                                |   1082 +
 ...terfaceblas__sparse_1_1rsb__blas__get__mtx.html |     89 +
 ...terfaceblas__sparse_1_1uscr__insert__block.html |    395 +
 ...erfaceblas__sparse_1_1uscr__insert__clique.html |    443 +
 ...interfaceblas__sparse_1_1uscr__insert__col.html |    367 +
 ...rfaceblas__sparse_1_1uscr__insert__entries.html |    367 +
 ...terfaceblas__sparse_1_1uscr__insert__entry.html |    340 +
 ...interfaceblas__sparse_1_1uscr__insert__row.html |    367 +
 doc/html/interfaceblas__sparse_1_1usmm.html        |    503 +
 doc/html/interfaceblas__sparse_1_1usmv.html        |    447 +
 doc/html/interfaceblas__sparse_1_1ussm.html        |    423 +
 doc/html/interfaceblas__sparse_1_1ussv.html        |    367 +
 doc/html/interfacersb_1_1rsb__coo__sort.html       |    140 +
 .../interfacersb_1_1rsb__file__mtx__get__dims.html |    122 +
 doc/html/interfacersb_1_1rsb__file__mtx__load.html |    116 +
 doc/html/interfacersb_1_1rsb__file__mtx__rndr.html |    128 +
 doc/html/interfacersb_1_1rsb__file__mtx__save.html |    104 +
 doc/html/interfacersb_1_1rsb__file__vec__load.html |    116 +
 doc/html/interfacersb_1_1rsb__file__vec__save.html |    116 +
 doc/html/interfacersb_1_1rsb__lib__exit.html       |     94 +
 doc/html/interfacersb_1_1rsb__lib__get__opt.html   |    104 +
 doc/html/interfacersb_1_1rsb__lib__init.html       |     94 +
 doc/html/interfacersb_1_1rsb__lib__reinit.html     |     94 +
 doc/html/interfacersb_1_1rsb__lib__set__opt.html   |    104 +
 .../interfacersb_1_1rsb__lib__set__opt__str.html   |    104 +
 .../interfacersb_1_1rsb__mtx__add__to__dense.html  |    134 +
 ...cersb_1_1rsb__mtx__alloc__from__coo__begin.html |    128 +
 ...cersb_1_1rsb__mtx__alloc__from__coo__const.html |    158 +
 ...facersb_1_1rsb__mtx__alloc__from__coo__end.html |     94 +
 ...rsb_1_1rsb__mtx__alloc__from__coo__inplace.html |    158 +
 ...cersb_1_1rsb__mtx__alloc__from__csc__const.html |    158 +
 ...cersb_1_1rsb__mtx__alloc__from__csr__const.html |    158 +
 ...rsb_1_1rsb__mtx__alloc__from__csr__inplace.html |    158 +
 doc/html/interfacersb_1_1rsb__mtx__clone.html      |    128 +
 doc/html/interfacersb_1_1rsb__mtx__free.html       |     94 +
 doc/html/interfacersb_1_1rsb__mtx__get__coo.html   |    122 +
 .../interfacersb_1_1rsb__mtx__get__coo__block.html |    164 +
 doc/html/interfacersb_1_1rsb__mtx__get__csr.html   |    128 +
 doc/html/interfacersb_1_1rsb__mtx__get__info.html  |    110 +
 .../interfacersb_1_1rsb__mtx__get__info__str.html  |    116 +
 doc/html/interfacersb_1_1rsb__mtx__get__nrm.html   |    110 +
 doc/html/interfacersb_1_1rsb__mtx__get__prec.html  |    116 +
 ...nterfacersb_1_1rsb__mtx__get__rows__sparse.html |    152 +
 doc/html/interfacersb_1_1rsb__mtx__get__vals.html  |    128 +
 doc/html/interfacersb_1_1rsb__mtx__get__vec.html   |    110 +
 doc/html/interfacersb_1_1rsb__mtx__rndr.html       |    122 +
 doc/html/interfacersb_1_1rsb__mtx__set__vals.html  |    128 +
 .../interfacersb_1_1rsb__mtx__switch__to__coo.html |    122 +
 .../interfacersb_1_1rsb__mtx__switch__to__csr.html |    122 +
 doc/html/interfacersb_1_1rsb__mtx__upd__vals.html  |    110 +
 doc/html/interfacersb_1_1rsb__perror.html          |    104 +
 ...ersb_1_1rsb__psblas__trans__to__rsb__trans.html |     94 +
 doc/html/interfacersb_1_1rsb__spmm.html            |    152 +
 doc/html/interfacersb_1_1rsb__spmsp.html           |    140 +
 .../interfacersb_1_1rsb__spmsp__to__dense.html     |    164 +
 doc/html/interfacersb_1_1rsb__spmv.html            |    140 +
 doc/html/interfacersb_1_1rsb__sppsp.html           |    140 +
 doc/html/interfacersb_1_1rsb__spsm.html            |    152 +
 doc/html/interfacersb_1_1rsb__spsv.html            |    134 +
 doc/html/interfacersb_1_1rsb__strerror__r.html     |    110 +
 doc/html/interfacersb_1_1rsb__time.html            |     93 +
 doc/html/interfacersb_1_1rsb__tune__spmm.html      |    182 +
 doc/html/interfacersb_1_1rsb__tune__spsm.html      |    182 +
 doc/html/modules.html                              |     59 +
 doc/html/nav_f.png                                 |    Bin 0 -> 153 bytes
 doc/html/nav_g.png                                 |    Bin 0 -> 94 bytes
 doc/html/nav_h.png                                 |    Bin 0 -> 98 bytes
 doc/html/open.png                                  |    Bin 0 -> 123 bytes
 doc/html/pages.html                                |     58 +
 doc/html/rsb_8F90.html                             |    172 +
 doc/html/rsb_8h.html                               |   2280 +
 doc/html/rsb__blas__sparse_8F90.html               |    109 +
 doc/html/rsb__libspblas_8c.html                    |   1695 +
 doc/html/rsb__libspblas_8h.html                    |   1768 +
 doc/html/rsb__libspblas__handle_8c.html            |     94 +
 doc/html/rsb__rsb_8c.html                          |    300 +
 doc/html/rsb__types_8h.html                        |    742 +
 doc/html/structrsb__initopts.html                  |    147 +
 doc/html/sync_off.png                              |    Bin 0 -> 853 bytes
 doc/html/sync_on.png                               |    Bin 0 -> 845 bytes
 doc/html/tab_a.png                                 |    Bin 0 -> 142 bytes
 doc/html/tab_b.png                                 |    Bin 0 -> 167 bytes
 doc/html/tab_h.png                                 |    Bin 0 -> 192 bytes
 doc/html/tab_s.png                                 |    Bin 0 -> 184 bytes
 doc/html/tabs.css                                  |     60 +
 doc/html/todo.html                                 |     82 +
 doc/man/librsb-config.3                            |     63 +
 doc/man/man3/rsb-examples.3                        |   1739 +
 doc/man/man3/rsb-spblas.h.3                        |   7053 +
 doc/man/man3/rsb.h.3                               |   2097 +
 doc/man/rsbench.3                                  |     76 +
 examples/Makefile.am                               |     83 +
 examples/Makefile.in                               |    681 +
 examples/autotune.c                                |    392 +
 examples/benchex.sh                                |     12 +
 examples/fortran.F90                               |    135 +
 examples/fortran_rsb_fi.F90                        |    195 +
 examples/hello-spblas.c                            |    170 +
 examples/hello.c                                   |    159 +
 examples/io-spblas.c                               |    111 +
 examples/make.sh                                   |     32 +
 examples/pd.mtx                                    |     48 +
 examples/power.c                                   |    139 +
 examples/transpose.c                               |    152 +
 examples/vf.mtx                                    |      8 +
 install-sh                                         |    527 +
 librsb-config.in                                   |    154 +
 librsb.pc.in                                       |     15 +
 libspblas_macros.m4                                |    951 +
 ltmain.sh                                          |   9661 +
 m4/Makefile.am                                     |      2 +
 m4/Makefile.in                                     |    432 +
 mergesort_macros.m4                                |    594 +
 missing                                            |    331 +
 ot-infty_norm.c                                    |      1 +
 ot-rowssums.c                                      |      1 +
 ot-scale.c                                         |      1 +
 ot-spmv.c                                          |      1 +
 ot-spmv_sasa.c                                     |      1 +
 ot-spmv_sxsa.c                                     |      1 +
 ot-spmv_uaua.c                                     |      1 +
 ot-spmv_uauz.c                                     |      1 +
 ot-spmv_unua.c                                     |      1 +
 ot-spmv_uxua.c                                     |      1 +
 ot-spsv.c                                          |      1 +
 ot-spsv_sxsx.c                                     |      1 +
 ot-spsv_uxua.c                                     |      1 +
 ot.c                                               |      1 +
 ot.m                                               |    735 +
 pd.mtx                                             |     48 +
 psb_mvsv_tester.f90                                |   7323 +
 psb_mvsv_tester.m                                  |     40 +
 psbtf.F90                                          |    532 +
 psbtf.m                                            |     60 +
 rsb-config.h.hin                                   |      3 +
 rsb-config.h.in                                    |    424 +
 rsb-incoming.grep                                  |     16 +
 rsb-incoming.sed                                   |      1 +
 rsb.F90                                            |   1202 +
 rsb.h                                              |    906 +
 rsb_asm.c                                          |    200 +
 rsb_asm.h                                          |     35 +
 rsb_bench.c                                        |   2861 +
 rsb_bench.h                                        |     96 +
 rsb_bench.m4                                       |    629 +
 rsb_bio.c                                          |    551 +
 rsb_bio.h                                          |     37 +
 rsb_blas_sparse.F90                                |   1736 +
 rsb_blas_sparse.m4                                 |    257 +
 rsb_blas_stuff.c                                   |     90 +
 rsb_blas_stuff.h                                   |     39 +
 rsb_clone.c                                        |   1009 +
 rsb_clone.h                                        |     54 +
 rsb_common.h                                       |   1417 +
 rsb_config.m4                                      |     56 +
 rsb_config.m4.in                                   |     56 +
 rsb_coo.c                                          |    737 +
 rsb_coo.h                                          |     46 +
 rsb_coo2rec.c                                      |   2990 +
 rsb_coo2rec.h                                      |     78 +
 rsb_coo_check.c                                    |    399 +
 rsb_coo_check.h                                    |     41 +
 rsb_coo_symm.c                                     |     98 +
 rsb_coo_symm.h                                     |     37 +
 rsb_cpmv.c                                         |    100 +
 rsb_cpmv.h                                         |     40 +
 rsb_csr.c                                          |    102 +
 rsb_csr.h                                          |     40 +
 rsb_csr2coo.c                                      |    240 +
 rsb_csr2coo.h                                      |     42 +
 rsb_do.c                                           |   1284 +
 rsb_do.h                                           |    124 +
 rsb_dump.c                                         |    366 +
 rsb_dump.h                                         |     54 +
 rsb_eps.c                                          |   1117 +
 rsb_eps.h                                          |     38 +
 rsb_err.c                                          |    143 +
 rsb_err.h                                          |    107 +
 rsb_failure_tests.c                                |    250 +
 rsb_failure_tests.h                                |     42 +
 rsb_fortran_macros.m4                              |    668 +
 rsb_fpb.c                                          |    129 +
 rsb_fpb.h                                          |     46 +
 rsb_garbage.c                                      |    955 +
 rsb_garbage.h                                      |     56 +
 rsb_gen.c                                          |    592 +
 rsb_gen.h                                          |     47 +
 rsb_genmm.c                                        |    220 +
 rsb_get.c                                          |   1878 +
 rsb_get.h                                          |     67 +
 rsb_idx.c                                          |    904 +
 rsb_idx.h                                          |     84 +
 rsb_init.c                                         |    808 +
 rsb_init.h                                         |     47 +
 rsb_internals.c                                    |   3902 +
 rsb_internals.h                                    |    180 +
 rsb_is.c                                           |    712 +
 rsb_is.h                                           |     59 +
 rsb_krnl.c                                         |  34946 ++++
 rsb_krnl.h                                         |    461 +
 rsb_krnl.m4                                        |    171 +
 rsb_krnl_bcoo_macros.m4                            |    753 +
 rsb_krnl_bcoo_spmv_u.c                             | 186189 ++++++++++++++++++
 rsb_krnl_bcoo_spmv_u.h                             |  21824 ++
 rsb_krnl_bcoo_spmv_u.m4                            |     56 +
 rsb_krnl_bcss.c                                    |     38 +
 rsb_krnl_bcss.h                                    |   5387 +
 rsb_krnl_bcss.m4                                   |     84 +
 rsb_krnl_bcss_l.c                                  |     40 +
 rsb_krnl_bcss_l.h                                  |     42 +
 rsb_krnl_bcss_l.m4                                 |     77 +
 rsb_krnl_bcss_macros.m4                            |   1509 +
 rsb_krnl_bcss_misc_u.c                             |  42472 ++++
 rsb_krnl_bcss_misc_u.h                             |   3499 +
 rsb_krnl_bcss_misc_u.m4                            |     57 +
 rsb_krnl_bcss_spmv_u.c                             | 100282 ++++++++++
 rsb_krnl_bcss_spmv_u.h                             |   6957 +
 rsb_krnl_bcss_spmv_u.m4                            |     59 +
 rsb_krnl_bcss_spsv_u.c                             |  45000 +++++
 rsb_krnl_bcss_spsv_u.h                             |   2923 +
 rsb_krnl_bcss_spsv_u.m4                            |     57 +
 rsb_krnl_bcss_u.c                                  |     42 +
 rsb_krnl_bcss_u.h                                  |     45 +
 rsb_krnl_bcss_u.m4                                 |     61 +
 rsb_krnl_lb.c                                      |    100 +
 rsb_krnl_lb.h                                      |    103 +
 rsb_krnl_lb.m4                                     |     70 +
 rsb_krnl_linked_lists.m4                           |    467 +
 rsb_krnl_macros.m4                                 |   1418 +
 rsb_krnl_vb.c                                      |     40 +
 rsb_krnl_vb.h                                      |     43 +
 rsb_krnl_vb.m4                                     |     56 +
 rsb_krnl_vb_macros.m4                              |    410 +
 rsb_lbl.h                                          |    235 +
 rsb_libspblas.c                                    |   3698 +
 rsb_libspblas.h                                    |    577 +
 rsb_libspblas.m4                                   |    314 +
 rsb_libspblas_handle.c                             |   1734 +
 rsb_libspblas_handle.h                             |    167 +
 rsb_libspblas_tests.c                              |   2258 +
 rsb_libspblas_tests.h                              |     45 +
 rsb_license_header.inc                             |     21 +
 rsb_limiter.c                                      |    130 +
 rsb_limiter.h                                      |     60 +
 rsb_lock.c                                         |   1279 +
 rsb_lock.h                                         |    148 +
 rsb_mbw.c                                          |    900 +
 rsb_mbw.h                                          |    128 +
 rsb_merge.c                                        |    545 +
 rsb_merge.h                                        |     75 +
 rsb_merge.m4                                       |    190 +
 rsb_mergesort.c                                    |   2520 +
 rsb_mergesort.h                                    |    330 +
 rsb_mergesort.m4                                   |    108 +
 rsb_mio.c                                          |   1275 +
 rsb_mio.h                                          |     52 +
 rsb_misc.m4                                        |    981 +
 rsb_mkl.c                                          |    646 +
 rsb_mkl.h                                          |     70 +
 rsb_mkl.m4                                         |    600 +
 rsb_mmio.c                                         |    519 +
 rsb_mmio.h                                         |    150 +
 rsb_mmls.c                                         |    125 +
 rsb_mod.m4                                         |    107 +
 rsb_msort_up.c                                     |    333 +
 rsb_msort_up.h                                     |     39 +
 rsb_ompio.c                                        |    293 +
 rsb_ompio.h                                        |     57 +
 rsb_ompio.m4                                       |    130 +
 rsb_op.c                                           |     35 +
 rsb_op.h                                           |     72 +
 rsb_partition.c                                    |    262 +
 rsb_partition.h                                    |     38 +
 rsb_pcnt.c                                         |    463 +
 rsb_pcnt.h                                         |     68 +
 rsb_perf.c                                         |   1211 +
 rsb_perf.h                                         |    171 +
 rsb_permute.c                                      |   1169 +
 rsb_permute.h                                      |     68 +
 rsb_permute.m4                                     |    372 +
 rsb_pr.c                                           |   2566 +
 rsb_pr.h                                           |     59 +
 rsb_prec.c                                         |    341 +
 rsb_prec.h                                         |     66 +
 rsb_prec.m4                                        |    186 +
 rsb_psblas.h                                       |     33 +
 rsb_rec.c                                          |   1940 +
 rsb_rec.h                                          |     66 +
 rsb_rec2coo.c                                      |    309 +
 rsb_rec2coo.h                                      |     35 +
 rsb_rec2csr.c                                      |    122 +
 rsb_rec2csr.h                                      |     34 +
 rsb_render.c                                       |    297 +
 rsb_render.h                                       |     44 +
 rsb_rsb.c                                          |   1671 +
 rsb_set.c                                          |    494 +
 rsb_set.h                                          |     43 +
 rsb_spgemm.c                                       |    741 +
 rsb_spgemm.h                                       |     38 +
 rsb_spgemm_csr.c                                   |    396 +
 rsb_spgemm_csr.h                                   |     49 +
 rsb_spgemm_csr.m4                                  |    149 +
 rsb_spmv.c                                         |    437 +
 rsb_spmv.h                                         |    115 +
 rsb_spmv.m4                                        |    451 +
 rsb_spsum.c                                        |    144 +
 rsb_spsum.h                                        |     35 +
 rsb_spsum_misc.c                                   |    349 +
 rsb_spsum_misc.h                                   |     44 +
 rsb_spsum_misc.m4                                  |    120 +
 rsb_spsv.c                                         |    867 +
 rsb_spsv.h                                         |     56 +
 rsb_src.c                                          |    419 +
 rsb_src.h                                          |     44 +
 rsb_srt.c                                          |   1903 +
 rsb_srt.h                                          |    149 +
 rsb_srtp.c                                         |    421 +
 rsb_srtp.h                                         |     40 +
 rsb_strmif.c                                       |     43 +
 rsb_stropts.c                                      |     41 +
 rsb_struct.h                                       |    359 +
 rsb_swt.c                                          |    970 +
 rsb_swt.h                                          |     87 +
 rsb_sys.c                                          |   1343 +
 rsb_sys.h                                          |    259 +
 rsb_test_accuracy.c                                |    237 +
 rsb_test_accuracy.h                                |     42 +
 rsb_test_matops.c                                  |   9914 +
 rsb_test_matops.h                                  |    165 +
 rsb_test_matops.m4                                 |   4843 +
 rsb_tune.c                                         |   1776 +
 rsb_tune.h                                         |    160 +
 rsb_types.h                                        |    579 +
 rsb_types.m4                                       |    628 +
 rsb_unroll.c                                       |     69 +
 rsb_unroll.h                                       |     74 +
 rsb_unroll.m4                                      |    247 +
 rsb_user.c                                         |    445 +
 rsb_util.c                                         |   8240 +
 rsb_util.h                                         |    204 +
 rsb_util.m4                                        |   3345 +
 rsbench.c                                          |    601 +
 sbtc.c                                             |  96461 +++++++++
 sbtc.m                                             |     80 +
 sbtf.F90                                           |  87196 ++++++++
 sbtf.m                                             |     58 +
 sbtg-types.m                                       |      6 +
 sbtg-types.m4                                      |     15 +
 sbtg.m                                             |   1095 +
 scripts/Makefile.am                                |      5 +
 scripts/Makefile.in                                |    435 +
 scripts/callgrind.sh                               |     24 +
 scripts/configure_for_debug.sh                     |     32 +
 scripts/devtests.sh                                |     47 +
 scripts/doc-tests.sh                               |     24 +
 scripts/eda.sh                                     |      7 +
 scripts/gcov.sh                                    |     54 +
 scripts/gendense.sh                                |     81 +
 scripts/genstrided.sh                              |     52 +
 scripts/gprof.sh                                   |     27 +
 scripts/hinfo.sh                                   |     32 +
 scripts/librsb-here.sh                             |     62 +
 scripts/likwid.sh                                  |     23 +
 scripts/linux-sys-cache.sh                         |     67 +
 scripts/matrices_get.sh                            |     73 +
 scripts/mmhead.sh                                  |     39 +
 scripts/mmpci2gen.sh                               |     73 +
 scripts/mmsym2gen.sh                               |     22 +
 scripts/nightly.sh                                 |    129 +
 scripts/readme-tests.sh                            |     16 +
 scripts/rsb_h_to_rsb_fi.sh                         |     66 +
 scripts/rsbmandesc.awk                             |     22 +
 scripts/rsbmanseealso.sh                           |     22 +
 scripts/scalasca.sh                                |     38 +
 scripts/score_p.sh                                 |     40 +
 scripts/static-libs.sh                             |    349 +
 scripts/test.sh                                    |    273 +
 scripts/versions.sh                                |     49 +
 testgen.sh.m4                                      |     30 +
 vf.mtx                                             |      8 +
 wisdom.m4                                          |    145 +
 529 files changed, 896821 insertions(+)

diff --git a/A.mtx b/A.mtx
new file mode 100644
index 0000000..2107725
--- /dev/null
+++ b/A.mtx
@@ -0,0 +1,11 @@
+%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..a167b2f
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,2 @@
+Michele Martone
+See the CREDITS section in the README file.
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..65c5ca8
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,165 @@
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..e69de29
diff --git a/INSTALL b/INSTALL
new file mode 100644
index 0000000..a1e89e1
--- /dev/null
+++ b/INSTALL
@@ -0,0 +1,370 @@
+Installation Instructions
+*************************
+
+Copyright (C) 1994-1996, 1999-2002, 2004-2011 Free Software Foundation,
+Inc.
+
+   Copying and distribution of this file, with or without modification,
+are permitted in any medium without royalty provided the copyright
+notice and this notice are preserved.  This file is offered as-is,
+without warranty of any kind.
+
+Basic Installation
+==================
+
+   Briefly, the shell commands `./configure; make; make install' should
+configure, build, and install this package.  The following
+more-detailed instructions are generic; see the `README' file for
+instructions specific to this package.  Some packages provide this
+`INSTALL' file but do not implement all of the features documented
+below.  The lack of an optional feature in a given package is not
+necessarily a bug.  More recommendations for GNU packages can be found
+in *note Makefile Conventions: (standards)Makefile Conventions.
+
+   The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation.  It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions.  Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, and a
+file `config.log' containing compiler output (useful mainly for
+debugging `configure').
+
+   It can also use an optional file (typically called `config.cache'
+and enabled with `--cache-file=config.cache' or simply `-C') that saves
+the results of its tests to speed up reconfiguring.  Caching is
+disabled by default to prevent problems with accidental use of stale
+cache files.
+
+   If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release.  If you are using the cache, and at
+some point `config.cache' contains results you don't want to keep, you
+may remove or edit it.
+
+   The file `configure.ac' (or `configure.in') is used to create
+`configure' by a program called `autoconf'.  You need `configure.ac' if
+you want to change it or regenerate `configure' using a newer version
+of `autoconf'.
+
+   The simplest way to compile this package is:
+
+  1. `cd' to the directory containing the package's source code and type
+     `./configure' to configure the package for your system.
+
+     Running `configure' might take a while.  While running, it prints
+     some messages telling which features it is checking for.
+
+  2. Type `make' to compile the package.
+
+  3. Optionally, type `make check' to run any self-tests that come with
+     the package, generally using the just-built uninstalled binaries.
+
+  4. Type `make install' to install the programs and any data files and
+     documentation.  When installing into a prefix owned by root, it is
+     recommended that the package be configured and built as a regular
+     user, and only the `make install' phase executed with root
+     privileges.
+
+  5. Optionally, type `make installcheck' to repeat any self-tests, but
+     this time using the binaries in their final installed location.
+     This target does not install anything.  Running this target as a
+     regular user, particularly if the prior `make install' required
+     root privileges, verifies that the installation completed
+     correctly.
+
+  6. You can remove the program binaries and object files from the
+     source code directory by typing `make clean'.  To also remove the
+     files that `configure' created (so you can compile the package for
+     a different kind of computer), type `make distclean'.  There is
+     also a `make maintainer-clean' target, but that is intended mainly
+     for the package's developers.  If you use it, you may have to get
+     all sorts of other programs in order to regenerate files that came
+     with the distribution.
+
+  7. Often, you can also type `make uninstall' to remove the installed
+     files again.  In practice, not all packages have tested that
+     uninstallation works correctly, even though it is required by the
+     GNU Coding Standards.
+
+  8. Some packages, particularly those that use Automake, provide `make
+     distcheck', which can by used by developers to test that all other
+     targets like `make install' and `make uninstall' work correctly.
+     This target is generally not run by end users.
+
+Compilers and Options
+=====================
+
+   Some systems require unusual options for compilation or linking that
+the `configure' script does not know about.  Run `./configure --help'
+for details on some of the pertinent environment variables.
+
+   You can give `configure' initial values for configuration parameters
+by setting variables in the command line or in the environment.  Here
+is an example:
+
+     ./configure CC=c99 CFLAGS=-g LIBS=-lposix
+
+   *Note Defining Variables::, for more details.
+
+Compiling For Multiple Architectures
+====================================
+
+   You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory.  To do this, you can use GNU `make'.  `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script.  `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'.  This
+is known as a "VPATH" build.
+
+   With a non-GNU `make', it is safer to compile the package for one
+architecture at a time in the source code directory.  After you have
+installed the package for one architecture, use `make distclean' before
+reconfiguring for another architecture.
+
+   On MacOS X 10.5 and later systems, you can create libraries and
+executables that work on multiple system types--known as "fat" or
+"universal" binaries--by specifying multiple `-arch' options to the
+compiler but only a single `-arch' option to the preprocessor.  Like
+this:
+
+     ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
+                 CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
+                 CPP="gcc -E" CXXCPP="g++ -E"
+
+   This is not guaranteed to produce working output in all cases, you
+may have to build one architecture at a time and combine the results
+using the `lipo' tool if you have problems.
+
+Installation Names
+==================
+
+   By default, `make install' installs the package's commands under
+`/usr/local/bin', include files under `/usr/local/include', etc.  You
+can specify an installation prefix other than `/usr/local' by giving
+`configure' the option `--prefix=PREFIX', where PREFIX must be an
+absolute file name.
+
+   You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files.  If you
+pass the option `--exec-prefix=PREFIX' to `configure', the package uses
+PREFIX as the prefix for installing programs and libraries.
+Documentation and other data files still use the regular prefix.
+
+   In addition, if you use an unusual directory layout you can give
+options like `--bindir=DIR' to specify different values for particular
+kinds of files.  Run `configure --help' for a list of the directories
+you can set and what kinds of files go in them.  In general, the
+default for these options is expressed in terms of `${prefix}', so that
+specifying just `--prefix' will affect all of the other directory
+specifications that were not explicitly provided.
+
+   The most portable way to affect installation locations is to pass the
+correct locations to `configure'; however, many packages provide one or
+both of the following shortcuts of passing variable assignments to the
+`make install' command line to change installation locations without
+having to reconfigure or recompile.
+
+   The first method involves providing an override variable for each
+affected directory.  For example, `make install
+prefix=/alternate/directory' will choose an alternate location for all
+directory configuration variables that were expressed in terms of
+`${prefix}'.  Any directories that were specified during `configure',
+but not in terms of `${prefix}', must each be overridden at install
+time for the entire installation to be relocated.  The approach of
+makefile variable overrides for each directory variable is required by
+the GNU Coding Standards, and ideally causes no recompilation.
+However, some platforms have known limitations with the semantics of
+shared libraries that end up requiring recompilation when using this
+method, particularly noticeable in packages that use GNU Libtool.
+
+   The second method involves providing the `DESTDIR' variable.  For
+example, `make install DESTDIR=/alternate/directory' will prepend
+`/alternate/directory' before all installation names.  The approach of
+`DESTDIR' overrides is not required by the GNU Coding Standards, and
+does not work on platforms that have drive letters.  On the other hand,
+it does better at avoiding recompilation issues, and works well even
+when some directory options were not specified in terms of `${prefix}'
+at `configure' time.
+
+Optional Features
+=================
+
+   If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+   Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System).  The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+   For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+   Some packages offer the ability to configure how verbose the
+execution of `make' will be.  For these packages, running `./configure
+--enable-silent-rules' sets the default to minimal output, which can be
+overridden with `make V=1'; while running `./configure
+--disable-silent-rules' sets the default to verbose, which can be
+overridden with `make V=0'.
+
+Particular systems
+==================
+
+   On HP-UX, the default C compiler is not ANSI C compatible.  If GNU
+CC is not installed, it is recommended to use the following options in
+order to use an ANSI C compiler:
+
+     ./configure CC="cc -Ae -D_XOPEN_SOURCE=500"
+
+and if that doesn't work, install pre-built binaries of GCC for HP-UX.
+
+   HP-UX `make' updates targets which have the same time stamps as
+their prerequisites, which makes it generally unusable when shipped
+generated files such as `configure' are involved.  Use GNU `make'
+instead.
+
+   On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot
+parse its `<wchar.h>' header file.  The option `-nodtk' can be used as
+a workaround.  If GNU CC is not installed, it is therefore recommended
+to try
+
+     ./configure CC="cc"
+
+and if that doesn't work, try
+
+     ./configure CC="cc -nodtk"
+
+   On Solaris, don't put `/usr/ucb' early in your `PATH'.  This
+directory contains several dysfunctional programs; working variants of
+these programs are available in `/usr/bin'.  So, if you need `/usr/ucb'
+in your `PATH', put it _after_ `/usr/bin'.
+
+   On Haiku, software installed for all users goes in `/boot/common',
+not `/usr/local'.  It is recommended to use the following options:
+
+     ./configure --prefix=/boot/common
+
+Specifying the System Type
+==========================
+
+   There may be some features `configure' cannot figure out
+automatically, but needs to determine by the type of machine the package
+will run on.  Usually, assuming the package is built to be run on the
+_same_ architectures, `configure' can figure that out, but if it prints
+a message saying it cannot guess the machine type, give it the
+`--build=TYPE' option.  TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name which has the form:
+
+     CPU-COMPANY-SYSTEM
+
+where SYSTEM can have one of these forms:
+
+     OS
+     KERNEL-OS
+
+   See the file `config.sub' for the possible values of each field.  If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the machine type.
+
+   If you are _building_ compiler tools for cross-compiling, you should
+use the option `--target=TYPE' to select the type of system they will
+produce code for.
+
+   If you want to _use_ a cross compiler, that generates code for a
+platform different from the build platform, you should specify the
+"host" platform (i.e., that on which the generated programs will
+eventually be run) with `--host=TYPE'.
+
+Sharing Defaults
+================
+
+   If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists.  Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Defining Variables
+==================
+
+   Variables not defined in a site shell script can be set in the
+environment passed to `configure'.  However, some packages may run
+configure again during the build, and the customized values of these
+variables may be lost.  In order to avoid this problem, you should set
+them in the `configure' command line, using `VAR=value'.  For example:
+
+     ./configure CC=/usr/local2/bin/gcc
+
+causes the specified `gcc' to be used as the C compiler (unless it is
+overridden in the site shell script).
+
+Unfortunately, this technique does not work for `CONFIG_SHELL' due to
+an Autoconf bug.  Until the bug is fixed you can use this workaround:
+
+     CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
+
+`configure' Invocation
+======================
+
+   `configure' recognizes the following options to control how it
+operates.
+
+`--help'
+`-h'
+     Print a summary of all of the options to `configure', and exit.
+
+`--help=short'
+`--help=recursive'
+     Print a summary of the options unique to this package's
+     `configure', and exit.  The `short' variant lists options used
+     only in the top level, while the `recursive' variant lists options
+     also present in any nested packages.
+
+`--version'
+`-V'
+     Print the version of Autoconf used to generate the `configure'
+     script, and exit.
+
+`--cache-file=FILE'
+     Enable the cache: use and save the results of the tests in FILE,
+     traditionally `config.cache'.  FILE defaults to `/dev/null' to
+     disable caching.
+
+`--config-cache'
+`-C'
+     Alias for `--cache-file=config.cache'.
+
+`--quiet'
+`--silent'
+`-q'
+     Do not print messages saying which checks are being made.  To
+     suppress all normal output, redirect it to `/dev/null' (any error
+     messages will still be shown).
+
+`--srcdir=DIR'
+     Look for the package's source code in directory DIR.  Usually
+     `configure' can determine that directory automatically.
+
+`--prefix=DIR'
+     Use DIR as the installation prefix.  *note Installation Names::
+     for more details, including other options available for fine-tuning
+     the installation locations.
+
+`--no-create'
+`-n'
+     Run the configure checks, but stop before creating any output
+     files.
+
+`configure' also accepts some other, not widely useful, options.  Run
+`configure --help' for more details.
+
diff --git a/L1C.m4 b/L1C.m4
new file mode 100644
index 0000000..0c31c88
--- /dev/null
+++ b/L1C.m4
@@ -0,0 +1,151 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl	@brief
+dnl	A cache estimator code  (EXPERIMENTAL, FIXME)
+dnl
+dnl	FIXME: INCOMPLETE
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+dnl
+dnl
+dnl
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief L1 cache probing code (OBSOLETE)
+ */
+#include "rsb_sys.h"	/* rsb__aligned_malloc */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+typedef size_t rsb_word_t ;/* FIXME */
+
+define(`RSB_M4_CACHE_SCAN',`dnl
+pushdef(`FORCE_EVICTION',ifelse($1,1,1,0))dnl
+pushdef(`args',`$1')dnl
+pushdef(`want_what',$2)dnl
+dnl
+ifelse(want_what,`function_identifier',`k_wordops_'FORCE_EVICTION)`'dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+int $0(args,`function_identifier')dnl
+$0(args,`function_args');dnl
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+static inline size_t $0(args,`function_identifier')dnl
+$0(args,`function_args')dnl
+$0(args,`function_body')dnl
+')dnl
+dnl
+ifelse(want_what,`function_args',`dnl
+(rsb_word_t *p)dnl
+')dnl
+dnl
+ifelse(want_what,`function_body',`dnl
+dnl
+{
+dnl pushdef(`WORDS',1024)dnl
+pushdef(`CACHE_SIZE',eval(8192))dnl
+pushdef(`WORD_SIZE',8)dnl
+pushdef(`WORDS',eval(CACHE_SIZE/WORD_SIZE))dnl
+pushdef(`CACHE_LINE_SIZE',eval(64))dnl
+pushdef(`CACHE_WAYS',4)dnl
+pushdef(`CACHE_SETS',eval(CACHE_SIZE/(CACHE_LINE_SIZE*CACHE_WAYS)))dnl
+pushdef(`WORDS_PER_CACHE_LINE',eval(CACHE_LINE_SIZE/WORD_SIZE))dnl	words per cache line
+pushdef(`CACHE_SET_OFFSET_WORDS',eval(CACHE_WAYS*WORDS_PER_CACHE_LINE))dnl
+pushdef(`CACHE_SET_OFFSET',eval(CACHE_SIZE/CACHE_SETS))dnl
+	/*
+	 * Code for a cache size of CACHE_SIZE bytes, WORD_SIZE bytes sized words,
+	 * CACHE_WAYS-way associativity, with CACHE_LINE_SIZE bytes sized cache lines,
+	 * each line thus fitting WORDS_PER_CACHE_LINE words,
+	 * for a total of CACHE_SETS cache sets, distanced CACHE_SET_OFFSET_WORDS words (CACHE_SET_OFFSET bytes) each.
+	 *
+	 */
+dnl	/* CSI th cache set */
+dnl	/* CWI th cache line touched */
+	/* FIXME : NESTED LOOPS IS BUGGY */
+	RSB_M4_SIMPLE_UNROLL(`CWI',`0',`eval(CACHE_WAYS)',`dnl
+	RSB_M4_SIMPLE_UNROLL(`CSI',`0',`eval(CACHE_SETS)',`dnl
+	RSB_M4_SIMPLE_UNROLL(`CLI',`0',`eval(WORDS_PER_CACHE_LINE)',`
+	p[CLI + CWI*CACHE_SET_OFFSET_WORDS + CSI*WORDS_PER_CACHE_LINE]*=-1;dnl
+	/* CLI^th word of CSI^th cache set of CWI^th associativity way */
+	')
+	')
+	/* each cache set has been loaded with a minimum of cache misses */
+	')
+	return eval(CACHE_SETS*WORDS_PER_CACHE_LINE*CACHE_WAYS);
+popdef(`CACHE_SET_OFFSET')dnl
+popdef(`CACHE_SET_OFFSET_WORDS')dnl
+popdef(`WORDS_PER_CACHE_LINE')dnl	words per cache line
+popdef(`CACHE_SETS')dnl
+popdef(`CACHE_WAYS')dnl
+popdef(`CACHE_LINE_SIZE')dnl
+popdef(`WORDS')dnl
+popdef(`WORD_SIZE')dnl
+popdef(`CACHE_SIZE')dnl
+	/* FIXME : should follow nothing : CSI CLI CWI */
+}
+')dnl
+popdef(`args')dnl
+popdef(`want_what')dnl
+popdef(`FORCE_EVICTION')dnl
+dnl
+dnl
+')dnl
+dnl
+
+RSB_M4_CACHE_SCAN(1,`function_definition')
+RSB_M4_CACHE_SCAN(0,`function_definition')
+
+int main()
+{
+	size_t i,j=0,it,times=100000;
+	rsb_word_t * p = NULL;
+	size_t N,KW,K=1024,W;
+	double t,bt;
+	size_t ops;
+
+	for(i=1;i<7;++i)
+	{
+		N=(1<<i)*K;			/* bytes */
+		KW=N/(K*sizeof(rsb_word_t));	/* kilowords */
+		W=N/(   sizeof(rsb_word_t));	/* words */
+
+		//p = rsb__aligned_malloc(N,N);	/* we want aligned bytes*/
+		p = rsb__aligned_malloc(((1<<8) * K),N);	/* we want aligned bytes*/
+		if(!p)goto err;
+
+		ops=0;				/* op count reset */
+		t = - rsb_time();			/* clock reset */
+		for(it=0;it<times;++it)	
+/*			for(j=0;j<KW;++j)	*//* we process one kiloword at a time */
+				ops += RSB_M4_CACHE_SCAN(0,`function_identifier')( p+j*K );
+		t += rsb_time();
+
+		RSB_STDOUT("%10d times, %10d Kwords == %10d bytes : %10lg secs : %10lg ops per sec\n",times,KW,N,t,((double)(ops))/t);
+
+		ops=0;				/* op count reset */
+		bt = - rsb_time();			/* clock reset */
+		for(it=0;it<times;++it)	
+/*			for(j=0;j<KW;++j)	*//* we process one kiloword at a time */
+				ops += RSB_M4_CACHE_SCAN(1,`function_identifier')( p+j*K );
+		bt += rsb_time();
+
+		RSB_STDOUT("%10d times, %10d Kwords == %10d bytes : %10lg secs : %10lg ops per sec\n",times,KW,N,bt,((double)(ops))/bt);
+		RSB_STDOUT("ratio = %lg\n",bt/t);
+		RSB_STDOUT("\n");
+		if(p){rsb__free(p);p=NULL;}
+	}
+	
+	if(p)free(p);
+	return 0;
+err:
+	return -1;
+}
+
+/* @endcond */
diff --git a/Makefile.am b/Makefile.am
new file mode 100644
index 0000000..09d8a61
--- /dev/null
+++ b/Makefile.am
@@ -0,0 +1,603 @@
+ACLOCAL_AMFLAGS=-I m4
+SUBDIRS= . examples scripts bench blas_sparse doc m4
+dist_doc_DATA=README AUTHORS 
+bin_SCRIPTS=librsb-config
+bin_PROGRAMS=rsbench$(EXEEXT)
+noinst_PROGRAMS= # gemv rsbenchxx # cg jacobi
+EXTRAPROGRAMSFORTRAN=
+EXTRAPROGRAMSC=ot$(EXEEXT)
+# L1C 
+if HAVE_SPARSE_BLAS_INTERFACE
+EXTRAPROGRAMSC+=sbtc$(EXEEXT)
+sbtc_SOURCES=sbtc.c
+sbtc_CFLAGS=$(CFLAGS) -D SBTC_USE_RSB_H -I .
+sbtc_LDADD=$(default_ldadd)
+endif
+if HAVE_FORTRAN_EXAMPLES
+if HAVE_SPARSE_BLAS_INTERFACE
+EXTRAPROGRAMSFORTRAN+=sbtf$(EXEEXT)
+endif
+endif
+EXTRA_PROGRAMS= $(EXTRAPROGRAMSC) $(EXTRAPROGRAMSFORTRAN)
+#EXTRA_LIBRARIES=librsb.la
+EXTRA_LIBRARIES=
+EXTRA_SCRIPTS=librsb-config
+
+NODISTSRC=  rsb_mkl.h blas_sparse.h
+
+RSBENCH_BUILT_SOURCES=	\
+		rsb_test_matops.c rsb_test_matops.h	\
+		rsb_mkl.c rsb_mkl.h
+
+LIB_BUILT_SOURCES_M4=	\
+		rsb_unroll.c rsb_unroll.h \
+		rsb_krnl_vb.c rsb_krnl_vb.h \
+		rsb_krnl_lb.c rsb_krnl_lb.h \
+		rsb_krnl.c rsb_krnl.h \
+		rsb_bench.c rsb_bench.h \
+		rsb_types.h \
+		rsb_mergesort.h rsb_mergesort.c \
+		rsb_permute.h rsb_permute.c \
+		rsb_krnl_bcss_l.h rsb_krnl_bcss_l.c \
+		rsb_krnl_bcss_u.h rsb_krnl_bcss_u.c \
+		rsb_krnl_bcss_spsv_u.h rsb_krnl_bcss_spsv_u.c \
+		rsb_krnl_bcss_spmv_u.h rsb_krnl_bcss_spmv_u.c \
+		rsb_krnl_bcss_misc_u.h rsb_krnl_bcss_misc_u.c \
+		rsb_krnl_bcoo_spmv_u.h rsb_krnl_bcoo_spmv_u.c \
+		rsb_krnl_bcss.h rsb_krnl_bcss.c \
+		rsb_spmv.h rsb_spmv.c \
+		rsb_merge.h rsb_merge.c \
+		rsb_ompio.c rsb_ompio.h \
+		rsb_util.h rsb_util.c \
+		rsb_spgemm_csr.c rsb_spgemm_csr.h \
+		rsb_spsum_misc.c rsb_spsum_misc.h \
+		rsb_prec.h rsb_prec.c
+
+LIB_BUILT_SOURCES_MAKEFILE=	\
+		rsb_stropts.c \
+		rsb_strmif.c \
+		rsb_types.h \
+		blas_sparse.h
+
+LIB_BUILT_SOURCES=	\
+	$(LIB_BUILT_SOURCES_MAKEFILE) $(LIB_BUILT_SOURCES_M4)
+	
+# rsb_prec.h: $(NODISTSRC)
+
+include_HEADERS=
+nodist_include_HEADERS=rsb-config.h
+EXTRAHEADERSFORTRAN=
+
+EXTRASRCFILESSFORTRAN=
+EXTRASRCPROGSSFORTRAN=
+if HAVE_SPARSE_BLAS_INTERFACE
+EXTRASRCFILESSFORTRAN+=rsb_blas_sparse.F90
+EXTRASRCPROGSSFORTRAN+=rsb_blas_sparse.F90
+if WANT_BLAS_SPARSE_MOD_INSTALL
+nodist_include_HEADERS+=blas_sparse.mod
+nodist_include_HEADERS+=rsb.mod
+endif
+blas_sparse.mod: rsb_blas_sparse.$(OBJEXT)
+rsb.mod: rsb.$(OBJEXT)
+# if WANT_BLAS_SPARSE_FI
+# nodist_include_HEADERS+=blas_sparse.fi
+# blas_sparse.fi: blas_sparse.m4
+# 	$(M4) $(M4_FLAGS) -D RSB_M4_WANT_BLAS_SPARSE_INTERFACE=1 $< > $@
+# endif
+EXTRAHEADERSFORTRAN+=rsb.F90
+rsb.F90: ch2icfb rsb.h $(top_srcdir)/scripts/rsb_h_to_rsb_fi.sh 
+	if test -f ch2icfb ; then $(top_srcdir)/scripts/rsb_h_to_rsb_fi.sh $(srcdir) > $@ ; else echo "Warning: Your system did not build ch2icfb for some reason --- skipping rebuild of "$@ ;true ; fi
+
+noinst_PROGRAMS+=ch2icfb$(EXEEXT) 
+ch2icfb_SOURCES=ch2icfb.c
+ch2icfb_CFLAGS=$(CFLAGS)
+ch2icfb_LDADD=
+else
+EXTRASRCFILESSFORTRAN+=
+EXTRASRCPROGSSFORTRAN+=
+endif
+
+if HAVE_FORTRAN_EXAMPLES
+sbtf_LDADD=$(default_ldadd)
+if HAVE_SPARSE_BLAS_INTERFACE
+sbtf_SOURCES=sbtf.F90
+EXTRASRCFILESSFORTRAN+=
+EXTRASRCPROGSSFORTRAN+=sbtf.F90 psbtf.F90 psb_mvsv_tester.f90
+else
+sbtf_SOURCES=
+EXTRASRCFILESSFORTRAN+=
+EXTRASRCPROGSSFORTRAN+=
+endif
+else
+EXTRASRCFILESSFORTRAN+=
+EXTRASRCPROGSSFORTRAN+=
+endif
+
+BUILT_SOURCES=	$(LIB_BUILT_SOURCES) $(RSBENCH_BUILT_SOURCES) $(ot_SOURCES) $(LIB_SPBLAS_BUILT_SOURCES) rsbenchxx.cpp $(EXTRASRCFILESSFORTRAN) rsb-config.h 
+
+.PHONY: e
+e:
+	vim rsb.h
+
+DIST_ARCHIVES_NOVERSION = librsb.tar.gz
+
+.PHONY: tdist
+tdist: dox
+	$(MAKE) dist VERSION=trunk
+
+.PHONY: rtdist
+rtdist: tdist
+	mv librsb-trunk.tar.gz $(DIST_ARCHIVES_NOVERSION) 
+
+.PHONY: rdist
+rdist: dox
+	$(MAKE) dist
+	mv $(DIST_ARCHIVES) $(DIST_ARCHIVES_NOVERSION) 
+
+.PHONY: ddist
+ddist: rdist
+	gpg -sbv -u 0xe0e669c8ef1258b8 $(DIST_ARCHIVES_NOVERSION)
+	md5sum $(DIST_ARCHIVES_NOVERSION) > $(DIST_ARCHIVES_NOVERSION).md5
+	gpg -sbav $(DIST_ARCHIVES_NOVERSION)
+	gpg --verify $(DIST_ARCHIVES_NOVERSION).sig
+
+.PHONY: bdist
+bdist: dox $(BINDISTFILES) 
+	rm -fR -- $(PACKAGE)-$(build)
+	mkdir $(PACKAGE)-$(build)
+	cp -fR $(BINDISTFILES) $(PACKAGE)-$(build)/
+	tar cvzf $(PACKAGE)-$(build).tgz $(PACKAGE)-$(build)  --exclude .svn  --exclude .deps
+	rm -fR $(PACKAGE)-$(build)
+	tar tvzf $(PACKAGE)-$(build).tgz
+
+.PHONY: help
+help:
+	@echo -e "Alternatives (see the README for these):\n make clean\n make cleanall\n make all \n make qqtests \n make qtests \n make tests \n make dist"
+
+if HAVE_M4
+LIB_CLEANALL_FILES=$(LIB_BUILT_SOURCES) $(LIB_SPBLAS_BUILT_SOURCES) $(RSBENCH_BUILT_SOURCES)
+else
+LIB_CLEANALL_FILES=$(LIB_BUILT_SOURCES_MAKEFILE)
+endif
+
+.PHONY: cleanall
+cleanall: clean
+	rm -rf $(LIB_CLEANALL_FILES)
+	$(MAKE) clean
+
+gclean:
+	rm -rf  *.gcov *.gcno
+
+noinst_LTLIBRARIES=librsb_nounroll.la librsb_base.la librsb_spblas.la
+lib_LTLIBRARIES=librsb.la
+
+librsb_la_SOURCES=
+am_librsb_la_OBJECTS=$(am_librsb_base_la_OBJECTS) $(am_librsb_nounroll_la_OBJECTS) $(am_librsb_spblas_la_OBJECTS)
+librsb_la_LDFLAGS=-no-undefined -version-info $(LIBRSB_ABI_VERSION)
+
+#librsb_la_LIBADD=-lgfortran
+default_ldadd=-L$(top_builddir)  $(top_builddir)/librsb.la 
+
+librsb_nounroll_la_CFLAGS=$(NOUNROLLCFLAGS)
+librsb_nounroll_la_SOURCES=$(LIB_BUILT_SOURCES) $(NODISTSRC)
+nodist_librsb_nounroll_la_SOURCES=$(NODISTSRC)
+librsb_base_la_CFLAGS=
+nodist_librsb_base_la_SOURCES=$(NODISTSRC)
+librsb_base_la_SOURCES= 	\
+			rsb_common.h \
+			rsb_is.c  rsb_is.h \
+			rsb_mio.c rsb_mio.h \
+			rsb_op.c  rsb_op.h \
+			rsb_bio.c rsb_bio.h \
+			rsb_get.c rsb_get.h \
+			rsb_set.c rsb_set.h \
+			rsb_coo.c rsb_coo.h \
+			rsb_csr.c rsb_csr.h \
+			rsb_coo_check.c rsb_coo_check.h \
+			rsb_coo_symm.c rsb_coo_symm.h \
+			rsb_idx.c rsb_idx.h \
+			rsb_srt.c rsb_srt.h \
+			rsb_srtp.c rsb_srtp.h \
+			rsb_src.c rsb_src.h \
+			rsb_test_accuracy.c rsb_test_accuracy.h \
+			rsb_clone.c rsb_clone.h \
+			rsb_rec.h rsb_rec.c \
+			rsb_render.c rsb_render.h \
+			rsb_eps.c rsb_eps.h \
+			rsb_msort_up.c rsb_msort_up.h \
+			rsb_sys.c rsb_sys.h \
+			rsb_blas_stuff.c rsb_blas_stuff.h \
+			rsb_gen.c rsb_gen.h \
+			rsb_perf.c rsb_perf.h \
+			rsb_rsb.c rsb.h \
+			rsb_err.c rsb_err.h \
+			rsb_tune.c rsb_tune.h \
+			rsb_struct.h \
+			rsb_do.c rsb_do.h \
+			rsb_internals.c rsb_internals.h \
+			rsb_garbage.c rsb_garbage.h \
+			rsb_mmio.c rsb_mmio.h \
+			rsb_partition.c rsb_partition.h \
+		       	rsb_lbl.h \
+			rsb_mbw.c rsb_mbw.h \
+			rsb_limiter.c rsb_limiter.h \
+			rsb_fpb.c rsb_fpb.h \
+			rsb_spgemm.c rsb_spgemm.h \
+			rsb_spsum.c rsb_spsum.h \
+			rsb_spsv.c rsb_spsv.h \
+			rsb_lock.h rsb_lock.c \
+			rsb_swt.h rsb_swt.c \
+			rsb_init.h rsb_init.c \
+			rsb_dump.h rsb_dump.c \
+			rsb_cpmv.h rsb_cpmv.c \
+			rsb_psblas.h \
+			rsb_asm.h  rsb_asm.c  \
+			rsb_user.c \
+			rsb_coo2rec.c rsb_coo2rec.h \
+			rsb_rec2coo.c rsb_rec2coo.h \
+			rsb_rec2csr.c rsb_rec2csr.h \
+			rsb_csr2coo.c rsb_csr2coo.h \
+			$(EXTRASRCFILESSFORTRAN)
+
+LIB_SPBLAS_BUILT_SOURCES= rsb_libspblas.c rsb_libspblas.h
+
+librsb_spblas_la_SOURCES= \
+		rsb_libspblas_handle.h	rsb_libspblas_handle.c	\
+		$(LIB_SPBLAS_BUILT_SOURCES)
+
+if HAVE_M4
+rsb_libspblas.h: $(srcdir)/rsb_libspblas.m4 $(srcdir)/libspblas_macros.m4
+rsb_libspblas.c: $(srcdir)/rsb_libspblas.m4 $(srcdir)/libspblas_macros.m4
+psb_rsb_mod.F90: $(srcdir)/psb_rsb_mod.m4 $(srcdir)/rsb_fortran_macros.m4
+rsb_mod.F90: $(srcdir)/rsb_mod.m4 $(srcdir)/rsb_fortran_macros.m4
+rsb_blas_sparse.F90: $(srcdir)/rsb_blas_sparse.m4 $(srcdir)/rsb_fortran_macros.m4
+rsb_libspblas_handle.c: rsb_libspblas.h rsb_libspblas_handle.h
+rsb_libspblas_tests.c: rsb_libspblas.h rsb_libspblas_tests.h
+rsb_mkl.c: $(srcdir)/rsb_mkl.m4
+rsb_mkl.h: $(srcdir)/rsb_mkl.m4
+endif
+
+if WANT_INTERNAL_HEADERS_INSTALL
+rsb-librsb-internals.h: $(librsb_base_la_SOURCES) $(librsb_nounroll_la_SOURCES)
+	( cat rsb_license_header.inc                                                                            ; \
+       	echo '/*! Collated internal headers of librsb -- for inspection purposes only (not for usage). */'; echo ; \
+	echo '/* @cond INNERDOC */' ; \
+	echo '#ifndef RSB_LIBRSB_INTERNALS_H_INCLUDED'									; \
+	echo '#define RSB_LIBRSB_INTERNALS_H_INCLUDED'									; \
+	find $+ -iname '*.h' -exec 'cat' '{}' ';' | grep -v 'cond INNERDOC\|endcond' ; \
+	echo '#endif /* RSB_LIBRSB_INTERNALS_H_INCLUDED */'								; \
+	echo '/* @endcond */' ;  )> $@
+
+rsb-incoming.h: $(librsb_base_la_SOURCES) $(librsb_nounroll_la_SOURCES) $(top_srcdir)/rsb-incoming.grep
+	( cat rsb_license_header.inc                                                                            ; \
+       	echo '/* Collated internal headers of librsb -- for experimental use only only (not for usage). */'; echo	; \
+	echo '#ifndef RSB_LIBRSB_INCOMING_H_INCLUDED'									; \
+	echo '#define RSB_LIBRSB_INCOMING_H_INCLUDED'									; \
+	echo '#include <rsb.h>'									; \
+	( cat `svn ls | grep h$$` $(srcdir)/rsb_util.h ) | grep -f $(top_srcdir)/rsb-incoming.grep | sed -f $(top_srcdir)/rsb-incoming.sed | tac ; \
+	echo '#endif /* RSB_LIBRSB_INCOMING_H_INCLUDED */'								; )> $@
+
+nodist_include_HEADERS+=rsb-librsb-internals.h rsb-incoming.h
+endif
+
+include_HEADERS+=rsb.h rsb_types.h blas_sparse.h $(EXTRAHEADERSFORTRAN)
+RSB_PREM4HEADERS=$(include_HEADERS) rsb_mkl.h
+
+rsb_test_matops.h: rsb_mkl.h 
+BINDISTFILES=$(include_HEADERS) $(EXTRA_LIBRARIES) $(dist_doc_DATA) doc/html doc/man/man3 doc/man/*rsb* librsb-config librsb.pc
+rsbench_SOURCES=  rsbench.c $(RSBENCH_BUILT_SOURCES) rsb_genmm.c rsb_mmls.c \
+		rsb_pr.c     rsb_pr.h \
+		rsb_pcnt.c rsb_pcnt.h \
+		rsb_failure_tests.h 	rsb_failure_tests.c 	\
+		rsb_libspblas_tests.h 	rsb_libspblas_tests.c 
+# rsbenchxx_SOURCES=rsbenchxx.cpp
+rsbench_DEPENDENCIES= $(top_builddir)/librsb.la $(top_builddir)/librsb_nounroll.la $(top_builddir)/librsb_base.la $(top_builddir)/librsb_spblas.la rsb_mkl.c rsb_mkl.h # uhm
+# rsbench_DEPENDENCIES+= $(top_builddir)/librsb.so
+if WANT_BLAS_SPARSE_MOD_INSTALL
+else
+if HAVE_FC
+if HAVE_SPARSE_BLAS_INTERFACE
+rsbench_DEPENDENCIES+=blas_sparse.mod rsb.mod
+endif
+endif
+endif
+# rsbenchxx_DEPENDENCIES= rsbenchxx.cpp $(top_builddir)/librsb_nounroll.a $(top_builddir)/librsb_base.a # uhm
+# if  WANT_V
+# endif
+#ygemv_SOURCES=ygemv.c
+#gemv_SOURCES=gemv.c
+#L1C_SOURCES=L1C.c
+rsbench_CFLAGS=${RSB_RSBENCH_CFLAGS}
+# rsbench_LDADD=$(abs_top_builddir)/librsb.la -lgfortran ${RSB_RSBENCH_LIBS}
+rsbench_LDADD=$(abs_top_builddir)/librsb.la  ${RSB_RSBENCH_LIBS}
+# rsbenchxx_LDADD=$(default_ldadd)
+#L1C_LDADD=$(default_ldadd)
+#gemv_LDADD=$(default_ldadd)
+ot_LDADD=$(default_ldadd)
+
+MATRICES=pd.mtx A.mtx vf.mtx
+RSB_MFILES=psb_mvsv_tester.m psbtf.m ot.m sbtc.m sbtg.m sbtf.m sbtg-types.m
+
+EXTRA_DIST=$(LIBSOURCES) \
+	$(librsb_spblas_la_SOURCES) \
+	$(ot_SOURCES) \
+	$(MATRICES) \
+	$(RSB_MFILES) \
+	autogen.sh \
+	scripts/test.sh \
+	rsb_license_header.inc \
+	rsb-config.h.hin \
+	rsb-incoming.sed rsb-incoming.grep \
+	rsb.F90 \
+	librsb.pc.in \
+	$(EXTRASRCPROGSSFORTRAN)
+
+if HAVE_SPARSE_BLAS_INTERFACE
+EXTRA_DIST+=$(sbtc_SOURCES)
+endif
+
+LIBSOURCES=$(LIB_BUILT_SOURCES) \
+	rsb_krnl_bcss.m4   \
+	rsb_krnl_bcss_u.m4 \
+	rsb_krnl_bcss_l.m4 \
+	rsb_krnl_bcss_spsv_u.m4 \
+	rsb_krnl_bcss_spmv_u.m4 \
+	rsb_krnl_bcss_misc_u.m4 \
+	rsb_krnl_bcoo_spmv_u.m4 \
+	rsb_krnl_bcoo_macros.m4 \
+	rsb_merge.m4 \
+	rsb_util.m4 \
+	rsb_ompio.m4 \
+	rsb_mkl.m4 \
+	rsb_spgemm_csr.m4 \
+	rsb_spsum_misc.m4 \
+	rsb_prec.m4 \
+	rsb_spmv.m4 \
+	rsb_krnl.m4 \
+	rsb_bench.m4 \
+	rsb_types.m4 \
+	rsb_unroll.m4 \
+	rsb_mergesort.m4 \
+	rsb_permute.m4 \
+	rsb_krnl_vb_macros.m4 rsb_misc.m4 rsb_test_matops.m4 mergesort_macros.m4 \
+	rsb_krnl_bcss_macros.m4 rsb_krnl_macros.m4 do_unroll.m4 wisdom.m4 \
+	rsb_config.m4 \
+	rsb_krnl_linked_lists.m4 rsb_krnl_lb.m4 \
+	rsb_krnl_vb.m4 \
+	L1C.m4 testgen.sh.m4 \
+	libspblas_macros.m4 \
+	rsb_fortran_macros.m4 \
+	sbtg-types.m4 \
+	rsb_mod.m4 rsb_blas_sparse.m4 \
+	rsb_libspblas.m4
+
+ot_SOURCES=ot.c ot-spmv_uauz.c ot-infty_norm.c ot-scale.c ot-spmv_uaua.c ot-spmv_unua.c \
+	ot-spmv_uxua.c ot-spmv_sasa.c ot-spsv_uxua.c ot-spmv_sxsa.c ot-spsv_sxsx.c \
+	ot-spsv.c ot-spmv.c ot-rowssums.c
+
+if WANT_OCTAVE_TESTING
+psbtf.F90: psbtf.m sbtg.m sbtg-types.m
+	$(OCTAVE) $(OCTAVE_FLAGS) psbtf.m > psbtf.F90
+
+psb_mvsv_tester.f90: psb_mvsv_tester.m sbtg.m sbtg-types.m
+	$(OCTAVE) $(OCTAVE_FLAGS) psb_mvsv_tester.m > psb_mvsv_tester.f90
+
+if HAVE_FORTRAN_EXAMPLES
+sbtf.F90: sbtf.m sbtg.m sbtg-types.m
+	$(OCTAVE) $(OCTAVE_FLAGS) sbtf.m > sbtf.F90
+endif
+
+if HAVE_SPARSE_BLAS_INTERFACE
+$(sbtc_SOURCES): sbtc.m sbtg.m sbtg-types.m
+	$(OCTAVE) $(OCTAVE_FLAGS) sbtc.m > $(sbtc_SOURCES)
+endif
+else
+if HAVE_FORTRAN_EXAMPLES
+sbtf.F90: sbtf.m
+	echo "int main(void){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > sbtf.F90
+endif
+
+psb_mvsv_tester.f90: psb_mvsv_tester.m
+	echo "int main(void){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > psb_mvsv_tester.f90
+
+psbtf.F90: psbtf.m
+	echo "int main(void){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > psbtf.F90
+
+if HAVE_SPARSE_BLAS_INTERFACE
+$(sbtc_SOURCES): sbtc.m
+	echo "int main(){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > $(sbtc_SOURCES)
+endif
+endif
+
+if WANT_OCTAVE_TESTING_AND_INT
+$(ot_SOURCES): ot.m sbtg.m sbtg-types.m
+	$(OCTAVE) $(OCTAVE_FLAGS) ot.m $(WANT_ROW_UNLOOP_FACTORS) $(WANT_COLUMN_UNLOOP_FACTORS) $(WANT_MATRIX_ALL_OPS),$(WANT_MATRIX_ALL_META_OPS) $(WANT_MATRIX_OPS),$(WANT_MATRIX_ALL_META_OPS) main > ot.c
+	for o in `echo $(WANT_MATRIX_ALL_OPS),$(WANT_MATRIX_ALL_META_OPS) | sed "s/,/ /g"`  ; do $(OCTAVE) $(OCTAVE_FLAGS) ot.m $(WANT_ROW_UNLOOP_FACTORS) $(WANT_COLUMN_UNLOOP_FACTORS) $(WANT_MATRIX_ALL_OPS) $(WANT_MATRIX_OPS) $$o > ot-$$o.c  ; done
+else
+$(ot_SOURCES):
+	echo "int main(){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > ot.c
+	for o in `echo $(WANT_MATRIX_ALL_OPS),$(WANT_MATRIX_ALL_META_OPS) | sed "s/,/ /g"` ; do echo "static int f(){return 0;}" > ot-$$o.c  ; done
+endif
+
+rsb_strmif.c: rsb.h
+	( cat rsb_license_header.inc  ; \
+	echo '/* @cond INNERDOC */' ; \
+	echo '/* This file was generated by the Makefile */' ; \
+	echo '#include "rsb.h"' ; \
+	echo '#include "rsb_common.h"' ; \
+	echo '#include "rsb_do.h"' ; \
+	echo 'rsb_err_t rsb__do_get_matrix_info_from_string(const struct rsb_mtx_t *matrix, const rsb_char_t *mis, void* info, size_t buflen)' ; \
+	echo '{ rsb_err_t errval=RSB_ERR_BADARGS; if(!matrix || !mis || !info)goto err;' ; \
+	grep '^\(.define_\|.\)\ RSB_MIF_' rsb.h | sed 's/^. /#define /g;s/=0x/0x/g' | sed 's/\s\+/ /g;s/\/.*(//g;s/).*\///g;s/\/.*(//g;s/).*\///g;' | cut -d ' ' -f 2,4 | sed 's/^\(\S\+\) \(\S\+\)/if(0 == strcmp(mis,"\1")){ errval = rsb__do_get_matrix_info(matrix,\1,info,buflen); goto done;}/g;'; \
+	echo 'done:';	\
+	echo 'return errval;';	\
+	echo 'err: return RSB_ERR_GENERIC_ERROR;';	\
+	echo '}'; \
+	echo '/* @endcond */' ; \
+	) > $@
+
+rsb_stropts.c: rsb.h
+	( cat rsb_license_header.inc  ; \
+	echo '/* @cond INNERDOC */' ; \
+	echo '/* This file was generated by the Makefile */' ; \
+	echo '#include "rsb.h"' ; \
+	echo '#include "rsb_common.h"' ; \
+	echo 'rsb_err_t rsb__stropts_set(const rsb_char_t *opn, const rsb_char_t *arg)' ; \
+	echo '{ rsb_err_t errval=RSB_ERR_NO_ERROR; if(!opn || !arg)goto err;' ; \
+	grep '^\(.define\|.\)\ RSB_IO_WANT_' rsb.h | sed 's/^. /#define /g;s/=0x/0x/g' | grep 'rsb_int_t\|rsb_char_t\|rsb_real_t'|sed 's/\s\+/ /g;s/\/.*(//g;s/).*\///g;' | cut -d ' ' -f 2,4,5 | sed 's/^\(\S\+\) \(const \)*\(\S\+\)/if(0 == strcmp(opn,"\1")){ \2\3 RSB_DO_REINIT_SINGLE_VALUE_SET(\1,\&val,errval); goto done;}/g; s/\(rsb_char_t\*\)/\1 val = arg;/g;s/\(rsb_int_t\)/\1 val = rsb__util_atoi(arg);/g; s/\(rsb_real_t\)/\1 val = rsb__util_atof(arg);/g'; \
+	echo 'done:';	\
+	echo 'return errval;';	\
+	echo 'err: return RSB_ERR_GENERIC_ERROR;';	\
+	echo '}'; \
+	echo '/* @endcond */' ; \
+	) > $@
+
+.PHONY: feedback
+feedback: rsbench$(EXEEXT)
+	./rsbench$(EXEEXT) -O r 
+
+M4_FLAGS=	\
+		-I $(srcdir) \
+		-D WANT_SPSM_DIAG_CHECK=$(WANT_SPSM_DIAG_CHECK) \
+		-D WANT_HALFWORD_INDICES=$(WANT_HALFWORD_INDICES) \
+		-D WANT_ROW_UNLOOP_FACTORS=$(WANT_ROW_UNLOOP_FACTORS) \
+		-D WANT_COLUMN_UNLOOP_FACTORS=$(WANT_COLUMN_UNLOOP_FACTORS) \
+		-D WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR='$(WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR)' \
+		-D WANT_COLUMN_UNLOOP_FACTORS=$(WANT_COLUMN_UNLOOP_FACTORS) \
+		-D WANT_MATRIX_OPS='$(WANT_MATRIX_OPS)' \
+		-D WANT_TYPES='$(WANT_TYPES)' \
+		-D WANT_MATRIX_ALL_OPS='$(WANT_MATRIX_ALL_OPS)' \
+		-D WANT_MATRIX_ALL_TYPES='$(WANT_MATRIX_ALL_TYPES)' \
+		-D WANT_MATRIX_ALL_META_OPS='$(WANT_MATRIX_ALL_META_OPS)' \
+		-D WANT_MATRIX_BCSS_STORAGE='$(WANT_MATRIX_BCSS_STORAGE)' \
+		-D WANT_MATRIX_BCOO_STORAGE='$(WANT_MATRIX_BCOO_STORAGE)' \
+		-D WANT_MATRIX_VB_STORAGE='$(WANT_MATRIX_VB_STORAGE)' \
+		-D WANT_LOOPING_KERNELS='$(WANT_LOOPING_KERNELS)' \
+		-D WANT_MATRIX_STORAGE='$(WANT_MATRIX_STORAGE)'
+
+RSB_KERNELS_MACROS=rsb_krnl.m4 rsb_krnl_macros.m4  rsb_krnl_linked_lists.m4 wisdom.m4 rsb_misc.m4 rsb_krnl_bcss_macros.m4 \
+	rsb_krnl_bcss_spsv_u.m4 rsb_krnl_bcss_spmv_u.m4 rsb_krnl_bcss_misc_u.m4 rsb_krnl_bcoo_macros.m4 rsb_bench.m4 rsb_mergesort.m4 mergesort_macros.m4 rsb_permute.m4 rsb_krnl_vb.m4 rsb_krnl_vb_macros.m4 rsb_krnl_lb.m4
+rsb_mergesort.c rsb_permute.c rsb_krnl_vb.c rsb_krnl_vb.h rsb_unroll.h rsb_krnl_lb.h rsb_krnl_lb.c rsb_krnl.c rsb_bench.c rsb_krnl_bcss_spsv_u.c rsb_krnl_bcss_spmv_u.c rsb_krnl_bcss_misc_u.c rsb_krnl_bcss.c rsb_krnl_bcoo.c rsb_krnl_bcoo_spmv_u.c rsb_krnl.c: $(RSB_KERNELS_MACROS)
+
+blas_sparse.h: $(srcdir)/rsb_libspblas.h
+	cp -p $< $@ 
+
+#rsb-config.h: config.h
+#	( cat rsb_license_header.inc                                                                            ; \
+#	echo '/* This header file is not intended to be included librsb programs: it is only for inspection. */'; \
+#	echo '#ifndef RSB_CONFIG_H_INCLUDED'									; \
+#	echo '#define RSB_CONFIG_H_INCLUDED'									; \
+#	cat  $< | sed 's/^#define /#define RSB_/g;s/ RSB_RSB_/ RSB_/g'   					; \
+#	echo '/* #endif RSB_CONFIG_H_INCLUDED */'								; \
+#	echo '#endif'												)> $@
+
+if WANT_CXX_TEST_RSBENCH
+rsbenchxx.cpp: $(srcdir)/rsbench.c
+	cp $(srcdir)/rsbench.c rsbenchxx.cpp
+else
+endif
+
+if HAVE_M4
+sbtg-types.m: sbtg-types.m4 rsb_types.h
+	$(M4) $(M4_FLAGS) $< > $@
+
+.m4.c: $(RSB_PREM4HEADERS)
+	$(M4) $(M4_FLAGS) $< > $@
+
+.m4.h: $(RSB_PREM4HEADERS)
+	$(M4) $(M4_FLAGS) -D ONLY_WANT_HEADERS=1 $< > $@
+
+.m4.F90:
+	$(M4) $(M4_FLAGS) $< > $@
+endif
+
+# AM_LDFLAGS=$(LIBS)
+
+.PHONY: devsplinttest
+devsplinttest:
+	for f in $(librsb_nounroll_la_SOURCES) $(librsb_base_la_SOURCES) ; do splint -I`gcc -print-search-dirs | grep install: | sed s/install:.//g`  -preproc -DHAVE_CONFIG_H $$f > $$f-splint.txt ;done
+
+.PHONY: devtests
+devtests:
+	scripts/devtests.sh
+	@echo "	[*] dev test terminated successfully !"
+
+.PHONY: tests
+test: tests
+
+scripts/readme-tests.sh: README
+	echo 'if test x"$${srcdir}" = x ; then srcdir=. ; fi' > $@
+	LANG=C grep '^ *\(make \)**\./\(rsbench\|sbtc\|sbtf\)\|\(^ *test\> -f\)' $< | sed 's/\(rsbench\|sbtc\|sbtf\)/\1'"$(EXEEXT)"'/g' | sed 's/#.*$$//g;s/$$/ || exit 255/g' | sed 's/A.mtx/$${srcdir}\/A.mtx/g' >> $@
+
+.PHONY: mtests
+mtests: rsbench$(EXEEXT) $(srcdir)/scripts/readme-tests.sh
+	srcdir=$(srcdir) $(SHELL) $(srcdir)/scripts/readme-tests.sh
+	srcdir=$(srcdir) $(SHELL) $(srcdir)/scripts/doc-tests.sh
+	if ./rsbench$(EXEEXT)  -C | grep 'type char codes.*:*[SDCZ]' ; then cd examples ; $(MAKE) tests ; fi
+
+.PHONY: qtests
+qtests:	all
+	@echo " [*] beginning quick test..."
+	$(MAKE) mtests -C .
+	./rsbench$(EXEEXT) -Q 30.0Q
+	@echo " [*] quick test terminated successfully !"
+
+.PHONY: qqtests
+qqtests:	all
+	@echo " [*] beginning quick quiet test..."
+	$(MAKE) mtests -C . > /dev/null 2> /dev/null
+	./rsbench$(EXEEXT) -Q 30.0Q
+	@echo " [*] quick test terminated successfully !"
+
+.PHONY: tests
+tests:	$(EXTRA_LIBRARIES) ot$(EXEEXT) $(EXTRAPROGRAMSC) rsbench$(EXEEXT)
+	$(MAKE) qtests -C .
+	srcdir=$(srcdir) $(SHELL) $(srcdir)/scripts/test.sh
+	./ot$(EXEEXT)
+if HAVE_SPARSE_BLAS_INTERFACE
+	./sbtc$(EXEEXT)
+endif
+	@echo "	[*] full test terminated successfully !"
+
+.PHONY: btests
+btests: dist
+	scripts/dev_brute_tests.sh $(distdir).tar.gz
+	@echo "	[*] brute force package testing terminated successfully !"
+
+.PHONY: wc
+wc:
+	wc *.c
+	wc *.h
+	wc *.m4
+	wc *.m
+	cat *.m *.m4 *.c *.h |wc
+	cat `svn ls|grep .c$$` | wc
+	cat `svn ls|grep .h$$` | wc
+	cat `svn ls|grep .m4$$` | wc
+	cat `svn ls|grep .m$$` | wc
+	cat `svn ls|grep '\.\(m\|m4\|c\|h\)$$'` | wc
+
+.PHONY: doxonly
+doxonly:
+	$(MAKE) makedox -C doc
+
+.PHONY: dox
+dox: doxonly
+	$(MAKE) && cd examples && $(MAKE) 
+
+.PHONY: install-exec-hook
+install-exec-hook:
+	$(mkdir_p) "$(DESTDIR)$(docdir)"
+if HAVE_PKGCONFIG_INSTALL
+	$(mkdir_p) "$(DESTDIR)$(libdir)/pkgconfig"
+	$(INSTALL_DATA) librsb.pc "$(DESTDIR)$(libdir)/pkgconfig/"
+endif
+
+hinfo.log: all
+	scripts/hinfo.sh 2>&1 | cat >  hinfo.log
+
+# NOTE: The following target is only for experimental purposes.
+#shared: $(top_builddir)/librsb.so
+#$(top_builddir)/librsb.so: $(am_librsb_la_OBJECTS)
+#	$(CC) -o $@ -shared $(am_librsb_la_OBJECTS)
+
diff --git a/Makefile.in b/Makefile.in
new file mode 100644
index 0000000..fb1349a
--- /dev/null
+++ b/Makefile.in
@@ -0,0 +1,2596 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+
+
+
+
+
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+# L1C 
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__append_1 = sbtc$(EXEEXT)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am__append_2 = sbtf$(EXEEXT)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__append_3 = rsb_blas_sparse.F90
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__append_4 = rsb_blas_sparse.F90
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_BLAS_SPARSE_MOD_INSTALL_TRUE at am__append_5 = blas_sparse.mod \
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_BLAS_SPARSE_MOD_INSTALL_TRUE@	rsb.mod
+# if WANT_BLAS_SPARSE_FI
+# nodist_include_HEADERS+=blas_sparse.fi
+# blas_sparse.fi: blas_sparse.m4
+# 	$(M4) $(M4_FLAGS) -D RSB_M4_WANT_BLAS_SPARSE_INTERFACE=1 $< > $@
+# endif
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__append_6 = rsb.F90
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__append_7 = ch2icfb$(EXEEXT) 
+ at HAVE_SPARSE_BLAS_INTERFACE_FALSE@am__append_8 = 
+ at HAVE_SPARSE_BLAS_INTERFACE_FALSE@am__append_9 = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am__append_10 = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am__append_11 = sbtf.F90 psbtf.F90 psb_mvsv_tester.f90
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_FALSE at am__append_12 = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_FALSE at am__append_13 = 
+ at HAVE_FORTRAN_EXAMPLES_FALSE@am__append_14 = 
+ at HAVE_FORTRAN_EXAMPLES_FALSE@am__append_15 = 
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@am__append_16 = rsb-librsb-internals.h rsb-incoming.h
+# rsbench_DEPENDENCIES+= $(top_builddir)/librsb.so
+ at HAVE_FC_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_BLAS_SPARSE_MOD_INSTALL_FALSE at am__append_17 = blas_sparse.mod rsb.mod
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__append_18 = $(sbtc_SOURCES)
+subdir = .
+DIST_COMMON = README $(am__configure_deps) $(am__include_HEADERS_DIST) \
+	$(dist_doc_DATA) $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
+	$(srcdir)/librsb-config.in $(srcdir)/librsb.pc.in \
+	$(srcdir)/rsb-config.h.in $(srcdir)/rsb_config.m4.in \
+	$(top_srcdir)/configure AUTHORS COPYING ChangeLog INSTALL NEWS \
+	compile config.guess config.sub depcomp install-sh ltmain.sh \
+	missing
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
+ configure.lineno config.status.lineno
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = rsb-config.h
+CONFIG_CLEAN_FILES = librsb-config librsb.pc rsb_config.m4
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" \
+	"$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" \
+	"$(DESTDIR)$(includedir)" "$(DESTDIR)$(includedir)"
+LTLIBRARIES = $(lib_LTLIBRARIES) $(noinst_LTLIBRARIES)
+librsb_la_LIBADD =
+librsb_la_OBJECTS = $(am_librsb_la_OBJECTS)
+librsb_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(librsb_la_LDFLAGS) $(LDFLAGS) -o $@
+librsb_base_la_LIBADD =
+am__librsb_base_la_SOURCES_DIST = rsb_common.h rsb_is.c rsb_is.h \
+	rsb_mio.c rsb_mio.h rsb_op.c rsb_op.h rsb_bio.c rsb_bio.h \
+	rsb_get.c rsb_get.h rsb_set.c rsb_set.h rsb_coo.c rsb_coo.h \
+	rsb_csr.c rsb_csr.h rsb_coo_check.c rsb_coo_check.h \
+	rsb_coo_symm.c rsb_coo_symm.h rsb_idx.c rsb_idx.h rsb_srt.c \
+	rsb_srt.h rsb_srtp.c rsb_srtp.h rsb_src.c rsb_src.h \
+	rsb_test_accuracy.c rsb_test_accuracy.h rsb_clone.c \
+	rsb_clone.h rsb_rec.h rsb_rec.c rsb_render.c rsb_render.h \
+	rsb_eps.c rsb_eps.h rsb_msort_up.c rsb_msort_up.h rsb_sys.c \
+	rsb_sys.h rsb_blas_stuff.c rsb_blas_stuff.h rsb_gen.c \
+	rsb_gen.h rsb_perf.c rsb_perf.h rsb_rsb.c rsb.h rsb_err.c \
+	rsb_err.h rsb_tune.c rsb_tune.h rsb_struct.h rsb_do.c rsb_do.h \
+	rsb_internals.c rsb_internals.h rsb_garbage.c rsb_garbage.h \
+	rsb_mmio.c rsb_mmio.h rsb_partition.c rsb_partition.h \
+	rsb_lbl.h rsb_mbw.c rsb_mbw.h rsb_limiter.c rsb_limiter.h \
+	rsb_fpb.c rsb_fpb.h rsb_spgemm.c rsb_spgemm.h rsb_spsum.c \
+	rsb_spsum.h rsb_spsv.c rsb_spsv.h rsb_lock.h rsb_lock.c \
+	rsb_swt.h rsb_swt.c rsb_init.h rsb_init.c rsb_dump.h \
+	rsb_dump.c rsb_cpmv.h rsb_cpmv.c rsb_psblas.h rsb_asm.h \
+	rsb_asm.c rsb_user.c rsb_coo2rec.c rsb_coo2rec.h rsb_rec2coo.c \
+	rsb_rec2coo.h rsb_rec2csr.c rsb_rec2csr.h rsb_csr2coo.c \
+	rsb_csr2coo.h rsb_blas_sparse.F90
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am__objects_1 = rsb_blas_sparse.lo
+am__objects_2 =
+am__objects_3 = $(am__objects_1) $(am__objects_2) $(am__objects_2) \
+	$(am__objects_2) $(am__objects_2)
+am_librsb_base_la_OBJECTS = librsb_base_la-rsb_is.lo \
+	librsb_base_la-rsb_mio.lo librsb_base_la-rsb_op.lo \
+	librsb_base_la-rsb_bio.lo librsb_base_la-rsb_get.lo \
+	librsb_base_la-rsb_set.lo librsb_base_la-rsb_coo.lo \
+	librsb_base_la-rsb_csr.lo librsb_base_la-rsb_coo_check.lo \
+	librsb_base_la-rsb_coo_symm.lo librsb_base_la-rsb_idx.lo \
+	librsb_base_la-rsb_srt.lo librsb_base_la-rsb_srtp.lo \
+	librsb_base_la-rsb_src.lo librsb_base_la-rsb_test_accuracy.lo \
+	librsb_base_la-rsb_clone.lo librsb_base_la-rsb_rec.lo \
+	librsb_base_la-rsb_render.lo librsb_base_la-rsb_eps.lo \
+	librsb_base_la-rsb_msort_up.lo librsb_base_la-rsb_sys.lo \
+	librsb_base_la-rsb_blas_stuff.lo librsb_base_la-rsb_gen.lo \
+	librsb_base_la-rsb_perf.lo librsb_base_la-rsb_rsb.lo \
+	librsb_base_la-rsb_err.lo librsb_base_la-rsb_tune.lo \
+	librsb_base_la-rsb_do.lo librsb_base_la-rsb_internals.lo \
+	librsb_base_la-rsb_garbage.lo librsb_base_la-rsb_mmio.lo \
+	librsb_base_la-rsb_partition.lo librsb_base_la-rsb_mbw.lo \
+	librsb_base_la-rsb_limiter.lo librsb_base_la-rsb_fpb.lo \
+	librsb_base_la-rsb_spgemm.lo librsb_base_la-rsb_spsum.lo \
+	librsb_base_la-rsb_spsv.lo librsb_base_la-rsb_lock.lo \
+	librsb_base_la-rsb_swt.lo librsb_base_la-rsb_init.lo \
+	librsb_base_la-rsb_dump.lo librsb_base_la-rsb_cpmv.lo \
+	librsb_base_la-rsb_asm.lo librsb_base_la-rsb_user.lo \
+	librsb_base_la-rsb_coo2rec.lo librsb_base_la-rsb_rec2coo.lo \
+	librsb_base_la-rsb_rec2csr.lo librsb_base_la-rsb_csr2coo.lo \
+	$(am__objects_3)
+nodist_librsb_base_la_OBJECTS = $(am__objects_2)
+librsb_base_la_OBJECTS = $(am_librsb_base_la_OBJECTS) \
+	$(nodist_librsb_base_la_OBJECTS)
+librsb_nounroll_la_LIBADD =
+am__objects_4 = librsb_nounroll_la-rsb_stropts.lo \
+	librsb_nounroll_la-rsb_strmif.lo
+am__objects_5 = librsb_nounroll_la-rsb_unroll.lo \
+	librsb_nounroll_la-rsb_krnl_vb.lo \
+	librsb_nounroll_la-rsb_krnl_lb.lo \
+	librsb_nounroll_la-rsb_krnl.lo librsb_nounroll_la-rsb_bench.lo \
+	librsb_nounroll_la-rsb_mergesort.lo \
+	librsb_nounroll_la-rsb_permute.lo \
+	librsb_nounroll_la-rsb_krnl_bcss_l.lo \
+	librsb_nounroll_la-rsb_krnl_bcss_u.lo \
+	librsb_nounroll_la-rsb_krnl_bcss_spsv_u.lo \
+	librsb_nounroll_la-rsb_krnl_bcss_spmv_u.lo \
+	librsb_nounroll_la-rsb_krnl_bcss_misc_u.lo \
+	librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.lo \
+	librsb_nounroll_la-rsb_krnl_bcss.lo \
+	librsb_nounroll_la-rsb_spmv.lo librsb_nounroll_la-rsb_merge.lo \
+	librsb_nounroll_la-rsb_ompio.lo librsb_nounroll_la-rsb_util.lo \
+	librsb_nounroll_la-rsb_spgemm_csr.lo \
+	librsb_nounroll_la-rsb_spsum_misc.lo \
+	librsb_nounroll_la-rsb_prec.lo
+am__objects_6 = $(am__objects_4) $(am__objects_5)
+am_librsb_nounroll_la_OBJECTS = $(am__objects_6) $(am__objects_2)
+nodist_librsb_nounroll_la_OBJECTS = $(am__objects_2)
+librsb_nounroll_la_OBJECTS = $(am_librsb_nounroll_la_OBJECTS) \
+	$(nodist_librsb_nounroll_la_OBJECTS)
+librsb_nounroll_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(librsb_nounroll_la_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+librsb_spblas_la_LIBADD =
+am__objects_7 = rsb_libspblas.lo
+am_librsb_spblas_la_OBJECTS = rsb_libspblas_handle.lo $(am__objects_7)
+librsb_spblas_la_OBJECTS = $(am_librsb_spblas_la_OBJECTS)
+PROGRAMS = $(bin_PROGRAMS) $(noinst_PROGRAMS)
+am__ch2icfb_SOURCES_DIST = ch2icfb.c
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am_ch2icfb_OBJECTS =  \
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@	ch2icfb-ch2icfb.$(OBJEXT)
+ch2icfb_OBJECTS = $(am_ch2icfb_OBJECTS)
+ch2icfb_DEPENDENCIES =
+ch2icfb_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(ch2icfb_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+am_ot_OBJECTS = ot.$(OBJEXT) ot-spmv_uauz.$(OBJEXT) \
+	ot-infty_norm.$(OBJEXT) ot-scale.$(OBJEXT) \
+	ot-spmv_uaua.$(OBJEXT) ot-spmv_unua.$(OBJEXT) \
+	ot-spmv_uxua.$(OBJEXT) ot-spmv_sasa.$(OBJEXT) \
+	ot-spsv_uxua.$(OBJEXT) ot-spmv_sxsa.$(OBJEXT) \
+	ot-spsv_sxsx.$(OBJEXT) ot-spsv.$(OBJEXT) ot-spmv.$(OBJEXT) \
+	ot-rowssums.$(OBJEXT)
+ot_OBJECTS = $(am_ot_OBJECTS)
+am__DEPENDENCIES_1 = $(top_builddir)/librsb.la
+ot_DEPENDENCIES = $(am__DEPENDENCIES_1)
+am__objects_8 = rsbench-rsb_test_matops.$(OBJEXT) \
+	rsbench-rsb_mkl.$(OBJEXT)
+am_rsbench_OBJECTS = rsbench-rsbench.$(OBJEXT) $(am__objects_8) \
+	rsbench-rsb_genmm.$(OBJEXT) rsbench-rsb_mmls.$(OBJEXT) \
+	rsbench-rsb_pr.$(OBJEXT) rsbench-rsb_pcnt.$(OBJEXT) \
+	rsbench-rsb_failure_tests.$(OBJEXT) \
+	rsbench-rsb_libspblas_tests.$(OBJEXT)
+rsbench_OBJECTS = $(am_rsbench_OBJECTS)
+am__DEPENDENCIES_2 =
+rsbench_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(rsbench_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+am__sbtc_SOURCES_DIST = sbtc.c
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@am_sbtc_OBJECTS =  \
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@	sbtc-sbtc.$(OBJEXT)
+sbtc_OBJECTS = $(am_sbtc_OBJECTS)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@sbtc_DEPENDENCIES =  \
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@	$(am__DEPENDENCIES_1)
+sbtc_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(sbtc_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+am__sbtf_SOURCES_DIST = sbtf.F90
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am_sbtf_OBJECTS = sbtf.$(OBJEXT)
+sbtf_OBJECTS = $(am_sbtf_OBJECTS)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@sbtf_DEPENDENCIES = $(am__DEPENDENCIES_1)
+SCRIPTS = $(bin_SCRIPTS)
+DEFAULT_INCLUDES = -I. at am__isrc@
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+PPFCCOMPILE = $(FC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_FCFLAGS) $(FCFLAGS)
+LTPPFCCOMPILE = $(LIBTOOL) --tag=FC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(FC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_FCFLAGS) $(FCFLAGS)
+FCLD = $(FC)
+FCLINK = $(LIBTOOL) --tag=FC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(FCLD) $(AM_FCFLAGS) $(FCFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(librsb_la_SOURCES) $(librsb_base_la_SOURCES) \
+	$(nodist_librsb_base_la_SOURCES) $(librsb_nounroll_la_SOURCES) \
+	$(nodist_librsb_nounroll_la_SOURCES) \
+	$(librsb_spblas_la_SOURCES) $(ch2icfb_SOURCES) $(ot_SOURCES) \
+	$(rsbench_SOURCES) $(sbtc_SOURCES) $(sbtf_SOURCES)
+DIST_SOURCES = $(librsb_la_SOURCES) $(am__librsb_base_la_SOURCES_DIST) \
+	$(librsb_nounroll_la_SOURCES) $(librsb_spblas_la_SOURCES) \
+	$(am__ch2icfb_SOURCES_DIST) $(ot_SOURCES) $(rsbench_SOURCES) \
+	$(am__sbtc_SOURCES_DIST) $(am__sbtf_SOURCES_DIST)
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+	html-recursive info-recursive install-data-recursive \
+	install-dvi-recursive install-exec-recursive \
+	install-html-recursive install-info-recursive \
+	install-pdf-recursive install-ps-recursive install-recursive \
+	installcheck-recursive installdirs-recursive pdf-recursive \
+	ps-recursive uninstall-recursive
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+DATA = $(dist_doc_DATA)
+am__include_HEADERS_DIST = rsb.h rsb_types.h blas_sparse.h rsb.F90
+HEADERS = $(include_HEADERS) $(nodist_include_HEADERS)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
+	$(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
+	distdir dist dist-all distcheck
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+distdir = $(PACKAGE)-$(VERSION)
+top_distdir = $(distdir)
+am__remove_distdir = \
+  if test -d "$(distdir)"; then \
+    find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \
+      && rm -rf "$(distdir)" \
+      || { sleep 5 && rm -rf "$(distdir)"; }; \
+  else :; fi
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
+DIST_ARCHIVES = $(distdir).tar.gz
+GZIP_ENV = --best
+distuninstallcheck_listfiles = find . -type f -print
+am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \
+  | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$'
+distcleancheck_listfiles = find . -type f -print
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+ACLOCAL_AMFLAGS = -I m4
+SUBDIRS = . examples scripts bench blas_sparse doc m4
+dist_doc_DATA = README AUTHORS 
+bin_SCRIPTS = librsb-config
+bin_PROGRAMS = rsbench$(EXEEXT)
+noinst_PROGRAMS = $(am__append_7)
+EXTRAPROGRAMSFORTRAN = $(am__append_2)
+EXTRAPROGRAMSC = ot$(EXEEXT) $(am__append_1)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@sbtc_SOURCES = sbtc.c
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@sbtc_CFLAGS = $(CFLAGS) -D SBTC_USE_RSB_H -I .
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@sbtc_LDADD = $(default_ldadd)
+EXTRA_PROGRAMS = $(EXTRAPROGRAMSC) $(EXTRAPROGRAMSFORTRAN)
+#EXTRA_LIBRARIES=librsb.la
+EXTRA_LIBRARIES = 
+EXTRA_SCRIPTS = librsb-config
+NODISTSRC = rsb_mkl.h blas_sparse.h
+RSBENCH_BUILT_SOURCES = \
+		rsb_test_matops.c rsb_test_matops.h	\
+		rsb_mkl.c rsb_mkl.h
+
+LIB_BUILT_SOURCES_M4 = \
+		rsb_unroll.c rsb_unroll.h \
+		rsb_krnl_vb.c rsb_krnl_vb.h \
+		rsb_krnl_lb.c rsb_krnl_lb.h \
+		rsb_krnl.c rsb_krnl.h \
+		rsb_bench.c rsb_bench.h \
+		rsb_types.h \
+		rsb_mergesort.h rsb_mergesort.c \
+		rsb_permute.h rsb_permute.c \
+		rsb_krnl_bcss_l.h rsb_krnl_bcss_l.c \
+		rsb_krnl_bcss_u.h rsb_krnl_bcss_u.c \
+		rsb_krnl_bcss_spsv_u.h rsb_krnl_bcss_spsv_u.c \
+		rsb_krnl_bcss_spmv_u.h rsb_krnl_bcss_spmv_u.c \
+		rsb_krnl_bcss_misc_u.h rsb_krnl_bcss_misc_u.c \
+		rsb_krnl_bcoo_spmv_u.h rsb_krnl_bcoo_spmv_u.c \
+		rsb_krnl_bcss.h rsb_krnl_bcss.c \
+		rsb_spmv.h rsb_spmv.c \
+		rsb_merge.h rsb_merge.c \
+		rsb_ompio.c rsb_ompio.h \
+		rsb_util.h rsb_util.c \
+		rsb_spgemm_csr.c rsb_spgemm_csr.h \
+		rsb_spsum_misc.c rsb_spsum_misc.h \
+		rsb_prec.h rsb_prec.c
+
+LIB_BUILT_SOURCES_MAKEFILE = \
+		rsb_stropts.c \
+		rsb_strmif.c \
+		rsb_types.h \
+		blas_sparse.h
+
+LIB_BUILT_SOURCES = \
+	$(LIB_BUILT_SOURCES_MAKEFILE) $(LIB_BUILT_SOURCES_M4)
+
+
+# rsb_prec.h: $(NODISTSRC)
+include_HEADERS = rsb.h rsb_types.h blas_sparse.h \
+	$(EXTRAHEADERSFORTRAN)
+nodist_include_HEADERS = rsb-config.h $(am__append_5) $(am__append_16)
+EXTRAHEADERSFORTRAN = $(am__append_6)
+EXTRASRCFILESSFORTRAN = $(am__append_3) $(am__append_8) \
+	$(am__append_10) $(am__append_12) $(am__append_14)
+EXTRASRCPROGSSFORTRAN = $(am__append_4) $(am__append_9) \
+	$(am__append_11) $(am__append_13) $(am__append_15)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@ch2icfb_SOURCES = ch2icfb.c
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@ch2icfb_CFLAGS = $(CFLAGS)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@ch2icfb_LDADD = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@sbtf_LDADD = $(default_ldadd)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_FALSE at sbtf_SOURCES = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at sbtf_SOURCES = sbtf.F90
+BUILT_SOURCES = $(LIB_BUILT_SOURCES) $(RSBENCH_BUILT_SOURCES) $(ot_SOURCES) $(LIB_SPBLAS_BUILT_SOURCES) rsbenchxx.cpp $(EXTRASRCFILESSFORTRAN) rsb-config.h 
+DIST_ARCHIVES_NOVERSION = librsb.tar.gz
+ at HAVE_M4_FALSE@LIB_CLEANALL_FILES = $(LIB_BUILT_SOURCES_MAKEFILE)
+ at HAVE_M4_TRUE@LIB_CLEANALL_FILES = $(LIB_BUILT_SOURCES) $(LIB_SPBLAS_BUILT_SOURCES) $(RSBENCH_BUILT_SOURCES)
+noinst_LTLIBRARIES = librsb_nounroll.la librsb_base.la librsb_spblas.la
+lib_LTLIBRARIES = librsb.la
+librsb_la_SOURCES = 
+am_librsb_la_OBJECTS = $(am_librsb_base_la_OBJECTS) $(am_librsb_nounroll_la_OBJECTS) $(am_librsb_spblas_la_OBJECTS)
+librsb_la_LDFLAGS = -no-undefined -version-info $(LIBRSB_ABI_VERSION)
+
+#librsb_la_LIBADD=-lgfortran
+default_ldadd = -L$(top_builddir)  $(top_builddir)/librsb.la 
+librsb_nounroll_la_CFLAGS = $(NOUNROLLCFLAGS)
+librsb_nounroll_la_SOURCES = $(LIB_BUILT_SOURCES) $(NODISTSRC)
+nodist_librsb_nounroll_la_SOURCES = $(NODISTSRC)
+librsb_base_la_CFLAGS = 
+nodist_librsb_base_la_SOURCES = $(NODISTSRC)
+librsb_base_la_SOURCES = \
+			rsb_common.h \
+			rsb_is.c  rsb_is.h \
+			rsb_mio.c rsb_mio.h \
+			rsb_op.c  rsb_op.h \
+			rsb_bio.c rsb_bio.h \
+			rsb_get.c rsb_get.h \
+			rsb_set.c rsb_set.h \
+			rsb_coo.c rsb_coo.h \
+			rsb_csr.c rsb_csr.h \
+			rsb_coo_check.c rsb_coo_check.h \
+			rsb_coo_symm.c rsb_coo_symm.h \
+			rsb_idx.c rsb_idx.h \
+			rsb_srt.c rsb_srt.h \
+			rsb_srtp.c rsb_srtp.h \
+			rsb_src.c rsb_src.h \
+			rsb_test_accuracy.c rsb_test_accuracy.h \
+			rsb_clone.c rsb_clone.h \
+			rsb_rec.h rsb_rec.c \
+			rsb_render.c rsb_render.h \
+			rsb_eps.c rsb_eps.h \
+			rsb_msort_up.c rsb_msort_up.h \
+			rsb_sys.c rsb_sys.h \
+			rsb_blas_stuff.c rsb_blas_stuff.h \
+			rsb_gen.c rsb_gen.h \
+			rsb_perf.c rsb_perf.h \
+			rsb_rsb.c rsb.h \
+			rsb_err.c rsb_err.h \
+			rsb_tune.c rsb_tune.h \
+			rsb_struct.h \
+			rsb_do.c rsb_do.h \
+			rsb_internals.c rsb_internals.h \
+			rsb_garbage.c rsb_garbage.h \
+			rsb_mmio.c rsb_mmio.h \
+			rsb_partition.c rsb_partition.h \
+		       	rsb_lbl.h \
+			rsb_mbw.c rsb_mbw.h \
+			rsb_limiter.c rsb_limiter.h \
+			rsb_fpb.c rsb_fpb.h \
+			rsb_spgemm.c rsb_spgemm.h \
+			rsb_spsum.c rsb_spsum.h \
+			rsb_spsv.c rsb_spsv.h \
+			rsb_lock.h rsb_lock.c \
+			rsb_swt.h rsb_swt.c \
+			rsb_init.h rsb_init.c \
+			rsb_dump.h rsb_dump.c \
+			rsb_cpmv.h rsb_cpmv.c \
+			rsb_psblas.h \
+			rsb_asm.h  rsb_asm.c  \
+			rsb_user.c \
+			rsb_coo2rec.c rsb_coo2rec.h \
+			rsb_rec2coo.c rsb_rec2coo.h \
+			rsb_rec2csr.c rsb_rec2csr.h \
+			rsb_csr2coo.c rsb_csr2coo.h \
+			$(EXTRASRCFILESSFORTRAN)
+
+LIB_SPBLAS_BUILT_SOURCES = rsb_libspblas.c rsb_libspblas.h
+librsb_spblas_la_SOURCES = \
+		rsb_libspblas_handle.h	rsb_libspblas_handle.c	\
+		$(LIB_SPBLAS_BUILT_SOURCES)
+
+RSB_PREM4HEADERS = $(include_HEADERS) rsb_mkl.h
+BINDISTFILES = $(include_HEADERS) $(EXTRA_LIBRARIES) $(dist_doc_DATA) doc/html doc/man/man3 doc/man/*rsb* librsb-config librsb.pc
+rsbench_SOURCES = rsbench.c $(RSBENCH_BUILT_SOURCES) rsb_genmm.c rsb_mmls.c \
+		rsb_pr.c     rsb_pr.h \
+		rsb_pcnt.c rsb_pcnt.h \
+		rsb_failure_tests.h 	rsb_failure_tests.c 	\
+		rsb_libspblas_tests.h 	rsb_libspblas_tests.c 
+
+# rsbenchxx_SOURCES=rsbenchxx.cpp
+rsbench_DEPENDENCIES = $(top_builddir)/librsb.la \
+	$(top_builddir)/librsb_nounroll.la \
+	$(top_builddir)/librsb_base.la \
+	$(top_builddir)/librsb_spblas.la rsb_mkl.c rsb_mkl.h \
+	$(am__append_17)
+# rsbenchxx_DEPENDENCIES= rsbenchxx.cpp $(top_builddir)/librsb_nounroll.a $(top_builddir)/librsb_base.a # uhm
+# if  WANT_V
+# endif
+#ygemv_SOURCES=ygemv.c
+#gemv_SOURCES=gemv.c
+#L1C_SOURCES=L1C.c
+rsbench_CFLAGS = ${RSB_RSBENCH_CFLAGS}
+# rsbench_LDADD=$(abs_top_builddir)/librsb.la -lgfortran ${RSB_RSBENCH_LIBS}
+rsbench_LDADD = $(abs_top_builddir)/librsb.la  ${RSB_RSBENCH_LIBS}
+# rsbenchxx_LDADD=$(default_ldadd)
+#L1C_LDADD=$(default_ldadd)
+#gemv_LDADD=$(default_ldadd)
+ot_LDADD = $(default_ldadd)
+MATRICES = pd.mtx A.mtx vf.mtx
+RSB_MFILES = psb_mvsv_tester.m psbtf.m ot.m sbtc.m sbtg.m sbtf.m sbtg-types.m
+EXTRA_DIST = $(LIBSOURCES) $(librsb_spblas_la_SOURCES) $(ot_SOURCES) \
+	$(MATRICES) $(RSB_MFILES) autogen.sh scripts/test.sh \
+	rsb_license_header.inc rsb-config.h.hin rsb-incoming.sed \
+	rsb-incoming.grep rsb.F90 librsb.pc.in \
+	$(EXTRASRCPROGSSFORTRAN) $(am__append_18)
+LIBSOURCES = $(LIB_BUILT_SOURCES) \
+	rsb_krnl_bcss.m4   \
+	rsb_krnl_bcss_u.m4 \
+	rsb_krnl_bcss_l.m4 \
+	rsb_krnl_bcss_spsv_u.m4 \
+	rsb_krnl_bcss_spmv_u.m4 \
+	rsb_krnl_bcss_misc_u.m4 \
+	rsb_krnl_bcoo_spmv_u.m4 \
+	rsb_krnl_bcoo_macros.m4 \
+	rsb_merge.m4 \
+	rsb_util.m4 \
+	rsb_ompio.m4 \
+	rsb_mkl.m4 \
+	rsb_spgemm_csr.m4 \
+	rsb_spsum_misc.m4 \
+	rsb_prec.m4 \
+	rsb_spmv.m4 \
+	rsb_krnl.m4 \
+	rsb_bench.m4 \
+	rsb_types.m4 \
+	rsb_unroll.m4 \
+	rsb_mergesort.m4 \
+	rsb_permute.m4 \
+	rsb_krnl_vb_macros.m4 rsb_misc.m4 rsb_test_matops.m4 mergesort_macros.m4 \
+	rsb_krnl_bcss_macros.m4 rsb_krnl_macros.m4 do_unroll.m4 wisdom.m4 \
+	rsb_config.m4 \
+	rsb_krnl_linked_lists.m4 rsb_krnl_lb.m4 \
+	rsb_krnl_vb.m4 \
+	L1C.m4 testgen.sh.m4 \
+	libspblas_macros.m4 \
+	rsb_fortran_macros.m4 \
+	sbtg-types.m4 \
+	rsb_mod.m4 rsb_blas_sparse.m4 \
+	rsb_libspblas.m4
+
+ot_SOURCES = ot.c ot-spmv_uauz.c ot-infty_norm.c ot-scale.c ot-spmv_uaua.c ot-spmv_unua.c \
+	ot-spmv_uxua.c ot-spmv_sasa.c ot-spsv_uxua.c ot-spmv_sxsa.c ot-spsv_sxsx.c \
+	ot-spsv.c ot-spmv.c ot-rowssums.c
+
+M4_FLAGS = \
+		-I $(srcdir) \
+		-D WANT_SPSM_DIAG_CHECK=$(WANT_SPSM_DIAG_CHECK) \
+		-D WANT_HALFWORD_INDICES=$(WANT_HALFWORD_INDICES) \
+		-D WANT_ROW_UNLOOP_FACTORS=$(WANT_ROW_UNLOOP_FACTORS) \
+		-D WANT_COLUMN_UNLOOP_FACTORS=$(WANT_COLUMN_UNLOOP_FACTORS) \
+		-D WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR='$(WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR)' \
+		-D WANT_COLUMN_UNLOOP_FACTORS=$(WANT_COLUMN_UNLOOP_FACTORS) \
+		-D WANT_MATRIX_OPS='$(WANT_MATRIX_OPS)' \
+		-D WANT_TYPES='$(WANT_TYPES)' \
+		-D WANT_MATRIX_ALL_OPS='$(WANT_MATRIX_ALL_OPS)' \
+		-D WANT_MATRIX_ALL_TYPES='$(WANT_MATRIX_ALL_TYPES)' \
+		-D WANT_MATRIX_ALL_META_OPS='$(WANT_MATRIX_ALL_META_OPS)' \
+		-D WANT_MATRIX_BCSS_STORAGE='$(WANT_MATRIX_BCSS_STORAGE)' \
+		-D WANT_MATRIX_BCOO_STORAGE='$(WANT_MATRIX_BCOO_STORAGE)' \
+		-D WANT_MATRIX_VB_STORAGE='$(WANT_MATRIX_VB_STORAGE)' \
+		-D WANT_LOOPING_KERNELS='$(WANT_LOOPING_KERNELS)' \
+		-D WANT_MATRIX_STORAGE='$(WANT_MATRIX_STORAGE)'
+
+RSB_KERNELS_MACROS = rsb_krnl.m4 rsb_krnl_macros.m4  rsb_krnl_linked_lists.m4 wisdom.m4 rsb_misc.m4 rsb_krnl_bcss_macros.m4 \
+	rsb_krnl_bcss_spsv_u.m4 rsb_krnl_bcss_spmv_u.m4 rsb_krnl_bcss_misc_u.m4 rsb_krnl_bcoo_macros.m4 rsb_bench.m4 rsb_mergesort.m4 mergesort_macros.m4 rsb_permute.m4 rsb_krnl_vb.m4 rsb_krnl_vb_macros.m4 rsb_krnl_lb.m4
+
+all: $(BUILT_SOURCES) rsb-config.h
+	$(MAKE) $(AM_MAKEFLAGS) all-recursive
+
+.SUFFIXES:
+.SUFFIXES: .F90 .c .h .lo .m4 .o .obj
+am--refresh: Makefile
+	@:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      echo ' cd $(srcdir) && $(AUTOMAKE) --gnu'; \
+	      $(am__cd) $(srcdir) && $(AUTOMAKE) --gnu \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    echo ' $(SHELL) ./config.status'; \
+	    $(SHELL) ./config.status;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	$(SHELL) ./config.status --recheck
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	$(am__cd) $(srcdir) && $(AUTOCONF)
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	$(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
+$(am__aclocal_m4_deps):
+
+rsb-config.h: stamp-h1
+	@if test ! -f $@; then rm -f stamp-h1; else :; fi
+	@if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi
+
+stamp-h1: $(srcdir)/rsb-config.h.in $(top_builddir)/config.status
+	@rm -f stamp-h1
+	cd $(top_builddir) && $(SHELL) ./config.status rsb-config.h
+$(srcdir)/rsb-config.h.in:  $(am__configure_deps) 
+	($(am__cd) $(top_srcdir) && $(AUTOHEADER))
+	rm -f stamp-h1
+	touch $@
+
+distclean-hdr:
+	-rm -f rsb-config.h stamp-h1
+librsb-config: $(top_builddir)/config.status $(srcdir)/librsb-config.in
+	cd $(top_builddir) && $(SHELL) ./config.status $@
+librsb.pc: $(top_builddir)/config.status $(srcdir)/librsb.pc.in
+	cd $(top_builddir) && $(SHELL) ./config.status $@
+rsb_config.m4: $(top_builddir)/config.status $(srcdir)/rsb_config.m4.in
+	cd $(top_builddir) && $(SHELL) ./config.status $@
+install-libLTLIBRARIES: $(lib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(libdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(libdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
+	}
+
+uninstall-libLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \
+	done
+
+clean-libLTLIBRARIES:
+	-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
+	@list='$(lib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+librsb.la: $(librsb_la_OBJECTS) $(librsb_la_DEPENDENCIES) $(EXTRA_librsb_la_DEPENDENCIES) 
+	$(librsb_la_LINK) -rpath $(libdir) $(librsb_la_OBJECTS) $(librsb_la_LIBADD) $(LIBS)
+librsb_base.la: $(librsb_base_la_OBJECTS) $(librsb_base_la_DEPENDENCIES) $(EXTRA_librsb_base_la_DEPENDENCIES) 
+	$(FCLINK)  $(librsb_base_la_OBJECTS) $(librsb_base_la_LIBADD) $(LIBS)
+librsb_nounroll.la: $(librsb_nounroll_la_OBJECTS) $(librsb_nounroll_la_DEPENDENCIES) $(EXTRA_librsb_nounroll_la_DEPENDENCIES) 
+	$(librsb_nounroll_la_LINK)  $(librsb_nounroll_la_OBJECTS) $(librsb_nounroll_la_LIBADD) $(LIBS)
+librsb_spblas.la: $(librsb_spblas_la_OBJECTS) $(librsb_spblas_la_DEPENDENCIES) $(EXTRA_librsb_spblas_la_DEPENDENCIES) 
+	$(LINK)  $(librsb_spblas_la_OBJECTS) $(librsb_spblas_la_LIBADD) $(LIBS)
+install-binPROGRAMS: $(bin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+	fi; \
+	for p in $$list; do echo "$$p $$p"; done | \
+	sed 's/$(EXEEXT)$$//' | \
+	while read p p1; do if test -f $$p || test -f $$p1; \
+	  then echo "$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n;h' -e 's|.*|.|' \
+	    -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+	sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) files[d] = files[d] " " $$1; \
+	    else { print "f", $$3 "/" $$4, $$1; } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	    if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	    test -z "$$files" || { \
+	    echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+	    $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+	    } \
+	; done
+
+uninstall-binPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	  sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+	      -e 's/$$/$(EXEEXT)/' `; \
+	test -n "$$list" || exit 0; \
+	echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+	cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+	@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+
+clean-noinstPROGRAMS:
+	@list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+ch2icfb$(EXEEXT): $(ch2icfb_OBJECTS) $(ch2icfb_DEPENDENCIES) $(EXTRA_ch2icfb_DEPENDENCIES) 
+	@rm -f ch2icfb$(EXEEXT)
+	$(ch2icfb_LINK) $(ch2icfb_OBJECTS) $(ch2icfb_LDADD) $(LIBS)
+ot$(EXEEXT): $(ot_OBJECTS) $(ot_DEPENDENCIES) $(EXTRA_ot_DEPENDENCIES) 
+	@rm -f ot$(EXEEXT)
+	$(LINK) $(ot_OBJECTS) $(ot_LDADD) $(LIBS)
+rsbench$(EXEEXT): $(rsbench_OBJECTS) $(rsbench_DEPENDENCIES) $(EXTRA_rsbench_DEPENDENCIES) 
+	@rm -f rsbench$(EXEEXT)
+	$(rsbench_LINK) $(rsbench_OBJECTS) $(rsbench_LDADD) $(LIBS)
+sbtc$(EXEEXT): $(sbtc_OBJECTS) $(sbtc_DEPENDENCIES) $(EXTRA_sbtc_DEPENDENCIES) 
+	@rm -f sbtc$(EXEEXT)
+	$(sbtc_LINK) $(sbtc_OBJECTS) $(sbtc_LDADD) $(LIBS)
+sbtf$(EXEEXT): $(sbtf_OBJECTS) $(sbtf_DEPENDENCIES) $(EXTRA_sbtf_DEPENDENCIES) 
+	@rm -f sbtf$(EXEEXT)
+	$(FCLINK) $(sbtf_OBJECTS) $(sbtf_LDADD) $(LIBS)
+install-binSCRIPTS: $(bin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	@list='$(bin_SCRIPTS)'; test -n "$(bindir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  if test -f "$$d$$p"; then echo "$$d$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n' \
+	    -e 'h;s|.*|.|' \
+	    -e 'p;x;s,.*/,,;$(transform)' | sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1; } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) { files[d] = files[d] " " $$1; \
+	      if (++n[d] == $(am__install_max)) { \
+		print "f", d, files[d]; n[d] = 0; files[d] = "" } } \
+	    else { print "f", d "/" $$4, $$1 } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	     if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	     test -z "$$files" || { \
+	       echo " $(INSTALL_SCRIPT) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+	       $(INSTALL_SCRIPT) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+	     } \
+	; done
+
+uninstall-binSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_SCRIPTS)'; test -n "$(bindir)" || exit 0; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	       sed -e 's,.*/,,;$(transform)'`; \
+	dir='$(DESTDIR)$(bindir)'; $(am__uninstall_files_from_dir)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ch2icfb-ch2icfb.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_asm.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_bio.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_blas_stuff.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_clone.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_coo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_coo2rec.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_coo_check.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_coo_symm.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_cpmv.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_csr.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_csr2coo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_do.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_dump.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_eps.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_err.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_fpb.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_garbage.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_gen.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_get.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_idx.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_init.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_internals.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_is.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_limiter.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_lock.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_mbw.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_mio.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_mmio.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_msort_up.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_op.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_partition.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_perf.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_rec.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_rec2coo.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_rec2csr.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_render.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_rsb.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_set.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_spgemm.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_spsum.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_spsv.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_src.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_srt.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_srtp.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_swt.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_sys.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_test_accuracy.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_tune.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_base_la-rsb_user.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_bench.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_l.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_misc_u.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spmv_u.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spsv_u.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_u.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_lb.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_krnl_vb.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_merge.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_mergesort.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_ompio.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_permute.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_prec.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_spgemm_csr.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_spmv.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_spsum_misc.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_strmif.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_stropts.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_unroll.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/librsb_nounroll_la-rsb_util.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-infty_norm.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-rowssums.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-scale.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv_sasa.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv_sxsa.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv_uaua.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv_uauz.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv_unua.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spmv_uxua.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spsv.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spsv_sxsx.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot-spsv_uxua.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/ot.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsb_libspblas.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsb_libspblas_handle.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_failure_tests.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_genmm.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_libspblas_tests.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_mkl.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_mmls.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_pcnt.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_pr.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsb_test_matops.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/rsbench-rsbench.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/sbtc-sbtc.Po at am__quote@
+
+.F90.o:
+	$(PPFCCOMPILE) -c -o $@ $<
+
+.F90.obj:
+	$(PPFCCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.F90.lo:
+	$(LTPPFCCOMPILE) -c -o $@ $<
+
+.c.o:
+ at am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+ at am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+ at am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+librsb_base_la-rsb_is.lo: rsb_is.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_is.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_is.Tpo -c -o librsb_base_la-rsb_is.lo `test -f 'rsb_is.c' || echo '$(srcdir)/'`rsb_is.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_is.Tpo $(DEPDIR)/librsb_base_la-rsb_is.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_is.c' object='librsb_base_la-rsb_is.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_is.lo `test -f 'rsb_is.c' || echo '$(srcdir)/'`rsb_is.c
+
+librsb_base_la-rsb_mio.lo: rsb_mio.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_mio.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_mio.Tpo -c -o librsb_base_la-rsb_mio.lo `test -f 'rsb_mio.c' || echo '$(srcdir)/'`rsb_mio.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_mio.Tpo $(DEPDIR)/librsb_base_la-rsb_mio.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mio.c' object='librsb_base_la-rsb_mio.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_mio.lo `test -f 'rsb_mio.c' || echo '$(srcdir)/'`rsb_mio.c
+
+librsb_base_la-rsb_op.lo: rsb_op.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_op.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_op.Tpo -c -o librsb_base_la-rsb_op.lo `test -f 'rsb_op.c' || echo '$(srcdir)/'`rsb_op.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_op.Tpo $(DEPDIR)/librsb_base_la-rsb_op.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_op.c' object='librsb_base_la-rsb_op.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_op.lo `test -f 'rsb_op.c' || echo '$(srcdir)/'`rsb_op.c
+
+librsb_base_la-rsb_bio.lo: rsb_bio.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_bio.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_bio.Tpo -c -o librsb_base_la-rsb_bio.lo `test -f 'rsb_bio.c' || echo '$(srcdir)/'`rsb_bio.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_bio.Tpo $(DEPDIR)/librsb_base_la-rsb_bio.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_bio.c' object='librsb_base_la-rsb_bio.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_bio.lo `test -f 'rsb_bio.c' || echo '$(srcdir)/'`rsb_bio.c
+
+librsb_base_la-rsb_get.lo: rsb_get.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_get.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_get.Tpo -c -o librsb_base_la-rsb_get.lo `test -f 'rsb_get.c' || echo '$(srcdir)/'`rsb_get.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_get.Tpo $(DEPDIR)/librsb_base_la-rsb_get.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_get.c' object='librsb_base_la-rsb_get.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_get.lo `test -f 'rsb_get.c' || echo '$(srcdir)/'`rsb_get.c
+
+librsb_base_la-rsb_set.lo: rsb_set.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_set.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_set.Tpo -c -o librsb_base_la-rsb_set.lo `test -f 'rsb_set.c' || echo '$(srcdir)/'`rsb_set.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_set.Tpo $(DEPDIR)/librsb_base_la-rsb_set.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_set.c' object='librsb_base_la-rsb_set.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_set.lo `test -f 'rsb_set.c' || echo '$(srcdir)/'`rsb_set.c
+
+librsb_base_la-rsb_coo.lo: rsb_coo.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_coo.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_coo.Tpo -c -o librsb_base_la-rsb_coo.lo `test -f 'rsb_coo.c' || echo '$(srcdir)/'`rsb_coo.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_coo.Tpo $(DEPDIR)/librsb_base_la-rsb_coo.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_coo.c' object='librsb_base_la-rsb_coo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_coo.lo `test -f 'rsb_coo.c' || echo '$(srcdir)/'`rsb_coo.c
+
+librsb_base_la-rsb_csr.lo: rsb_csr.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_csr.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_csr.Tpo -c -o librsb_base_la-rsb_csr.lo `test -f 'rsb_csr.c' || echo '$(srcdir)/'`rsb_csr.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_csr.Tpo $(DEPDIR)/librsb_base_la-rsb_csr.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_csr.c' object='librsb_base_la-rsb_csr.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_csr.lo `test -f 'rsb_csr.c' || echo '$(srcdir)/'`rsb_csr.c
+
+librsb_base_la-rsb_coo_check.lo: rsb_coo_check.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_coo_check.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_coo_check.Tpo -c -o librsb_base_la-rsb_coo_check.lo `test -f 'rsb_coo_check.c' || echo '$(srcdir)/'`rsb_coo_check.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_coo_check.Tpo $(DEPDIR)/librsb_base_la-rsb_coo_check.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_coo_check.c' object='librsb_base_la-rsb_coo_check.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_coo_check.lo `test -f 'rsb_coo_check.c' || echo '$(srcdir)/'`rsb_coo_check.c
+
+librsb_base_la-rsb_coo_symm.lo: rsb_coo_symm.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_coo_symm.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_coo_symm.Tpo -c -o librsb_base_la-rsb_coo_symm.lo `test -f 'rsb_coo_symm.c' || echo '$(srcdir)/'`rsb_coo_symm.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_coo_symm.Tpo $(DEPDIR)/librsb_base_la-rsb_coo_symm.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_coo_symm.c' object='librsb_base_la-rsb_coo_symm.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_coo_symm.lo `test -f 'rsb_coo_symm.c' || echo '$(srcdir)/'`rsb_coo_symm.c
+
+librsb_base_la-rsb_idx.lo: rsb_idx.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_idx.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_idx.Tpo -c -o librsb_base_la-rsb_idx.lo `test -f 'rsb_idx.c' || echo '$(srcdir)/'`rsb_idx.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_idx.Tpo $(DEPDIR)/librsb_base_la-rsb_idx.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_idx.c' object='librsb_base_la-rsb_idx.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_idx.lo `test -f 'rsb_idx.c' || echo '$(srcdir)/'`rsb_idx.c
+
+librsb_base_la-rsb_srt.lo: rsb_srt.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_srt.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_srt.Tpo -c -o librsb_base_la-rsb_srt.lo `test -f 'rsb_srt.c' || echo '$(srcdir)/'`rsb_srt.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_srt.Tpo $(DEPDIR)/librsb_base_la-rsb_srt.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_srt.c' object='librsb_base_la-rsb_srt.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_srt.lo `test -f 'rsb_srt.c' || echo '$(srcdir)/'`rsb_srt.c
+
+librsb_base_la-rsb_srtp.lo: rsb_srtp.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_srtp.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_srtp.Tpo -c -o librsb_base_la-rsb_srtp.lo `test -f 'rsb_srtp.c' || echo '$(srcdir)/'`rsb_srtp.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_srtp.Tpo $(DEPDIR)/librsb_base_la-rsb_srtp.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_srtp.c' object='librsb_base_la-rsb_srtp.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_srtp.lo `test -f 'rsb_srtp.c' || echo '$(srcdir)/'`rsb_srtp.c
+
+librsb_base_la-rsb_src.lo: rsb_src.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_src.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_src.Tpo -c -o librsb_base_la-rsb_src.lo `test -f 'rsb_src.c' || echo '$(srcdir)/'`rsb_src.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_src.Tpo $(DEPDIR)/librsb_base_la-rsb_src.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_src.c' object='librsb_base_la-rsb_src.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_src.lo `test -f 'rsb_src.c' || echo '$(srcdir)/'`rsb_src.c
+
+librsb_base_la-rsb_test_accuracy.lo: rsb_test_accuracy.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_test_accuracy.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_test_accuracy.Tpo -c -o librsb_base_la-rsb_test_accuracy.lo `test -f 'rsb_test_accuracy.c' || echo '$(srcdir)/'`rsb_test_accuracy.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_test_accuracy.Tpo $(DEPDIR)/librsb_base_la-rsb_test_accuracy.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_test_accuracy.c' object='librsb_base_la-rsb_test_accuracy.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_test_accuracy.lo `test -f 'rsb_test_accuracy.c' || echo '$(srcdir)/'`rsb_test_accuracy.c
+
+librsb_base_la-rsb_clone.lo: rsb_clone.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_clone.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_clone.Tpo -c -o librsb_base_la-rsb_clone.lo `test -f 'rsb_clone.c' || echo '$(srcdir)/'`rsb_clone.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_clone.Tpo $(DEPDIR)/librsb_base_la-rsb_clone.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_clone.c' object='librsb_base_la-rsb_clone.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_clone.lo `test -f 'rsb_clone.c' || echo '$(srcdir)/'`rsb_clone.c
+
+librsb_base_la-rsb_rec.lo: rsb_rec.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_rec.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_rec.Tpo -c -o librsb_base_la-rsb_rec.lo `test -f 'rsb_rec.c' || echo '$(srcdir)/'`rsb_rec.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_rec.Tpo $(DEPDIR)/librsb_base_la-rsb_rec.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_rec.c' object='librsb_base_la-rsb_rec.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_rec.lo `test -f 'rsb_rec.c' || echo '$(srcdir)/'`rsb_rec.c
+
+librsb_base_la-rsb_render.lo: rsb_render.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_render.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_render.Tpo -c -o librsb_base_la-rsb_render.lo `test -f 'rsb_render.c' || echo '$(srcdir)/'`rsb_render.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_render.Tpo $(DEPDIR)/librsb_base_la-rsb_render.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_render.c' object='librsb_base_la-rsb_render.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_render.lo `test -f 'rsb_render.c' || echo '$(srcdir)/'`rsb_render.c
+
+librsb_base_la-rsb_eps.lo: rsb_eps.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_eps.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_eps.Tpo -c -o librsb_base_la-rsb_eps.lo `test -f 'rsb_eps.c' || echo '$(srcdir)/'`rsb_eps.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_eps.Tpo $(DEPDIR)/librsb_base_la-rsb_eps.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_eps.c' object='librsb_base_la-rsb_eps.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_eps.lo `test -f 'rsb_eps.c' || echo '$(srcdir)/'`rsb_eps.c
+
+librsb_base_la-rsb_msort_up.lo: rsb_msort_up.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_msort_up.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_msort_up.Tpo -c -o librsb_base_la-rsb_msort_up.lo `test -f 'rsb_msort_up.c' || echo '$(srcdir)/'`rsb_msort_up.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_msort_up.Tpo $(DEPDIR)/librsb_base_la-rsb_msort_up.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_msort_up.c' object='librsb_base_la-rsb_msort_up.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_msort_up.lo `test -f 'rsb_msort_up.c' || echo '$(srcdir)/'`rsb_msort_up.c
+
+librsb_base_la-rsb_sys.lo: rsb_sys.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_sys.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_sys.Tpo -c -o librsb_base_la-rsb_sys.lo `test -f 'rsb_sys.c' || echo '$(srcdir)/'`rsb_sys.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_sys.Tpo $(DEPDIR)/librsb_base_la-rsb_sys.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_sys.c' object='librsb_base_la-rsb_sys.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_sys.lo `test -f 'rsb_sys.c' || echo '$(srcdir)/'`rsb_sys.c
+
+librsb_base_la-rsb_blas_stuff.lo: rsb_blas_stuff.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_blas_stuff.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_blas_stuff.Tpo -c -o librsb_base_la-rsb_blas_stuff.lo `test -f 'rsb_blas_stuff.c' || echo '$(srcdir)/'`rsb_blas_stuff.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_blas_stuff.Tpo $(DEPDIR)/librsb_base_la-rsb_blas_stuff.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_blas_stuff.c' object='librsb_base_la-rsb_blas_stuff.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_blas_stuff.lo `test -f 'rsb_blas_stuff.c' || echo '$(srcdir)/'`rsb_blas_stuff.c
+
+librsb_base_la-rsb_gen.lo: rsb_gen.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_gen.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_gen.Tpo -c -o librsb_base_la-rsb_gen.lo `test -f 'rsb_gen.c' || echo '$(srcdir)/'`rsb_gen.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_gen.Tpo $(DEPDIR)/librsb_base_la-rsb_gen.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_gen.c' object='librsb_base_la-rsb_gen.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_gen.lo `test -f 'rsb_gen.c' || echo '$(srcdir)/'`rsb_gen.c
+
+librsb_base_la-rsb_perf.lo: rsb_perf.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_perf.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_perf.Tpo -c -o librsb_base_la-rsb_perf.lo `test -f 'rsb_perf.c' || echo '$(srcdir)/'`rsb_perf.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_perf.Tpo $(DEPDIR)/librsb_base_la-rsb_perf.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_perf.c' object='librsb_base_la-rsb_perf.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_perf.lo `test -f 'rsb_perf.c' || echo '$(srcdir)/'`rsb_perf.c
+
+librsb_base_la-rsb_rsb.lo: rsb_rsb.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_rsb.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_rsb.Tpo -c -o librsb_base_la-rsb_rsb.lo `test -f 'rsb_rsb.c' || echo '$(srcdir)/'`rsb_rsb.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_rsb.Tpo $(DEPDIR)/librsb_base_la-rsb_rsb.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_rsb.c' object='librsb_base_la-rsb_rsb.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_rsb.lo `test -f 'rsb_rsb.c' || echo '$(srcdir)/'`rsb_rsb.c
+
+librsb_base_la-rsb_err.lo: rsb_err.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_err.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_err.Tpo -c -o librsb_base_la-rsb_err.lo `test -f 'rsb_err.c' || echo '$(srcdir)/'`rsb_err.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_err.Tpo $(DEPDIR)/librsb_base_la-rsb_err.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_err.c' object='librsb_base_la-rsb_err.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_err.lo `test -f 'rsb_err.c' || echo '$(srcdir)/'`rsb_err.c
+
+librsb_base_la-rsb_tune.lo: rsb_tune.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_tune.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_tune.Tpo -c -o librsb_base_la-rsb_tune.lo `test -f 'rsb_tune.c' || echo '$(srcdir)/'`rsb_tune.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_tune.Tpo $(DEPDIR)/librsb_base_la-rsb_tune.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_tune.c' object='librsb_base_la-rsb_tune.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_tune.lo `test -f 'rsb_tune.c' || echo '$(srcdir)/'`rsb_tune.c
+
+librsb_base_la-rsb_do.lo: rsb_do.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_do.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_do.Tpo -c -o librsb_base_la-rsb_do.lo `test -f 'rsb_do.c' || echo '$(srcdir)/'`rsb_do.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_do.Tpo $(DEPDIR)/librsb_base_la-rsb_do.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_do.c' object='librsb_base_la-rsb_do.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_do.lo `test -f 'rsb_do.c' || echo '$(srcdir)/'`rsb_do.c
+
+librsb_base_la-rsb_internals.lo: rsb_internals.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_internals.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_internals.Tpo -c -o librsb_base_la-rsb_internals.lo `test -f 'rsb_internals.c' || echo '$(srcdir)/'`rsb_internals.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_internals.Tpo $(DEPDIR)/librsb_base_la-rsb_internals.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_internals.c' object='librsb_base_la-rsb_internals.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_internals.lo `test -f 'rsb_internals.c' || echo '$(srcdir)/'`rsb_internals.c
+
+librsb_base_la-rsb_garbage.lo: rsb_garbage.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_garbage.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_garbage.Tpo -c -o librsb_base_la-rsb_garbage.lo `test -f 'rsb_garbage.c' || echo '$(srcdir)/'`rsb_garbage.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_garbage.Tpo $(DEPDIR)/librsb_base_la-rsb_garbage.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_garbage.c' object='librsb_base_la-rsb_garbage.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_garbage.lo `test -f 'rsb_garbage.c' || echo '$(srcdir)/'`rsb_garbage.c
+
+librsb_base_la-rsb_mmio.lo: rsb_mmio.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_mmio.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_mmio.Tpo -c -o librsb_base_la-rsb_mmio.lo `test -f 'rsb_mmio.c' || echo '$(srcdir)/'`rsb_mmio.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_mmio.Tpo $(DEPDIR)/librsb_base_la-rsb_mmio.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mmio.c' object='librsb_base_la-rsb_mmio.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_mmio.lo `test -f 'rsb_mmio.c' || echo '$(srcdir)/'`rsb_mmio.c
+
+librsb_base_la-rsb_partition.lo: rsb_partition.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_partition.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_partition.Tpo -c -o librsb_base_la-rsb_partition.lo `test -f 'rsb_partition.c' || echo '$(srcdir)/'`rsb_partition.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_partition.Tpo $(DEPDIR)/librsb_base_la-rsb_partition.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_partition.c' object='librsb_base_la-rsb_partition.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_partition.lo `test -f 'rsb_partition.c' || echo '$(srcdir)/'`rsb_partition.c
+
+librsb_base_la-rsb_mbw.lo: rsb_mbw.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_mbw.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_mbw.Tpo -c -o librsb_base_la-rsb_mbw.lo `test -f 'rsb_mbw.c' || echo '$(srcdir)/'`rsb_mbw.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_mbw.Tpo $(DEPDIR)/librsb_base_la-rsb_mbw.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mbw.c' object='librsb_base_la-rsb_mbw.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_mbw.lo `test -f 'rsb_mbw.c' || echo '$(srcdir)/'`rsb_mbw.c
+
+librsb_base_la-rsb_limiter.lo: rsb_limiter.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_limiter.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_limiter.Tpo -c -o librsb_base_la-rsb_limiter.lo `test -f 'rsb_limiter.c' || echo '$(srcdir)/'`rsb_limiter.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_limiter.Tpo $(DEPDIR)/librsb_base_la-rsb_limiter.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_limiter.c' object='librsb_base_la-rsb_limiter.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_limiter.lo `test -f 'rsb_limiter.c' || echo '$(srcdir)/'`rsb_limiter.c
+
+librsb_base_la-rsb_fpb.lo: rsb_fpb.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_fpb.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_fpb.Tpo -c -o librsb_base_la-rsb_fpb.lo `test -f 'rsb_fpb.c' || echo '$(srcdir)/'`rsb_fpb.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_fpb.Tpo $(DEPDIR)/librsb_base_la-rsb_fpb.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_fpb.c' object='librsb_base_la-rsb_fpb.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_fpb.lo `test -f 'rsb_fpb.c' || echo '$(srcdir)/'`rsb_fpb.c
+
+librsb_base_la-rsb_spgemm.lo: rsb_spgemm.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_spgemm.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_spgemm.Tpo -c -o librsb_base_la-rsb_spgemm.lo `test -f 'rsb_spgemm.c' || echo '$(srcdir)/'`rsb_spgemm.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_spgemm.Tpo $(DEPDIR)/librsb_base_la-rsb_spgemm.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_spgemm.c' object='librsb_base_la-rsb_spgemm.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_spgemm.lo `test -f 'rsb_spgemm.c' || echo '$(srcdir)/'`rsb_spgemm.c
+
+librsb_base_la-rsb_spsum.lo: rsb_spsum.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_spsum.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_spsum.Tpo -c -o librsb_base_la-rsb_spsum.lo `test -f 'rsb_spsum.c' || echo '$(srcdir)/'`rsb_spsum.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_spsum.Tpo $(DEPDIR)/librsb_base_la-rsb_spsum.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_spsum.c' object='librsb_base_la-rsb_spsum.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_spsum.lo `test -f 'rsb_spsum.c' || echo '$(srcdir)/'`rsb_spsum.c
+
+librsb_base_la-rsb_spsv.lo: rsb_spsv.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_spsv.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_spsv.Tpo -c -o librsb_base_la-rsb_spsv.lo `test -f 'rsb_spsv.c' || echo '$(srcdir)/'`rsb_spsv.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_spsv.Tpo $(DEPDIR)/librsb_base_la-rsb_spsv.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_spsv.c' object='librsb_base_la-rsb_spsv.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_spsv.lo `test -f 'rsb_spsv.c' || echo '$(srcdir)/'`rsb_spsv.c
+
+librsb_base_la-rsb_lock.lo: rsb_lock.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_lock.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_lock.Tpo -c -o librsb_base_la-rsb_lock.lo `test -f 'rsb_lock.c' || echo '$(srcdir)/'`rsb_lock.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_lock.Tpo $(DEPDIR)/librsb_base_la-rsb_lock.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_lock.c' object='librsb_base_la-rsb_lock.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_lock.lo `test -f 'rsb_lock.c' || echo '$(srcdir)/'`rsb_lock.c
+
+librsb_base_la-rsb_swt.lo: rsb_swt.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_swt.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_swt.Tpo -c -o librsb_base_la-rsb_swt.lo `test -f 'rsb_swt.c' || echo '$(srcdir)/'`rsb_swt.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_swt.Tpo $(DEPDIR)/librsb_base_la-rsb_swt.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_swt.c' object='librsb_base_la-rsb_swt.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_swt.lo `test -f 'rsb_swt.c' || echo '$(srcdir)/'`rsb_swt.c
+
+librsb_base_la-rsb_init.lo: rsb_init.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_init.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_init.Tpo -c -o librsb_base_la-rsb_init.lo `test -f 'rsb_init.c' || echo '$(srcdir)/'`rsb_init.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_init.Tpo $(DEPDIR)/librsb_base_la-rsb_init.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_init.c' object='librsb_base_la-rsb_init.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_init.lo `test -f 'rsb_init.c' || echo '$(srcdir)/'`rsb_init.c
+
+librsb_base_la-rsb_dump.lo: rsb_dump.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_dump.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_dump.Tpo -c -o librsb_base_la-rsb_dump.lo `test -f 'rsb_dump.c' || echo '$(srcdir)/'`rsb_dump.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_dump.Tpo $(DEPDIR)/librsb_base_la-rsb_dump.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_dump.c' object='librsb_base_la-rsb_dump.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_dump.lo `test -f 'rsb_dump.c' || echo '$(srcdir)/'`rsb_dump.c
+
+librsb_base_la-rsb_cpmv.lo: rsb_cpmv.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_cpmv.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_cpmv.Tpo -c -o librsb_base_la-rsb_cpmv.lo `test -f 'rsb_cpmv.c' || echo '$(srcdir)/'`rsb_cpmv.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_cpmv.Tpo $(DEPDIR)/librsb_base_la-rsb_cpmv.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_cpmv.c' object='librsb_base_la-rsb_cpmv.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_cpmv.lo `test -f 'rsb_cpmv.c' || echo '$(srcdir)/'`rsb_cpmv.c
+
+librsb_base_la-rsb_asm.lo: rsb_asm.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_asm.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_asm.Tpo -c -o librsb_base_la-rsb_asm.lo `test -f 'rsb_asm.c' || echo '$(srcdir)/'`rsb_asm.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_asm.Tpo $(DEPDIR)/librsb_base_la-rsb_asm.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_asm.c' object='librsb_base_la-rsb_asm.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_asm.lo `test -f 'rsb_asm.c' || echo '$(srcdir)/'`rsb_asm.c
+
+librsb_base_la-rsb_user.lo: rsb_user.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_user.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_user.Tpo -c -o librsb_base_la-rsb_user.lo `test -f 'rsb_user.c' || echo '$(srcdir)/'`rsb_user.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_user.Tpo $(DEPDIR)/librsb_base_la-rsb_user.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_user.c' object='librsb_base_la-rsb_user.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_user.lo `test -f 'rsb_user.c' || echo '$(srcdir)/'`rsb_user.c
+
+librsb_base_la-rsb_coo2rec.lo: rsb_coo2rec.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_coo2rec.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_coo2rec.Tpo -c -o librsb_base_la-rsb_coo2rec.lo `test -f 'rsb_coo2rec.c' || echo '$(srcdir)/'`rsb_coo2rec.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_coo2rec.Tpo $(DEPDIR)/librsb_base_la-rsb_coo2rec.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_coo2rec.c' object='librsb_base_la-rsb_coo2rec.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_coo2rec.lo `test -f 'rsb_coo2rec.c' || echo '$(srcdir)/'`rsb_coo2rec.c
+
+librsb_base_la-rsb_rec2coo.lo: rsb_rec2coo.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_rec2coo.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_rec2coo.Tpo -c -o librsb_base_la-rsb_rec2coo.lo `test -f 'rsb_rec2coo.c' || echo '$(srcdir)/'`rsb_rec2coo.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_rec2coo.Tpo $(DEPDIR)/librsb_base_la-rsb_rec2coo.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_rec2coo.c' object='librsb_base_la-rsb_rec2coo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_rec2coo.lo `test -f 'rsb_rec2coo.c' || echo '$(srcdir)/'`rsb_rec2coo.c
+
+librsb_base_la-rsb_rec2csr.lo: rsb_rec2csr.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_rec2csr.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_rec2csr.Tpo -c -o librsb_base_la-rsb_rec2csr.lo `test -f 'rsb_rec2csr.c' || echo '$(srcdir)/'`rsb_rec2csr.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_rec2csr.Tpo $(DEPDIR)/librsb_base_la-rsb_rec2csr.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_rec2csr.c' object='librsb_base_la-rsb_rec2csr.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_rec2csr.lo `test -f 'rsb_rec2csr.c' || echo '$(srcdir)/'`rsb_rec2csr.c
+
+librsb_base_la-rsb_csr2coo.lo: rsb_csr2coo.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -MT librsb_base_la-rsb_csr2coo.lo -MD -MP -MF $(DEPDIR)/librsb_base_la-rsb_csr2coo.Tpo -c -o librsb_base_la-rsb_csr2coo.lo `test -f 'rsb_csr2coo.c' || echo '$(srcdir)/'`rsb_csr2coo.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_base_la-rsb_csr2coo.Tpo $(DEPDIR)/librsb_base_la-rsb_csr2coo.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_csr2coo.c' object='librsb_base_la-rsb_csr2coo.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_base_la_CFLAGS) $(CFLAGS) -c -o librsb_base_la-rsb_csr2coo.lo `test -f 'rsb_csr2coo.c' || echo '$(srcdir)/'`rsb_csr2coo.c
+
+librsb_nounroll_la-rsb_stropts.lo: rsb_stropts.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_stropts.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_stropts.Tpo -c -o librsb_nounroll_la-rsb_stropts.lo `test -f 'rsb_stropts.c' || echo '$(srcdir)/'`rsb_stropts.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_stropts.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_stropts.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_stropts.c' object='librsb_nounroll_la-rsb_stropts.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_stropts.lo `test -f 'rsb_stropts.c' || echo '$(srcdir)/'`rsb_stropts.c
+
+librsb_nounroll_la-rsb_strmif.lo: rsb_strmif.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_strmif.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_strmif.Tpo -c -o librsb_nounroll_la-rsb_strmif.lo `test -f 'rsb_strmif.c' || echo '$(srcdir)/'`rsb_strmif.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_strmif.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_strmif.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_strmif.c' object='librsb_nounroll_la-rsb_strmif.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_strmif.lo `test -f 'rsb_strmif.c' || echo '$(srcdir)/'`rsb_strmif.c
+
+librsb_nounroll_la-rsb_unroll.lo: rsb_unroll.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_unroll.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_unroll.Tpo -c -o librsb_nounroll_la-rsb_unroll.lo `test -f 'rsb_unroll.c' || echo '$(srcdir)/'`rsb_unroll.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_unroll.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_unroll.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_unroll.c' object='librsb_nounroll_la-rsb_unroll.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_unroll.lo `test -f 'rsb_unroll.c' || echo '$(srcdir)/'`rsb_unroll.c
+
+librsb_nounroll_la-rsb_krnl_vb.lo: rsb_krnl_vb.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_vb.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_vb.Tpo -c -o librsb_nounroll_la-rsb_krnl_vb.lo `test -f 'rsb_krnl_vb.c' || echo '$(srcdir)/'`rsb_krnl_vb.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_vb.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_vb.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_vb.c' object='librsb_nounroll_la-rsb_krnl_vb.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_vb.lo `test -f 'rsb_krnl_vb.c' || echo '$(srcdir)/'`rsb_krnl_vb.c
+
+librsb_nounroll_la-rsb_krnl_lb.lo: rsb_krnl_lb.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_lb.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_lb.Tpo -c -o librsb_nounroll_la-rsb_krnl_lb.lo `test -f 'rsb_krnl_lb.c' || echo '$(srcdir)/'`rsb_krnl_lb.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_lb.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_lb.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_lb.c' object='librsb_nounroll_la-rsb_krnl_lb.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_lb.lo `test -f 'rsb_krnl_lb.c' || echo '$(srcdir)/'`rsb_krnl_lb.c
+
+librsb_nounroll_la-rsb_krnl.lo: rsb_krnl.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl.Tpo -c -o librsb_nounroll_la-rsb_krnl.lo `test -f 'rsb_krnl.c' || echo '$(srcdir)/'`rsb_krnl.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl.c' object='librsb_nounroll_la-rsb_krnl.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl.lo `test -f 'rsb_krnl.c' || echo '$(srcdir)/'`rsb_krnl.c
+
+librsb_nounroll_la-rsb_bench.lo: rsb_bench.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_bench.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_bench.Tpo -c -o librsb_nounroll_la-rsb_bench.lo `test -f 'rsb_bench.c' || echo '$(srcdir)/'`rsb_bench.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_bench.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_bench.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_bench.c' object='librsb_nounroll_la-rsb_bench.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_bench.lo `test -f 'rsb_bench.c' || echo '$(srcdir)/'`rsb_bench.c
+
+librsb_nounroll_la-rsb_mergesort.lo: rsb_mergesort.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_mergesort.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_mergesort.Tpo -c -o librsb_nounroll_la-rsb_mergesort.lo `test -f 'rsb_mergesort.c' || echo '$(srcdir)/'`rsb_mergesort.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_mergesort.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_mergesort.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mergesort.c' object='librsb_nounroll_la-rsb_mergesort.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_mergesort.lo `test -f 'rsb_mergesort.c' || echo '$(srcdir)/'`rsb_mergesort.c
+
+librsb_nounroll_la-rsb_permute.lo: rsb_permute.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_permute.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_permute.Tpo -c -o librsb_nounroll_la-rsb_permute.lo `test -f 'rsb_permute.c' || echo '$(srcdir)/'`rsb_permute.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_permute.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_permute.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_permute.c' object='librsb_nounroll_la-rsb_permute.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_permute.lo `test -f 'rsb_permute.c' || echo '$(srcdir)/'`rsb_permute.c
+
+librsb_nounroll_la-rsb_krnl_bcss_l.lo: rsb_krnl_bcss_l.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcss_l.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_l.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcss_l.lo `test -f 'rsb_krnl_bcss_l.c' || echo '$(srcdir)/'`rsb_krnl_bcss_l.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_l.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_l.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcss_l.c' object='librsb_nounroll_la-rsb_krnl_bcss_l.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcss_l.lo `test -f 'rsb_krnl_bcss_l.c' || echo '$(srcdir)/'`rsb_krnl_bcss_l.c
+
+librsb_nounroll_la-rsb_krnl_bcss_u.lo: rsb_krnl_bcss_u.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcss_u.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_u.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcss_u.lo `test -f 'rsb_krnl_bcss_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_u.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_u.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_u.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcss_u.c' object='librsb_nounroll_la-rsb_krnl_bcss_u.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcss_u.lo `test -f 'rsb_krnl_bcss_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_u.c
+
+librsb_nounroll_la-rsb_krnl_bcss_spsv_u.lo: rsb_krnl_bcss_spsv_u.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcss_spsv_u.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spsv_u.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcss_spsv_u.lo `test -f 'rsb_krnl_bcss_spsv_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_spsv_u.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spsv_u.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spsv_u.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcss_spsv_u.c' object='librsb_nounroll_la-rsb_krnl_bcss_spsv_u.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcss_spsv_u.lo `test -f 'rsb_krnl_bcss_spsv_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_spsv_u.c
+
+librsb_nounroll_la-rsb_krnl_bcss_spmv_u.lo: rsb_krnl_bcss_spmv_u.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcss_spmv_u.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spmv_u.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcss_spmv_u.lo `test -f 'rsb_krnl_bcss_spmv_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_spmv_u.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spmv_u.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_spmv_u.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcss_spmv_u.c' object='librsb_nounroll_la-rsb_krnl_bcss_spmv_u.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcss_spmv_u.lo `test -f 'rsb_krnl_bcss_spmv_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_spmv_u.c
+
+librsb_nounroll_la-rsb_krnl_bcss_misc_u.lo: rsb_krnl_bcss_misc_u.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcss_misc_u.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_misc_u.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcss_misc_u.lo `test -f 'rsb_krnl_bcss_misc_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_misc_u.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_misc_u.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss_misc_u.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcss_misc_u.c' object='librsb_nounroll_la-rsb_krnl_bcss_misc_u.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcss_misc_u.lo `test -f 'rsb_krnl_bcss_misc_u.c' || echo '$(srcdir)/'`rsb_krnl_bcss_misc_u.c
+
+librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.lo: rsb_krnl_bcoo_spmv_u.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.lo `test -f 'rsb_krnl_bcoo_spmv_u.c' || echo '$(srcdir)/'`rsb_krnl_bcoo_spmv_u.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcoo_spmv_u.c' object='librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcoo_spmv_u.lo `test -f 'rsb_krnl_bcoo_spmv_u.c' || echo '$(srcdir)/'`rsb_krnl_bcoo_spmv_u.c
+
+librsb_nounroll_la-rsb_krnl_bcss.lo: rsb_krnl_bcss.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_krnl_bcss.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss.Tpo -c -o librsb_nounroll_la-rsb_krnl_bcss.lo `test -f 'rsb_krnl_bcss.c' || echo '$(srcdir)/'`rsb_krnl_bcss.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_krnl_bcss.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_krnl_bcss.c' object='librsb_nounroll_la-rsb_krnl_bcss.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_krnl_bcss.lo `test -f 'rsb_krnl_bcss.c' || echo '$(srcdir)/'`rsb_krnl_bcss.c
+
+librsb_nounroll_la-rsb_spmv.lo: rsb_spmv.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_spmv.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_spmv.Tpo -c -o librsb_nounroll_la-rsb_spmv.lo `test -f 'rsb_spmv.c' || echo '$(srcdir)/'`rsb_spmv.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_spmv.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_spmv.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_spmv.c' object='librsb_nounroll_la-rsb_spmv.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_spmv.lo `test -f 'rsb_spmv.c' || echo '$(srcdir)/'`rsb_spmv.c
+
+librsb_nounroll_la-rsb_merge.lo: rsb_merge.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_merge.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_merge.Tpo -c -o librsb_nounroll_la-rsb_merge.lo `test -f 'rsb_merge.c' || echo '$(srcdir)/'`rsb_merge.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_merge.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_merge.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_merge.c' object='librsb_nounroll_la-rsb_merge.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_merge.lo `test -f 'rsb_merge.c' || echo '$(srcdir)/'`rsb_merge.c
+
+librsb_nounroll_la-rsb_ompio.lo: rsb_ompio.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_ompio.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_ompio.Tpo -c -o librsb_nounroll_la-rsb_ompio.lo `test -f 'rsb_ompio.c' || echo '$(srcdir)/'`rsb_ompio.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_ompio.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_ompio.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_ompio.c' object='librsb_nounroll_la-rsb_ompio.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_ompio.lo `test -f 'rsb_ompio.c' || echo '$(srcdir)/'`rsb_ompio.c
+
+librsb_nounroll_la-rsb_util.lo: rsb_util.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_util.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_util.Tpo -c -o librsb_nounroll_la-rsb_util.lo `test -f 'rsb_util.c' || echo '$(srcdir)/'`rsb_util.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_util.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_util.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_util.c' object='librsb_nounroll_la-rsb_util.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_util.lo `test -f 'rsb_util.c' || echo '$(srcdir)/'`rsb_util.c
+
+librsb_nounroll_la-rsb_spgemm_csr.lo: rsb_spgemm_csr.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_spgemm_csr.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_spgemm_csr.Tpo -c -o librsb_nounroll_la-rsb_spgemm_csr.lo `test -f 'rsb_spgemm_csr.c' || echo '$(srcdir)/'`rsb_spgemm_csr.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_spgemm_csr.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_spgemm_csr.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_spgemm_csr.c' object='librsb_nounroll_la-rsb_spgemm_csr.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_spgemm_csr.lo `test -f 'rsb_spgemm_csr.c' || echo '$(srcdir)/'`rsb_spgemm_csr.c
+
+librsb_nounroll_la-rsb_spsum_misc.lo: rsb_spsum_misc.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_spsum_misc.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_spsum_misc.Tpo -c -o librsb_nounroll_la-rsb_spsum_misc.lo `test -f 'rsb_spsum_misc.c' || echo '$(srcdir)/'`rsb_spsum_misc.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_spsum_misc.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_spsum_misc.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_spsum_misc.c' object='librsb_nounroll_la-rsb_spsum_misc.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_spsum_misc.lo `test -f 'rsb_spsum_misc.c' || echo '$(srcdir)/'`rsb_spsum_misc.c
+
+librsb_nounroll_la-rsb_prec.lo: rsb_prec.c
+ at am__fastdepCC_TRUE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -MT librsb_nounroll_la-rsb_prec.lo -MD -MP -MF $(DEPDIR)/librsb_nounroll_la-rsb_prec.Tpo -c -o librsb_nounroll_la-rsb_prec.lo `test -f 'rsb_prec.c' || echo '$(srcdir)/'`rsb_prec.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/librsb_nounroll_la-rsb_prec.Tpo $(DEPDIR)/librsb_nounroll_la-rsb_prec.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_prec.c' object='librsb_nounroll_la-rsb_prec.lo' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LIBTOOL)  --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(librsb_nounroll_la_CFLAGS) $(CFLAGS) -c -o librsb_nounroll_la-rsb_prec.lo `test -f 'rsb_prec.c' || echo '$(srcdir)/'`rsb_prec.c
+
+ch2icfb-ch2icfb.o: ch2icfb.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ch2icfb_CFLAGS) $(CFLAGS) -MT ch2icfb-ch2icfb.o -MD -MP -MF $(DEPDIR)/ch2icfb-ch2icfb.Tpo -c -o ch2icfb-ch2icfb.o `test -f 'ch2icfb.c' || echo '$(srcdir)/'`ch2icfb.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/ch2icfb-ch2icfb.Tpo $(DEPDIR)/ch2icfb-ch2icfb.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='ch2icfb.c' object='ch2icfb-ch2icfb.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ch2icfb_CFLAGS) $(CFLAGS) -c -o ch2icfb-ch2icfb.o `test -f 'ch2icfb.c' || echo '$(srcdir)/'`ch2icfb.c
+
+ch2icfb-ch2icfb.obj: ch2icfb.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ch2icfb_CFLAGS) $(CFLAGS) -MT ch2icfb-ch2icfb.obj -MD -MP -MF $(DEPDIR)/ch2icfb-ch2icfb.Tpo -c -o ch2icfb-ch2icfb.obj `if test -f 'ch2icfb.c'; then $(CYGPATH_W) 'ch2icfb.c'; else $(CYGPATH_W) '$(srcdir)/ch2icfb.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/ch2icfb-ch2icfb.Tpo $(DEPDIR)/ch2icfb-ch2icfb.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='ch2icfb.c' object='ch2icfb-ch2icfb.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(ch2icfb_CFLAGS) $(CFLAGS) -c -o ch2icfb-ch2icfb.obj `if test -f 'ch2icfb.c'; then $(CYGPATH_W) 'ch2icfb.c'; else $(CYGPATH_W) '$(srcdir)/ch2icfb.c'; fi`
+
+rsbench-rsbench.o: rsbench.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsbench.o -MD -MP -MF $(DEPDIR)/rsbench-rsbench.Tpo -c -o rsbench-rsbench.o `test -f 'rsbench.c' || echo '$(srcdir)/'`rsbench.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsbench.Tpo $(DEPDIR)/rsbench-rsbench.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsbench.c' object='rsbench-rsbench.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsbench.o `test -f 'rsbench.c' || echo '$(srcdir)/'`rsbench.c
+
+rsbench-rsbench.obj: rsbench.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsbench.obj -MD -MP -MF $(DEPDIR)/rsbench-rsbench.Tpo -c -o rsbench-rsbench.obj `if test -f 'rsbench.c'; then $(CYGPATH_W) 'rsbench.c'; else $(CYGPATH_W) '$(srcdir)/rsbench.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsbench.Tpo $(DEPDIR)/rsbench-rsbench.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsbench.c' object='rsbench-rsbench.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsbench.obj `if test -f 'rsbench.c'; then $(CYGPATH_W) 'rsbench.c'; else $(CYGPATH_W) '$(srcdir)/rsbench.c'; fi`
+
+rsbench-rsb_test_matops.o: rsb_test_matops.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_test_matops.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_test_matops.Tpo -c -o rsbench-rsb_test_matops.o `test -f 'rsb_test_matops.c' || echo '$(srcdir)/'`rsb_test_matops.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_test_matops.Tpo $(DEPDIR)/rsbench-rsb_test_matops.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_test_matops.c' object='rsbench-rsb_test_matops.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_test_matops.o `test -f 'rsb_test_matops.c' || echo '$(srcdir)/'`rsb_test_matops.c
+
+rsbench-rsb_test_matops.obj: rsb_test_matops.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_test_matops.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_test_matops.Tpo -c -o rsbench-rsb_test_matops.obj `if test -f 'rsb_test_matops.c'; then $(CYGPATH_W) 'rsb_test_matops.c'; else $(CYGPATH_W) '$(srcdir)/rsb_test_matops.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_test_matops.Tpo $(DEPDIR)/rsbench-rsb_test_matops.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_test_matops.c' object='rsbench-rsb_test_matops.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_test_matops.obj `if test -f 'rsb_test_matops.c'; then $(CYGPATH_W) 'rsb_test_matops.c'; else $(CYGPATH_W) '$(srcdir)/rsb_test_matops.c'; fi`
+
+rsbench-rsb_mkl.o: rsb_mkl.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_mkl.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_mkl.Tpo -c -o rsbench-rsb_mkl.o `test -f 'rsb_mkl.c' || echo '$(srcdir)/'`rsb_mkl.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_mkl.Tpo $(DEPDIR)/rsbench-rsb_mkl.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mkl.c' object='rsbench-rsb_mkl.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_mkl.o `test -f 'rsb_mkl.c' || echo '$(srcdir)/'`rsb_mkl.c
+
+rsbench-rsb_mkl.obj: rsb_mkl.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_mkl.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_mkl.Tpo -c -o rsbench-rsb_mkl.obj `if test -f 'rsb_mkl.c'; then $(CYGPATH_W) 'rsb_mkl.c'; else $(CYGPATH_W) '$(srcdir)/rsb_mkl.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_mkl.Tpo $(DEPDIR)/rsbench-rsb_mkl.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mkl.c' object='rsbench-rsb_mkl.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_mkl.obj `if test -f 'rsb_mkl.c'; then $(CYGPATH_W) 'rsb_mkl.c'; else $(CYGPATH_W) '$(srcdir)/rsb_mkl.c'; fi`
+
+rsbench-rsb_genmm.o: rsb_genmm.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_genmm.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_genmm.Tpo -c -o rsbench-rsb_genmm.o `test -f 'rsb_genmm.c' || echo '$(srcdir)/'`rsb_genmm.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_genmm.Tpo $(DEPDIR)/rsbench-rsb_genmm.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_genmm.c' object='rsbench-rsb_genmm.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_genmm.o `test -f 'rsb_genmm.c' || echo '$(srcdir)/'`rsb_genmm.c
+
+rsbench-rsb_genmm.obj: rsb_genmm.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_genmm.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_genmm.Tpo -c -o rsbench-rsb_genmm.obj `if test -f 'rsb_genmm.c'; then $(CYGPATH_W) 'rsb_genmm.c'; else $(CYGPATH_W) '$(srcdir)/rsb_genmm.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_genmm.Tpo $(DEPDIR)/rsbench-rsb_genmm.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_genmm.c' object='rsbench-rsb_genmm.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_genmm.obj `if test -f 'rsb_genmm.c'; then $(CYGPATH_W) 'rsb_genmm.c'; else $(CYGPATH_W) '$(srcdir)/rsb_genmm.c'; fi`
+
+rsbench-rsb_mmls.o: rsb_mmls.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_mmls.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_mmls.Tpo -c -o rsbench-rsb_mmls.o `test -f 'rsb_mmls.c' || echo '$(srcdir)/'`rsb_mmls.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_mmls.Tpo $(DEPDIR)/rsbench-rsb_mmls.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mmls.c' object='rsbench-rsb_mmls.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_mmls.o `test -f 'rsb_mmls.c' || echo '$(srcdir)/'`rsb_mmls.c
+
+rsbench-rsb_mmls.obj: rsb_mmls.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_mmls.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_mmls.Tpo -c -o rsbench-rsb_mmls.obj `if test -f 'rsb_mmls.c'; then $(CYGPATH_W) 'rsb_mmls.c'; else $(CYGPATH_W) '$(srcdir)/rsb_mmls.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_mmls.Tpo $(DEPDIR)/rsbench-rsb_mmls.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_mmls.c' object='rsbench-rsb_mmls.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_mmls.obj `if test -f 'rsb_mmls.c'; then $(CYGPATH_W) 'rsb_mmls.c'; else $(CYGPATH_W) '$(srcdir)/rsb_mmls.c'; fi`
+
+rsbench-rsb_pr.o: rsb_pr.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_pr.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_pr.Tpo -c -o rsbench-rsb_pr.o `test -f 'rsb_pr.c' || echo '$(srcdir)/'`rsb_pr.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_pr.Tpo $(DEPDIR)/rsbench-rsb_pr.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_pr.c' object='rsbench-rsb_pr.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_pr.o `test -f 'rsb_pr.c' || echo '$(srcdir)/'`rsb_pr.c
+
+rsbench-rsb_pr.obj: rsb_pr.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_pr.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_pr.Tpo -c -o rsbench-rsb_pr.obj `if test -f 'rsb_pr.c'; then $(CYGPATH_W) 'rsb_pr.c'; else $(CYGPATH_W) '$(srcdir)/rsb_pr.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_pr.Tpo $(DEPDIR)/rsbench-rsb_pr.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_pr.c' object='rsbench-rsb_pr.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_pr.obj `if test -f 'rsb_pr.c'; then $(CYGPATH_W) 'rsb_pr.c'; else $(CYGPATH_W) '$(srcdir)/rsb_pr.c'; fi`
+
+rsbench-rsb_pcnt.o: rsb_pcnt.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_pcnt.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_pcnt.Tpo -c -o rsbench-rsb_pcnt.o `test -f 'rsb_pcnt.c' || echo '$(srcdir)/'`rsb_pcnt.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_pcnt.Tpo $(DEPDIR)/rsbench-rsb_pcnt.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_pcnt.c' object='rsbench-rsb_pcnt.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_pcnt.o `test -f 'rsb_pcnt.c' || echo '$(srcdir)/'`rsb_pcnt.c
+
+rsbench-rsb_pcnt.obj: rsb_pcnt.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_pcnt.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_pcnt.Tpo -c -o rsbench-rsb_pcnt.obj `if test -f 'rsb_pcnt.c'; then $(CYGPATH_W) 'rsb_pcnt.c'; else $(CYGPATH_W) '$(srcdir)/rsb_pcnt.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_pcnt.Tpo $(DEPDIR)/rsbench-rsb_pcnt.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_pcnt.c' object='rsbench-rsb_pcnt.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_pcnt.obj `if test -f 'rsb_pcnt.c'; then $(CYGPATH_W) 'rsb_pcnt.c'; else $(CYGPATH_W) '$(srcdir)/rsb_pcnt.c'; fi`
+
+rsbench-rsb_failure_tests.o: rsb_failure_tests.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_failure_tests.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_failure_tests.Tpo -c -o rsbench-rsb_failure_tests.o `test -f 'rsb_failure_tests.c' || echo '$(srcdir)/'`rsb_failure_tests.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_failure_tests.Tpo $(DEPDIR)/rsbench-rsb_failure_tests.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_failure_tests.c' object='rsbench-rsb_failure_tests.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_failure_tests.o `test -f 'rsb_failure_tests.c' || echo '$(srcdir)/'`rsb_failure_tests.c
+
+rsbench-rsb_failure_tests.obj: rsb_failure_tests.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_failure_tests.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_failure_tests.Tpo -c -o rsbench-rsb_failure_tests.obj `if test -f 'rsb_failure_tests.c'; then $(CYGPATH_W) 'rsb_failure_tests.c'; else $(CYGPATH_W) '$(srcdir)/rsb_failure_tests.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_failure_tests.Tpo $(DEPDIR)/rsbench-rsb_failure_tests.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_failure_tests.c' object='rsbench-rsb_failure_tests.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_failure_tests.obj `if test -f 'rsb_failure_tests.c'; then $(CYGPATH_W) 'rsb_failure_tests.c'; else $(CYGPATH_W) '$(srcdir)/rsb_failure_tests.c'; fi`
+
+rsbench-rsb_libspblas_tests.o: rsb_libspblas_tests.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_libspblas_tests.o -MD -MP -MF $(DEPDIR)/rsbench-rsb_libspblas_tests.Tpo -c -o rsbench-rsb_libspblas_tests.o `test -f 'rsb_libspblas_tests.c' || echo '$(srcdir)/'`rsb_libspblas_tests.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_libspblas_tests.Tpo $(DEPDIR)/rsbench-rsb_libspblas_tests.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_libspblas_tests.c' object='rsbench-rsb_libspblas_tests.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_libspblas_tests.o `test -f 'rsb_libspblas_tests.c' || echo '$(srcdir)/'`rsb_libspblas_tests.c
+
+rsbench-rsb_libspblas_tests.obj: rsb_libspblas_tests.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -MT rsbench-rsb_libspblas_tests.obj -MD -MP -MF $(DEPDIR)/rsbench-rsb_libspblas_tests.Tpo -c -o rsbench-rsb_libspblas_tests.obj `if test -f 'rsb_libspblas_tests.c'; then $(CYGPATH_W) 'rsb_libspblas_tests.c'; else $(CYGPATH_W) '$(srcdir)/rsb_libspblas_tests.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/rsbench-rsb_libspblas_tests.Tpo $(DEPDIR)/rsbench-rsb_libspblas_tests.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='rsb_libspblas_tests.c' object='rsbench-rsb_libspblas_tests.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(rsbench_CFLAGS) $(CFLAGS) -c -o rsbench-rsb_libspblas_tests.obj `if test -f 'rsb_libspblas_tests.c'; then $(CYGPATH_W) 'rsb_libspblas_tests.c'; else $(CYGPATH_W) '$(srcdir)/rsb_libspblas_tests.c'; fi`
+
+sbtc-sbtc.o: sbtc.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sbtc_CFLAGS) $(CFLAGS) -MT sbtc-sbtc.o -MD -MP -MF $(DEPDIR)/sbtc-sbtc.Tpo -c -o sbtc-sbtc.o `test -f 'sbtc.c' || echo '$(srcdir)/'`sbtc.c
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/sbtc-sbtc.Tpo $(DEPDIR)/sbtc-sbtc.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='sbtc.c' object='sbtc-sbtc.o' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sbtc_CFLAGS) $(CFLAGS) -c -o sbtc-sbtc.o `test -f 'sbtc.c' || echo '$(srcdir)/'`sbtc.c
+
+sbtc-sbtc.obj: sbtc.c
+ at am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sbtc_CFLAGS) $(CFLAGS) -MT sbtc-sbtc.obj -MD -MP -MF $(DEPDIR)/sbtc-sbtc.Tpo -c -o sbtc-sbtc.obj `if test -f 'sbtc.c'; then $(CYGPATH_W) 'sbtc.c'; else $(CYGPATH_W) '$(srcdir)/sbtc.c'; fi`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/sbtc-sbtc.Tpo $(DEPDIR)/sbtc-sbtc.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='sbtc.c' object='sbtc-sbtc.obj' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sbtc_CFLAGS) $(CFLAGS) -c -o sbtc-sbtc.obj `if test -f 'sbtc.c'; then $(CYGPATH_W) 'sbtc.c'; else $(CYGPATH_W) '$(srcdir)/sbtc.c'; fi`
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+distclean-libtool:
+	-rm -f libtool config.lt
+install-dist_docDATA: $(dist_doc_DATA)
+	@$(NORMAL_INSTALL)
+	@list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(docdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(docdir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; \
+	done | $(am__base_list) | \
+	while read files; do \
+	  echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(docdir)'"; \
+	  $(INSTALL_DATA) $$files "$(DESTDIR)$(docdir)" || exit $$?; \
+	done
+
+uninstall-dist_docDATA:
+	@$(NORMAL_UNINSTALL)
+	@list='$(dist_doc_DATA)'; test -n "$(docdir)" || list=; \
+	files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+	dir='$(DESTDIR)$(docdir)'; $(am__uninstall_files_from_dir)
+install-includeHEADERS: $(include_HEADERS)
+	@$(NORMAL_INSTALL)
+	@list='$(include_HEADERS)'; test -n "$(includedir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; \
+	done | $(am__base_list) | \
+	while read files; do \
+	  echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \
+	  $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \
+	done
+
+uninstall-includeHEADERS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(include_HEADERS)'; test -n "$(includedir)" || list=; \
+	files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+	dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir)
+install-nodist_includeHEADERS: $(nodist_include_HEADERS)
+	@$(NORMAL_INSTALL)
+	@list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(includedir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(includedir)" || exit 1; \
+	fi; \
+	for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; \
+	done | $(am__base_list) | \
+	while read files; do \
+	  echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(includedir)'"; \
+	  $(INSTALL_HEADER) $$files "$(DESTDIR)$(includedir)" || exit $$?; \
+	done
+
+uninstall-nodist_includeHEADERS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(nodist_include_HEADERS)'; test -n "$(includedir)" || list=; \
+	files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+	dir='$(DESTDIR)$(includedir)'; $(am__uninstall_files_from_dir)
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+#     (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+	@fail= failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+	@fail= failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	rev=''; for subdir in $$list; do \
+	  if test "$$subdir" = "."; then :; else \
+	    rev="$$subdir $$rev"; \
+	  fi; \
+	done; \
+	rev="$$rev ."; \
+	target=`echo $@ | sed s/-recursive//`; \
+	for subdir in $$rev; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done && test -z "$$fail"
+tags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+	done
+ctags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+	done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES) rsb-config.h.in $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	list='$(SOURCES) $(HEADERS) rsb-config.h.in $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES) rsb-config.h.in $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS) rsb-config.h.in $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	$(am__remove_distdir)
+	test -d "$(distdir)" || mkdir "$(distdir)"
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+	-test -n "$(am__skip_mode_fix)" \
+	|| find "$(distdir)" -type d ! -perm -755 \
+		-exec chmod u+rwx,go+rx {} \; -o \
+	  ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
+	  ! -type d ! -perm -400 -exec chmod a+r {} \; -o \
+	  ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \
+	|| chmod -R a+r "$(distdir)"
+dist-gzip: distdir
+	tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+	$(am__remove_distdir)
+
+dist-bzip2: distdir
+	tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2
+	$(am__remove_distdir)
+
+dist-lzip: distdir
+	tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz
+	$(am__remove_distdir)
+
+dist-lzma: distdir
+	tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma
+	$(am__remove_distdir)
+
+dist-xz: distdir
+	tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz
+	$(am__remove_distdir)
+
+dist-tarZ: distdir
+	tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
+	$(am__remove_distdir)
+
+dist-shar: distdir
+	shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
+	$(am__remove_distdir)
+
+dist-zip: distdir
+	-rm -f $(distdir).zip
+	zip -rq $(distdir).zip $(distdir)
+	$(am__remove_distdir)
+
+dist dist-all: distdir
+	tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
+	$(am__remove_distdir)
+
+# This target untars the dist file and tries a VPATH configuration.  Then
+# it guarantees that the distribution is self-contained by making another
+# tarfile.
+distcheck: dist
+	case '$(DIST_ARCHIVES)' in \
+	*.tar.gz*) \
+	  GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\
+	*.tar.bz2*) \
+	  bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\
+	*.tar.lzma*) \
+	  lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\
+	*.tar.lz*) \
+	  lzip -dc $(distdir).tar.lz | $(am__untar) ;;\
+	*.tar.xz*) \
+	  xz -dc $(distdir).tar.xz | $(am__untar) ;;\
+	*.tar.Z*) \
+	  uncompress -c $(distdir).tar.Z | $(am__untar) ;;\
+	*.shar.gz*) \
+	  GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\
+	*.zip*) \
+	  unzip $(distdir).zip ;;\
+	esac
+	chmod -R a-w $(distdir); chmod u+w $(distdir)
+	mkdir $(distdir)/_build
+	mkdir $(distdir)/_inst
+	chmod a-w $(distdir)
+	test -d $(distdir)/_build || exit 0; \
+	dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
+	  && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
+	  && am__cwd=`pwd` \
+	  && $(am__cd) $(distdir)/_build \
+	  && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+	    $(AM_DISTCHECK_CONFIGURE_FLAGS) \
+	    $(DISTCHECK_CONFIGURE_FLAGS) \
+	  && $(MAKE) $(AM_MAKEFLAGS) \
+	  && $(MAKE) $(AM_MAKEFLAGS) dvi \
+	  && $(MAKE) $(AM_MAKEFLAGS) check \
+	  && $(MAKE) $(AM_MAKEFLAGS) install \
+	  && $(MAKE) $(AM_MAKEFLAGS) installcheck \
+	  && $(MAKE) $(AM_MAKEFLAGS) uninstall \
+	  && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
+	        distuninstallcheck \
+	  && chmod -R a-w "$$dc_install_base" \
+	  && ({ \
+	       (cd ../.. && umask 077 && mkdir "$$dc_destdir") \
+	       && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
+	       && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
+	       && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
+	            distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
+	      } || { rm -rf "$$dc_destdir"; exit 1; }) \
+	  && rm -rf "$$dc_destdir" \
+	  && $(MAKE) $(AM_MAKEFLAGS) dist \
+	  && rm -rf $(DIST_ARCHIVES) \
+	  && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \
+	  && cd "$$am__cwd" \
+	  || exit 1
+	$(am__remove_distdir)
+	@(echo "$(distdir) archives ready for distribution: "; \
+	  list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \
+	  sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x'
+distuninstallcheck:
+	@test -n '$(distuninstallcheck_dir)' || { \
+	  echo 'ERROR: trying to run $@ with an empty' \
+	       '$$(distuninstallcheck_dir)' >&2; \
+	  exit 1; \
+	}; \
+	$(am__cd) '$(distuninstallcheck_dir)' || { \
+	  echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \
+	  exit 1; \
+	}; \
+	test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \
+	   || { echo "ERROR: files left after uninstall:" ; \
+	        if test -n "$(DESTDIR)"; then \
+	          echo "  (check DESTDIR support)"; \
+	        fi ; \
+	        $(distuninstallcheck_listfiles) ; \
+	        exit 1; } >&2
+distcleancheck: distclean
+	@if test '$(srcdir)' = . ; then \
+	  echo "ERROR: distcleancheck can only run from a VPATH build" ; \
+	  exit 1 ; \
+	fi
+	@test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
+	  || { echo "ERROR: files left in build directory after distclean:" ; \
+	       $(distcleancheck_listfiles) ; \
+	       exit 1; } >&2
+check-am: all-am
+check: $(BUILT_SOURCES)
+	$(MAKE) $(AM_MAKEFLAGS) check-recursive
+all-am: Makefile $(LTLIBRARIES) $(PROGRAMS) $(SCRIPTS) $(DATA) \
+		$(HEADERS) rsb-config.h
+install-binPROGRAMS: install-libLTLIBRARIES
+
+installdirs: installdirs-recursive
+installdirs-am:
+	for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(bindir)" "$(DESTDIR)$(docdir)" "$(DESTDIR)$(includedir)" "$(DESTDIR)$(includedir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: $(BUILT_SOURCES)
+	$(MAKE) $(AM_MAKEFLAGS) install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+	-test -z "$(BUILT_SOURCES)" || rm -f $(BUILT_SOURCES)
+clean: clean-recursive
+
+clean-am: clean-binPROGRAMS clean-generic clean-libLTLIBRARIES \
+	clean-libtool clean-noinstLTLIBRARIES clean-noinstPROGRAMS \
+	mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-hdr distclean-libtool distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am: install-dist_docDATA install-includeHEADERS \
+	install-nodist_includeHEADERS
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS install-binSCRIPTS \
+	install-libLTLIBRARIES
+	@$(NORMAL_INSTALL)
+	$(MAKE) $(AM_MAKEFLAGS) install-exec-hook
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
+	-rm -rf $(top_srcdir)/autom4te.cache
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS uninstall-binSCRIPTS \
+	uninstall-dist_docDATA uninstall-includeHEADERS \
+	uninstall-libLTLIBRARIES uninstall-nodist_includeHEADERS
+
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) all check \
+	ctags-recursive install install-am install-exec-am \
+	install-strip tags-recursive
+
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+	all all-am am--refresh check check-am clean clean-binPROGRAMS \
+	clean-generic clean-libLTLIBRARIES clean-libtool \
+	clean-noinstLTLIBRARIES clean-noinstPROGRAMS ctags \
+	ctags-recursive dist dist-all dist-bzip2 dist-gzip dist-lzip \
+	dist-lzma dist-shar dist-tarZ dist-xz dist-zip distcheck \
+	distclean distclean-compile distclean-generic distclean-hdr \
+	distclean-libtool distclean-tags distcleancheck distdir \
+	distuninstallcheck dvi dvi-am html html-am info info-am \
+	install install-am install-binPROGRAMS install-binSCRIPTS \
+	install-data install-data-am install-dist_docDATA install-dvi \
+	install-dvi-am install-exec install-exec-am install-exec-hook \
+	install-html install-html-am install-includeHEADERS \
+	install-info install-info-am install-libLTLIBRARIES \
+	install-man install-nodist_includeHEADERS install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs installdirs-am \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-recursive uninstall uninstall-am \
+	uninstall-binPROGRAMS uninstall-binSCRIPTS \
+	uninstall-dist_docDATA uninstall-includeHEADERS \
+	uninstall-libLTLIBRARIES uninstall-nodist_includeHEADERS
+
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@blas_sparse.mod: rsb_blas_sparse.$(OBJEXT)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@rsb.mod: rsb.$(OBJEXT)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@rsb.F90: ch2icfb rsb.h $(top_srcdir)/scripts/rsb_h_to_rsb_fi.sh 
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@	if test -f ch2icfb ; then $(top_srcdir)/scripts/rsb_h_to_rsb_fi.sh $(srcdir) > $@ ; else echo "Warning: Your system did not build ch2icfb for some reason --- skipping rebuild of "$@ ;true ; fi
+
+.PHONY: e
+e:
+	vim rsb.h
+
+.PHONY: tdist
+tdist: dox
+	$(MAKE) dist VERSION=trunk
+
+.PHONY: rtdist
+rtdist: tdist
+	mv librsb-trunk.tar.gz $(DIST_ARCHIVES_NOVERSION) 
+
+.PHONY: rdist
+rdist: dox
+	$(MAKE) dist
+	mv $(DIST_ARCHIVES) $(DIST_ARCHIVES_NOVERSION) 
+
+.PHONY: ddist
+ddist: rdist
+	gpg -sbv -u 0xe0e669c8ef1258b8 $(DIST_ARCHIVES_NOVERSION)
+	md5sum $(DIST_ARCHIVES_NOVERSION) > $(DIST_ARCHIVES_NOVERSION).md5
+	gpg -sbav $(DIST_ARCHIVES_NOVERSION)
+	gpg --verify $(DIST_ARCHIVES_NOVERSION).sig
+
+.PHONY: bdist
+bdist: dox $(BINDISTFILES) 
+	rm -fR -- $(PACKAGE)-$(build)
+	mkdir $(PACKAGE)-$(build)
+	cp -fR $(BINDISTFILES) $(PACKAGE)-$(build)/
+	tar cvzf $(PACKAGE)-$(build).tgz $(PACKAGE)-$(build)  --exclude .svn  --exclude .deps
+	rm -fR $(PACKAGE)-$(build)
+	tar tvzf $(PACKAGE)-$(build).tgz
+
+.PHONY: help
+help:
+	@echo -e "Alternatives (see the README for these):\n make clean\n make cleanall\n make all \n make qqtests \n make qtests \n make tests \n make dist"
+
+.PHONY: cleanall
+cleanall: clean
+	rm -rf $(LIB_CLEANALL_FILES)
+	$(MAKE) clean
+
+gclean:
+	rm -rf  *.gcov *.gcno
+
+ at HAVE_M4_TRUE@rsb_libspblas.h: $(srcdir)/rsb_libspblas.m4 $(srcdir)/libspblas_macros.m4
+ at HAVE_M4_TRUE@rsb_libspblas.c: $(srcdir)/rsb_libspblas.m4 $(srcdir)/libspblas_macros.m4
+ at HAVE_M4_TRUE@psb_rsb_mod.F90: $(srcdir)/psb_rsb_mod.m4 $(srcdir)/rsb_fortran_macros.m4
+ at HAVE_M4_TRUE@rsb_mod.F90: $(srcdir)/rsb_mod.m4 $(srcdir)/rsb_fortran_macros.m4
+ at HAVE_M4_TRUE@rsb_blas_sparse.F90: $(srcdir)/rsb_blas_sparse.m4 $(srcdir)/rsb_fortran_macros.m4
+ at HAVE_M4_TRUE@rsb_libspblas_handle.c: rsb_libspblas.h rsb_libspblas_handle.h
+ at HAVE_M4_TRUE@rsb_libspblas_tests.c: rsb_libspblas.h rsb_libspblas_tests.h
+ at HAVE_M4_TRUE@rsb_mkl.c: $(srcdir)/rsb_mkl.m4
+ at HAVE_M4_TRUE@rsb_mkl.h: $(srcdir)/rsb_mkl.m4
+
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@rsb-librsb-internals.h: $(librsb_base_la_SOURCES) $(librsb_nounroll_la_SOURCES)
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	( cat rsb_license_header.inc                                                                            ; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@       	echo '/*! Collated internal headers of librsb -- for inspection purposes only (not for usage). */'; echo ; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '/* @cond INNERDOC */' ; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#ifndef RSB_LIBRSB_INTERNALS_H_INCLUDED'									; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#define RSB_LIBRSB_INTERNALS_H_INCLUDED'									; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	find $+ -iname '*.h' -exec 'cat' '{}' ';' | grep -v 'cond INNERDOC\|endcond' ; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#endif /* RSB_LIBRSB_INTERNALS_H_INCLUDED */'								; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '/* @endcond */' ;  )> $@
+
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@rsb-incoming.h: $(librsb_base_la_SOURCES) $(librsb_nounroll_la_SOURCES) $(top_srcdir)/rsb-incoming.grep
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	( cat rsb_license_header.inc                                                                            ; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@       	echo '/* Collated internal headers of librsb -- for experimental use only only (not for usage). */'; echo	; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#ifndef RSB_LIBRSB_INCOMING_H_INCLUDED'									; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#define RSB_LIBRSB_INCOMING_H_INCLUDED'									; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#include <rsb.h>'									; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	( cat `svn ls | grep h$$` $(srcdir)/rsb_util.h ) | grep -f $(top_srcdir)/rsb-incoming.grep | sed -f $(top_srcdir)/rsb-incoming.sed | tac ; \
+ at WANT_INTERNAL_HEADERS_INSTALL_TRUE@	echo '#endif /* RSB_LIBRSB_INCOMING_H_INCLUDED */'								; )> $@
+
+rsb_test_matops.h: rsb_mkl.h 
+
+ at WANT_OCTAVE_TESTING_TRUE@psbtf.F90: psbtf.m sbtg.m sbtg-types.m
+ at WANT_OCTAVE_TESTING_TRUE@	$(OCTAVE) $(OCTAVE_FLAGS) psbtf.m > psbtf.F90
+
+ at WANT_OCTAVE_TESTING_TRUE@psb_mvsv_tester.f90: psb_mvsv_tester.m sbtg.m sbtg-types.m
+ at WANT_OCTAVE_TESTING_TRUE@	$(OCTAVE) $(OCTAVE_FLAGS) psb_mvsv_tester.m > psb_mvsv_tester.f90
+
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@WANT_OCTAVE_TESTING_TRUE at sbtf.F90: sbtf.m sbtg.m sbtg-types.m
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@WANT_OCTAVE_TESTING_TRUE@	$(OCTAVE) $(OCTAVE_FLAGS) sbtf.m > sbtf.F90
+
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_OCTAVE_TESTING_TRUE@$(sbtc_SOURCES): sbtc.m sbtg.m sbtg-types.m
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_OCTAVE_TESTING_TRUE@	$(OCTAVE) $(OCTAVE_FLAGS) sbtc.m > $(sbtc_SOURCES)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@WANT_OCTAVE_TESTING_FALSE at sbtf.F90: sbtf.m
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@WANT_OCTAVE_TESTING_FALSE@	echo "int main(void){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > sbtf.F90
+
+ at WANT_OCTAVE_TESTING_FALSE@psb_mvsv_tester.f90: psb_mvsv_tester.m
+ at WANT_OCTAVE_TESTING_FALSE@	echo "int main(void){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > psb_mvsv_tester.f90
+
+ at WANT_OCTAVE_TESTING_FALSE@psbtf.F90: psbtf.m
+ at WANT_OCTAVE_TESTING_FALSE@	echo "int main(void){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > psbtf.F90
+
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_OCTAVE_TESTING_FALSE@$(sbtc_SOURCES): sbtc.m
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@@WANT_OCTAVE_TESTING_FALSE@	echo "int main(){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > $(sbtc_SOURCES)
+
+ at WANT_OCTAVE_TESTING_AND_INT_TRUE@$(ot_SOURCES): ot.m sbtg.m sbtg-types.m
+ at WANT_OCTAVE_TESTING_AND_INT_TRUE@	$(OCTAVE) $(OCTAVE_FLAGS) ot.m $(WANT_ROW_UNLOOP_FACTORS) $(WANT_COLUMN_UNLOOP_FACTORS) $(WANT_MATRIX_ALL_OPS),$(WANT_MATRIX_ALL_META_OPS) $(WANT_MATRIX_OPS),$(WANT_MATRIX_ALL_META_OPS) main > ot.c
+ at WANT_OCTAVE_TESTING_AND_INT_TRUE@	for o in `echo $(WANT_MATRIX_ALL_OPS),$(WANT_MATRIX_ALL_META_OPS) | sed "s/,/ /g"`  ; do $(OCTAVE) $(OCTAVE_FLAGS) ot.m $(WANT_ROW_UNLOOP_FACTORS) $(WANT_COLUMN_UNLOOP_FACTORS) $(WANT_MATRIX_ALL_OPS) $(WANT_MATRIX_OPS) $$o > ot-$$o.c  ; done
+ at WANT_OCTAVE_TESTING_AND_INT_FALSE@$(ot_SOURCES):
+ at WANT_OCTAVE_TESTING_AND_INT_FALSE@	echo "int main(){printf(\"sorry, you did not install octave, so the octave based tester is disabled\\n\");return 0;}" > ot.c
+ at WANT_OCTAVE_TESTING_AND_INT_FALSE@	for o in `echo $(WANT_MATRIX_ALL_OPS),$(WANT_MATRIX_ALL_META_OPS) | sed "s/,/ /g"` ; do echo "static int f(){return 0;}" > ot-$$o.c  ; done
+
+rsb_strmif.c: rsb.h
+	( cat rsb_license_header.inc  ; \
+	echo '/* @cond INNERDOC */' ; \
+	echo '/* This file was generated by the Makefile */' ; \
+	echo '#include "rsb.h"' ; \
+	echo '#include "rsb_common.h"' ; \
+	echo '#include "rsb_do.h"' ; \
+	echo 'rsb_err_t rsb__do_get_matrix_info_from_string(const struct rsb_mtx_t *matrix, const rsb_char_t *mis, void* info, size_t buflen)' ; \
+	echo '{ rsb_err_t errval=RSB_ERR_BADARGS; if(!matrix || !mis || !info)goto err;' ; \
+	grep '^\(.define_\|.\)\ RSB_MIF_' rsb.h | sed 's/^. /#define /g;s/=0x/0x/g' | sed 's/\s\+/ /g;s/\/.*(//g;s/).*\///g;s/\/.*(//g;s/).*\///g;' | cut -d ' ' -f 2,4 | sed 's/^\(\S\+\) \(\S\+\)/if(0 == strcmp(mis,"\1")){ errval = rsb__do_get_matrix_info(matrix,\1,info,buflen); goto done;}/g;'; \
+	echo 'done:';	\
+	echo 'return errval;';	\
+	echo 'err: return RSB_ERR_GENERIC_ERROR;';	\
+	echo '}'; \
+	echo '/* @endcond */' ; \
+	) > $@
+
+rsb_stropts.c: rsb.h
+	( cat rsb_license_header.inc  ; \
+	echo '/* @cond INNERDOC */' ; \
+	echo '/* This file was generated by the Makefile */' ; \
+	echo '#include "rsb.h"' ; \
+	echo '#include "rsb_common.h"' ; \
+	echo 'rsb_err_t rsb__stropts_set(const rsb_char_t *opn, const rsb_char_t *arg)' ; \
+	echo '{ rsb_err_t errval=RSB_ERR_NO_ERROR; if(!opn || !arg)goto err;' ; \
+	grep '^\(.define\|.\)\ RSB_IO_WANT_' rsb.h | sed 's/^. /#define /g;s/=0x/0x/g' | grep 'rsb_int_t\|rsb_char_t\|rsb_real_t'|sed 's/\s\+/ /g;s/\/.*(//g;s/).*\///g;' | cut -d ' ' -f 2,4,5 | sed 's/^\(\S\+\) \(const \)*\(\S\+\)/if(0 == strcmp(opn,"\1")){ \2\3 RSB_DO_REINIT_SINGLE_VALUE_SET(\1,\&val,errval); goto done;}/g; s/\(rsb_char_t\*\)/\1 val = arg;/g;s/\(rsb_int_t\)/\1 val = rsb__util_atoi(arg);/g; s/\(rsb_real_t\)/\1 val = rsb__util_atof(arg);/g'; \
+	echo 'done:';	\
+	echo 'return errval;';	\
+	echo 'err: return RSB_ERR_GENERIC_ERROR;';	\
+	echo '}'; \
+	echo '/* @endcond */' ; \
+	) > $@
+
+.PHONY: feedback
+feedback: rsbench$(EXEEXT)
+	./rsbench$(EXEEXT) -O r 
+rsb_mergesort.c rsb_permute.c rsb_krnl_vb.c rsb_krnl_vb.h rsb_unroll.h rsb_krnl_lb.h rsb_krnl_lb.c rsb_krnl.c rsb_bench.c rsb_krnl_bcss_spsv_u.c rsb_krnl_bcss_spmv_u.c rsb_krnl_bcss_misc_u.c rsb_krnl_bcss.c rsb_krnl_bcoo.c rsb_krnl_bcoo_spmv_u.c rsb_krnl.c: $(RSB_KERNELS_MACROS)
+
+blas_sparse.h: $(srcdir)/rsb_libspblas.h
+	cp -p $< $@ 
+
+#rsb-config.h: config.h
+#	( cat rsb_license_header.inc                                                                            ; \
+#	echo '/* This header file is not intended to be included librsb programs: it is only for inspection. */'; \
+#	echo '#ifndef RSB_CONFIG_H_INCLUDED'									; \
+#	echo '#define RSB_CONFIG_H_INCLUDED'									; \
+#	cat  $< | sed 's/^#define /#define RSB_/g;s/ RSB_RSB_/ RSB_/g'   					; \
+#	echo '/* #endif RSB_CONFIG_H_INCLUDED */'								; \
+#	echo '#endif'												)> $@
+
+ at WANT_CXX_TEST_RSBENCH_TRUE@rsbenchxx.cpp: $(srcdir)/rsbench.c
+ at WANT_CXX_TEST_RSBENCH_TRUE@	cp $(srcdir)/rsbench.c rsbenchxx.cpp
+
+ at HAVE_M4_TRUE@sbtg-types.m: sbtg-types.m4 rsb_types.h
+ at HAVE_M4_TRUE@	$(M4) $(M4_FLAGS) $< > $@
+
+ at HAVE_M4_TRUE@.m4.c: $(RSB_PREM4HEADERS)
+ at HAVE_M4_TRUE@	$(M4) $(M4_FLAGS) $< > $@
+
+ at HAVE_M4_TRUE@.m4.h: $(RSB_PREM4HEADERS)
+ at HAVE_M4_TRUE@	$(M4) $(M4_FLAGS) -D ONLY_WANT_HEADERS=1 $< > $@
+
+ at HAVE_M4_TRUE@.m4.F90:
+ at HAVE_M4_TRUE@	$(M4) $(M4_FLAGS) $< > $@
+
+# AM_LDFLAGS=$(LIBS)
+
+.PHONY: devsplinttest
+devsplinttest:
+	for f in $(librsb_nounroll_la_SOURCES) $(librsb_base_la_SOURCES) ; do splint -I`gcc -print-search-dirs | grep install: | sed s/install:.//g`  -preproc -DHAVE_CONFIG_H $$f > $$f-splint.txt ;done
+
+.PHONY: devtests
+devtests:
+	scripts/devtests.sh
+	@echo "	[*] dev test terminated successfully !"
+
+.PHONY: tests
+test: tests
+
+scripts/readme-tests.sh: README
+	echo 'if test x"$${srcdir}" = x ; then srcdir=. ; fi' > $@
+	LANG=C grep '^ *\(make \)**\./\(rsbench\|sbtc\|sbtf\)\|\(^ *test\> -f\)' $< | sed 's/\(rsbench\|sbtc\|sbtf\)/\1'"$(EXEEXT)"'/g' | sed 's/#.*$$//g;s/$$/ || exit 255/g' | sed 's/A.mtx/$${srcdir}\/A.mtx/g' >> $@
+
+.PHONY: mtests
+mtests: rsbench$(EXEEXT) $(srcdir)/scripts/readme-tests.sh
+	srcdir=$(srcdir) $(SHELL) $(srcdir)/scripts/readme-tests.sh
+	srcdir=$(srcdir) $(SHELL) $(srcdir)/scripts/doc-tests.sh
+	if ./rsbench$(EXEEXT)  -C | grep 'type char codes.*:*[SDCZ]' ; then cd examples ; $(MAKE) tests ; fi
+
+.PHONY: qtests
+qtests:	all
+	@echo " [*] beginning quick test..."
+	$(MAKE) mtests -C .
+	./rsbench$(EXEEXT) -Q 30.0Q
+	@echo " [*] quick test terminated successfully !"
+
+.PHONY: qqtests
+qqtests:	all
+	@echo " [*] beginning quick quiet test..."
+	$(MAKE) mtests -C . > /dev/null 2> /dev/null
+	./rsbench$(EXEEXT) -Q 30.0Q
+	@echo " [*] quick test terminated successfully !"
+
+.PHONY: tests
+tests:	$(EXTRA_LIBRARIES) ot$(EXEEXT) $(EXTRAPROGRAMSC) rsbench$(EXEEXT)
+	$(MAKE) qtests -C .
+	srcdir=$(srcdir) $(SHELL) $(srcdir)/scripts/test.sh
+	./ot$(EXEEXT)
+ at HAVE_SPARSE_BLAS_INTERFACE_TRUE@	./sbtc$(EXEEXT)
+	@echo "	[*] full test terminated successfully !"
+
+.PHONY: btests
+btests: dist
+	scripts/dev_brute_tests.sh $(distdir).tar.gz
+	@echo "	[*] brute force package testing terminated successfully !"
+
+.PHONY: wc
+wc:
+	wc *.c
+	wc *.h
+	wc *.m4
+	wc *.m
+	cat *.m *.m4 *.c *.h |wc
+	cat `svn ls|grep .c$$` | wc
+	cat `svn ls|grep .h$$` | wc
+	cat `svn ls|grep .m4$$` | wc
+	cat `svn ls|grep .m$$` | wc
+	cat `svn ls|grep '\.\(m\|m4\|c\|h\)$$'` | wc
+
+.PHONY: doxonly
+doxonly:
+	$(MAKE) makedox -C doc
+
+.PHONY: dox
+dox: doxonly
+	$(MAKE) && cd examples && $(MAKE) 
+
+.PHONY: install-exec-hook
+install-exec-hook:
+	$(mkdir_p) "$(DESTDIR)$(docdir)"
+ at HAVE_PKGCONFIG_INSTALL_TRUE@	$(mkdir_p) "$(DESTDIR)$(libdir)/pkgconfig"
+ at HAVE_PKGCONFIG_INSTALL_TRUE@	$(INSTALL_DATA) librsb.pc "$(DESTDIR)$(libdir)/pkgconfig/"
+
+hinfo.log: all
+	scripts/hinfo.sh 2>&1 | cat >  hinfo.log
+
+# NOTE: The following target is only for experimental purposes.
+#shared: $(top_builddir)/librsb.so
+#$(top_builddir)/librsb.so: $(am_librsb_la_OBJECTS)
+#	$(CC) -o $@ -shared $(am_librsb_la_OBJECTS)
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/NEWS b/NEWS
new file mode 100644
index 0000000..b8c5265
--- /dev/null
+++ b/NEWS
@@ -0,0 +1,225 @@
+
+List of user visible changes for each librsb release.
+
+ librsb Version 1.2.0-rc5 (20160902), changes:
+ - Fixed EPS rendering of matrices, e.g.:
+    "./rsbench  --plot-matrix -aRzd -f matrix.mtx > matrix.eps"
+ - Will detect MINGW environment via the __MINGW32__ symbol and add 
+   -D__USE_MINGW_ANSI_STDIO=1 to circumvent its C99 incompatibilities.
+ - fix: previously, code was broken in case of lack of all of
+   --enable-allocator-wrapper, posix_memalign() and memalign(); 
+   now malloc() will be used instead (r3486).
+ - fix: memory hierarchy info string set via --with-memhinfo used to be
+   ignored by an eventual auto-detected value, wrongly.
+
+ librsb Version 1.2.0-rc4 (20160805), changes:
+ - librsb-config will print a space between each emitted item and
+   explicitly the static library file path on --static
+ - fix: rsbench -M was requesting wrong alignment from posix_memalign
+ - internally using libtool for everything:
+   - obsoleted the --enable-shlib-linked-examples option; now on please use 
+     --disable-shared / --enable-shared, --disable-static / --enable-static 
+     to avoid the defaults.
+ - librsb-config new options: --cc --fc --cxx to get the librsb compilers
+ - internally using memset instead of bzero (deprecated since POSIX.2004).
+ - fix: example examples/fortran_rsb_fi.F90 had two consecutive rsb_lib_exit.
+ - fix: binary I/O (-b/-w) test in test.sh used to ignore missing XDR support.
+
+ librsb Version 1.2.0-rc3 (20160505), changes:
+ - Extension: if parameter flagsA of mtx_set_vals() has RSB_FLAG_DUPLICATES_SUM
+   then values will be summed up into the matrix.
+ - Bugfix: rsb_mtx_get_nrm on symmetric matrices was buggy.
+ - Bugfix: rsb_spsm potentially wrong in --enable-openmp and (nrhs>1).
+           (ussm affected)
+ - Bugfix: rsb_spsm wrong in --disable-openmp version and (nrhs>1).
+           (ussm affected)
+ - Bugfix: rsb_spsm used to scale only first rhs when (*alphap!=1 and nrhs>1).
+           (ussm affected)
+ - Bugfix: rsb_spsm used to solve only first rhs when (y != x).
+           (ussm not affected)
+ - Bugfix: rsb_spmm used to scale only first rhs when (*betap!=1 and nrhs>1).
+           (usmm not affected)
+ - Bugfix: rsb_tune_spmm/rsb_tune_spsm returned (false positive) error on
+   ( mtxAp != NULL && mtxOpp != NULL ) rather than on
+   ( mtxAp != NULL && mtxOpp != NULL && *mtxOpp != NULL ).
+ - Will use memset() on systems with no bzero() (e.g. mingw).
+
+ librsb Version 1.2.0-rc2 (20151025), changes:
+ - Bugfix: rsb_mtx_add_to_dense was using submatrix CSR arrays as they were
+   COO, thus producing wrong output.
+ - Bugfix: error message for RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT was wrong.
+ - Bugfix: printout of sysconf()-detected L4 cache information.
+ - Bugfix: fixed broken build when sysconf() missing.
+ - Experimental --with-hwloc switch, recommended on cygwin.
+ - Bugfix: fixed broken build and qtests when using separate build, src dirs.
+
+ librsb Version 1.2.0, changes:
+ - general improvements:
+  * NUMA-aware tuning and allocations
+  * more documentation comments in rsb.F90
+  * better performance of rsb_spsm when nrhs>1
+  * faster rsb-to-sorted-coo conversion in rsb_mtx_switch_to_coo
+  * enabled out-of-tree builds (e.g. one distclean dir and many build dirs)
+  * new autotuning mechanisms behind rsb_tune_spmm/rsb_tune_spsm
+  * fewer compile time warnings from automatically generated code, e.g. in
+    rsb_krnl.c and rsb_libspblas.c
+ - programming interface (API) changes: 
+  * bugfix w.r.t. 1.1: usmm()/ussm() were not declared in rsb_blas_sparse.F90
+  * introduced extension BLAS property blas_rsb_autotune_next_operation
+    to trigger auto tuning at the next usmv/usmm call
+  * eliminated RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE and
+               RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE
+  * rsb_load_spblas_matrix_file_as_matrix_market() now takes a
+    typecode argument. Sets either blas_upper_triangular,
+    blas_lower_triangular, blas_upper_hermitian, blas_lower_hermitian,
+    blas_upper_symmetric or blas_lower_symmetric property according to
+    the loaded file.
+  * properly using INTEGER(C_SIGNED_CHAR) for 'typecode' arguments in rsb.F90.
+  * rsb.F90 change: all interfaces to functions taking INTEGER arrays now
+    require them to be declared as TARGET and passed as pointer
+    (via C_LOC()), just as e.g. VA.
+  * introduced the RSB_MARF_EPS_L flag for rendering Encapsulated PostScript
+    (EPS) matrices (with rsb_mtx_rndr()) and having a label included
+ - functionality changes: 
+  * if called after uscr_end, uscr_insert_entries (and similar) will either
+    add/overwrite, according to whether the blas_rsb_duplicates_ovw or 
+    blas_rsb_duplicates_sum property has been set just after uscr_end;
+    default is blas_rsb_duplicates_ovw.
+  * if configured without memory wrapper, requests for 
+    RSB_IO_WANT_MEM_ALLOC_TOT and RSB_IO_WANT_MEM_ALLOC_CNT will
+    also give an error
+  * now rsb_file_mtx_load will load Matrix Market files having nnz=0
+ - bug fixes: 
+  * non-square sparse-sparse matrices multiply (rsb_spmsp) and sum
+    (rsb_sppsp) had wrong conformance check and potential off-limits
+    writes
+  * usmm() used to have beta=0 as default; changed this to be 1 according to
+    the Sparse BLAS standard
+  * rsb_mtx_clone(): alphap now uses source matrix typecode
+  * rsb_util_sort_row_major_bucket_based_parallel() bugfix
+  * RSB_IO_WANT_VERBOSE_TUNING used to depend on RSB_WANT_ALLOCATOR_LIMITS
+ - rsbench (librsb benchmarking program (internals)) changes:
+  * --incx and --incy now accept lists of integers; e.g. "1" or "1,2,4"
+  * rsb_mtx_get_rows_sparse() was not handling RSB_TRANSPOSITION_C and
+    RSB_TRANSPOSITION_T correctly until 1.1-rc2. Fixed now.
+  * added --only-upper-triangle
+  * if unspecified, default --alpha and beta now are set to 1.0
+  * when using --incx and --incy with list arguments, with
+    --one-nonunit-incx-incy-nrhs-per-type rsbench will skip benchmarking
+    combinations with both incX and incY > 1
+  * --also-transpose will skip transposed multiply of symmetric matrices
+  * --no-want-ancillary-execs is now default
+  * in an auto-tuning scan --impatient will print partial performance results
+    and update the performance results frequently.
+  * small fix in ancillary (--want-ancillary-execs) time measurements
+  * --types is a new alias for --type, and 'all' for ':' (all configured types)
+  * will tolerate non-existing or unreadable matrix files by just skipping them
+  * --reuse-io-arrays is now default (see --no-reuse-io-arrays to disable this)
+    and will avoid repeated file loading for the same matrix.
+  * --want-mkl-autotune 0/1 will disable/enable MKL autotuning in addition to
+    the RSB autotuning experiment.
+  * added:
+     --skip-loading-if-matching-regex
+     --skip-loading-symmetric-matrices
+     --skip-loading-unsymmetric-matrices
+     --skip-loading-hermitian-matrices
+     --skip-loading-not-unsymmetric-matrices
+     --skip-loading-if-more-nnz-matrices 
+     --skip-loading-if-less-nnz-matrices
+     --skip-loading-if-more-filesize-kb-matrices
+     --skip-loading-if-matching-regex and --skip-loading-if-matching-substr
+  * if -n <threads> unspecified, will use omp_get_max_threads()
+    to determine the threads count.
+  * will terminate gracefully after SIGINT (CTRL-c from the keyboard)
+  * producing performance record files with
+      --write-performance-record <file>
+    ( '' will ask for an automatically generated file name)
+  * reading back performance record files with --read-performance-record
+  * writing no performance record file with --write-no-performance-record
+  * --max-runtime will make the program terminate gracefully after a specified 
+    maximal amount of time
+  * rsbench: LaTeX output of performance records; see options
+    --write-performance-record and --read-performance-record
+  * --out-res to--out-lhs and --dump-n-res-elements to --dump-n-lhs-elements
+  * --want-no-autotune
+  * expanded examples
+  * ...
+
+ librsb Version 1.1.0, library changes:
+
+  * introduced rsb_tune_spmm: autotuning for rsb_spmv/rsb_spmm.
+  * introduced rsb_tune_spsm: autotuning for rsb_spsv/rsb_spsm.
+  * extensions for autotuning in the sparse blas interface:
+     blas_rsb_spmv_autotuning_on,   blas_rsb_spmv_autotuning_off,
+     blas_rsb_spmv_n_autotuning_on, blas_rsb_spmv_n_autotuning_off,
+     blas_rsb_spmv_t_autotuning_on, blas_rsb_spmv_t_autotuning_off.
+  * RSB_IO_WANT_VERBOSE_TUNING option will enable verbose autotuning
+  * introduced rsb_file_vec_save
+  * configure option --enable-rsb-num-threads enables the user to specify
+    desired rsb_spmv/rsb_spmm threads count (if >0) via the RSB_NUM_THREADS
+    environment variable; even the value specified via
+    RSB_IO_WANT_EXECUTING_THREADS will be overridden.
+  * deprecated rsb_file_mtx_get_dimensions for rsb_file_mtx_get_dims.
+  * deprecated rsb_mtx_get_norm for rsb_mtx_get_nrm.
+  * deprecated rsb_mtx_upd_values for rsb_mtx_upd_vals.
+  * deprecated rsb_file_mtx_render for rsb_file_mtx_rndr.
+  * deprecated rsb_mtx_get_values for rsb_mtx_get_vals.
+  * deprecated rsb_mtx_set_values for rsb_mtx_set_vals.
+  * deprecated rsb_mtx_get_preconditioner for rsb_mtx_get_prec.
+  * introduced rsb_lib_set_opt and rsb_lib_get_opt as a replacement to
+    now deprecated RSB_REINIT_SINGLE_VALUE_C_IOP RSB_REINIT_SINGLE_VALUE
+    RSB_REINIT_SINGLE_VALUE_C_IOP, RSB_REINIT_SINGLE_VALUE_SET and
+    RSB_REINIT_SINGLE_VALUE_GET.
+  * introduced an ISO-C-BINDING interface to rsb.h (rsb.F03): consequently,
+    the --disable-fortran-interface configure option is now unnecessary, and
+    fortran programs can use rsb_lib_exit/rsb_lib_init instead of
+    rsb_lib_exit_np/rsb_lib_init_np.
+  * significantly improved documentation.
+  * --enable-librsb-stats configure option will enable collection of time
+    spent in librsb, together with RSB_IO_WANT_LIBRSB_ETIME .
+  * --enable-zero-division-checks-on-spsm renamed to
+    --enable-zero-division-checks-on-solve.
+  * rsb.mod will be optionally installed (separate from blas_sparse.mod).
+  * producing a librsb.pc file for the pkg-config system.
+  * improved performance of multi-vector multiplication
+    (leaf matrices will step once in each multi-vector).
+  * introduced rsb_mtx_rndr for rendering matrix structures to files.
+  * now using parallel scaling of output vector in Y <- beta Y + .. operations.
+  * extensions for handling duplicates in the sparse blas interface:
+    blas_rsb_duplicates_ovw, blas_rsb_duplicates_sum.
+  * introduced the RSB_MARF_EPS flag for rendering matrices as PostScript.
+  * introduced the RSB_CHAR_AS_TRANSPOSITION macro.
+  * introduced rsb_blas_get_mtx to enable rsb.h functions on blas_sparse.h
+    matrices.
+  * introduced Sparse BLAS extra properties for control/inquiry:
+    blas_rsb_rep_csr, blas_rsb_rep_coo, blas_rsb_rep_rsb.
+  * debug option to limit count and volume of memory allocations, with
+    RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS and RSB_IO_WANT_MAX_MEMORY_ALLOCATED
+    (depending on --enable-allocator-wrapper).
+  * configure switch to select maximal threads count (--with-max-threads).
+  * <rsb-types.h> renamed to <rsb_types.h>.
+  * each librsb source file is prefixed by 'rsb' (less probability of object
+    file name clash in large applications).
+  * RSB_IO_* macros are now declared as enum rsb_opt_t.
+  * Doxygen will be only invoked to build documentation if configure switch 
+    --enable-doc-build has been enabled.
+  * RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE has been extended to Sparse BLAS
+    interface to librsb.
+  * Changes pertaining only the rsbench benchmark program:
+   * rsbench benchmarks data-footprint equivalent GEMV and GEMM.
+   * --read-as-binary/--write-as-binary options to rsbench (not yet in rsb.h).
+   * --discard-read-zeros option to rsbench (to add RSB_FLAG_DISCARD_ZEROS to
+     the matrix flags).
+   * --want-perf-counters option to rsbench (tested with PAPI 5.3; will enable
+     additional rsb-originating performance counter statistics to be dumped).
+   * zero-dimensioned matrices are allowed.
+   * changed row pointers parameter of rsb_mtx_alloc_from_csr_inplace to
+     rsb_nnz_idx_t*. 
+   * PostScript (RSB_MARF_EPS, RSB_MARF_EPS_S, RSB_MARF_EPS_B) dump with
+     rsb_mtx_rndr are more compact now
+   * to enable or disable openmp, use either --enable-openmp or --disable-openmp
+
+ librsb Version 1.0.0
+
+  * first public release
+
diff --git a/README b/README
new file mode 100644
index 0000000..05d9bc5
--- /dev/null
+++ b/README
@@ -0,0 +1,678 @@
+
+================================================================================
+
+ librsb README file 
+ 
+================================================================================
+	librsb - Recursive Sparse Blocks  Matrix computations library
+
+ A library for sparse matrix computations featuring the Recursive Sparse Blocks
+ (RSB) matrix format. This format allows cache efficient and multi-threaded
+ (that is, shared memory parallel) operations on large sparse matrices.
+ It provides the most common operations necessary to iterative solvers, like
+ matrix-vector multiplication, triangular solution, rows/columns scaling, 
+ diagonal extraction / setting, blocks extraction, norm computation, formats
+ conversion.  The RSB format is especially well suited for symmetric and
+ transposed multiplication variants.
+ Most numerical kernels code is auto generated, and the supported numerical
+ types can be chosen by the user at build time.
+ This library is dual-interfaced: it can be used via the native (`RSB') 
+ interface (with identifiers prefixed by `rsb_' or `RSB_'), and a Sparse BLAS
+ one (`BLAS_').
+ The `RSB' interface can be used from C (rsb.h header) or via modern Fortran
+ ISO-C-BINDING ("rsb" module).
+ The Sparse BLAS interface is usable from C via the blas_sparse.h header, and
+ from Fortran via the "blas_sparse" module.
+
+================================================================================
+
+ This (README) is the first document you should read about librsb.
+ It contains basic instructions to generate, compile, install, and use librsb.
+ The reference documentation for programming with librsb is contained in the
+ ./doc/ source package subdirectory and when installed, placed in the
+ appropriate system directories as both Unix man pages (./doc/man/) and HTML
+ (./doc/html/).
+ If you are a user of a previous version of librsb, see the NEWS file listing
+ the changes.
+ After having read this file you are welcome to ask questions to the author.
+
+--------------------------------------------------------------------------------
+		INTRODUCTION
+--------------------------------------------------------------------------------
+
+ librsb is a library for sparse matrix algebra computations.
+ It is stand-alone: does not require any other library to build or work.
+ It is shared memory parallel, using the OpenMP standard.
+ It focuses on high performance and provides build options.
+ A part of the library code is automatically generated from templates and
+ macros, on the basis of the numerical type a user wish to have supported.
+ The configure script options (self documented --- not documented here) provide
+ many build time options, especially with respect to debug and additional 
+ verbosity.
+
+   		INTRODUCTION
+   		MAIN ASPECTS,FEATURES
+   		QUICK INSTALL AND TESTING
+   		LIBRARY CONFIGURATION, GENERATION, BUILD 
+   		INSTALLATION, USAGE
+   		EXECUTION AND ENVIRONMENT VARIABLES
+   		DOCUMENTATION, EXAMPLES AND PROGRAMMING GUIDELINES
+   		CONFIGURE, BUILD AND BENCHMARK EXAMPLE
+   		COMPATIBILITY
+   		FAQ
+   		POSSIBLE / POTENTIAL FUTURE FEATURES / ENHANCEMENTS
+   		ABOUT THE INTERNALS
+   		BUGS
+   		CONTACTS
+   		CREDITS
+   		LICENSE
+
+--------------------------------------------------------------------------------
+		MAIN ASPECTS,FEATURES
+--------------------------------------------------------------------------------
+
+ * very efficient (see the website for benchmark performance results)
+ * threads/structure autotuning feature for additional performance
+ * support for multiple numerical data types which can be turned
+   on/off individually (e.g.:double, float, int, char, complex, double complex)
+   at configure time
+ * a sparse BLAS interface for matrix assembly, computation, destruction
+ * a code generator for its inner CSR, COO computational kernels
+ * based on a recursive memory layout of submatrices
+ * enough functionality to implement the most common iterative methods 
+ * basic index types overflow checks and input sanitizing
+ * parallel matrix assembly and conversion routines
+ * auxiliary functions for matrix I/O (using the "Matrix Market" format:
+   real, integer, complex and pattern are supported)
+ * implemented as a building block for solvers like e.g. PSBLAS
+ * dual implementation of kernels: with "full word" and "half word" indices
+ * thread level (shared memory) parallelism by using OpenMP
+ * basic (unoptimized) sparse matrices multiplication and summation
+ * interactive usage possible by using the "sparsersb" plugin for GNU Octave 
+ * complete with examples and a test suite
+ * see the NEWS text file for a list of changes from version to version
+
+--------------------------------------------------------------------------------
+		QUICK INSTALL AND TESTING EXAMPLE
+--------------------------------------------------------------------------------
+	
+	# unpack the archives or get them from the repositories
+	./autogen.sh	# only necessary if  configure  file does not exist
+	./configure --prefix=$HOME/local/librsb/
+        # see also ./configure --help for many other options
+	# librsb has been configured
+	make help	# provides information
+	make		# build the library and test programs
+	# librsb has been built
+        make  qtests	# perform brief sanity tests
+        make qqtests	# the same, but with less output
+        make  tests	# perform extended sanity tests
+	ls examples/*.c   # here are editable examples; build them with 'make'
+	ls examples/*.F90 # here are editable examples; build them with 'make'
+	make install	# install to $HOME/local/librsb/
+	# librsb has been installed; now you can write your own programs
+
+	# for instance, try using one of the librsb examples as a model: 
+	mkdir -p ~/rsb-test/ && cp examples/hello.c ~/rsb-test/myrsb.c
+	# adapt hello.c to your needs and recompile:
+	cd ~/rsb-test/
+	export PATH=$PATH:$HOME/local/librsb/bin/
+	gcc `librsb-config --I_opts`.  -c myrsb.c 
+ 	gcc -o myrsb myrsb.o `librsb-config --static --ldflags --extra_libs`
+ 	./myrsb         # run your program
+
+--------------------------------------------------------------------------------
+ 		LIBRARY CONFIGURATION, GENERATION, BUILD 
+--------------------------------------------------------------------------------
+
+ This library consists of C code (C 99), partially generated by M4 macros.
+ The user wishing to build librsb can specify different initial parameters 
+ determining the supported matrix operations, inner explicit loop unrolling
+ factors, available numerical data types and code variations.
+ These parameters have to be specified to the  ./configure  script.
+
+ The M4 macros are used at build time to generate specialized C code.
+ If building from repository sources, an M4 preprocessor is required.
+ Otherwise, it is necessary only when specifying ./configure  options affecting
+ code generation (see ./configure --help).
+ The M4 preprocessor executable can be specified explicitly to ./configure
+ with the M4 environment variable or via the --with-m4 option.
+ After invoking ./configure  and before running 'make' it is possible to invoke
+ 'make cleanall' to make sure that auto-generated code is deleted first.
+ 
+ At configure time, it is very important that the configure script is able to
+ detect the system cache memory hierarchy parameters.
+ In the case it fails, you are encouraged to specify cache parameters by 
+ re-running ./configure  and setting the --with-memhinfo  option.
+ For instance:
+    --with-memhinfo=L2:4/64/512K,L1:8/64/24K 
+ These values need not be exact: they can be approximate.
+ Yet they may be critical to library performance; for this reason you are
+ allowed to override this default in a variety of ways.
+ Read further to get a description of the memory hierarchy info string format.
+
+ If you want to build Fortran examples, be sure of invoking ./configure with the
+ --enable-fortran-examples option.  You can specify the desired Fortran compiler
+ and compilation flags via the FC and FCFLAGS variables.
+
+ Set the CPPFLAGS variable at configure time to provide additional compilation
+ flags; e.g. configure to detect necessary headers in non-standard location.
+ Similarly, the LDFLAGS variable can be set to contain link time options; so 
+ you can use it to specify libraries to be linked to librsb examples.
+ Invoke ./configure --help  for details of other relevant environment variables.
+ 
+ After ./configure  you will see informations about the current build options
+ and if satisfied, invoke 'make' to build the library and the examples.
+
+ To check for library consistence, run:
+
+   make qtests # takes a short time
+or
+   make tests  # takes longer, more complete
+ 
+ If these tests terminate with an error code, it is highly likely that it has
+ been caused by a bug in librsb, so please tell us (see BUGS).
+
+--------------------------------------------------------------------------------
+		INSTALLATION, USAGE
+--------------------------------------------------------------------------------
+ 
+ Once built, the library can be installed with:
+
+	su -c 'make install'	#'make install' installs the library system-wide
+
+ This installs header files, binary library files, and the librsb-config
+ program.
+ Then, application C programs should include the rsb.h header file with
+	#include <rsb.h>
+ and be compiled using include options as generated by the output of 
+  	`librsb-config --I_opts`.
+
+ To link to the librsb.a static library file and its dependencies one can use 
+ the output of `librsb-config --static --ldflags --extra_libs`.
+ 
+ Only static libraries are built currently.
+
+ If you wish to use the library without installing it in the system directories,
+ make sure to include the <rsb.h> header file and link to the librsb.a library
+ and all the necessary additional libraries.  
+
+ Users of pkg-config can manually copy the librsb.pc file to the appropriate
+ directory to use pkg-config in a way similar to librsb-config.
+
+--------------------------------------------------------------------------------
+		EXECUTION AND ENVIRONMENT VARIABLES
+--------------------------------------------------------------------------------
+ 
+ By default, the only environment variable read by librsb is
+ RSB_USER_SET_MEM_HIERARCHY_INFO, and will override configure-time and
+ auto-detected settings about memory hierarchy.
+
+ Its value is specified as n concatenated strings of the form:
+	 L<l>:<a_l>/<b_l>/<c_l>
+ These strings are separated by a comma (","), and each of them is made
+ up from substrings where:
+   <n> is the cache memories hierarchy height, from 1 upwards.
+   <l> is the cache level, from 1 upwards.
+   <a_l> is the cache associativity
+   <b_l> is the cache block size (cache line length)
+   <c_l> is the cache capacity (size)
+
+ The <a_l>, <b_l>, <c_l> substrings consist of an integer number with an
+ optional multiplier character among {K,M,G} (to specify respectively 2^10,
+ 2^20 or 2^30).
+ Any value is permitted, a long as it is positive. Higher level cache
+ capacities are required to be larger than lower level ones.
+ Example strings and usage in the BASH shell:
+  RSB_USER_SET_MEM_HIERARCHY_INFO="L2:4/64/512K,L1:8/64/32K"  <your program>
+  RSB_USER_SET_MEM_HIERARCHY_INFO="L1:8/128/2M"  <your program>
+
+ You may explicitly set this environment variable to fine-tune the library
+ operation.
+ If not doing so, runtime detection will be attempted; if this shall fail,
+ a configure time detected value will be used.
+ In some cases the configure time detection fails (e.g.: on very recent
+ systems); this is not a fault of librsb but rather of the underlying
+ environment.
+
+ A default value for this memory hierarchy info string can be set at configure
+ time by using the  --with-memhinfo  configure option.
+
+ If you don't know values for these parameters, you can run the
+  ./scripts/linux-sys-cache.sh 
+ script to try to get a guess on a Linux system.
+ On other systems, please consult the available documentation.
+ E.g.: On Mac OS 10.6 it is possible to get this information by invoking
+  "sysctl -a | grep cache".
+  
+ The librsb library achieves parallelism by using OpenMP.
+ Even though librsb does not directly read any OpenMP environment variable,
+ it is still affected by them (e.g. the OMP_NUM_THREADS environment variable
+ specifying the number of parallel threads).
+ Please consult your compiler's OpenMP implementation documentation
+ for more information.
+
+--------------------------------------------------------------------------------
+		DOCUMENTATION, EXAMPLES AND PROGRAMMING GUIDELINES
+--------------------------------------------------------------------------------
+
+ The API is entirely specified in the <rsb.h> header file. This is the only
+ header file the application developer should ever include to use the library.
+ 
+ The complete API documentation is generated by the doxygen tool in the doc
+ directory in both HTML and man formats, and gets installed with 'make install'.
+ If you wish not to use doxygen (or don't have it) you can skip documentation
+ generation by adding the "DOXYGEN=false" argument to ./configure .
+
+ There are a number of working example programs in the "examples" directory.
+
+ The library only declares symbols prefixed by `rsb_'.
+ These symbols include those declared in rsb.h, as well as internal,
+ undocumented service functions and variables.
+ Therefore, to avoid name clashes, you should avoid declaring `rsb_' prefixed
+ identifiers in programs using librsb.  
+
+ If configure has been invoked with the --enable-sparse-blas-interface, then
+ the corresponding `BLAS_' and `blas_' prefixed symbols will also be built.
+
+ If after building the library, you find that it exports symbols with different
+ prefixes (besides the system specific, compiler-generated symbols), please 
+ report this to us -- it is a bug.
+
+--------------------------------------------------------------------------------
+	CONFIGURE, BUILD AND BENCHMARK EXAMPLE
+--------------------------------------------------------------------------------
+
+ First configure and build with reasonable options, such as (gcc, 64 bit):
+
+  export MKLROOT=/opt/intel/mkl
+  ./configure --disable-debug CC=gcc FC=gfortran CFLAGS=-O3 \
+    --with-mkl="-static -L${MKLROOT}/lib/intel64 \
+    -Wl,--start-group,-lmkl_intel_lp64,-lmkl_gnu_thread,-lmkl_core,--end-group \
+    -fopenmp -lpthread"                        \
+    --with-memhinfo=L2:4/64/512K,L1:8/64/24K   \
+    --with-mkl-include=/opt/intel/mkl/include/ \
+    --prefix=/opt/librsb-optimized/            \
+    --enable-matrix-types="double,double complex"
+
+ Or (icc, 64 bit):
+
+  export MKLROOT=/opt/intel/mkl
+ ./configure --disable-debug CC=icc FC=ifort CFLAGS=-O3 \
+ --with-mkl="-static -L${MKLROOT}/lib/intel64 -openmp -lpthread \
+ -Wl,--start-group,-lmkl_intel_lp64,-lmkl_intel_thread,-lmkl_core,--end-group" \
+ --with-memhinfo=L2:4/64/512K,L1:8/64/24K   \
+ --with-mkl-include=/opt/intel/mkl/include/ \
+ --prefix=/opt/librsb-optimized/            \
+ --enable-matrix-types="double,double complex"
+
+  or (32 bit):
+
+  ./configure --disable-debug CC=gcc FC=gfortran CFLAGS=-O3 \
+   --with-memhinfo=L2:4/64/512K,L1:8/64/24K     \
+   --with-mkl="-static -L/opt/intel/mkl/lib/ia32/ -lmkl_solver \
+   -Wl,--start-group,-lmkl_intel,-lmkl_gnu_thread,-lmkl_core,--end-group \
+   -fopenmp -lpthread" \
+   --with-mkl-include=/opt/intel/mkl/include/   \
+   --prefix=/opt/librsb-optimized/              \
+   --enable-matrix-types="double,double complex"
+
+and then
+
+  make       # builds library and test programs
+  make tests # optional
+
+ In the above example, optional use of the MKL library is configured in.
+ However, librsb does not use MKL in any way: it is only used by the
+ "rsbench" test program.
+
+ Say you want to quickly benchmark the library for a quick SPMV speed test.
+ You have a valid Matrix Market file containing a matrix, A.mtx,
+ and you want to benchmark librsb with it on 1 and 4 cores, performing
+ 100 sparse matrix-vector multiply iterations.
+ Then do a serial test first:
+ ./rsbench -oa -Ob -f A.mtx -qH -R -n1 -t100 --verbose 
+ and then a parallel test:
+ OMP_NUM_THREADS=4 ./rsbench -oa -Ob -f A.mtx -qH -R -n1,4 -t100 --verbose
+
+ You can add option --compare-competitors to enable comparisons to the MKL,
+ provided it has been configured in.
+ If not specifying a type (argument to the -T option), the default will be
+ used.
+ If configured in at build time, choices may be -T D (where D is the BLAS
+ prefix for "double"), -T Z (Z stands for "double complex") and so on.
+ You can specify "-T :" to mean all of the configured types.
+ Output of 'rsbench' shall be easy to understand or parse.
+
+ For more options and configure information, invoke:
+
+ ./rsbench --help
+
+ To get the built in defaults, invoke the following:
+ ./rsbench -oa -Ob --help
+ ./rsbench --help
+ ./rsbench --version
+ ./rsbench -I
+ ./rsbench -C
+
+ An example Matrix Market matrix file contents:
+
+%%MatrixMarket matrix coordinate pattern general
+% This is a comment.
+% See other examples in the distributed *.mtx files.
+2 2 3
+1 1
+2 1
+2 2
+
+--------------------------------------------------------------------------------
+		COMPATIBILITY
+--------------------------------------------------------------------------------
+ 
+ This library has been built and tested on Unix machines.
+ Microsoft Windows users might try building librsb under the Cygwin environment.
+
+ Some tricks may have to be used on IBM AIX. For instance, adding the
+ --without-xdr or the --without-zlib switch to ./configure.
+ Your mileage may vary.
+ AIX's "make" program may give problems; use the GNU version "gmake" instead;
+ the same shall be done with the M4 interpreter.
+
+ This library was developed mostly on Debian Linux and using only free software.
+
+--------------------------------------------------------------------------------
+		FAQ
+--------------------------------------------------------------------------------
+
+ Q: Can you provide me good configure defaults for an optimized build ?
+ A: Default './configure' options are appropriate for an optimized build.
+    You will need to choose good compilation flags.
+    A good starting point for gcc is ./configure CC=gcc CFLAGS='-O3'. 
+    For more, consult your compiler documentation (e.g. man gcc, man icc),
+    and learn about the best flags for your specific platform.
+    Striping your executable (make install-strip for librsb's rsbench) may
+    help.
+
+ Q: I am a beginner and I wish librsb to be very verbose when I invoke
+    library interface functions incorrectly.
+    Can you provide me good configure defaults for such a "debug" build ?
+ A: Yes: ./scripts/configure_for_debug.sh
+
+ Q: I have machine X, compiler Y, compiling flags Z; is SpMV performance P with
+    matrix M good ?
+ A: In general, hard to tell. However you can `make hinfo.log' and send me 
+    (see CONTACTS) the hinfo.log file and your matrix in Matrix Market format
+    (well, please don't send matrices by email but rather upload them
+    somewhere on the web and send an URL to them).
+    The hinfo.log file will contain useful compile and machine informations.
+    Then I *may* get an idea about the performance you should get with that
+    matrix on that computer.
+
+ Q: What is the Sparse BLAS ?
+ A: It's a programming interface specification:
+    [sparseblas_2001]:
+    BLAS Technical Forum Standard, Chapter 3, Sparse BLAS
+    http://www.netlib.org/blas/blast-forum/chapter3.pdf
+    [dhp_2002]:
+    An Overview of the Sparse Basic Linear Algebra Subprograms:
+     The New Standard from the BLAS Technical Forum
+    IAIN S. DUFF, CERFACS and Rutherford Appleton Laboratory
+    MICHAEL A. HEROUX, Sandia National Laboratories
+    ROLDAN POZO, National Institute of Standards and Technology
+    [dv_2002]:
+    Algorithm 818:
+     A Reference Model Implementation of the Sparse BLAS in Fortran 95
+    IAIN S. DUFF, CERFACS, France and Atlas Centre, RAL, England
+    CHRISTOF V�MEL, CERFACS, France
+
+ Q: Is there an easy way to profile librsb usage in my application ?
+ A: Yes: build with --enable-librsb-stats and extract time elapsed in librsb
+    via e.g.: RSB_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_LIBRSB_ETIME,&dt,errval).
+
+ Q: Why another sparse matrix library ?
+ A: This library is the fruit of the author's PhD work, focused on researching
+    improved multi threaded and cache friendly matrix storage schemes for
+    PSBLAS.
+
+ Q: What are the key features of this library when compared to other ones ?
+ A: Recursive storage, a code generator, parallel BLAS operations
+    (including matrix assembly, matrix-matrix multiplication, transposed
+     matrix-vector multiply), a battery of tests, a Sparse BLAS
+     interface and a free software licensing.
+ 
+ Q: How do I detect librsb from my package's configure script ?
+ A: Add to your configure.ac:
+    AH_TEMPLATE([HAVE_LIBRSB])
+    AC_CHECK_FUNC([rsb_lib_init],AC_DEFINE([HAVE_LIBRSB],[1],[librsb detected]))
+    then rerun autoconf and invoke configure as:
+    ./configure	CFLAGS=`librsb-config   --cflags` \
+                LDFLAGS=`librsb-config  --ldflags --extra_libs`
+   
+ Q: How is correctness checked in the librsb test suite ?
+ A: Different linear system generators and tester programs are being used to
+    brute-force-test several routines and input combinations as possible.
+    See 'make tests'; and run/edit the following tester programs if you are
+    curious:
+    test -f sbtc && ./sbtc||true # Sparse BLAS checker (C interface based)
+    test -f sbtf && ./sbtf||true # Sparse BLAS checker (Fortran interface, opt.)
+    ./rsbench -Q 10.0 # 10 seconds brute-test
+
+ Q: Why did you write the library in C and not in C++ ?
+ A: Mainly...
+    Because C can be easily interfaced with C++ and Fortran.
+    Because using a debugger under full fledged C++ is a headache.
+    Because of the C's 'restrict' keyword.
+    
+ Q: Why did you use C and not Fortran ?
+ A: This library is slightly system-oriented, and system calls interfacing is
+    much easier in C. Also C's pointers arithmetic support plays a crucial role.
+
+ Q: Is there a quick and easy way to perform an artificial performance
+    test with huge matrices without having to program ?
+ A: Sure. The following lines generate matrices of a specified dimension.
+    You can play with them by changing the matrix size, for instance. 
+    ./rsbench  -oa -Ob -qH -R --dense 1                    --verbose
+    ./rsbench  -oa -Ob -qH -R --dense 1024                 --verbose
+    ./rsbench  -oa -Ob -qH -R --lower 1024 --as-symmetric  --verbose
+    ./rsbench  -oa -Ob -qH -R --dense 1000 --gen-lband 10 --gen-uband 3
+    ./rsbench  -oa -Ob -qH -R --generate-diagonal 1000
+
+ Q: I've found a bug! What should I do ?
+ A: First please make sure it is really a bug: read the documentation, check,
+    double check.
+    Then you can write a description of the problem, with a minimal program
+    source code and data to replicate it.
+    Then you can jump to the CONTACTS details section.
+
+ Q: Is it possible to build matrices of, say, long double or 
+    long double complex or int or short int ?
+ A: Yes, it's not a problem. You should invoke the configure script accordingly,
+    e.g.: --enable-matrix-types="long double".
+    If this breaks code compilation, feel free to contact the author
+    (see the CONTACTS section).
+
+ Q: Is there a way to compare the performance of this library to some other
+    high performance libraries ?
+ A: If you build rsbench with support for the Intel MKL library, then you
+    can do performance comparisons with e.g.:
+    # ./rsbench -oa -Ob -qH -R --gen-diag 100 --compare-competitors --verbose
+    or use the following script:
+    # bench/dense.sh ' '
+    Or even better, check out the --write-performance-record feature ; for 
+    details see the output of:
+    # rsbench -oa -Ob --help
+
+ Q: Is there a non-threaded (serial) version of librsb ?
+ A: Yes: you can configure the library to work serially (with no OpenMP).
+    See ./configure --help. 
+
+ Q: Is this library thread-safe ?
+ A: Probably yes: no static buffers are being used, and reentrant C standard
+    library functions are invoked.
+
+ Q: Does the librsb library run on GPUs or Intel MIC ?
+ A: It has been built on Intel MIC once, but not tested.
+
+ Q: I built and compiled the code without enabling any BLAS type (S,D,C,Z), 
+     and both `make qtests' and `make tests' ran successfully outside the
+     ./examples directory, but `make tests' breaks within ./examples directory.
+ A: Well, the tests passed because the examples testing was simply skipped.
+    The example programs need at least one of these types to work.
+
+ Q: At build time I get many "unused variable" warnings. Why ? 
+ A: librsb accommodates many code generation and build time configuration
+    options. Some combinations may turn off compilation of certain parts of the
+    code, leading some variables to be unused.
+
+ Q: Are there papers to read about the RSB format and algorithms ?
+ A: Yes, the following:
+
+    Michele Martone
+    Efficient Multithreaded Untransposed, Transposed or Symmetric Sparse
+    Matrix-Vector Multiplication with the Recursive Sparse Blocks Format
+    Parallel Computing 40(7): 251-270 (2014)
+    http://dx.doi.org/10.1016/j.parco.2014.03.008
+
+    Michele Martone
+    Cache and Energy Efficiency of Sparse Matrix-Vector Multiplication for
+    Different BLAS Numerical Types with the RSB Format
+    Proceedings of the ParCo 2013 conference, September 2013, Munich, Germany
+    PARCO 2013: 193-202
+    http://dx.doi.org/10.3233/978-1-61499-381-0-193
+
+    Michele Martone, Marcin Paprzycki, Salvatore Filippone: An Improved Sparse
+    Matrix-Vector Multiply Based on Recursive Sparse Blocks Layout.
+    LSSC 2011: 606-613
+    http://dx.doi.org/10.1007/978-3-642-29843-1_69
+
+    Michele Martone, Salvatore Filippone, Salvatore Tucci, Marcin Paprzycki,
+    Maria Ganzha: Utilizing Recursive Storage in Sparse Matrix-Vector
+    Multiplication - Preliminary Considerations. CATA 2010: 300-305
+    
+    Michele Martone, Salvatore Filippone, Marcin Paprzycki, Salvatore Tucci:
+    Assembling Recursively Stored Sparse Matrices. IMCSIT 2010: 317-325
+    http://www.proceedings2010.imcsit.org/pliks/205.pdf
+
+    Michele Martone, Salvatore Filippone, Pawel Gepner, Marcin Paprzycki,
+    Salvatore Tucci: Use of Hybrid Recursive CSR/COO Data Structures in Sparse
+    Matrices-Vector Multiplication. IMCSIT 2010: 327-335
+    http://dx.doi.org/10.1109/SYNASC.2010.72
+
+    Michele Martone, Salvatore Filippone, Marcin Paprzycki, Salvatore Tucci:
+    On BLAS Operations with Recursively Stored Sparse Matrices.
+    SYNASC 2010: 49-56
+    http://dx.doi.org/10.1109/SYNASC.2010.72
+
+    Michele Martone, Salvatore Filippone, Marcin Paprzycki, Salvatore Tucci:
+    On the Usage of 16 Bit Indices in Recursively Stored Sparse Matrices.
+    SYNASC 2010: 57-64
+    http://dx.doi.org/10.1109/SYNASC.2010.77
+
+ Q: I have M4-related problems on IBM SP5/SP6 (my M4 preprocessor tries to
+    regenerate code but it fails). What should I do ?
+ A: A fix is to use a GNU M4 implementation 
+    e.g.: M4=/opt/freeware/bin/m4 ./configure ...
+    e.g.: M4=gm4 ./configure ...
+    or execute:
+    touch *.h ; touch *.c ; make
+    Or "./configure; make"  the library on a different machine, then build 
+    a sources archive with `make dist', and use it on the original machine.
+   
+--------------------------------------------------------------------------------
+	POSSIBLE / POTENTIAL FUTURE FEATURES / ENHANCEMENTS
+--------------------------------------------------------------------------------
+
+ * auxiliary functions for numerical vectors
+ * CSC,BCSR,BCSC and other formats
+ * (optional) loop unrolled kernels for BCSR/BCSC
+ * performance prediction/estimation facilities (experimental)
+ * types of the blocks, nonzeroes, and coordinates indices can be user specified
+ * a code generator for BCSR, BCSC, VBR, VBC kernels
+ * full support for BCSR, BCSC storages 
+ * automatic matrix blocking selection (for BCSR/BCSC) 
+ * an arbitrary subset of block size kernels can be specified to be generated
+ * full support for VBR,VBC storages
+ * recursive storage variants of blocked formats (non uniform blocking)
+ * more auto-tuning and prediction control
+ * use of assembly functions or intrinsics
+ * the use of context variables (scenarios with multiple libraries using
+   librsb completely independently at the same time are not supported)
+ * enhanced in-place matrix assembly functions (useful for really huge matrices)
+
+--------------------------------------------------------------------------------
+   		ABOUT THE INTERNALS
+--------------------------------------------------------------------------------
+
+ The following good practices are being followed during development of librsb.
+
+ - only symbols beginning with `rsb_' or `blas_' are being exported.
+ - internal functions are usually prefixed by `rsb__'.
+ - no library internal function shall call any API function.
+
+ If by using/inspecting the code you notice any of the above is being violated,
+ please report about it.
+
+--------------------------------------------------------------------------------
+		BUGS
+--------------------------------------------------------------------------------
+
+ If you encounter any bug (e.g.: mismatch of library/program behaviour and
+ documentation, please let me know about it by sending me (see CONTACTS) all
+ relevant information (code snippet, originating data/matrix, config.log), in
+ such a way that I can replicate the bug behaviour on my machines.
+ If the bug occurred when using rsb interfaced to some proprietary library,
+ please make sure the bug is in librsb.
+
+ It may be of great help to you to build the library with the debug compile
+ options on (e.g.: CFLAGS='-O0 -ggdb'), and with appropriate library verbosity
+ levels (--enable-internals-error-verbosity, --enable-interface-error-verbosity
+ and --enable-io-level  options to configure) to better understand the program 
+ behaviour before sending a report.
+
+ Make sure you have the latest version of the library when reporting a bug. 
+
+--------------------------------------------------------------------------------
+		CONTACTS
+--------------------------------------------------------------------------------
+
+ You are welcome to contact the librsb author:
+
+  Michele Martone < michelemartone AT users DOT sourceforge DOT net >
+ 
+ Please specify "librsb" in the "Subject:" line of your emails.
+
+ More information and downloads on  http://sourceforge.net/projects/librsb
+
+ Mailing list: https://lists.sourceforge.net/lists/listinfo/librsb-users
+ 
+--------------------------------------------------------------------------------
+		CREDITS	(in alphabetical order)
+--------------------------------------------------------------------------------
+
+For librsb-1.2:
+ Marco Atzeri provided testing, patches to build librsb under cygwin and
+ spotted a few bugs.
+ Mu-Chu Lee provided a patch to fix sorting code crashing with > 10^9 nnz.
+
+For librsb-1.1:
+ Gilles Gouaillardet provided a patch for OpenMP-encapsulated I/O.
+ Marco Restelli provided with testing and detailed comments and suggestions.
+
+For librsb-1.0:
+ Francis Casson helped with testing and documentation reviewing during the first
+ release.
+ Nitya Hariharan helped revising early versions of the documentation.
+
+--------------------------------------------------------------------------------
+		LICENSE
+--------------------------------------------------------------------------------
+
+ This software is distributed under the terms of the Lesser GNU Public License
+ version 3 (LGPLv3) or later.
+ See the COPYING file for a copy of the LGPLv3.
+
+ librsb is free software.
+ To support it, consider writing "thank you" to the author and acknowledging use
+ of librsb in your publications. That would be very appreciated.
+
+--------------------------------------------------------------------------------
diff --git a/aclocal.m4 b/aclocal.m4
new file mode 100644
index 0000000..a10acfb
--- /dev/null
+++ b/aclocal.m4
@@ -0,0 +1,9616 @@
+# generated automatically by aclocal 1.11.6 -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation,
+# Inc.
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+m4_ifndef([AC_AUTOCONF_VERSION],
+  [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
+[m4_warning([this file was generated for autoconf 2.69.
+You have another version of autoconf.  It may work, but is not guaranteed to.
+If you have problems, you may need to regenerate the build system entirely.
+To do so, use the procedure documented by the package, typically `autoreconf'.])])
+
+# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
+#
+#   Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+#                 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+#                 Foundation, Inc.
+#   Written by Gordon Matzigkeit, 1996
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+m4_define([_LT_COPYING], [dnl
+#   Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+#                 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+#                 Foundation, Inc.
+#   Written by Gordon Matzigkeit, 1996
+#
+#   This file is part of GNU Libtool.
+#
+# GNU Libtool is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING.  If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
+# obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+])
+
+# serial 57 LT_INIT
+
+
+# LT_PREREQ(VERSION)
+# ------------------
+# Complain and exit if this libtool version is less that VERSION.
+m4_defun([LT_PREREQ],
+[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1,
+       [m4_default([$3],
+		   [m4_fatal([Libtool version $1 or higher is required],
+		             63)])],
+       [$2])])
+
+
+# _LT_CHECK_BUILDDIR
+# ------------------
+# Complain if the absolute build directory name contains unusual characters
+m4_defun([_LT_CHECK_BUILDDIR],
+[case `pwd` in
+  *\ * | *\	*)
+    AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;;
+esac
+])
+
+
+# LT_INIT([OPTIONS])
+# ------------------
+AC_DEFUN([LT_INIT],
+[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT
+AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+AC_BEFORE([$0], [LT_LANG])dnl
+AC_BEFORE([$0], [LT_OUTPUT])dnl
+AC_BEFORE([$0], [LTDL_INIT])dnl
+m4_require([_LT_CHECK_BUILDDIR])dnl
+
+dnl Autoconf doesn't catch unexpanded LT_ macros by default:
+m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl
+m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl
+dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4
+dnl unless we require an AC_DEFUNed macro:
+AC_REQUIRE([LTOPTIONS_VERSION])dnl
+AC_REQUIRE([LTSUGAR_VERSION])dnl
+AC_REQUIRE([LTVERSION_VERSION])dnl
+AC_REQUIRE([LTOBSOLETE_VERSION])dnl
+m4_require([_LT_PROG_LTMAIN])dnl
+
+_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}])
+
+dnl Parse OPTIONS
+_LT_SET_OPTIONS([$0], [$1])
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ltmain"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+AC_SUBST(LIBTOOL)dnl
+
+_LT_SETUP
+
+# Only expand once:
+m4_define([LT_INIT])
+])# LT_INIT
+
+# Old names:
+AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT])
+AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PROG_LIBTOOL], [])
+dnl AC_DEFUN([AM_PROG_LIBTOOL], [])
+
+
+# _LT_CC_BASENAME(CC)
+# -------------------
+# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+m4_defun([_LT_CC_BASENAME],
+[for cc_temp in $1""; do
+  case $cc_temp in
+    compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
+    distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
+    \-*) ;;
+    *) break;;
+  esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+])
+
+
+# _LT_FILEUTILS_DEFAULTS
+# ----------------------
+# It is okay to use these file commands and assume they have been set
+# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'.
+m4_defun([_LT_FILEUTILS_DEFAULTS],
+[: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+])# _LT_FILEUTILS_DEFAULTS
+
+
+# _LT_SETUP
+# ---------
+m4_defun([_LT_SETUP],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+
+_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl
+dnl
+_LT_DECL([], [host_alias], [0], [The host system])dnl
+_LT_DECL([], [host], [0])dnl
+_LT_DECL([], [host_os], [0])dnl
+dnl
+_LT_DECL([], [build_alias], [0], [The build system])dnl
+_LT_DECL([], [build], [0])dnl
+_LT_DECL([], [build_os], [0])dnl
+dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+dnl
+AC_REQUIRE([AC_PROG_LN_S])dnl
+test -z "$LN_S" && LN_S="ln -s"
+_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl
+dnl
+AC_REQUIRE([LT_CMD_MAX_LEN])dnl
+_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl
+_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
+dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl
+m4_require([_LT_CMD_RELOAD])dnl
+m4_require([_LT_CHECK_MAGIC_METHOD])dnl
+m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl
+m4_require([_LT_CMD_OLD_ARCHIVE])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_WITH_SYSROOT])dnl
+
+_LT_CONFIG_LIBTOOL_INIT([
+# See if we are running on zsh, and set the options which allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}" ; then
+   setopt NO_GLOB_SUBST
+fi
+])
+if test -n "${ZSH_VERSION+set}" ; then
+   setopt NO_GLOB_SUBST
+fi
+
+_LT_CHECK_OBJDIR
+
+m4_require([_LT_TAG_COMPILER])dnl
+
+case $host_os in
+aix3*)
+  # AIX sometimes has problems with the GCC collect2 program.  For some
+  # reason, if we set the COLLECT_NAMES environment variable, the problems
+  # vanish in a puff of smoke.
+  if test "X${COLLECT_NAMES+set}" != Xset; then
+    COLLECT_NAMES=
+    export COLLECT_NAMES
+  fi
+  ;;
+esac
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+_LT_CC_BASENAME([$compiler])
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+    _LT_PATH_MAGIC
+  fi
+  ;;
+esac
+
+# Use C for the default configuration in the libtool script
+LT_SUPPORTED_TAG([CC])
+_LT_LANG_C_CONFIG
+_LT_LANG_DEFAULT_CONFIG
+_LT_CONFIG_COMMANDS
+])# _LT_SETUP
+
+
+# _LT_PREPARE_SED_QUOTE_VARS
+# --------------------------
+# Define a few sed substitution that help us do robust quoting.
+m4_defun([_LT_PREPARE_SED_QUOTE_VARS],
+[# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\([["`\\]]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+])
+
+# _LT_PROG_LTMAIN
+# ---------------
+# Note that this code is called both from `configure', and `config.status'
+# now that we use AC_CONFIG_COMMANDS to generate libtool.  Notably,
+# `config.status' has no value for ac_aux_dir unless we are using Automake,
+# so we pass a copy along to make sure it has a sensible value anyway.
+m4_defun([_LT_PROG_LTMAIN],
+[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl
+_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir'])
+ltmain="$ac_aux_dir/ltmain.sh"
+])# _LT_PROG_LTMAIN
+
+
+
+# So that we can recreate a full libtool script including additional
+# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
+# in macros and then make a single call at the end using the `libtool'
+# label.
+
+
+# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS])
+# ----------------------------------------
+# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL_INIT],
+[m4_ifval([$1],
+          [m4_append([_LT_OUTPUT_LIBTOOL_INIT],
+                     [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_INIT])
+
+
+# _LT_CONFIG_LIBTOOL([COMMANDS])
+# ------------------------------
+# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+m4_define([_LT_CONFIG_LIBTOOL],
+[m4_ifval([$1],
+          [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS],
+                     [$1
+])])])
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS])
+
+
+# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS])
+# -----------------------------------------------------
+m4_defun([_LT_CONFIG_SAVE_COMMANDS],
+[_LT_CONFIG_LIBTOOL([$1])
+_LT_CONFIG_LIBTOOL_INIT([$2])
+])
+
+
+# _LT_FORMAT_COMMENT([COMMENT])
+# -----------------------------
+# Add leading comment marks to the start of each line, and a trailing
+# full-stop to the whole comment if one is not present already.
+m4_define([_LT_FORMAT_COMMENT],
+[m4_ifval([$1], [
+m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])],
+              [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.])
+)])
+
+
+
+
+
+# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?])
+# -------------------------------------------------------------------
+# CONFIGNAME is the name given to the value in the libtool script.
+# VARNAME is the (base) name used in the configure script.
+# VALUE may be 0, 1 or 2 for a computed quote escaped value based on
+# VARNAME.  Any other value will be used directly.
+m4_define([_LT_DECL],
+[lt_if_append_uniq([lt_decl_varnames], [$2], [, ],
+    [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name],
+	[m4_ifval([$1], [$1], [$2])])
+    lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3])
+    m4_ifval([$4],
+	[lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])])
+    lt_dict_add_subkey([lt_decl_dict], [$2],
+	[tagged?], [m4_ifval([$5], [yes], [no])])])
+])
+
+
+# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION])
+# --------------------------------------------------------
+m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])])
+
+
+# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_tag_varnames],
+[_lt_decl_filter([tagged?], [yes], $@)])
+
+
+# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..])
+# ---------------------------------------------------------
+m4_define([_lt_decl_filter],
+[m4_case([$#],
+  [0], [m4_fatal([$0: too few arguments: $#])],
+  [1], [m4_fatal([$0: too few arguments: $#: $1])],
+  [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)],
+  [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)],
+  [lt_dict_filter([lt_decl_dict], $@)])[]dnl
+])
+
+
+# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...])
+# --------------------------------------------------
+m4_define([lt_decl_quote_varnames],
+[_lt_decl_filter([value], [1], $@)])
+
+
+# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_dquote_varnames],
+[_lt_decl_filter([value], [2], $@)])
+
+
+# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...])
+# ---------------------------------------------------
+m4_define([lt_decl_varnames_tagged],
+[m4_assert([$# <= 2])dnl
+_$0(m4_quote(m4_default([$1], [[, ]])),
+    m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]),
+    m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))])
+m4_define([_lt_decl_varnames_tagged],
+[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])])
+
+
+# lt_decl_all_varnames([SEPARATOR], [VARNAME1...])
+# ------------------------------------------------
+m4_define([lt_decl_all_varnames],
+[_$0(m4_quote(m4_default([$1], [[, ]])),
+     m4_if([$2], [],
+	   m4_quote(lt_decl_varnames),
+	m4_quote(m4_shift($@))))[]dnl
+])
+m4_define([_lt_decl_all_varnames],
+[lt_join($@, lt_decl_varnames_tagged([$1],
+			lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl
+])
+
+
+# _LT_CONFIG_STATUS_DECLARE([VARNAME])
+# ------------------------------------
+# Quote a variable value, and forward it to `config.status' so that its
+# declaration there will have the same value as in `configure'.  VARNAME
+# must have a single quote delimited value for this to work.
+m4_define([_LT_CONFIG_STATUS_DECLARE],
+[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`'])
+
+
+# _LT_CONFIG_STATUS_DECLARATIONS
+# ------------------------------
+# We delimit libtool config variables with single quotes, so when
+# we write them to config.status, we have to be sure to quote all
+# embedded single quotes properly.  In configure, this macro expands
+# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
+#
+#    <var>='`$ECHO "$<var>" | $SED "$delay_single_quote_subst"`'
+m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
+    [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAGS
+# ----------------
+# Output comment and list of tags supported by the script
+m4_defun([_LT_LIBTOOL_TAGS],
+[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl
+available_tags="_LT_TAGS"dnl
+])
+
+
+# _LT_LIBTOOL_DECLARE(VARNAME, [TAG])
+# -----------------------------------
+# Extract the dictionary values for VARNAME (optionally with TAG) and
+# expand to a commented shell variable setting:
+#
+#    # Some comment about what VAR is for.
+#    visible_name=$lt_internal_name
+m4_define([_LT_LIBTOOL_DECLARE],
+[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1],
+					   [description])))[]dnl
+m4_pushdef([_libtool_name],
+    m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl
+m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])),
+    [0], [_libtool_name=[$]$1],
+    [1], [_libtool_name=$lt_[]$1],
+    [2], [_libtool_name=$lt_[]$1],
+    [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl
+m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl
+])
+
+
+# _LT_LIBTOOL_CONFIG_VARS
+# -----------------------
+# Produce commented declarations of non-tagged libtool config variables
+# suitable for insertion in the LIBTOOL CONFIG section of the `libtool'
+# script.  Tagged libtool config variables (even for the LIBTOOL CONFIG
+# section) are produced by _LT_LIBTOOL_TAG_VARS.
+m4_defun([_LT_LIBTOOL_CONFIG_VARS],
+[m4_foreach([_lt_var],
+    m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)),
+    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])])
+
+
+# _LT_LIBTOOL_TAG_VARS(TAG)
+# -------------------------
+m4_define([_LT_LIBTOOL_TAG_VARS],
+[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames),
+    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])])
+
+
+# _LT_TAGVAR(VARNAME, [TAGNAME])
+# ------------------------------
+m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])])
+
+
+# _LT_CONFIG_COMMANDS
+# -------------------
+# Send accumulated output to $CONFIG_STATUS.  Thanks to the lists of
+# variables for single and double quote escaping we saved from calls
+# to _LT_DECL, we can put quote escaped variables declarations
+# into `config.status', and then the shell code to quote escape them in
+# for loops in `config.status'.  Finally, any additional code accumulated
+# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded.
+m4_defun([_LT_CONFIG_COMMANDS],
+[AC_PROVIDE_IFELSE([LT_OUTPUT],
+	dnl If the libtool generation code has been placed in $CONFIG_LT,
+	dnl instead of duplicating it all over again into config.status,
+	dnl then we will have config.status run $CONFIG_LT later, so it
+	dnl needs to know what name is stored there:
+        [AC_CONFIG_COMMANDS([libtool],
+            [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])],
+    dnl If the libtool generation code is destined for config.status,
+    dnl expand the accumulated commands and init code now:
+    [AC_CONFIG_COMMANDS([libtool],
+        [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])])
+])#_LT_CONFIG_COMMANDS
+
+
+# Initialize.
+m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT],
+[
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+_LT_CONFIG_STATUS_DECLARATIONS
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$[]1
+_LTECHO_EOF'
+}
+
+# Quote evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_quote_varnames); do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[[\\\\\\\`\\"\\\$]]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+# Double-quote double-evaled strings.
+for var in lt_decl_all_varnames([[ \
+]], lt_decl_dquote_varnames); do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[[\\\\\\\`\\"\\\$]]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+_LT_OUTPUT_LIBTOOL_INIT
+])
+
+# _LT_GENERATED_FILE_INIT(FILE, [COMMENT])
+# ------------------------------------
+# Generate a child script FILE with all initialization necessary to
+# reuse the environment learned by the parent script, and make the
+# file executable.  If COMMENT is supplied, it is inserted after the
+# `#!' sequence but before initialization text begins.  After this
+# macro, additional text can be appended to FILE to form the body of
+# the child script.  The macro ends with non-zero status if the
+# file could not be fully written (such as if the disk is full).
+m4_ifdef([AS_INIT_GENERATED],
+[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])],
+[m4_defun([_LT_GENERATED_FILE_INIT],
+[m4_require([AS_PREPARE])]dnl
+[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl
+[lt_write_fail=0
+cat >$1 <<_ASEOF || lt_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+$2
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$1 <<\_ASEOF || lt_write_fail=1
+AS_SHELL_SANITIZE
+_AS_PREPARE
+exec AS_MESSAGE_FD>&1
+_ASEOF
+test $lt_write_fail = 0 && chmod +x $1[]dnl
+m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT
+
+# LT_OUTPUT
+# ---------
+# This macro allows early generation of the libtool script (before
+# AC_OUTPUT is called), incase it is used in configure for compilation
+# tests.
+AC_DEFUN([LT_OUTPUT],
+[: ${CONFIG_LT=./config.lt}
+AC_MSG_NOTICE([creating $CONFIG_LT])
+_LT_GENERATED_FILE_INIT(["$CONFIG_LT"],
+[# Run this file to recreate a libtool stub with the current configuration.])
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+lt_cl_silent=false
+exec AS_MESSAGE_LOG_FD>>config.log
+{
+  echo
+  AS_BOX([Running $as_me.])
+} >&AS_MESSAGE_LOG_FD
+
+lt_cl_help="\
+\`$as_me' creates a local libtool stub from the current configuration,
+for use in further configure time tests before the real libtool is
+generated.
+
+Usage: $[0] [[OPTIONS]]
+
+  -h, --help      print this help, then exit
+  -V, --version   print version number, then exit
+  -q, --quiet     do not print progress messages
+  -d, --debug     don't remove temporary files
+
+Report bugs to <bug-libtool at gnu.org>."
+
+lt_cl_version="\
+m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
+m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
+configured by $[0], generated by m4_PACKAGE_STRING.
+
+Copyright (C) 2011 Free Software Foundation, Inc.
+This config.lt script is free software; the Free Software Foundation
+gives unlimited permision to copy, distribute and modify it."
+
+while test $[#] != 0
+do
+  case $[1] in
+    --version | --v* | -V )
+      echo "$lt_cl_version"; exit 0 ;;
+    --help | --h* | -h )
+      echo "$lt_cl_help"; exit 0 ;;
+    --debug | --d* | -d )
+      debug=: ;;
+    --quiet | --q* | --silent | --s* | -q )
+      lt_cl_silent=: ;;
+
+    -*) AC_MSG_ERROR([unrecognized option: $[1]
+Try \`$[0] --help' for more information.]) ;;
+
+    *) AC_MSG_ERROR([unrecognized argument: $[1]
+Try \`$[0] --help' for more information.]) ;;
+  esac
+  shift
+done
+
+if $lt_cl_silent; then
+  exec AS_MESSAGE_FD>/dev/null
+fi
+_LTEOF
+
+cat >>"$CONFIG_LT" <<_LTEOF
+_LT_OUTPUT_LIBTOOL_COMMANDS_INIT
+_LTEOF
+
+cat >>"$CONFIG_LT" <<\_LTEOF
+AC_MSG_NOTICE([creating $ofile])
+_LT_OUTPUT_LIBTOOL_COMMANDS
+AS_EXIT(0)
+_LTEOF
+chmod +x "$CONFIG_LT"
+
+# configure is writing to config.log, but config.lt does its own redirection,
+# appending to config.log, which fails on DOS, as config.log is still kept
+# open by configure.  Here we exec the FD to /dev/null, effectively closing
+# config.log, so it can be properly (re)opened and appended to by config.lt.
+lt_cl_success=:
+test "$silent" = yes &&
+  lt_config_lt_args="$lt_config_lt_args --quiet"
+exec AS_MESSAGE_LOG_FD>/dev/null
+$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
+exec AS_MESSAGE_LOG_FD>>config.log
+$lt_cl_success || AS_EXIT(1)
+])# LT_OUTPUT
+
+
+# _LT_CONFIG(TAG)
+# ---------------
+# If TAG is the built-in tag, create an initial libtool script with a
+# default configuration from the untagged config vars.  Otherwise add code
+# to config.status for appending the configuration named by TAG from the
+# matching tagged config vars.
+m4_defun([_LT_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_CONFIG_SAVE_COMMANDS([
+  m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl
+  m4_if(_LT_TAG, [C], [
+    # See if we are running on zsh, and set the options which allow our
+    # commands through without removal of \ escapes.
+    if test -n "${ZSH_VERSION+set}" ; then
+      setopt NO_GLOB_SUBST
+    fi
+
+    cfgfile="${ofile}T"
+    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+    $RM "$cfgfile"
+
+    cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+
+# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+_LT_COPYING
+_LT_LIBTOOL_TAGS
+
+# ### BEGIN LIBTOOL CONFIG
+_LT_LIBTOOL_CONFIG_VARS
+_LT_LIBTOOL_TAG_VARS
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+  case $host_os in
+  aix3*)
+    cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program.  For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+  COLLECT_NAMES=
+  export COLLECT_NAMES
+fi
+_LT_EOF
+    ;;
+  esac
+
+  _LT_PROG_LTMAIN
+
+  # We use sed instead of cat because bash on DJGPP gets confused if
+  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
+  # text mode, it properly converts lines to CR/LF.  This bash problem
+  # is reportedly fixed, but why not run on old versions too?
+  sed '$q' "$ltmain" >> "$cfgfile" \
+     || (rm -f "$cfgfile"; exit 1)
+
+  _LT_PROG_REPLACE_SHELLFNS
+
+   mv -f "$cfgfile" "$ofile" ||
+    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+  chmod +x "$ofile"
+],
+[cat <<_LT_EOF >> "$ofile"
+
+dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded
+dnl in a comment (ie after a #).
+# ### BEGIN LIBTOOL TAG CONFIG: $1
+_LT_LIBTOOL_TAG_VARS(_LT_TAG)
+# ### END LIBTOOL TAG CONFIG: $1
+_LT_EOF
+])dnl /m4_if
+],
+[m4_if([$1], [], [
+    PACKAGE='$PACKAGE'
+    VERSION='$VERSION'
+    TIMESTAMP='$TIMESTAMP'
+    RM='$RM'
+    ofile='$ofile'], [])
+])dnl /_LT_CONFIG_SAVE_COMMANDS
+])# _LT_CONFIG
+
+
+# LT_SUPPORTED_TAG(TAG)
+# ---------------------
+# Trace this macro to discover what tags are supported by the libtool
+# --tag option, using:
+#    autoconf --trace 'LT_SUPPORTED_TAG:$1'
+AC_DEFUN([LT_SUPPORTED_TAG], [])
+
+
+# C support is built-in for now
+m4_define([_LT_LANG_C_enabled], [])
+m4_define([_LT_TAGS], [])
+
+
+# LT_LANG(LANG)
+# -------------
+# Enable libtool support for the given language if not already enabled.
+AC_DEFUN([LT_LANG],
+[AC_BEFORE([$0], [LT_OUTPUT])dnl
+m4_case([$1],
+  [C],			[_LT_LANG(C)],
+  [C++],		[_LT_LANG(CXX)],
+  [Go],			[_LT_LANG(GO)],
+  [Java],		[_LT_LANG(GCJ)],
+  [Fortran 77],		[_LT_LANG(F77)],
+  [Fortran],		[_LT_LANG(FC)],
+  [Windows Resource],	[_LT_LANG(RC)],
+  [m4_ifdef([_LT_LANG_]$1[_CONFIG],
+    [_LT_LANG($1)],
+    [m4_fatal([$0: unsupported language: "$1"])])])dnl
+])# LT_LANG
+
+
+# _LT_LANG(LANGNAME)
+# ------------------
+m4_defun([_LT_LANG],
+[m4_ifdef([_LT_LANG_]$1[_enabled], [],
+  [LT_SUPPORTED_TAG([$1])dnl
+  m4_append([_LT_TAGS], [$1 ])dnl
+  m4_define([_LT_LANG_]$1[_enabled], [])dnl
+  _LT_LANG_$1_CONFIG($1)])dnl
+])# _LT_LANG
+
+
+m4_ifndef([AC_PROG_GO], [
+# NOTE: This macro has been submitted for inclusion into   #
+#  GNU Autoconf as AC_PROG_GO.  When it is available in    #
+#  a released version of Autoconf we should remove this    #
+#  macro and use it instead.                               #
+m4_defun([AC_PROG_GO],
+[AC_LANG_PUSH(Go)dnl
+AC_ARG_VAR([GOC],     [Go compiler command])dnl
+AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl
+_AC_ARG_VAR_LDFLAGS()dnl
+AC_CHECK_TOOL(GOC, gccgo)
+if test -z "$GOC"; then
+  if test -n "$ac_tool_prefix"; then
+    AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo])
+  fi
+fi
+if test -z "$GOC"; then
+  AC_CHECK_PROG(GOC, gccgo, gccgo, false)
+fi
+])#m4_defun
+])#m4_ifndef
+
+
+# _LT_LANG_DEFAULT_CONFIG
+# -----------------------
+m4_defun([_LT_LANG_DEFAULT_CONFIG],
+[AC_PROVIDE_IFELSE([AC_PROG_CXX],
+  [LT_LANG(CXX)],
+  [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_F77],
+  [LT_LANG(F77)],
+  [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])])
+
+AC_PROVIDE_IFELSE([AC_PROG_FC],
+  [LT_LANG(FC)],
+  [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])])
+
+dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal
+dnl pulling things in needlessly.
+AC_PROVIDE_IFELSE([AC_PROG_GCJ],
+  [LT_LANG(GCJ)],
+  [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],
+    [LT_LANG(GCJ)],
+    [AC_PROVIDE_IFELSE([LT_PROG_GCJ],
+      [LT_LANG(GCJ)],
+      [m4_ifdef([AC_PROG_GCJ],
+	[m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])])
+       m4_ifdef([A][M_PROG_GCJ],
+	[m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])])
+       m4_ifdef([LT_PROG_GCJ],
+	[m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
+
+AC_PROVIDE_IFELSE([AC_PROG_GO],
+  [LT_LANG(GO)],
+  [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])])
+
+AC_PROVIDE_IFELSE([LT_PROG_RC],
+  [LT_LANG(RC)],
+  [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
+])# _LT_LANG_DEFAULT_CONFIG
+
+# Obsolete macros:
+AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
+AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
+AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
+AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
+AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
+dnl AC_DEFUN([AC_LIBTOOL_F77], [])
+dnl AC_DEFUN([AC_LIBTOOL_FC], [])
+dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
+dnl AC_DEFUN([AC_LIBTOOL_RC], [])
+
+
+# _LT_TAG_COMPILER
+# ----------------
+m4_defun([_LT_TAG_COMPILER],
+[AC_REQUIRE([AC_PROG_CC])dnl
+
+_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl
+_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl
+_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl
+_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+])# _LT_TAG_COMPILER
+
+
+# _LT_COMPILER_BOILERPLATE
+# ------------------------
+# Check for compiler boilerplate output or warnings with
+# the simple compiler test code.
+m4_defun([_LT_COMPILER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+])# _LT_COMPILER_BOILERPLATE
+
+
+# _LT_LINKER_BOILERPLATE
+# ----------------------
+# Check for linker boilerplate output or warnings with
+# the simple link test code.
+m4_defun([_LT_LINKER_BOILERPLATE],
+[m4_require([_LT_DECL_SED])dnl
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+])# _LT_LINKER_BOILERPLATE
+
+# _LT_REQUIRED_DARWIN_CHECKS
+# -------------------------
+m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
+  case $host_os in
+    rhapsody* | darwin*)
+    AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:])
+    AC_CHECK_TOOL([NMEDIT], [nmedit], [:])
+    AC_CHECK_TOOL([LIPO], [lipo], [:])
+    AC_CHECK_TOOL([OTOOL], [otool], [:])
+    AC_CHECK_TOOL([OTOOL64], [otool64], [:])
+    _LT_DECL([], [DSYMUTIL], [1],
+      [Tool to manipulate archived DWARF debug symbol files on Mac OS X])
+    _LT_DECL([], [NMEDIT], [1],
+      [Tool to change global to local symbols on Mac OS X])
+    _LT_DECL([], [LIPO], [1],
+      [Tool to manipulate fat objects and archives on Mac OS X])
+    _LT_DECL([], [OTOOL], [1],
+      [ldd/readelf like tool for Mach-O binaries on Mac OS X])
+    _LT_DECL([], [OTOOL64], [1],
+      [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4])
+
+    AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod],
+      [lt_cv_apple_cc_single_mod=no
+      if test -z "${LT_MULTI_MODULE}"; then
+	# By default we will add the -single_module flag. You can override
+	# by either setting the environment variable LT_MULTI_MODULE
+	# non-empty at configure time, or by adding -multi_module to the
+	# link flags.
+	rm -rf libconftest.dylib*
+	echo "int foo(void){return 1;}" > conftest.c
+	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD
+	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+        _lt_result=$?
+	# If there is a non-empty error log, and "single_module"
+	# appears in it, assume the flag caused a linker warning
+        if test -s conftest.err && $GREP single_module conftest.err; then
+	  cat conftest.err >&AS_MESSAGE_LOG_FD
+	# Otherwise, if the output was created with a 0 exit code from
+	# the compiler, it worked.
+	elif test -f libconftest.dylib && test $_lt_result -eq 0; then
+	  lt_cv_apple_cc_single_mod=yes
+	else
+	  cat conftest.err >&AS_MESSAGE_LOG_FD
+	fi
+	rm -rf libconftest.dylib*
+	rm -f conftest.*
+      fi])
+
+    AC_CACHE_CHECK([for -exported_symbols_list linker flag],
+      [lt_cv_ld_exported_symbols_list],
+      [lt_cv_ld_exported_symbols_list=no
+      save_LDFLAGS=$LDFLAGS
+      echo "_main" > conftest.sym
+      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+      AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+	[lt_cv_ld_exported_symbols_list=yes],
+	[lt_cv_ld_exported_symbols_list=no])
+	LDFLAGS="$save_LDFLAGS"
+    ])
+
+    AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load],
+      [lt_cv_ld_force_load=no
+      cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD
+      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD
+      echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD
+      $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD
+      echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD
+      $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD
+      cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD
+      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+      _lt_result=$?
+      if test -s conftest.err && $GREP force_load conftest.err; then
+	cat conftest.err >&AS_MESSAGE_LOG_FD
+      elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then
+	lt_cv_ld_force_load=yes
+      else
+	cat conftest.err >&AS_MESSAGE_LOG_FD
+      fi
+        rm -f conftest.err libconftest.a conftest conftest.c
+        rm -rf conftest.dSYM
+    ])
+    case $host_os in
+    rhapsody* | darwin1.[[012]])
+      _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
+    darwin1.*)
+      _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+    darwin*) # darwin 5.x on
+      # if running on 10.5 or later, the deployment target defaults
+      # to the OS version, if on x86, and 10.4, the deployment
+      # target defaults to 10.4. Don't you love it?
+      case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
+	10.0,*86*-darwin8*|10.0,*-darwin[[91]]*)
+	  _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+	10.[[012]]*)
+	  _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+	10.*)
+	  _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+      esac
+    ;;
+  esac
+    if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+      _lt_dar_single_mod='$single_module'
+    fi
+    if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
+      _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+    else
+      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+    fi
+    if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
+      _lt_dsymutil='~$DSYMUTIL $lib || :'
+    else
+      _lt_dsymutil=
+    fi
+    ;;
+  esac
+])
+
+
+# _LT_DARWIN_LINKER_FEATURES([TAG])
+# ---------------------------------
+# Checks for linker and compiler features on darwin
+m4_defun([_LT_DARWIN_LINKER_FEATURES],
+[
+  m4_require([_LT_REQUIRED_DARWIN_CHECKS])
+  _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+  _LT_TAGVAR(hardcode_direct, $1)=no
+  _LT_TAGVAR(hardcode_automatic, $1)=yes
+  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+  if test "$lt_cv_ld_force_load" = "yes"; then
+    _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+    m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes],
+                  [FC],  [_LT_TAGVAR(compiler_needs_object, $1)=yes])
+  else
+    _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+  fi
+  _LT_TAGVAR(link_all_deplibs, $1)=yes
+  _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined"
+  case $cc_basename in
+     ifort*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test "$_lt_dar_can_shared" = "yes"; then
+    output_verbose_link_cmd=func_echo_all
+    _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+    _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+    _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+    _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+    m4_if([$1], [CXX],
+[   if test "$lt_cv_apple_cc_single_mod" != "yes"; then
+      _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}"
+      _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}"
+    fi
+],[])
+  else
+  _LT_TAGVAR(ld_shlibs, $1)=no
+  fi
+])
+
+# _LT_SYS_MODULE_PATH_AIX([TAGNAME])
+# ----------------------------------
+# Links a minimal program and checks the executable
+# for the system default hardcoded library path. In most cases,
+# this is /usr/lib:/lib, but when the MPI compilers are used
+# the location of the communication and MPI libs are included too.
+# If we don't find anything, use the default library path according
+# to the aix ld manual.
+# Store the results from the different compilers for each TAGNAME.
+# Allow to override them for all tags through lt_cv_aix_libpath.
+m4_defun([_LT_SYS_MODULE_PATH_AIX],
+[m4_require([_LT_DECL_SED])dnl
+if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])],
+  [AC_LINK_IFELSE([AC_LANG_PROGRAM],[
+  lt_aix_libpath_sed='[
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }]'
+  _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi],[])
+  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib"
+  fi
+  ])
+  aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])
+fi
+])# _LT_SYS_MODULE_PATH_AIX
+
+
+# _LT_SHELL_INIT(ARG)
+# -------------------
+m4_define([_LT_SHELL_INIT],
+[m4_divert_text([M4SH-INIT], [$1
+])])# _LT_SHELL_INIT
+
+
+
+# _LT_PROG_ECHO_BACKSLASH
+# -----------------------
+# Find how we can fake an echo command that does not interpret backslash.
+# In particular, with Autoconf 2.60 or later we add some code to the start
+# of the generated configure script which will find a shell with a builtin
+# printf (which we can use as an echo command).
+m4_defun([_LT_PROG_ECHO_BACKSLASH],
+[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+AC_MSG_CHECKING([how to print strings])
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='printf %s\n'
+else
+  # Use this function as a fallback that always works.
+  func_fallback_echo ()
+  {
+    eval 'cat <<_LTECHO_EOF
+$[]1
+_LTECHO_EOF'
+  }
+  ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO "$*" 
+}
+
+case "$ECHO" in
+  printf*) AC_MSG_RESULT([printf]) ;;
+  print*) AC_MSG_RESULT([print -r]) ;;
+  *) AC_MSG_RESULT([cat]) ;;
+esac
+
+m4_ifdef([_AS_DETECT_SUGGESTED],
+[_AS_DETECT_SUGGESTED([
+  test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || (
+    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+    PATH=/empty FPATH=/empty; export PATH FPATH
+    test "X`printf %s $ECHO`" = "X$ECHO" \
+      || test "X`print -r -- $ECHO`" = "X$ECHO" )])])
+
+_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
+_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes])
+])# _LT_PROG_ECHO_BACKSLASH
+
+
+# _LT_WITH_SYSROOT
+# ----------------
+AC_DEFUN([_LT_WITH_SYSROOT],
+[AC_MSG_CHECKING([for sysroot])
+AC_ARG_WITH([sysroot],
+[  --with-sysroot[=DIR] Search for dependent libraries within DIR
+                        (or the compiler's sysroot if not specified).],
+[], [with_sysroot=no])
+
+dnl lt_sysroot will always be passed unquoted.  We quote it here
+dnl in case the user passed a directory name.
+lt_sysroot=
+case ${with_sysroot} in #(
+ yes)
+   if test "$GCC" = yes; then
+     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+   fi
+   ;; #(
+ /*)
+   lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+   ;; #(
+ no|'')
+   ;; #(
+ *)
+   AC_MSG_RESULT([${with_sysroot}])
+   AC_MSG_ERROR([The sysroot must be an absolute path.])
+   ;;
+esac
+
+ AC_MSG_RESULT([${lt_sysroot:-no}])
+_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl
+[dependent libraries, and in which our libraries should be installed.])])
+
+# _LT_ENABLE_LOCK
+# ---------------
+m4_defun([_LT_ENABLE_LOCK],
+[AC_ARG_ENABLE([libtool-lock],
+  [AS_HELP_STRING([--disable-libtool-lock],
+    [avoid locking (might break parallel builds)])])
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+  # Find out which ABI we are using.
+  echo 'int i;' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    case `/usr/bin/file conftest.$ac_objext` in
+      *ELF-32*)
+	HPUX_IA64_MODE="32"
+	;;
+      *ELF-64*)
+	HPUX_IA64_MODE="64"
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+*-*-irix6*)
+  # Find out which ABI we are using.
+  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    if test "$lt_cv_prog_gnu_ld" = yes; then
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -melf32bsmip"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -melf32bmipn32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -melf64bmip"
+	;;
+      esac
+    else
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -32"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -n32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -64"
+	  ;;
+      esac
+    fi
+  fi
+  rm -rf conftest*
+  ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+  # Find out which ABI we are using.
+  echo 'int i;' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    case `/usr/bin/file conftest.o` in
+      *32-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_i386_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    LD="${LD-ld} -m elf_i386"
+	    ;;
+	  ppc64-*linux*|powerpc64-*linux*)
+	    LD="${LD-ld} -m elf32ppclinux"
+	    ;;
+	  s390x-*linux*)
+	    LD="${LD-ld} -m elf_s390"
+	    ;;
+	  sparc64-*linux*)
+	    LD="${LD-ld} -m elf32_sparc"
+	    ;;
+	esac
+	;;
+      *64-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_x86_64_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    LD="${LD-ld} -m elf_x86_64"
+	    ;;
+	  ppc*-*linux*|powerpc*-*linux*)
+	    LD="${LD-ld} -m elf64ppc"
+	    ;;
+	  s390*-*linux*|s390*-*tpf*)
+	    LD="${LD-ld} -m elf64_s390"
+	    ;;
+	  sparc*-*linux*)
+	    LD="${LD-ld} -m elf64_sparc"
+	    ;;
+	esac
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+
+*-*-sco3.2v5*)
+  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+  SAVE_CFLAGS="$CFLAGS"
+  CFLAGS="$CFLAGS -belf"
+  AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+    [AC_LANG_PUSH(C)
+     AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+     AC_LANG_POP])
+  if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+    CFLAGS="$SAVE_CFLAGS"
+  fi
+  ;;
+*-*solaris*)
+  # Find out which ABI we are using.
+  echo 'int i;' > conftest.$ac_ext
+  if AC_TRY_EVAL(ac_compile); then
+    case `/usr/bin/file conftest.o` in
+    *64-bit*)
+      case $lt_cv_prog_gnu_ld in
+      yes*)
+        case $host in
+        i?86-*-solaris*)
+          LD="${LD-ld} -m elf_x86_64"
+          ;;
+        sparc*-*-solaris*)
+          LD="${LD-ld} -m elf64_sparc"
+          ;;
+        esac
+        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
+        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+          LD="${LD-ld}_sol2"
+        fi
+        ;;
+      *)
+	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+	  LD="${LD-ld} -64"
+	fi
+	;;
+      esac
+      ;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+esac
+
+need_locks="$enable_libtool_lock"
+])# _LT_ENABLE_LOCK
+
+
+# _LT_PROG_AR
+# -----------
+m4_defun([_LT_PROG_AR],
+[AC_CHECK_TOOLS(AR, [ar], false)
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+_LT_DECL([], [AR], [1], [The archiver])
+_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive])
+
+AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file],
+  [lt_cv_ar_at_file=no
+   AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
+     [echo conftest.$ac_objext > conftest.lst
+      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD'
+      AC_TRY_EVAL([lt_ar_try])
+      if test "$ac_status" -eq 0; then
+	# Ensure the archiver fails upon bogus file names.
+	rm -f conftest.$ac_objext libconftest.a
+	AC_TRY_EVAL([lt_ar_try])
+	if test "$ac_status" -ne 0; then
+          lt_cv_ar_at_file=@
+        fi
+      fi
+      rm -f conftest.* libconftest.a
+     ])
+  ])
+
+if test "x$lt_cv_ar_at_file" = xno; then
+  archiver_list_spec=
+else
+  archiver_list_spec=$lt_cv_ar_at_file
+fi
+_LT_DECL([], [archiver_list_spec], [1],
+  [How to feed a file listing to the archiver])
+])# _LT_PROG_AR
+
+
+# _LT_CMD_OLD_ARCHIVE
+# -------------------
+m4_defun([_LT_CMD_OLD_ARCHIVE],
+[_LT_PROG_AR
+
+AC_CHECK_TOOL(STRIP, strip, :)
+test -z "$STRIP" && STRIP=:
+_LT_DECL([], [STRIP], [1], [A symbol stripping program])
+
+AC_CHECK_TOOL(RANLIB, ranlib, :)
+test -z "$RANLIB" && RANLIB=:
+_LT_DECL([], [RANLIB], [1],
+    [Commands used to install an old-style archive])
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+  case $host_os in
+  openbsd*)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+    ;;
+  *)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+    ;;
+  esac
+  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+fi
+
+case $host_os in
+  darwin*)
+    lock_old_archive_extraction=yes ;;
+  *)
+    lock_old_archive_extraction=no ;;
+esac
+_LT_DECL([], [old_postinstall_cmds], [2])
+_LT_DECL([], [old_postuninstall_cmds], [2])
+_LT_TAGDECL([], [old_archive_cmds], [2],
+    [Commands used to build an old-style archive])
+_LT_DECL([], [lock_old_archive_extraction], [0],
+    [Whether to use a lock for old archive extraction])
+])# _LT_CMD_OLD_ARCHIVE
+
+
+# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+#		[OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------------------
+# Check whether the given compiler option works
+AC_DEFUN([_LT_COMPILER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+  [$2=no
+   m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4])
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$3"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&AS_MESSAGE_LOG_FD
+   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       $2=yes
+     fi
+   fi
+   $RM conftest*
+])
+
+if test x"[$]$2" = xyes; then
+    m4_if([$5], , :, [$5])
+else
+    m4_if([$6], , :, [$6])
+fi
+])# _LT_COMPILER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], [])
+
+
+# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+#                  [ACTION-SUCCESS], [ACTION-FAILURE])
+# ----------------------------------------------------
+# Check whether the given linker option works
+AC_DEFUN([_LT_LINKER_OPTION],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_SED])dnl
+AC_CACHE_CHECK([$1], [$2],
+  [$2=no
+   save_LDFLAGS="$LDFLAGS"
+   LDFLAGS="$LDFLAGS $3"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&AS_MESSAGE_LOG_FD
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         $2=yes
+       fi
+     else
+       $2=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS="$save_LDFLAGS"
+])
+
+if test x"[$]$2" = xyes; then
+    m4_if([$4], , :, [$4])
+else
+    m4_if([$5], , :, [$5])
+fi
+])# _LT_LINKER_OPTION
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], [])
+
+
+# LT_CMD_MAX_LEN
+#---------------
+AC_DEFUN([LT_CMD_MAX_LEN],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+# find the maximum length of command line arguments
+AC_MSG_CHECKING([the maximum length of command line arguments])
+AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
+  i=0
+  teststring="ABCD"
+
+  case $build_os in
+  msdosdjgpp*)
+    # On DJGPP, this test can blow up pretty badly due to problems in libc
+    # (any single argument exceeding 2000 bytes causes a buffer overrun
+    # during glob expansion).  Even if it were fixed, the result of this
+    # check would be larger than it should be.
+    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
+    ;;
+
+  gnu*)
+    # Under GNU Hurd, this test is not required because there is
+    # no limit to the length of command line arguments.
+    # Libtool will interpret -1 as no limit whatsoever
+    lt_cv_sys_max_cmd_len=-1;
+    ;;
+
+  cygwin* | mingw* | cegcc*)
+    # On Win9x/ME, this test blows up -- it succeeds, but takes
+    # about 5 minutes as the teststring grows exponentially.
+    # Worse, since 9x/ME are not pre-emptively multitasking,
+    # you end up with a "frozen" computer, even though with patience
+    # the test eventually succeeds (with a max line length of 256k).
+    # Instead, let's just punt: use the minimum linelength reported by
+    # all of the supported platforms: 8192 (on NT/2K/XP).
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  mint*)
+    # On MiNT this can take a long time and run out of memory.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  amigaos*)
+    # On AmigaOS with pdksh, this test takes hours, literally.
+    # So we just punt and use a minimum line length of 8192.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+    # This has been around since 386BSD, at least.  Likely further.
+    if test -x /sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+    elif test -x /usr/sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+    else
+      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
+    fi
+    # And add a safety zone
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    ;;
+
+  interix*)
+    # We know the value 262144 and hardcode it with a safety zone (like BSD)
+    lt_cv_sys_max_cmd_len=196608
+    ;;
+
+  os2*)
+    # The test takes a long time on OS/2.
+    lt_cv_sys_max_cmd_len=8192
+    ;;
+
+  osf*)
+    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+    # nice to cause kernel panics so lets avoid the loop below.
+    # First set a reasonable default.
+    lt_cv_sys_max_cmd_len=16384
+    #
+    if test -x /sbin/sysconfig; then
+      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+        *1*) lt_cv_sys_max_cmd_len=-1 ;;
+      esac
+    fi
+    ;;
+  sco3.2v5*)
+    lt_cv_sys_max_cmd_len=102400
+    ;;
+  sysv5* | sco5v6* | sysv4.2uw2*)
+    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+    if test -n "$kargmax"; then
+      lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[	 ]]//'`
+    else
+      lt_cv_sys_max_cmd_len=32768
+    fi
+    ;;
+  *)
+    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+    if test -n "$lt_cv_sys_max_cmd_len"; then
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    else
+      # Make teststring a little bigger before we do anything with it.
+      # a 1K string should be a reasonable start.
+      for i in 1 2 3 4 5 6 7 8 ; do
+        teststring=$teststring$teststring
+      done
+      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+      # If test is not a shell built-in, we'll probably end up computing a
+      # maximum length that is only half of the actual maximum length, but
+      # we can't tell.
+      while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \
+	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+	      test $i != 17 # 1/2 MB should be enough
+      do
+        i=`expr $i + 1`
+        teststring=$teststring$teststring
+      done
+      # Only check the string length outside the loop.
+      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+      teststring=
+      # Add a significant safety factor because C++ compilers can tack on
+      # massive amounts of additional arguments before passing them to the
+      # linker.  It appears as though 1/2 is a usable value.
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+    fi
+    ;;
+  esac
+])
+if test -n $lt_cv_sys_max_cmd_len ; then
+  AC_MSG_RESULT($lt_cv_sys_max_cmd_len)
+else
+  AC_MSG_RESULT(none)
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+_LT_DECL([], [max_cmd_len], [0],
+    [What is the maximum length of a command?])
+])# LT_CMD_MAX_LEN
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], [])
+
+
+# _LT_HEADER_DLFCN
+# ----------------
+m4_defun([_LT_HEADER_DLFCN],
+[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl
+])# _LT_HEADER_DLFCN
+
+
+# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+#                      ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+# ----------------------------------------------------------------
+m4_defun([_LT_TRY_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test "$cross_compiling" = yes; then :
+  [$4]
+else
+  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+  lt_status=$lt_dlunknown
+  cat > conftest.$ac_ext <<_LT_EOF
+[#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+#  define LT_DLGLOBAL		RTLD_GLOBAL
+#else
+#  ifdef DL_GLOBAL
+#    define LT_DLGLOBAL		DL_GLOBAL
+#  else
+#    define LT_DLGLOBAL		0
+#  endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+   find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+#  ifdef RTLD_LAZY
+#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+#  else
+#    ifdef DL_LAZY
+#      define LT_DLLAZY_OR_NOW		DL_LAZY
+#    else
+#      ifdef RTLD_NOW
+#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+#      else
+#        ifdef DL_NOW
+#          define LT_DLLAZY_OR_NOW	DL_NOW
+#        else
+#          define LT_DLLAZY_OR_NOW	0
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+
+/* When -fvisbility=hidden is used, assume the code has been annotated
+   correspondingly for the symbols needed.  */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+  int status = $lt_dlunknown;
+
+  if (self)
+    {
+      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+      else
+        {
+	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+          else puts (dlerror ());
+	}
+      /* dlclose (self); */
+    }
+  else
+    puts (dlerror ());
+
+  return status;
+}]
+_LT_EOF
+  if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then
+    (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
+    lt_status=$?
+    case x$lt_status in
+      x$lt_dlno_uscore) $1 ;;
+      x$lt_dlneed_uscore) $2 ;;
+      x$lt_dlunknown|x*) $3 ;;
+    esac
+  else :
+    # compilation failed
+    $3
+  fi
+fi
+rm -fr conftest*
+])# _LT_TRY_DLOPEN_SELF
+
+
+# LT_SYS_DLOPEN_SELF
+# ------------------
+AC_DEFUN([LT_SYS_DLOPEN_SELF],
+[m4_require([_LT_HEADER_DLFCN])dnl
+if test "x$enable_dlopen" != xyes; then
+  enable_dlopen=unknown
+  enable_dlopen_self=unknown
+  enable_dlopen_self_static=unknown
+else
+  lt_cv_dlopen=no
+  lt_cv_dlopen_libs=
+
+  case $host_os in
+  beos*)
+    lt_cv_dlopen="load_add_on"
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+    ;;
+
+  mingw* | pw32* | cegcc*)
+    lt_cv_dlopen="LoadLibrary"
+    lt_cv_dlopen_libs=
+    ;;
+
+  cygwin*)
+    lt_cv_dlopen="dlopen"
+    lt_cv_dlopen_libs=
+    ;;
+
+  darwin*)
+  # if libdl is installed we need to link against it
+    AC_CHECK_LIB([dl], [dlopen],
+		[lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[
+    lt_cv_dlopen="dyld"
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+    ])
+    ;;
+
+  *)
+    AC_CHECK_FUNC([shl_load],
+	  [lt_cv_dlopen="shl_load"],
+      [AC_CHECK_LIB([dld], [shl_load],
+	    [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"],
+	[AC_CHECK_FUNC([dlopen],
+	      [lt_cv_dlopen="dlopen"],
+	  [AC_CHECK_LIB([dl], [dlopen],
+		[lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],
+	    [AC_CHECK_LIB([svld], [dlopen],
+		  [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"],
+	      [AC_CHECK_LIB([dld], [dld_link],
+		    [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"])
+	      ])
+	    ])
+	  ])
+	])
+      ])
+    ;;
+  esac
+
+  if test "x$lt_cv_dlopen" != xno; then
+    enable_dlopen=yes
+  else
+    enable_dlopen=no
+  fi
+
+  case $lt_cv_dlopen in
+  dlopen)
+    save_CPPFLAGS="$CPPFLAGS"
+    test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+    save_LDFLAGS="$LDFLAGS"
+    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+    save_LIBS="$LIBS"
+    LIBS="$lt_cv_dlopen_libs $LIBS"
+
+    AC_CACHE_CHECK([whether a program can dlopen itself],
+	  lt_cv_dlopen_self, [dnl
+	  _LT_TRY_DLOPEN_SELF(
+	    lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+	    lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+    ])
+
+    if test "x$lt_cv_dlopen_self" = xyes; then
+      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+      AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+	  lt_cv_dlopen_self_static, [dnl
+	  _LT_TRY_DLOPEN_SELF(
+	    lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+	    lt_cv_dlopen_self_static=no,  lt_cv_dlopen_self_static=cross)
+      ])
+    fi
+
+    CPPFLAGS="$save_CPPFLAGS"
+    LDFLAGS="$save_LDFLAGS"
+    LIBS="$save_LIBS"
+    ;;
+  esac
+
+  case $lt_cv_dlopen_self in
+  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+  *) enable_dlopen_self=unknown ;;
+  esac
+
+  case $lt_cv_dlopen_self_static in
+  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+  *) enable_dlopen_self_static=unknown ;;
+  esac
+fi
+_LT_DECL([dlopen_support], [enable_dlopen], [0],
+	 [Whether dlopen is supported])
+_LT_DECL([dlopen_self], [enable_dlopen_self], [0],
+	 [Whether dlopen of programs is supported])
+_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0],
+	 [Whether dlopen of statically linked programs is supported])
+])# LT_SYS_DLOPEN_SELF
+
+# Old name:
+AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], [])
+
+
+# _LT_COMPILER_C_O([TAGNAME])
+# ---------------------------
+# Check to see if options -c and -o are simultaneously supported by compiler.
+# This macro does not hard code the compiler like AC_PROG_CC_C_O.
+m4_defun([_LT_COMPILER_C_O],
+[m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
+  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)],
+  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&AS_MESSAGE_LOG_FD
+   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+     fi
+   fi
+   chmod u+w . 2>&AS_MESSAGE_LOG_FD
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+])
+_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1],
+	[Does compiler simultaneously support -c and -o options?])
+])# _LT_COMPILER_C_O
+
+
+# _LT_COMPILER_FILE_LOCKS([TAGNAME])
+# ----------------------------------
+# Check to see if we can do hard links to lock some files if needed
+m4_defun([_LT_COMPILER_FILE_LOCKS],
+[m4_require([_LT_ENABLE_LOCK])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+_LT_COMPILER_C_O([$1])
+
+hard_links="nottested"
+if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then
+  # do not overwrite the value of need_locks provided by the user
+  AC_MSG_CHECKING([if we can lock with hard links])
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  AC_MSG_RESULT([$hard_links])
+  if test "$hard_links" = no; then
+    AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe])
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?])
+])# _LT_COMPILER_FILE_LOCKS
+
+
+# _LT_CHECK_OBJDIR
+# ----------------
+m4_defun([_LT_CHECK_OBJDIR],
+[AC_CACHE_CHECK([for objdir], [lt_cv_objdir],
+[rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+  lt_cv_objdir=.libs
+else
+  # MS-DOS does not allow filenames that begin with a dot.
+  lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null])
+objdir=$lt_cv_objdir
+_LT_DECL([], [objdir], [0],
+         [The name of the directory that contains temporary libtool files])dnl
+m4_pattern_allow([LT_OBJDIR])dnl
+AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/",
+  [Define to the sub-directory in which libtool stores uninstalled libraries.])
+])# _LT_CHECK_OBJDIR
+
+
+# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME])
+# --------------------------------------
+# Check hardcoding attributes.
+m4_defun([_LT_LINKER_HARDCODE_LIBPATH],
+[AC_MSG_CHECKING([how to hardcode library paths into programs])
+_LT_TAGVAR(hardcode_action, $1)=
+if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" ||
+   test -n "$_LT_TAGVAR(runpath_var, $1)" ||
+   test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then
+
+  # We can hardcode non-existent directories.
+  if test "$_LT_TAGVAR(hardcode_direct, $1)" != no &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no &&
+     test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then
+    # Linking always hardcodes the temporary library directory.
+    _LT_TAGVAR(hardcode_action, $1)=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    _LT_TAGVAR(hardcode_action, $1)=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  _LT_TAGVAR(hardcode_action, $1)=unsupported
+fi
+AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)])
+
+if test "$_LT_TAGVAR(hardcode_action, $1)" = relink ||
+   test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+     test "$enable_shared" = no; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+_LT_TAGDECL([], [hardcode_action], [0],
+    [How to hardcode a shared library path into an executable])
+])# _LT_LINKER_HARDCODE_LIBPATH
+
+
+# _LT_CMD_STRIPLIB
+# ----------------
+m4_defun([_LT_CMD_STRIPLIB],
+[m4_require([_LT_DECL_EGREP])
+striplib=
+old_striplib=
+AC_MSG_CHECKING([whether stripping libraries is possible])
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+  test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+  test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+  AC_MSG_RESULT([yes])
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+  case $host_os in
+  darwin*)
+    if test -n "$STRIP" ; then
+      striplib="$STRIP -x"
+      old_striplib="$STRIP -S"
+      AC_MSG_RESULT([yes])
+    else
+      AC_MSG_RESULT([no])
+    fi
+    ;;
+  *)
+    AC_MSG_RESULT([no])
+    ;;
+  esac
+fi
+_LT_DECL([], [old_striplib], [1], [Commands to strip libraries])
+_LT_DECL([], [striplib], [1])
+])# _LT_CMD_STRIPLIB
+
+
+# _LT_SYS_DYNAMIC_LINKER([TAG])
+# -----------------------------
+# PORTME Fill in your ld.so characteristics
+m4_defun([_LT_SYS_DYNAMIC_LINKER],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_OBJDUMP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+AC_MSG_CHECKING([dynamic linker characteristics])
+m4_if([$1],
+	[], [
+if test "$GCC" = yes; then
+  case $host_os in
+    darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
+    *) lt_awk_arg="/^libraries:/" ;;
+  esac
+  case $host_os in
+    mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;;
+    *) lt_sed_strip_eq="s,=/,/,g" ;;
+  esac
+  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+  case $lt_search_path_spec in
+  *\;*)
+    # if the path contains ";" then we assume it to be the separator
+    # otherwise default to the standard path separator (i.e. ":") - it is
+    # assumed that no part of a normal pathname contains ";" but that should
+    # okay in the real world where ";" in dirpaths is itself problematic.
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+    ;;
+  *)
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+    ;;
+  esac
+  # Ok, now we have the path, separated by spaces, we can step through it
+  # and add multilib dir if necessary.
+  lt_tmp_lt_search_path_spec=
+  lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+  for lt_sys_path in $lt_search_path_spec; do
+    if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+    else
+      test -d "$lt_sys_path" && \
+	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+    fi
+  done
+  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+BEGIN {RS=" "; FS="/|\n";} {
+  lt_foo="";
+  lt_count=0;
+  for (lt_i = NF; lt_i > 0; lt_i--) {
+    if ($lt_i != "" && $lt_i != ".") {
+      if ($lt_i == "..") {
+        lt_count++;
+      } else {
+        if (lt_count == 0) {
+          lt_foo="/" $lt_i lt_foo;
+        } else {
+          lt_count--;
+        }
+      }
+    }
+  }
+  if (lt_foo != "") { lt_freq[[lt_foo]]++; }
+  if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
+}'`
+  # AWK program above erroneously prepends '/' to C:/dos/paths
+  # for these hosts.
+  case $host_os in
+    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+      $SED 's,/\([[A-Za-z]]:\),\1,g'` ;;
+  esac
+  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+else
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi])
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='${libname}${release}${shared_ext}$major'
+  ;;
+
+aix[[4-9]]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test "$host_cpu" = ia64; then
+    # AIX 5 supports IA64
+    library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line `#! .'.  This would cause the generated library to
+    # depend on `.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[[01]] | aix4.[[01]].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    if test "$aix_use_runtimelinking" = yes; then
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    else
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='${libname}${release}.a $libname.a'
+      soname_spec='${libname}${release}${shared_ext}$major'
+    fi
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='${libname}${shared_ext}'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[[45]]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=".dll"
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+m4_if([$1], [],[
+      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"])
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}'
+    library_names_spec='${libname}.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec="$LIB"
+      if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+  soname_spec='${libname}${release}${major}$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+m4_if([$1], [],[
+  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"])
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[[23]].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[[01]]* | freebsdelf3.[[01]]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \
+  freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    if test "X$HPUX_IA64_MODE" = X32; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+    fi
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[[3-9]]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test "$lt_cv_prog_gnu_ld" = yes; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+  sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath],
+    [lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
+	 LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
+    AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+      [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
+	 [lt_cv_shlibpath_overrides_runpath=yes])])
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+    ])
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Append ld.so.conf contents to the search path
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsdelf*-gnu)
+  version_type=linux
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='NetBSD ld.elf_so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec="/usr/lib"
+  need_lib_prefix=no
+  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+  case $host_os in
+    openbsd3.3 | openbsd3.3.*)	need_version=yes ;;
+    *)				need_version=no  ;;
+  esac
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+    case $host_os in
+      openbsd2.[[89]] | openbsd2.[[89]].*)
+	shlibpath_overrides_runpath=no
+	;;
+      *)
+	shlibpath_overrides_runpath=yes
+	;;
+      esac
+  else
+    shlibpath_overrides_runpath=yes
+  fi
+  ;;
+
+os2*)
+  libname_spec='$name'
+  shrext_cmds=".dll"
+  need_lib_prefix=no
+  library_names_spec='$libname${shared_ext} $libname.a'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=LIBPATH
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test "$with_gnu_ld" = yes; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec ;then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+    soname_spec='$libname${shared_ext}.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=freebsd-elf
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test "$with_gnu_ld" = yes; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+AC_MSG_RESULT([$dynamic_linker])
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+  sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+  sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+_LT_DECL([], [variables_saved_for_relink], [1],
+    [Variables whose values should be saved in libtool wrapper scripts and
+    restored at link time])
+_LT_DECL([], [need_lib_prefix], [0],
+    [Do we need the "lib" prefix for modules?])
+_LT_DECL([], [need_version], [0], [Do we need a version for libraries?])
+_LT_DECL([], [version_type], [0], [Library versioning type])
+_LT_DECL([], [runpath_var], [0],  [Shared library runtime path variable])
+_LT_DECL([], [shlibpath_var], [0],[Shared library path variable])
+_LT_DECL([], [shlibpath_overrides_runpath], [0],
+    [Is shlibpath searched before the hard-coded library search path?])
+_LT_DECL([], [libname_spec], [1], [Format of library name prefix])
+_LT_DECL([], [library_names_spec], [1],
+    [[List of archive names.  First name is the real one, the rest are links.
+    The last name is the one that the linker finds with -lNAME]])
+_LT_DECL([], [soname_spec], [1],
+    [[The coded name of the library, if different from the real name]])
+_LT_DECL([], [install_override_mode], [1],
+    [Permission mode override for installation of shared libraries])
+_LT_DECL([], [postinstall_cmds], [2],
+    [Command to use after installation of a shared archive])
+_LT_DECL([], [postuninstall_cmds], [2],
+    [Command to use after uninstallation of a shared archive])
+_LT_DECL([], [finish_cmds], [2],
+    [Commands used to finish a libtool library installation in a directory])
+_LT_DECL([], [finish_eval], [1],
+    [[As "finish_cmds", except a single script fragment to be evaled but
+    not shown]])
+_LT_DECL([], [hardcode_into_libs], [0],
+    [Whether we should hardcode library paths into libraries])
+_LT_DECL([], [sys_lib_search_path_spec], [2],
+    [Compile-time system search path for libraries])
+_LT_DECL([], [sys_lib_dlsearch_path_spec], [2],
+    [Run-time system search path for libraries])
+])# _LT_SYS_DYNAMIC_LINKER
+
+
+# _LT_PATH_TOOL_PREFIX(TOOL)
+# --------------------------
+# find a file program which can recognize shared library
+AC_DEFUN([_LT_PATH_TOOL_PREFIX],
+[m4_require([_LT_DECL_EGREP])dnl
+AC_MSG_CHECKING([for $1])
+AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+[case $MAGIC_CMD in
+[[\\/*] |  ?:[\\/]*])
+  lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+  ;;
+*)
+  lt_save_MAGIC_CMD="$MAGIC_CMD"
+  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+dnl $ac_dummy forces splitting on constant user-supplied paths.
+dnl POSIX.2 word splitting is done only on the output of word expansions,
+dnl not every word.  This closes a longstanding sh security hole.
+  ac_dummy="m4_if([$2], , $PATH, [$2])"
+  for ac_dir in $ac_dummy; do
+    IFS="$lt_save_ifs"
+    test -z "$ac_dir" && ac_dir=.
+    if test -f $ac_dir/$1; then
+      lt_cv_path_MAGIC_CMD="$ac_dir/$1"
+      if test -n "$file_magic_test_file"; then
+	case $deplibs_check_method in
+	"file_magic "*)
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+	  MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+	    $EGREP "$file_magic_regex" > /dev/null; then
+	    :
+	  else
+	    cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such.  This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem.  Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool at gnu.org
+
+_LT_EOF
+	  fi ;;
+	esac
+      fi
+      break
+    fi
+  done
+  IFS="$lt_save_ifs"
+  MAGIC_CMD="$lt_save_MAGIC_CMD"
+  ;;
+esac])
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+  AC_MSG_RESULT($MAGIC_CMD)
+else
+  AC_MSG_RESULT(no)
+fi
+_LT_DECL([], [MAGIC_CMD], [0],
+	 [Used to examine libraries when file_magic_cmd begins with "file"])dnl
+])# _LT_PATH_TOOL_PREFIX
+
+# Old name:
+AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], [])
+
+
+# _LT_PATH_MAGIC
+# --------------
+# find a file program which can recognize a shared library
+m4_defun([_LT_PATH_MAGIC],
+[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH)
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+  if test -n "$ac_tool_prefix"; then
+    _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH)
+  else
+    MAGIC_CMD=:
+  fi
+fi
+])# _LT_PATH_MAGIC
+
+
+# LT_PATH_LD
+# ----------
+# find the pathname to the GNU or non-GNU linker
+AC_DEFUN([LT_PATH_LD],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PROG_ECHO_BACKSLASH])dnl
+
+AC_ARG_WITH([gnu-ld],
+    [AS_HELP_STRING([--with-gnu-ld],
+	[assume the C compiler uses GNU ld @<:@default=no@:>@])],
+    [test "$withval" = no || with_gnu_ld=yes],
+    [with_gnu_ld=no])dnl
+
+ac_prog=ld
+if test "$GCC" = yes; then
+  # Check if gcc -print-prog-name=ld gives a path.
+  AC_MSG_CHECKING([for ld used by $CC])
+  case $host in
+  *-*-mingw*)
+    # gcc leaves a trailing carriage return which upsets mingw
+    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+  *)
+    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+  esac
+  case $ac_prog in
+    # Accept absolute paths.
+    [[\\/]]* | ?:[[\\/]]*)
+      re_direlt='/[[^/]][[^/]]*/\.\./'
+      # Canonicalize the pathname of ld
+      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+      done
+      test -z "$LD" && LD="$ac_prog"
+      ;;
+  "")
+    # If it fails, then pretend we aren't using GCC.
+    ac_prog=ld
+    ;;
+  *)
+    # If it is relative, then search for the first ld in PATH.
+    with_gnu_ld=unknown
+    ;;
+  esac
+elif test "$with_gnu_ld" = yes; then
+  AC_MSG_CHECKING([for GNU ld])
+else
+  AC_MSG_CHECKING([for non-GNU ld])
+fi
+AC_CACHE_VAL(lt_cv_path_LD,
+[if test -z "$LD"; then
+  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+  for ac_dir in $PATH; do
+    IFS="$lt_save_ifs"
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+      lt_cv_path_LD="$ac_dir/$ac_prog"
+      # Check to see if the program is GNU ld.  I'd rather use --version,
+      # but apparently some variants of GNU ld only accept -v.
+      # Break only if it was the GNU/non-GNU ld that we prefer.
+      case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+      *GNU* | *'with BFD'*)
+	test "$with_gnu_ld" != no && break
+	;;
+      *)
+	test "$with_gnu_ld" != yes && break
+	;;
+      esac
+    fi
+  done
+  IFS="$lt_save_ifs"
+else
+  lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi])
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+  AC_MSG_RESULT($LD)
+else
+  AC_MSG_RESULT(no)
+fi
+test -z "$LD" && AC_MSG_ERROR([no acceptable ld found in \$PATH])
+_LT_PATH_LD_GNU
+AC_SUBST([LD])
+
+_LT_TAGDECL([], [LD], [1], [The linker used to build libraries])
+])# LT_PATH_LD
+
+# Old names:
+AU_ALIAS([AM_PROG_LD], [LT_PATH_LD])
+AU_ALIAS([AC_PROG_LD], [LT_PATH_LD])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_LD], [])
+dnl AC_DEFUN([AC_PROG_LD], [])
+
+
+# _LT_PATH_LD_GNU
+#- --------------
+m4_defun([_LT_PATH_LD_GNU],
+[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], lt_cv_prog_gnu_ld,
+[# I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+  lt_cv_prog_gnu_ld=yes
+  ;;
+*)
+  lt_cv_prog_gnu_ld=no
+  ;;
+esac])
+with_gnu_ld=$lt_cv_prog_gnu_ld
+])# _LT_PATH_LD_GNU
+
+
+# _LT_CMD_RELOAD
+# --------------
+# find reload flag for linker
+#   -- PORTME Some linkers may need a different reload flag.
+m4_defun([_LT_CMD_RELOAD],
+[AC_CACHE_CHECK([for $LD option to reload object files],
+  lt_cv_ld_reload_flag,
+  [lt_cv_ld_reload_flag='-r'])
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    if test "$GCC" != yes; then
+      reload_cmds=false
+    fi
+    ;;
+  darwin*)
+    if test "$GCC" = yes; then
+      reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+    else
+      reload_cmds='$LD$reload_flag -o $output$reload_objs'
+    fi
+    ;;
+esac
+_LT_TAGDECL([], [reload_flag], [1], [How to create reloadable object files])dnl
+_LT_TAGDECL([], [reload_cmds], [2])dnl
+])# _LT_CMD_RELOAD
+
+
+# _LT_CHECK_MAGIC_METHOD
+# ----------------------
+# how to check for library dependencies
+#  -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_MAGIC_METHOD],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+AC_CACHE_CHECK([how to recognize dependent libraries],
+lt_cv_deplibs_check_method,
+[lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given extended regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[[4-9]]*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+beos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+bsdi[[45]]*)
+  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+  lt_cv_file_magic_cmd='/usr/bin/file -L'
+  lt_cv_file_magic_test_file=/shlib/libc.so
+  ;;
+
+cygwin*)
+  # func_win32_libid is a shell function defined in ltmain.sh
+  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+  lt_cv_file_magic_cmd='func_win32_libid'
+  ;;
+
+mingw* | pw32*)
+  # Base MSYS/MinGW do not provide the 'file' command needed by
+  # func_win32_libid shell function, so use a weaker test based on 'objdump',
+  # unless we find 'file', for example because we are cross-compiling.
+  # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
+  if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
+    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+    lt_cv_file_magic_cmd='func_win32_libid'
+  else
+    # Keep this pattern in sync with the one in func_win32_libid.
+    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+    lt_cv_file_magic_cmd='$OBJDUMP -f'
+  fi
+  ;;
+
+cegcc*)
+  # use the weaker test based on 'objdump'. See mingw*.
+  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+  lt_cv_file_magic_cmd='$OBJDUMP -f'
+  ;;
+
+darwin* | rhapsody*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+freebsd* | dragonfly*)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    case $host_cpu in
+    i*86 )
+      # Not sure whether the presence of OpenBSD here was a mistake.
+      # Let's accept both of them until this is cleared up.
+      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
+      lt_cv_file_magic_cmd=/usr/bin/file
+      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+      ;;
+    esac
+  else
+    lt_cv_deplibs_check_method=pass_all
+  fi
+  ;;
+
+haiku*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+hpux10.20* | hpux11*)
+  lt_cv_file_magic_cmd=/usr/bin/file
+  case $host_cpu in
+  ia64*)
+    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
+    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+    ;;
+  hppa*64*)
+    [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]']
+    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+    ;;
+  *)
+    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library'
+    lt_cv_file_magic_test_file=/usr/lib/libc.sl
+    ;;
+  esac
+  ;;
+
+interix[[3-9]]*)
+  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+  lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$'
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $LD in
+  *-32|*"-32 ") libmagic=32-bit;;
+  *-n32|*"-n32 ") libmagic=N32;;
+  *-64|*"-64 ") libmagic=64-bit;;
+  *) libmagic=never-match;;
+  esac
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+netbsd* | netbsdelf*-gnu)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$'
+  fi
+  ;;
+
+newos6*)
+  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+  lt_cv_file_magic_cmd=/usr/bin/file
+  lt_cv_file_magic_test_file=/usr/lib/libnls.so
+  ;;
+
+*nto* | *qnx*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+openbsd*)
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+  fi
+  ;;
+
+osf3* | osf4* | osf5*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+rdos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+solaris*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv4 | sysv4.3*)
+  case $host_vendor in
+  motorola)
+    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+    ;;
+  ncr)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  sequent)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+    ;;
+  sni)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+    lt_cv_file_magic_test_file=/lib/libc.so
+    ;;
+  siemens)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  pc)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  esac
+  ;;
+
+tpf*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+esac
+])
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+  case $host_os in
+  mingw* | pw32*)
+    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+      want_nocaseglob=yes
+    else
+      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"`
+    fi
+    ;;
+  esac
+fi
+
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+_LT_DECL([], [deplibs_check_method], [1],
+    [Method to check whether dependent libraries are shared objects])
+_LT_DECL([], [file_magic_cmd], [1],
+    [Command to use when deplibs_check_method = "file_magic"])
+_LT_DECL([], [file_magic_glob], [1],
+    [How to find potential files when deplibs_check_method = "file_magic"])
+_LT_DECL([], [want_nocaseglob], [1],
+    [Find potential files using nocaseglob when deplibs_check_method = "file_magic"])
+])# _LT_CHECK_MAGIC_METHOD
+
+
+# LT_PATH_NM
+# ----------
+# find the pathname to a BSD- or MS-compatible name lister
+AC_DEFUN([LT_PATH_NM],
+[AC_REQUIRE([AC_PROG_CC])dnl
+AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM,
+[if test -n "$NM"; then
+  # Let the user override the test.
+  lt_cv_path_NM="$NM"
+else
+  lt_nm_to_check="${ac_tool_prefix}nm"
+  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+    lt_nm_to_check="$lt_nm_to_check nm"
+  fi
+  for lt_tmp_nm in $lt_nm_to_check; do
+    lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+      IFS="$lt_save_ifs"
+      test -z "$ac_dir" && ac_dir=.
+      tmp_nm="$ac_dir/$lt_tmp_nm"
+      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+	# Check to see if the nm accepts a BSD-compat flag.
+	# Adding the `sed 1q' prevents false positives on HP-UX, which says:
+	#   nm: unknown option "B" ignored
+	# Tru64's nm complains that /dev/null is an invalid object file
+	case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
+	*/dev/null* | *'Invalid file or object type'*)
+	  lt_cv_path_NM="$tmp_nm -B"
+	  break
+	  ;;
+	*)
+	  case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+	  */dev/null*)
+	    lt_cv_path_NM="$tmp_nm -p"
+	    break
+	    ;;
+	  *)
+	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+	    continue # so that we can try to find one that supports BSD flags
+	    ;;
+	  esac
+	  ;;
+	esac
+      fi
+    done
+    IFS="$lt_save_ifs"
+  done
+  : ${lt_cv_path_NM=no}
+fi])
+if test "$lt_cv_path_NM" != "no"; then
+  NM="$lt_cv_path_NM"
+else
+  # Didn't find any BSD compatible name lister, look for dumpbin.
+  if test -n "$DUMPBIN"; then :
+    # Let the user override the test.
+  else
+    AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :)
+    case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+    *COFF*)
+      DUMPBIN="$DUMPBIN -symbols"
+      ;;
+    *)
+      DUMPBIN=:
+      ;;
+    esac
+  fi
+  AC_SUBST([DUMPBIN])
+  if test "$DUMPBIN" != ":"; then
+    NM="$DUMPBIN"
+  fi
+fi
+test -z "$NM" && NM=nm
+AC_SUBST([NM])
+_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
+
+AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
+  [lt_cv_nm_interface="BSD nm"
+  echo "int some_variable = 0;" > conftest.$ac_ext
+  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
+  (eval "$ac_compile" 2>conftest.err)
+  cat conftest.err >&AS_MESSAGE_LOG_FD
+  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
+  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+  cat conftest.err >&AS_MESSAGE_LOG_FD
+  (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD)
+  cat conftest.out >&AS_MESSAGE_LOG_FD
+  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+    lt_cv_nm_interface="MS dumpbin"
+  fi
+  rm -f conftest*])
+])# LT_PATH_NM
+
+# Old names:
+AU_ALIAS([AM_PROG_NM], [LT_PATH_NM])
+AU_ALIAS([AC_PROG_NM], [LT_PATH_NM])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_PROG_NM], [])
+dnl AC_DEFUN([AC_PROG_NM], [])
+
+# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+# --------------------------------
+# how to determine the name of the shared library
+# associated with a specific link library.
+#  -- PORTME fill in with the dynamic library characteristics
+m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB],
+[m4_require([_LT_DECL_EGREP])
+m4_require([_LT_DECL_OBJDUMP])
+m4_require([_LT_DECL_DLLTOOL])
+AC_CACHE_CHECK([how to associate runtime and link libraries],
+lt_cv_sharedlib_from_linklib_cmd,
+[lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+  # two different shell functions defined in ltmain.sh
+  # decide which to use based on capabilities of $DLLTOOL
+  case `$DLLTOOL --help 2>&1` in
+  *--identify-strict*)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+    ;;
+  *)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+    ;;
+  esac
+  ;;
+*)
+  # fallback: assume linklib IS sharedlib
+  lt_cv_sharedlib_from_linklib_cmd="$ECHO"
+  ;;
+esac
+])
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+_LT_DECL([], [sharedlib_from_linklib_cmd], [1],
+    [Command to associate shared and link libraries])
+])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+
+
+# _LT_PATH_MANIFEST_TOOL
+# ----------------------
+# locate the manifest tool
+m4_defun([_LT_PATH_MANIFEST_TOOL],
+[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :)
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool],
+  [lt_cv_path_mainfest_tool=no
+  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD
+  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+  cat conftest.err >&AS_MESSAGE_LOG_FD
+  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+    lt_cv_path_mainfest_tool=yes
+  fi
+  rm -f conftest*])
+if test "x$lt_cv_path_mainfest_tool" != xyes; then
+  MANIFEST_TOOL=:
+fi
+_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl
+])# _LT_PATH_MANIFEST_TOOL
+
+
+# LT_LIB_M
+# --------
+# check for math library
+AC_DEFUN([LT_LIB_M],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+LIBM=
+case $host in
+*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*)
+  # These system don't have libm, or don't need it
+  ;;
+*-ncr-sysv4.3*)
+  AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw")
+  AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm")
+  ;;
+*)
+  AC_CHECK_LIB(m, cos, LIBM="-lm")
+  ;;
+esac
+AC_SUBST([LIBM])
+])# LT_LIB_M
+
+# Old name:
+AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_CHECK_LIBM], [])
+
+
+# _LT_COMPILER_NO_RTTI([TAGNAME])
+# -------------------------------
+m4_defun([_LT_COMPILER_NO_RTTI],
+[m4_require([_LT_TAG_COMPILER])dnl
+
+_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+
+if test "$GCC" = yes; then
+  case $cc_basename in
+  nvcc*)
+    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;;
+  *)
+    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;;
+  esac
+
+  _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
+    lt_cv_prog_compiler_rtti_exceptions,
+    [-fno-rtti -fno-exceptions], [],
+    [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"])
+fi
+_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
+	[Compiler flag to turn off builtin functions])
+])# _LT_COMPILER_NO_RTTI
+
+
+# _LT_CMD_GLOBAL_SYMBOLS
+# ----------------------
+m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+AC_REQUIRE([LT_PATH_LD])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+AC_MSG_CHECKING([command to parse $NM output from $compiler object])
+AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe],
+[
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[[BCDEGRST]]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+  symcode='[[BCDT]]'
+  ;;
+cygwin* | mingw* | pw32* | cegcc*)
+  symcode='[[ABCDGISTW]]'
+  ;;
+hpux*)
+  if test "$host_cpu" = ia64; then
+    symcode='[[ABCDEGRST]]'
+  fi
+  ;;
+irix* | nonstopux*)
+  symcode='[[BCDEGRST]]'
+  ;;
+osf*)
+  symcode='[[BCDEGQRST]]'
+  ;;
+solaris*)
+  symcode='[[BDRT]]'
+  ;;
+sco3.2v5*)
+  symcode='[[DT]]'
+  ;;
+sysv4.2uw2*)
+  symcode='[[DT]]'
+  ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+  symcode='[[ABDT]]'
+  ;;
+sysv4)
+  symcode='[[DFNSTU]]'
+  ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+  symcode='[[ABCDGIRSTW]]' ;;
+esac
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/  {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/  {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/  {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/  {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/  {\"lib\2\", (void *) \&\2},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+  ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+  symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+  # Write the raw and C identifiers.
+  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+    # Fake it for dumpbin and say T for any non-static function
+    # and D for any global variable.
+    # Also find C++ and __fastcall symbols from MSVC++,
+    # which start with @ or ?.
+    lt_cv_sys_global_symbol_pipe="$AWK ['"\
+"     {last_section=section; section=\$ 3};"\
+"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+"     \$ 0!~/External *\|/{next};"\
+"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+"     {if(hide[section]) next};"\
+"     {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
+"     {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
+"     s[1]~/^[@?]/{print s[1], s[1]; next};"\
+"     s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+"     ' prfx=^$ac_symprfx]"
+  else
+    lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[	 ]]\($symcode$symcode*\)[[	 ]][[	 ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+  fi
+  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
+
+  # Check to see that the pipe works correctly.
+  pipe_works=no
+
+  rm -f conftest*
+  cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+  if AC_TRY_EVAL(ac_compile); then
+    # Now try to grab the symbols.
+    nlist=conftest.nm
+    if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then
+      # Try sorting and uniquifying the output.
+      if sort "$nlist" | uniq > "$nlist"T; then
+	mv -f "$nlist"T "$nlist"
+      else
+	rm -f "$nlist"T
+      fi
+
+      # Make sure that we snagged all the symbols we need.
+      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+	  cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT@&t at _DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data.  */
+# define LT@&t at _DLSYM_CONST
+#else
+# define LT@&t at _DLSYM_CONST const
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+	  # Now generate the symbol file.
+	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+	  cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols.  */
+LT@&t at _DLSYM_CONST struct {
+  const char *name;
+  void       *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[[]] =
+{
+  { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+	  $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/  {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+	  cat <<\_LT_EOF >> conftest.$ac_ext
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+	  # Now try linking the two files.
+	  mv conftest.$ac_objext conftstm.$ac_objext
+	  lt_globsym_save_LIBS=$LIBS
+	  lt_globsym_save_CFLAGS=$CFLAGS
+	  LIBS="conftstm.$ac_objext"
+	  CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
+	  if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then
+	    pipe_works=yes
+	  fi
+	  LIBS=$lt_globsym_save_LIBS
+	  CFLAGS=$lt_globsym_save_CFLAGS
+	else
+	  echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
+	fi
+      else
+	echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD
+      fi
+    else
+      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
+    fi
+  else
+    echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD
+    cat conftest.$ac_ext >&5
+  fi
+  rm -rf conftest* conftst*
+
+  # Do not use the global_symbol_pipe unless it works.
+  if test "$pipe_works" = yes; then
+    break
+  else
+    lt_cv_sys_global_symbol_pipe=
+  fi
+done
+])
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+  lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+  AC_MSG_RESULT(failed)
+else
+  AC_MSG_RESULT(ok)
+fi
+
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+  nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then
+  nm_file_list_spec='@'
+fi
+
+_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
+    [Take the output of nm and produce a listing of raw symbols and C names])
+_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
+    [Transform the output of nm in a proper C declaration])
+_LT_DECL([global_symbol_to_c_name_address],
+    [lt_cv_sys_global_symbol_to_c_name_address], [1],
+    [Transform the output of nm in a C name address pair])
+_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
+    [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
+    [Transform the output of nm in a C name address pair when lib prefix is needed])
+_LT_DECL([], [nm_file_list_spec], [1],
+    [Specify filename containing input files for $NM])
+]) # _LT_CMD_GLOBAL_SYMBOLS
+
+
+# _LT_COMPILER_PIC([TAGNAME])
+# ---------------------------
+m4_defun([_LT_COMPILER_PIC],
+[m4_require([_LT_TAG_COMPILER])dnl
+_LT_TAGVAR(lt_prog_compiler_wl, $1)=
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+_LT_TAGVAR(lt_prog_compiler_static, $1)=
+
+m4_if([$1], [CXX], [
+  # C++ specific cases for pic, static, wl, etc.
+  if test "$GXX" = yes; then
+    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+    case $host_os in
+    aix*)
+      # All AIX code is PIC.
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the `-m68020' flag to GCC prevents building anything better,
+            # like `-m68040'.
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+    mingw* | cygwin* | os2* | pw32* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      m4_if([$1], [GCJ], [],
+	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+      ;;
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+      ;;
+    *djgpp*)
+      # DJGPP does not support shared libraries at all
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+      ;;
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)=
+      ;;
+    interix[[3-9]]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+      fi
+      ;;
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	;;
+      esac
+      ;;
+    *qnx* | *nto*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+      ;;
+    *)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+      ;;
+    esac
+  else
+    case $host_os in
+      aix[[4-9]]*)
+	# All AIX code is PIC.
+	if test "$host_cpu" = ia64; then
+	  # AIX 5 now supports IA64 processor
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	else
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+	fi
+	;;
+      chorus*)
+	case $cc_basename in
+	cxch68*)
+	  # Green Hills C++ Compiler
+	  # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+	  ;;
+	esac
+	;;
+      mingw* | cygwin* | os2* | pw32* | cegcc*)
+	# This hack is so that the source file can tell whether it is being
+	# built for inclusion in a dll (and should export symbols for example).
+	m4_if([$1], [GCJ], [],
+	  [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+	;;
+      dgux*)
+	case $cc_basename in
+	  ec++*)
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    ;;
+	  ghcx*)
+	    # Green Hills C++ Compiler
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      freebsd* | dragonfly*)
+	# FreeBSD uses GNU C++
+	;;
+      hpux9* | hpux10* | hpux11*)
+	case $cc_basename in
+	  CC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+	    if test "$host_cpu" != ia64; then
+	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+	    fi
+	    ;;
+	  aCC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+	    case $host_cpu in
+	    hppa*64*|ia64*)
+	      # +Z the default
+	      ;;
+	    *)
+	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+	      ;;
+	    esac
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      interix*)
+	# This is c89, which is MS Visual C++ (no shared libs)
+	# Anyone wants to do a port?
+	;;
+      irix5* | irix6* | nonstopux*)
+	case $cc_basename in
+	  CC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+	    # CC pic flag -KPIC is the default.
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+	case $cc_basename in
+	  KCC*)
+	    # KAI C++ Compiler
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	    ;;
+	  ecpc* )
+	    # old Intel C++ for x86_64 which still supported -KPIC.
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	    ;;
+	  icpc* )
+	    # Intel C++, used to be incompatible with GCC.
+	    # ICC 10 doesn't accept -KPIC any more.
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	    ;;
+	  pgCC* | pgcpp*)
+	    # Portland Group C++ compiler
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	  cxx*)
+	    # Compaq C++
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+	    ;;
+	  xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*)
+	    # IBM XL 8.0, 9.0 on PPC and BlueGene
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+      lynxos*)
+	;;
+      m88k*)
+	;;
+      mvs*)
+	case $cc_basename in
+	  cxx*)
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      netbsd* | netbsdelf*-gnu)
+	;;
+      *qnx* | *nto*)
+        # QNX uses GNU C++, but need to define -shared option too, otherwise
+        # it will coredump.
+        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+        ;;
+      osf3* | osf4* | osf5*)
+	case $cc_basename in
+	  KCC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+	    ;;
+	  RCC*)
+	    # Rational C++ 2.4.1
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    ;;
+	  cxx*)
+	    # Digital/Compaq C++
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      psos*)
+	;;
+      solaris*)
+	case $cc_basename in
+	  CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+	    ;;
+	  gcx*)
+	    # Green Hills C++ Compiler
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sunos4*)
+	case $cc_basename in
+	  CC*)
+	    # Sun C++ 4.x
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	  lcc*)
+	    # Lucid
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+	case $cc_basename in
+	  CC*)
+	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	    ;;
+	esac
+	;;
+      tandem*)
+	case $cc_basename in
+	  NCC*)
+	    # NonStop-UX NCC 3.20
+	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      vxworks*)
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+	;;
+    esac
+  fi
+],
+[
+  if test "$GCC" = yes; then
+    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+
+    case $host_os in
+      aix*)
+      # All AIX code is PIC.
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the `-m68020' flag to GCC prevents building anything better,
+            # like `-m68040'.
+            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      m4_if([$1], [GCJ], [],
+	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+      ;;
+
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)=
+      ;;
+
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	# +Z the default
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	;;
+      esac
+      ;;
+
+    interix[[3-9]]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+
+    msdosdjgpp*)
+      # Just because we use GCC doesn't mean we suddenly get shared libraries
+      # on systems that don't support them.
+      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      enable_shared=no
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+      fi
+      ;;
+
+    *)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+      ;;
+    esac
+
+    case $cc_basename in
+    nvcc*) # Cuda Compiler Driver 2.2
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker '
+      if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+        _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)"
+      fi
+      ;;
+    esac
+  else
+    # PORTME Check for flag to pass linker flags through the system compiler.
+    case $host_os in
+    aix*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      else
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+      fi
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      m4_if([$1], [GCJ], [],
+	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+      ;;
+
+    hpux9* | hpux10* | hpux11*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+      # not for PA HP-UX.
+      case $host_cpu in
+      hppa*64*|ia64*)
+	# +Z the default
+	;;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+	;;
+      esac
+      # Is there a better lt_prog_compiler_static that works with the bundled CC?
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive'
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      # PIC (with -KPIC) is the default.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+      ;;
+
+    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+      case $cc_basename in
+      # old Intel for x86_64 which still supported -KPIC.
+      ecc*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+        ;;
+      # icc used to be incompatible with GCC.
+      # ICC 10 doesn't accept -KPIC any more.
+      icc* | ifort*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+        ;;
+      # Lahey Fortran 8.1.
+      lf95*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
+	;;
+      nagfor*)
+	# NAG Fortran compiler
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	;;
+      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+        # Portland Group compilers (*not* the Pentium gcc compiler,
+	# which looks to be a dead project)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+        ;;
+      ccc*)
+        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+        # All Alpha code is PIC.
+        _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+        ;;
+      xl* | bgxl* | bgf* | mpixl*)
+	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+	;;
+      *)
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*)
+	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
+	  ;;
+	*Sun\ F* | *Sun*Fortran*)
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+	  ;;
+	*Sun\ C*)
+	  # Sun C 5.9
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	  ;;
+        *Intel*\ [[CF]]*Compiler*)
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+	  ;;
+	*Portland\ Group*)
+	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+	  ;;
+	esac
+	;;
+      esac
+      ;;
+
+    newsos6)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+      ;;
+
+    osf3* | osf4* | osf5*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      # All OSF/1 code is PIC.
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+      ;;
+
+    rdos*)
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+      ;;
+
+    solaris*)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      case $cc_basename in
+      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
+      *)
+	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
+      esac
+      ;;
+
+    sunos4*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    sysv4 | sysv4.2uw2* | sysv4.3*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec ;then
+	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic'
+	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      fi
+      ;;
+
+    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    unicos*)
+      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      ;;
+
+    uts4*)
+      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+      ;;
+
+    *)
+      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+      ;;
+    esac
+  fi
+])
+case $host_os in
+  # For platforms which do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+    ;;
+  *)
+    _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t at m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
+    ;;
+esac
+
+AC_CACHE_CHECK([for $compiler option to produce PIC],
+  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)],
+  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
+_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+  _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works],
+    [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)],
+    [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t at m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [],
+    [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in
+     "" | " "*) ;;
+     *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;;
+     esac],
+    [_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+     _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
+fi
+_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
+	[Additional compiler flags for building library objects])
+
+_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
+	[How to pass a linker flag through the compiler])
+#
+# Check to make sure the static flag actually works.
+#
+wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\"
+_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works],
+  _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1),
+  $lt_tmp_static_flag,
+  [],
+  [_LT_TAGVAR(lt_prog_compiler_static, $1)=])
+_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
+	[Compiler flag to prevent dynamic linking])
+])# _LT_COMPILER_PIC
+
+
+# _LT_LINKER_SHLIBS([TAGNAME])
+# ----------------------------
+# See if the linker supports building shared libraries.
+m4_defun([_LT_LINKER_SHLIBS],
+[AC_REQUIRE([LT_PATH_LD])dnl
+AC_REQUIRE([LT_PATH_NM])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_DECL_SED])dnl
+m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+m4_require([_LT_TAG_COMPILER])dnl
+AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+m4_if([$1], [CXX], [
+  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+  case $host_os in
+  aix[[4-9]]*)
+    # If we're using GNU nm, then we don't want the "-C" option.
+    # -C means demangle to AIX nm, but means don't demangle with GNU nm
+    # Also, AIX nm treats weak defined symbols like other global defined
+    # symbols, whereas GNU nm marks them as "W".
+    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+    else
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+    fi
+    ;;
+  pw32*)
+    _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds"
+    ;;
+  cygwin* | mingw* | cegcc*)
+    case $cc_basename in
+    cl*)
+      _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+      ;;
+    *)
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+      ;;
+    esac
+    ;;
+  linux* | k*bsd*-gnu | gnu*)
+    _LT_TAGVAR(link_all_deplibs, $1)=no
+    ;;
+  *)
+    _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+    ;;
+  esac
+], [
+  runpath_var=
+  _LT_TAGVAR(allow_undefined_flag, $1)=
+  _LT_TAGVAR(always_export_symbols, $1)=no
+  _LT_TAGVAR(archive_cmds, $1)=
+  _LT_TAGVAR(archive_expsym_cmds, $1)=
+  _LT_TAGVAR(compiler_needs_object, $1)=no
+  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+  _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  _LT_TAGVAR(hardcode_automatic, $1)=no
+  _LT_TAGVAR(hardcode_direct, $1)=no
+  _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+  _LT_TAGVAR(hardcode_libdir_separator, $1)=
+  _LT_TAGVAR(hardcode_minus_L, $1)=no
+  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+  _LT_TAGVAR(inherit_rpath, $1)=no
+  _LT_TAGVAR(link_all_deplibs, $1)=unknown
+  _LT_TAGVAR(module_cmds, $1)=
+  _LT_TAGVAR(module_expsym_cmds, $1)=
+  _LT_TAGVAR(old_archive_from_new_cmds, $1)=
+  _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)=
+  _LT_TAGVAR(thread_safe_flag_spec, $1)=
+  _LT_TAGVAR(whole_archive_flag_spec, $1)=
+  # include_expsyms should be a list of space-separated symbols to be *always*
+  # included in the symbol list
+  _LT_TAGVAR(include_expsyms, $1)=
+  # exclude_expsyms can be an extended regexp of symbols to exclude
+  # it will be wrapped by ` (' and `)$', so one must not match beginning or
+  # end of line.  Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+  # as well as any symbol that contains `d'.
+  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+  # platforms (ab)use it in PIC code, but their linkers get confused if
+  # the symbol is explicitly referenced.  Since portable code cannot
+  # rely on this symbol name, it's probably fine to never include it in
+  # preloaded symbol tables.
+  # Exclude shared library initialization/finalization symbols.
+dnl Note also adjust exclude_expsyms for C++ above.
+  extract_expsyms_cmds=
+
+  case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    # FIXME: the MSVC++ port hasn't been tested in a loooong time
+    # When not using gcc, we currently assume that we are using
+    # Microsoft Visual C++.
+    if test "$GCC" != yes; then
+      with_gnu_ld=no
+    fi
+    ;;
+  interix*)
+    # we just hope/assume this is gcc and not c89 (= MSVC++)
+    with_gnu_ld=yes
+    ;;
+  openbsd*)
+    with_gnu_ld=no
+    ;;
+  linux* | k*bsd*-gnu | gnu*)
+    _LT_TAGVAR(link_all_deplibs, $1)=no
+    ;;
+  esac
+
+  _LT_TAGVAR(ld_shlibs, $1)=yes
+
+  # On some targets, GNU ld is compatible enough with the native linker
+  # that we're better off using the native interface for both.
+  lt_use_gnu_ld_interface=no
+  if test "$with_gnu_ld" = yes; then
+    case $host_os in
+      aix*)
+	# The AIX port of GNU ld has always aspired to compatibility
+	# with the native linker.  However, as the warning in the GNU ld
+	# block says, versions before 2.19.5* couldn't really create working
+	# shared libraries, regardless of the interface used.
+	case `$LD -v 2>&1` in
+	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+	  *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;;
+	  *\ \(GNU\ Binutils\)\ [[3-9]]*) ;;
+	  *)
+	    lt_use_gnu_ld_interface=yes
+	    ;;
+	esac
+	;;
+      *)
+	lt_use_gnu_ld_interface=yes
+	;;
+    esac
+  fi
+
+  if test "$lt_use_gnu_ld_interface" = yes; then
+    # If archive_cmds runs LD, not CC, wlarc should be empty
+    wlarc='${wl}'
+
+    # Set some defaults for GNU ld with shared library support. These
+    # are reset later if shared libraries are not supported. Putting them
+    # here allows them to be overridden if necessary.
+    runpath_var=LD_RUN_PATH
+    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+    _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+    # ancient GNU ld didn't support --whole-archive et. al.
+    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+      _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+    else
+      _LT_TAGVAR(whole_archive_flag_spec, $1)=
+    fi
+    supports_anon_versioning=no
+    case `$LD -v 2>&1` in
+      *GNU\ gold*) supports_anon_versioning=yes ;;
+      *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
+      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+      *\ 2.11.*) ;; # other 2.11 versions
+      *) supports_anon_versioning=yes ;;
+    esac
+
+    # See if GNU ld supports shared libraries.
+    case $host_os in
+    aix[[3-9]]*)
+      # On AIX/PPC, the GNU linker is very broken
+      if test "$host_cpu" != ia64; then
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support.  If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+            _LT_TAGVAR(archive_expsym_cmds, $1)=''
+        ;;
+      m68k)
+            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+            _LT_TAGVAR(hardcode_minus_L, $1)=yes
+        ;;
+      esac
+      ;;
+
+    beos*)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	# Joseph Beckenbach <jrb3 at best.com> says some releases of gcc
+	# support --undefined.  This deserves some investigation.  FIXME
+	_LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+      # as there is no search path for DLLs.
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols'
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      _LT_TAGVAR(always_export_symbols, $1)=no
+      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+
+      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	# If the export-symbols file already is a .def file (1st line
+	# is EXPORTS), use it as is; otherwise, prepend...
+	_LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	  cp $export_symbols $output_objdir/$soname.def;
+	else
+	  echo EXPORTS > $output_objdir/$soname.def;
+	  cat $export_symbols >> $output_objdir/$soname.def;
+	fi~
+	$CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    haiku*)
+      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      ;;
+
+    interix[[3-9]]*)
+      _LT_TAGVAR(hardcode_direct, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+      # Instead, shared libraries are loaded at an image base (0x10000000 by
+      # default) and relocated if they conflict, which is a slow very memory
+      # consuming and fragmenting process.  To avoid this, we pick a random,
+      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      ;;
+
+    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+      tmp_diet=no
+      if test "$host_os" = linux-dietlibc; then
+	case $cc_basename in
+	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+	esac
+      fi
+      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+	 && test "$tmp_diet" = no
+      then
+	tmp_addflag=' $pic_flag'
+	tmp_sharedflag='-shared'
+	case $cc_basename,$host_cpu in
+        pgcc*)				# Portland Group C compiler
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag'
+	  ;;
+	pgf77* | pgf90* | pgf95* | pgfortran*)
+					# Portland Group f77 and f90 compilers
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag -Mnomain' ;;
+	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+	  tmp_addflag=' -i_dynamic' ;;
+	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+	ifc* | ifort*)			# Intel Fortran compiler
+	  tmp_addflag=' -nofor_main' ;;
+	lf95*)				# Lahey Fortran 8.1
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)=
+	  tmp_sharedflag='--shared' ;;
+	xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+	  tmp_sharedflag='-qmkshrobj'
+	  tmp_addflag= ;;
+	nvcc*)	# Cuda Compiler Driver 2.2
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  _LT_TAGVAR(compiler_needs_object, $1)=yes
+	  ;;
+	esac
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ C*)			# Sun C 5.9
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  _LT_TAGVAR(compiler_needs_object, $1)=yes
+	  tmp_sharedflag='-G' ;;
+	*Sun\ F*)			# Sun Fortran 8.3
+	  tmp_sharedflag='-G' ;;
+	esac
+	_LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+        if test "x$supports_anon_versioning" = xyes; then
+          _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+	    cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+	    echo "local: *; };" >> $output_objdir/$libname.ver~
+	    $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+        fi
+
+	case $cc_basename in
+	xlf* | bgf* | bgxlf* | mpixlf*)
+	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+	  if test "x$supports_anon_versioning" = xyes; then
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+	      cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+	      echo "local: *; };" >> $output_objdir/$libname.ver~
+	      $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+	  fi
+	  ;;
+	esac
+      else
+        _LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    netbsd* | netbsdelf*-gnu)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+	wlarc=
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      fi
+      ;;
+
+    solaris*)
+      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+      case `$LD -v 2>&1` in
+        *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*)
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+	;;
+	*)
+	  # For security reasons, it is highly recommended that you always
+	  # use absolute paths for naming shared libraries, and exclude the
+	  # DT_RUNPATH tag from executables and libraries.  But doing so
+	  # requires that you compile everything twice, which is a pain.
+	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+	  else
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	  fi
+	;;
+      esac
+      ;;
+
+    sunos4*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      wlarc=
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    *)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+    esac
+
+    if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then
+      runpath_var=
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+      _LT_TAGVAR(whole_archive_flag_spec, $1)=
+    fi
+  else
+    # PORTME fill in a description of your system's linker (not GNU ld)
+    case $host_os in
+    aix3*)
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      _LT_TAGVAR(always_export_symbols, $1)=yes
+      _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+      # Note: this linker hardcodes the directories in LIBPATH if there
+      # are no directories specified by -L.
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+	# Neither direct hardcoding nor static linking is supported with a
+	# broken collect2.
+	_LT_TAGVAR(hardcode_direct, $1)=unsupported
+      fi
+      ;;
+
+    aix[[4-9]]*)
+      if test "$host_cpu" = ia64; then
+	# On IA64, the linker does run time linking by default, so we don't
+	# have to do anything special.
+	aix_use_runtimelinking=no
+	exp_sym_flag='-Bexport'
+	no_entry_flag=""
+      else
+	# If we're using GNU nm, then we don't want the "-C" option.
+	# -C means demangle to AIX nm, but means don't demangle with GNU nm
+	# Also, AIX nm treats weak defined symbols like other global
+	# defined symbols, whereas GNU nm marks them as "W".
+	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+	  _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	else
+	  _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	fi
+	aix_use_runtimelinking=no
+
+	# Test if we are trying to use run time linking or normal
+	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+	# need to do runtime linking.
+	case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+	  for ld_flag in $LDFLAGS; do
+	  if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+	    aix_use_runtimelinking=yes
+	    break
+	  fi
+	  done
+	  ;;
+	esac
+
+	exp_sym_flag='-bexport'
+	no_entry_flag='-bnoentry'
+      fi
+
+      # When large executables or shared objects are built, AIX ld can
+      # have problems creating the table of contents.  If linking a library
+      # or program results in "error TOC overflow" add -mminimal-toc to
+      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+      _LT_TAGVAR(archive_cmds, $1)=''
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      _LT_TAGVAR(file_list_spec, $1)='${wl}-f,'
+
+      if test "$GCC" = yes; then
+	case $host_os in aix4.[[012]]|aix4.[[012]].*)
+	# We only want to do this on AIX 4.2 and lower, the check
+	# below for broken collect2 doesn't work under 4.3+
+	  collect2name=`${CC} -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	  # We have reworked collect2
+	  :
+	  else
+	  # We have old collect2
+	  _LT_TAGVAR(hardcode_direct, $1)=unsupported
+	  # It fails to find uninstalled libraries when the uninstalled
+	  # path is not listed in the libpath.  Setting hardcode_minus_L
+	  # to unsupported forces relinking
+	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	  _LT_TAGVAR(hardcode_libdir_separator, $1)=
+	  fi
+	  ;;
+	esac
+	shared_flag='-shared'
+	if test "$aix_use_runtimelinking" = yes; then
+	  shared_flag="$shared_flag "'${wl}-G'
+	fi
+	_LT_TAGVAR(link_all_deplibs, $1)=no
+      else
+	# not using gcc
+	if test "$host_cpu" = ia64; then
+	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	# chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+	else
+	  if test "$aix_use_runtimelinking" = yes; then
+	    shared_flag='${wl}-G'
+	  else
+	    shared_flag='${wl}-bM:SRE'
+	  fi
+	fi
+      fi
+
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall'
+      # It seems that -bexpall does not export symbols beginning with
+      # underscore (_), so it is better to generate a list of symbols to export.
+      _LT_TAGVAR(always_export_symbols, $1)=yes
+      if test "$aix_use_runtimelinking" = yes; then
+	# Warning - without using the other runtime loading flags (-brtl),
+	# -berok will link without error, but may produce a broken library.
+	_LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+        # Determine the default libpath from the value encoded in an
+        # empty executable.
+        _LT_SYS_MODULE_PATH_AIX([$1])
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+      else
+	if test "$host_cpu" = ia64; then
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
+	  _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+	  _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+	else
+	 # Determine the default libpath from the value encoded in an
+	 # empty executable.
+	 _LT_SYS_MODULE_PATH_AIX([$1])
+	 _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+	  # Warning - without using the other run time loading flags,
+	  # -berok will link without error, but may produce a broken library.
+	  _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
+	  _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
+	  if test "$with_gnu_ld" = yes; then
+	    # We only use this code for GNU lds that support --whole-archive.
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	  else
+	    # Exported symbols can be pulled into shared objects from archives
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+	  fi
+	  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+	  # This is similar to how AIX traditionally builds its shared libraries.
+	  _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+	fi
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+            _LT_TAGVAR(archive_expsym_cmds, $1)=''
+        ;;
+      m68k)
+            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+            _LT_TAGVAR(hardcode_minus_L, $1)=yes
+        ;;
+      esac
+      ;;
+
+    bsdi[[45]]*)
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # When not using gcc, we currently assume that we are using
+      # Microsoft Visual C++.
+      # hardcode_libdir_flag_spec is actually meaningless, as there is
+      # no search path for DLLs.
+      case $cc_basename in
+      cl*)
+	# Native MSVC
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	_LT_TAGVAR(always_export_symbols, $1)=yes
+	_LT_TAGVAR(file_list_spec, $1)='@'
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=".dll"
+	# FIXME: Setting linknames here is a bad hack.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+	_LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	    sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+	  else
+	    sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+	  fi~
+	  $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+	  linknames='
+	# The linker will not automatically build a static lib if we build a DLL.
+	# _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	_LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+	_LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+	# Don't use ranlib
+	_LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+	_LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+	  lt_tool_outputfile="@TOOL_OUTPUT@"~
+	  case $lt_outputfile in
+	    *.exe|*.EXE) ;;
+	    *)
+	      lt_outputfile="$lt_outputfile.exe"
+	      lt_tool_outputfile="$lt_tool_outputfile.exe"
+	      ;;
+	  esac~
+	  if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+	    $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+	    $RM "$lt_outputfile.manifest";
+	  fi'
+	;;
+      *)
+	# Assume MSVC wrapper
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=".dll"
+	# FIXME: Setting linknames here is a bad hack.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+	# The linker will automatically build a .lib file if we build a DLL.
+	_LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+	# FIXME: Should let the user specify the lib program.
+	_LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
+	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+      _LT_DARWIN_LINKER_FEATURES($1)
+      ;;
+
+    dgux*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+    # support.  Future versions do this automatically, but an explicit c++rt0.o
+    # does not break anything, and helps significantly (at the cost of a little
+    # extra space).
+    freebsd2.2*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+    freebsd2.*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+    freebsd* | dragonfly*)
+      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    hpux9*)
+      if test "$GCC" = yes; then
+	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+
+      # hardcode_minus_L: Not really in the search PATH,
+      # but as the default location of the library.
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+      ;;
+
+    hpux10*)
+      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      if test "$with_gnu_ld" = no; then
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
+	_LT_TAGVAR(hardcode_direct, $1)=yes
+	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+	# hardcode_minus_L: Not really in the search PATH,
+	# but as the default location of the library.
+	_LT_TAGVAR(hardcode_minus_L, $1)=yes
+      fi
+      ;;
+
+    hpux11*)
+      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+	case $host_cpu in
+	hppa*64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	esac
+      else
+	case $host_cpu in
+	hppa*64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	m4_if($1, [], [
+	  # Older versions of the 11.00 compiler do not understand -b yet
+	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+	  _LT_LINKER_OPTION([if $CC understands -b],
+	    _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b],
+	    [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'],
+	    [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])],
+	  [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'])
+	  ;;
+	esac
+      fi
+      if test "$with_gnu_ld" = no; then
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	case $host_cpu in
+	hppa*64*|ia64*)
+	  _LT_TAGVAR(hardcode_direct, $1)=no
+	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	  ;;
+	*)
+	  _LT_TAGVAR(hardcode_direct, $1)=yes
+	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+
+	  # hardcode_minus_L: Not really in the search PATH,
+	  # but as the default location of the library.
+	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
+	  ;;
+	esac
+      fi
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      if test "$GCC" = yes; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	# Try to use the -exported_symbol ld option, if it does not
+	# work, assume that -exports_file does not work either and
+	# implicitly export all symbols.
+	# This should be the same for all languages, so no per-tag cache variable.
+	AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol],
+	  [lt_cv_irix_exported_symbol],
+	  [save_LDFLAGS="$LDFLAGS"
+	   LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+	   AC_LINK_IFELSE(
+	     [AC_LANG_SOURCE(
+	        [AC_LANG_CASE([C], [[int foo (void) { return 0; }]],
+			      [C++], [[int foo (void) { return 0; }]],
+			      [Fortran 77], [[
+      subroutine foo
+      end]],
+			      [Fortran], [[
+      subroutine foo
+      end]])])],
+	      [lt_cv_irix_exported_symbol=yes],
+	      [lt_cv_irix_exported_symbol=no])
+           LDFLAGS="$save_LDFLAGS"])
+	if test "$lt_cv_irix_exported_symbol" = yes; then
+          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+	fi
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+      fi
+      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      _LT_TAGVAR(inherit_rpath, $1)=yes
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      ;;
+
+    netbsd* | netbsdelf*-gnu)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    newsos6)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    *nto* | *qnx*)
+      ;;
+
+    openbsd*)
+      if test -f /usr/libexec/ld.so; then
+	_LT_TAGVAR(hardcode_direct, $1)=yes
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+	else
+	  case $host_os in
+	   openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*)
+	     _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+	     _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	     ;;
+	   *)
+	     _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	     _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	     ;;
+	  esac
+	fi
+      else
+	_LT_TAGVAR(ld_shlibs, $1)=no
+      fi
+      ;;
+
+    os2*)
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+      _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+      ;;
+
+    osf3*)
+      if test "$GCC" = yes; then
+	_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+      else
+	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+      fi
+      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      ;;
+
+    osf4* | osf5*)	# as osf3* with the addition of -msym flag
+      if test "$GCC" = yes; then
+	_LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+      else
+	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+	$CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+	# Both c and cxx compiler support -rpath directly
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+      fi
+      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+      ;;
+
+    solaris*)
+      _LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
+      if test "$GCC" = yes; then
+	wlarc='${wl}'
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+      else
+	case `$CC -V 2>&1` in
+	*"Compilers 5.0"*)
+	  wlarc=''
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+	  ;;
+	*)
+	  wlarc='${wl}'
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+	  ;;
+	esac
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      case $host_os in
+      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+      *)
+	# The compiler driver will combine and reorder linker options,
+	# but understands `-z linker_flag'.  GCC discards it without `$wl',
+	# but is careful enough not to reorder.
+	# Supported since Solaris 2.6 (maybe 2.5.1?)
+	if test "$GCC" = yes; then
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+	else
+	  _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+	fi
+	;;
+      esac
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      ;;
+
+    sunos4*)
+      if test "x$host_vendor" = xsequent; then
+	# Use $CC to link under sequent, because it throws in some extra .o
+	# files that make .init and .fini sections work.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_direct, $1)=yes
+      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    sysv4)
+      case $host_vendor in
+	sni)
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true???
+	;;
+	siemens)
+	  ## LD is ld it makes a PLAMLIB
+	  ## CC just makes a GrossModule.
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs'
+	  _LT_TAGVAR(hardcode_direct, $1)=no
+        ;;
+	motorola)
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie
+	;;
+      esac
+      runpath_var='LD_RUN_PATH'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    sysv4.3*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	_LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	runpath_var=LD_RUN_PATH
+	hardcode_runpath_var=yes
+	_LT_TAGVAR(ld_shlibs, $1)=yes
+      fi
+      ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+      _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      runpath_var='LD_RUN_PATH'
+
+      if test "$GCC" = yes; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6*)
+      # Note: We can NOT use -z defs as we might desire, because we do not
+      # link with -lc, and that would cause any symbols used from libc to
+      # always be unresolved, which means just about no library would
+      # ever link correctly.  If we're not using GNU ld we use -z text
+      # though, which does catch some bad symbols but isn't as heavy-handed
+      # as -z defs.
+      _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+      _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs'
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir'
+      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+      _LT_TAGVAR(link_all_deplibs, $1)=yes
+      _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport'
+      runpath_var='LD_RUN_PATH'
+
+      if test "$GCC" = yes; then
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	_LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    uts4*)
+      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      ;;
+
+    *)
+      _LT_TAGVAR(ld_shlibs, $1)=no
+      ;;
+    esac
+
+    if test x$host_vendor = xsni; then
+      case $host in
+      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym'
+	;;
+      esac
+    fi
+  fi
+])
+AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no
+
+_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld
+
+_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl
+_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl
+_LT_DECL([], [extract_expsyms_cmds], [2],
+    [The commands to extract the exported symbol list from a shared archive])
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in
+x|xyes)
+  # Assume -lc should be added
+  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+
+  if test "$enable_shared" = yes && test "$GCC" = yes; then
+    case $_LT_TAGVAR(archive_cmds, $1) in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      AC_CACHE_CHECK([whether -lc should be explicitly linked in],
+	[lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1),
+	[$RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
+	  pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
+	  _LT_TAGVAR(allow_undefined_flag, $1)=
+	  if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
+	  then
+	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+	  else
+	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+	  fi
+	  _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+	])
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0],
+    [Whether or not to add -lc for building shared libraries])
+_LT_TAGDECL([allow_libtool_libs_with_static_runtimes],
+    [enable_shared_with_static_runtimes], [0],
+    [Whether or not to disallow shared libs when runtime libs are static])
+_LT_TAGDECL([], [export_dynamic_flag_spec], [1],
+    [Compiler flag to allow reflexive dlopens])
+_LT_TAGDECL([], [whole_archive_flag_spec], [1],
+    [Compiler flag to generate shared objects directly from archives])
+_LT_TAGDECL([], [compiler_needs_object], [1],
+    [Whether the compiler copes with passing no objects directly])
+_LT_TAGDECL([], [old_archive_from_new_cmds], [2],
+    [Create an old-style archive from a shared archive])
+_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2],
+    [Create a temporary old-style archive to link instead of a shared archive])
+_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive])
+_LT_TAGDECL([], [archive_expsym_cmds], [2])
+_LT_TAGDECL([], [module_cmds], [2],
+    [Commands used to build a loadable module if different from building
+    a shared archive.])
+_LT_TAGDECL([], [module_expsym_cmds], [2])
+_LT_TAGDECL([], [with_gnu_ld], [1],
+    [Whether we are building with GNU ld or not])
+_LT_TAGDECL([], [allow_undefined_flag], [1],
+    [Flag that allows shared libraries with undefined symbols to be built])
+_LT_TAGDECL([], [no_undefined_flag], [1],
+    [Flag that enforces no undefined symbols])
+_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
+    [Flag to hardcode $libdir into a binary during linking.
+    This must work even if $libdir does not exist])
+_LT_TAGDECL([], [hardcode_libdir_separator], [1],
+    [Whether we need a single "-rpath" flag with a separated argument])
+_LT_TAGDECL([], [hardcode_direct], [0],
+    [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes
+    DIR into the resulting binary])
+_LT_TAGDECL([], [hardcode_direct_absolute], [0],
+    [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes
+    DIR into the resulting binary and the resulting library dependency is
+    "absolute", i.e impossible to change by setting ${shlibpath_var} if the
+    library is relocated])
+_LT_TAGDECL([], [hardcode_minus_L], [0],
+    [Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+    into the resulting binary])
+_LT_TAGDECL([], [hardcode_shlibpath_var], [0],
+    [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+    into the resulting binary])
+_LT_TAGDECL([], [hardcode_automatic], [0],
+    [Set to "yes" if building a shared library automatically hardcodes DIR
+    into the library and all subsequent libraries and executables linked
+    against it])
+_LT_TAGDECL([], [inherit_rpath], [0],
+    [Set to yes if linker adds runtime paths of dependent libraries
+    to runtime path list])
+_LT_TAGDECL([], [link_all_deplibs], [0],
+    [Whether libtool must link a program against all its dependency libraries])
+_LT_TAGDECL([], [always_export_symbols], [0],
+    [Set to "yes" if exported symbols are required])
+_LT_TAGDECL([], [export_symbols_cmds], [2],
+    [The commands to list exported symbols])
+_LT_TAGDECL([], [exclude_expsyms], [1],
+    [Symbols that should not be listed in the preloaded symbols])
+_LT_TAGDECL([], [include_expsyms], [1],
+    [Symbols that must always be exported])
+_LT_TAGDECL([], [prelink_cmds], [2],
+    [Commands necessary for linking programs (against libraries) with templates])
+_LT_TAGDECL([], [postlink_cmds], [2],
+    [Commands necessary for finishing linking programs])
+_LT_TAGDECL([], [file_list_spec], [1],
+    [Specify filename containing input files])
+dnl FIXME: Not yet implemented
+dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1],
+dnl    [Compiler flag to generate thread safe objects])
+])# _LT_LINKER_SHLIBS
+
+
+# _LT_LANG_C_CONFIG([TAG])
+# ------------------------
+# Ensure that the configuration variables for a C compiler are suitably
+# defined.  These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_C_CONFIG],
+[m4_require([_LT_DECL_EGREP])dnl
+lt_save_CC="$CC"
+AC_LANG_PUSH(C)
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+_LT_TAG_COMPILER
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+if test -n "$compiler"; then
+  _LT_COMPILER_NO_RTTI($1)
+  _LT_COMPILER_PIC($1)
+  _LT_COMPILER_C_O($1)
+  _LT_COMPILER_FILE_LOCKS($1)
+  _LT_LINKER_SHLIBS($1)
+  _LT_SYS_DYNAMIC_LINKER($1)
+  _LT_LINKER_HARDCODE_LIBPATH($1)
+  LT_SYS_DLOPEN_SELF
+  _LT_CMD_STRIPLIB
+
+  # Report which library types will actually be built
+  AC_MSG_CHECKING([if libtool supports shared libraries])
+  AC_MSG_RESULT([$can_build_shared])
+
+  AC_MSG_CHECKING([whether to build shared libraries])
+  test "$can_build_shared" = "no" && enable_shared=no
+
+  # On AIX, shared libraries and static libraries use the same namespace, and
+  # are all built from PIC.
+  case $host_os in
+  aix3*)
+    test "$enable_shared" = yes && enable_static=no
+    if test -n "$RANLIB"; then
+      archive_cmds="$archive_cmds~\$RANLIB \$lib"
+      postinstall_cmds='$RANLIB $lib'
+    fi
+    ;;
+
+  aix[[4-9]]*)
+    if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+      test "$enable_shared" = yes && enable_static=no
+    fi
+    ;;
+  esac
+  AC_MSG_RESULT([$enable_shared])
+
+  AC_MSG_CHECKING([whether to build static libraries])
+  # Make sure either enable_shared or enable_static is yes.
+  test "$enable_shared" = yes || enable_static=yes
+  AC_MSG_RESULT([$enable_static])
+
+  _LT_CONFIG($1)
+fi
+AC_LANG_POP
+CC="$lt_save_CC"
+])# _LT_LANG_C_CONFIG
+
+
+# _LT_LANG_CXX_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a C++ compiler are suitably
+# defined.  These variables are subsequently used by _LT_CONFIG to write
+# the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_CXX_CONFIG],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+m4_require([_LT_DECL_EGREP])dnl
+m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+    ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+    (test "X$CXX" != "Xg++"))) ; then
+  AC_PROG_CXXCPP
+else
+  _lt_caught_CXX_error=yes
+fi
+
+AC_LANG_PUSH(C++)
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(compiler_needs_object, $1)=no
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_caught_CXX_error" != yes; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="int some_variable = 0;"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }'
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+  _LT_TAG_COMPILER
+
+  # save warnings/boilerplate of simple test code
+  _LT_COMPILER_BOILERPLATE
+  _LT_LINKER_BOILERPLATE
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC=$CC
+  lt_save_CFLAGS=$CFLAGS
+  lt_save_LD=$LD
+  lt_save_GCC=$GCC
+  GCC=$GXX
+  lt_save_with_gnu_ld=$with_gnu_ld
+  lt_save_path_LD=$lt_cv_path_LD
+  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+  else
+    $as_unset lt_cv_prog_gnu_ld
+  fi
+  if test -n "${lt_cv_path_LDCXX+set}"; then
+    lt_cv_path_LD=$lt_cv_path_LDCXX
+  else
+    $as_unset lt_cv_path_LD
+  fi
+  test -z "${LDCXX+set}" || LD=$LDCXX
+  CC=${CXX-"c++"}
+  CFLAGS=$CXXFLAGS
+  compiler=$CC
+  _LT_TAGVAR(compiler, $1)=$CC
+  _LT_CC_BASENAME([$compiler])
+
+  if test -n "$compiler"; then
+    # We don't want -fno-exception when compiling C++ code, so set the
+    # no_builtin_flag separately
+    if test "$GXX" = yes; then
+      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+    else
+      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+    fi
+
+    if test "$GXX" = yes; then
+      # Set up default GNU C++ configuration
+
+      LT_PATH_LD
+
+      # Check if GNU C++ uses GNU ld as the underlying linker, since the
+      # archiving commands below assume that GNU ld is being used.
+      if test "$with_gnu_ld" = yes; then
+        _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+        _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+
+        # If archive_cmds runs LD, not CC, wlarc should be empty
+        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+        #     investigate it a little bit more. (MM)
+        wlarc='${wl}'
+
+        # ancient GNU ld didn't support --whole-archive et. al.
+        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+	  $GREP 'no-whole-archive' > /dev/null; then
+          _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+        else
+          _LT_TAGVAR(whole_archive_flag_spec, $1)=
+        fi
+      else
+        with_gnu_ld=no
+        wlarc=
+
+        # A generic and very simple default shared library creation
+        # command for GNU C++ for the case where it uses the native
+        # linker, instead of GNU ld.  If possible, this setting should
+        # overridden to take advantage of the native linker features on
+        # the platform it is being used on.
+        _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+      fi
+
+      # Commands to make compiler produce verbose output that lists
+      # what "hidden" libraries, object files and flags are used when
+      # linking a shared library.
+      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+    else
+      GXX=no
+      with_gnu_ld=no
+      wlarc=
+    fi
+
+    # PORTME: fill in a description of your system's C++ link characteristics
+    AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+    _LT_TAGVAR(ld_shlibs, $1)=yes
+    case $host_os in
+      aix3*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+      aix[[4-9]]*)
+        if test "$host_cpu" = ia64; then
+          # On IA64, the linker does run time linking by default, so we don't
+          # have to do anything special.
+          aix_use_runtimelinking=no
+          exp_sym_flag='-Bexport'
+          no_entry_flag=""
+        else
+          aix_use_runtimelinking=no
+
+          # Test if we are trying to use run time linking or normal
+          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+          # need to do runtime linking.
+          case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+	    for ld_flag in $LDFLAGS; do
+	      case $ld_flag in
+	      *-brtl*)
+	        aix_use_runtimelinking=yes
+	        break
+	        ;;
+	      esac
+	    done
+	    ;;
+          esac
+
+          exp_sym_flag='-bexport'
+          no_entry_flag='-bnoentry'
+        fi
+
+        # When large executables or shared objects are built, AIX ld can
+        # have problems creating the table of contents.  If linking a library
+        # or program results in "error TOC overflow" add -mminimal-toc to
+        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+        _LT_TAGVAR(archive_cmds, $1)=''
+        _LT_TAGVAR(hardcode_direct, $1)=yes
+        _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+        _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+        _LT_TAGVAR(link_all_deplibs, $1)=yes
+        _LT_TAGVAR(file_list_spec, $1)='${wl}-f,'
+
+        if test "$GXX" = yes; then
+          case $host_os in aix4.[[012]]|aix4.[[012]].*)
+          # We only want to do this on AIX 4.2 and lower, the check
+          # below for broken collect2 doesn't work under 4.3+
+	  collect2name=`${CC} -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	    # We have reworked collect2
+	    :
+	  else
+	    # We have old collect2
+	    _LT_TAGVAR(hardcode_direct, $1)=unsupported
+	    # It fails to find uninstalled libraries when the uninstalled
+	    # path is not listed in the libpath.  Setting hardcode_minus_L
+	    # to unsupported forces relinking
+	    _LT_TAGVAR(hardcode_minus_L, $1)=yes
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=
+	  fi
+          esac
+          shared_flag='-shared'
+	  if test "$aix_use_runtimelinking" = yes; then
+	    shared_flag="$shared_flag "'${wl}-G'
+	  fi
+        else
+          # not using gcc
+          if test "$host_cpu" = ia64; then
+	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	  # chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+          else
+	    if test "$aix_use_runtimelinking" = yes; then
+	      shared_flag='${wl}-G'
+	    else
+	      shared_flag='${wl}-bM:SRE'
+	    fi
+          fi
+        fi
+
+        _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall'
+        # It seems that -bexpall does not export symbols beginning with
+        # underscore (_), so it is better to generate a list of symbols to
+	# export.
+        _LT_TAGVAR(always_export_symbols, $1)=yes
+        if test "$aix_use_runtimelinking" = yes; then
+          # Warning - without using the other runtime loading flags (-brtl),
+          # -berok will link without error, but may produce a broken library.
+          _LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+          # Determine the default libpath from the value encoded in an empty
+          # executable.
+          _LT_SYS_MODULE_PATH_AIX([$1])
+          _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+
+          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+        else
+          if test "$host_cpu" = ia64; then
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib'
+	    _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+          else
+	    # Determine the default libpath from the value encoded in an
+	    # empty executable.
+	    _LT_SYS_MODULE_PATH_AIX([$1])
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath"
+	    # Warning - without using the other run time loading flags,
+	    # -berok will link without error, but may produce a broken library.
+	    _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok'
+	    _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok'
+	    if test "$with_gnu_ld" = yes; then
+	      # We only use this code for GNU lds that support --whole-archive.
+	      _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	    else
+	      # Exported symbols can be pulled into shared objects from archives
+	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+	    fi
+	    _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+	    # This is similar to how AIX traditionally builds its shared
+	    # libraries.
+	    _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+          fi
+        fi
+        ;;
+
+      beos*)
+	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	  # Joseph Beckenbach <jrb3 at best.com> says some releases of gcc
+	  # support --undefined.  This deserves some investigation.  FIXME
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	else
+	  _LT_TAGVAR(ld_shlibs, $1)=no
+	fi
+	;;
+
+      chorus*)
+        case $cc_basename in
+          *)
+	  # FIXME: insert proper C++ library support
+	  _LT_TAGVAR(ld_shlibs, $1)=no
+	  ;;
+        esac
+        ;;
+
+      cygwin* | mingw* | pw32* | cegcc*)
+	case $GXX,$cc_basename in
+	,cl* | no,cl*)
+	  # Native MSVC
+	  # hardcode_libdir_flag_spec is actually meaningless, as there is
+	  # no search path for DLLs.
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	  _LT_TAGVAR(always_export_symbols, $1)=yes
+	  _LT_TAGVAR(file_list_spec, $1)='@'
+	  # Tell ltmain to make .lib files, not .a files.
+	  libext=lib
+	  # Tell ltmain to make .dll files, not .so files.
+	  shrext_cmds=".dll"
+	  # FIXME: Setting linknames here is a bad hack.
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	      $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+	    else
+	      $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+	    fi~
+	    $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+	    linknames='
+	  # The linker will not automatically build a static lib if we build a DLL.
+	  # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+	  # Don't use ranlib
+	  _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+	  _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+	    lt_tool_outputfile="@TOOL_OUTPUT@"~
+	    case $lt_outputfile in
+	      *.exe|*.EXE) ;;
+	      *)
+		lt_outputfile="$lt_outputfile.exe"
+		lt_tool_outputfile="$lt_tool_outputfile.exe"
+		;;
+	    esac~
+	    func_to_tool_file "$lt_outputfile"~
+	    if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+	      $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+	      $RM "$lt_outputfile.manifest";
+	    fi'
+	  ;;
+	*)
+	  # g++
+	  # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+	  # as there is no search path for DLLs.
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols'
+	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+	  _LT_TAGVAR(always_export_symbols, $1)=no
+	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+
+	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	    # If the export-symbols file already is a .def file (1st line
+	    # is EXPORTS), use it as is; otherwise, prepend...
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	      cp $export_symbols $output_objdir/$soname.def;
+	    else
+	      echo EXPORTS > $output_objdir/$soname.def;
+	      cat $export_symbols >> $output_objdir/$soname.def;
+	    fi~
+	    $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	  else
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	  fi
+	  ;;
+	esac
+	;;
+      darwin* | rhapsody*)
+        _LT_DARWIN_LINKER_FEATURES($1)
+	;;
+
+      dgux*)
+        case $cc_basename in
+          ec++*)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          ghcx*)
+	    # Green Hills C++ Compiler
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+        esac
+        ;;
+
+      freebsd2.*)
+        # C++ shared libraries reported to be fairly broken before
+	# switch to ELF
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+
+      freebsd-elf*)
+        _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+        ;;
+
+      freebsd* | dragonfly*)
+        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+        # conventions
+        _LT_TAGVAR(ld_shlibs, $1)=yes
+        ;;
+
+      haiku*)
+        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+        _LT_TAGVAR(link_all_deplibs, $1)=yes
+        ;;
+
+      hpux9*)
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+        _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+        _LT_TAGVAR(hardcode_direct, $1)=yes
+        _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+				             # but as the default
+				             # location of the library.
+
+        case $cc_basename in
+          CC*)
+            # FIXME: insert proper C++ library support
+            _LT_TAGVAR(ld_shlibs, $1)=no
+            ;;
+          aCC*)
+            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+            # Commands to make compiler produce verbose output that lists
+            # what "hidden" libraries, object files and flags are used when
+            # linking a shared library.
+            #
+            # There doesn't appear to be a way to prevent this compiler from
+            # explicitly linking system object files so we need to strip them
+            # from the output so that they don't get included in the library
+            # dependencies.
+            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+            ;;
+          *)
+            if test "$GXX" = yes; then
+              _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+            else
+              # FIXME: insert proper C++ library support
+              _LT_TAGVAR(ld_shlibs, $1)=no
+            fi
+            ;;
+        esac
+        ;;
+
+      hpux10*|hpux11*)
+        if test $with_gnu_ld = no; then
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir'
+	  _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+          case $host_cpu in
+            hppa*64*|ia64*)
+              ;;
+            *)
+	      _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+              ;;
+          esac
+        fi
+        case $host_cpu in
+          hppa*64*|ia64*)
+            _LT_TAGVAR(hardcode_direct, $1)=no
+            _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+            ;;
+          *)
+            _LT_TAGVAR(hardcode_direct, $1)=yes
+            _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+            _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+					         # but as the default
+					         # location of the library.
+            ;;
+        esac
+
+        case $cc_basename in
+          CC*)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          aCC*)
+	    case $host_cpu in
+	      hppa*64*)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      ia64*)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      *)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	    esac
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+          *)
+	    if test "$GXX" = yes; then
+	      if test $with_gnu_ld = no; then
+	        case $host_cpu in
+	          hppa*64*)
+	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          ia64*)
+	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          *)
+	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	        esac
+	      fi
+	    else
+	      # FIXME: insert proper C++ library support
+	      _LT_TAGVAR(ld_shlibs, $1)=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      interix[[3-9]]*)
+	_LT_TAGVAR(hardcode_direct, $1)=no
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+	# Instead, shared libraries are loaded at an image base (0x10000000 by
+	# default) and relocated if they conflict, which is a slow very memory
+	# consuming and fragmenting process.  To avoid this, we pick a random,
+	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	_LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	;;
+      irix5* | irix6*)
+        case $cc_basename in
+          CC*)
+	    # SGI C++
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    if test "$GXX" = yes; then
+	      if test "$with_gnu_ld" = no; then
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	      else
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib'
+	      fi
+	    fi
+	    _LT_TAGVAR(link_all_deplibs, $1)=yes
+	    ;;
+        esac
+        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+        _LT_TAGVAR(inherit_rpath, $1)=yes
+        ;;
+
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
+	    ;;
+	  icpc* | ecpc* )
+	    # Intel C++
+	    with_gnu_ld=yes
+	    # version 8.0 and above of icpc choke on multiply defined symbols
+	    # if we add $predep_objects and $postdep_objects, however 7.1 and
+	    # earlier do not add the objects themselves.
+	    case `$CC -V 2>&1` in
+	      *"Version 7."*)
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	      *)  # Version 8.0 or newer
+	        tmp_idyn=
+	        case $host_cpu in
+		  ia64*) tmp_idyn=' -i_dynamic';;
+		esac
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	    esac
+	    _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	    ;;
+          pgCC* | pgcpp*)
+            # Portland Group C++ compiler
+	    case `$CC -V` in
+	    *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*)
+	      _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+		compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+	      _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+		$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+		$RANLIB $oldlib'
+	      _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+		$CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+		$CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+	      ;;
+	    *) # Version 6 and above use weak symbols
+	      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+	      ;;
+	    esac
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+            ;;
+	  cxx*)
+	    # Compaq C++
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname  -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
+
+	    runpath_var=LD_RUN_PATH
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+	    ;;
+	  xl* | mpixl* | bgxl*)
+	    # IBM XL 8.0 on PPC, with GNU ld
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic'
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    if test "x$supports_anon_versioning" = xyes; then
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+		cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+		echo "local: *; };" >> $output_objdir/$libname.ver~
+		$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+	    fi
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+	      _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
+	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	      _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	      _LT_TAGVAR(compiler_needs_object, $1)=yes
+
+	      # Not sure whether something based on
+	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+	      # would be better.
+	      output_verbose_link_cmd='func_echo_all'
+
+	      # Archives containing C++ object files must be created using
+	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	      # necessary to make sure instantiated templates are included
+	      # in the archive.
+	      _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+
+      lynxos*)
+        # FIXME: insert proper C++ library support
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	;;
+
+      m88k*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+	;;
+
+      mvs*)
+        case $cc_basename in
+          cxx*)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+	  *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+	esac
+	;;
+
+      netbsd*)
+        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	  _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+	  wlarc=
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	  _LT_TAGVAR(hardcode_direct, $1)=yes
+	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	fi
+	# Workaround some broken pre-1.5 toolchains
+	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+	;;
+
+      *nto* | *qnx*)
+        _LT_TAGVAR(ld_shlibs, $1)=yes
+	;;
+
+      openbsd2*)
+        # C++ shared libraries are fairly broken
+	_LT_TAGVAR(ld_shlibs, $1)=no
+	;;
+
+      openbsd*)
+	if test -f /usr/libexec/ld.so; then
+	  _LT_TAGVAR(hardcode_direct, $1)=yes
+	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib'
+	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E'
+	    _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+	  fi
+	  output_verbose_link_cmd=func_echo_all
+	else
+	  _LT_TAGVAR(ld_shlibs, $1)=no
+	fi
+	;;
+
+      osf3* | osf4* | osf5*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir'
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	    # Archives containing C++ object files must be created using
+	    # the KAI C++ compiler.
+	    case $host in
+	      osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;;
+	      *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;;
+	    esac
+	    ;;
+          RCC*)
+	    # Rational C++ 2.4.1
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          cxx*)
+	    case $host in
+	      osf3*)
+	        _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+		;;
+	      *)
+	        _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	        _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+	          echo "-hidden">> $lib.exp~
+	          $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~
+	          $RM $lib.exp'
+	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+		;;
+	    esac
+
+	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+	  *)
+	    if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+	      _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*'
+	      case $host in
+	        osf3*)
+	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+		  ;;
+	        *)
+	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+		  ;;
+	      esac
+
+	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir'
+	      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+
+	      # Commands to make compiler produce verbose output that lists
+	      # what "hidden" libraries, object files and flags are used when
+	      # linking a shared library.
+	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+	    else
+	      # FIXME: insert proper C++ library support
+	      _LT_TAGVAR(ld_shlibs, $1)=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      psos*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+
+      sunos4*)
+        case $cc_basename in
+          CC*)
+	    # Sun C++ 4.x
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          lcc*)
+	    # Lucid
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+        esac
+        ;;
+
+      solaris*)
+        case $cc_basename in
+          CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+            _LT_TAGVAR(archive_cmds_need_lc,$1)=yes
+	    _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag}  -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	      $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+	    _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	    case $host_os in
+	      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+	      *)
+		# The compiler driver will combine and reorder linker options,
+		# but understands `-z linker_flag'.
+	        # Supported since Solaris 2.6 (maybe 2.5.1?)
+		_LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+	        ;;
+	    esac
+	    _LT_TAGVAR(link_all_deplibs, $1)=yes
+
+	    output_verbose_link_cmd='func_echo_all'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+	    ;;
+          gcx*)
+	    # Green Hills C++ Compiler
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+
+	    # The C++ compiler must be used to create the archive.
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    # GNU C++ compiler with Solaris linker
+	    if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+	      _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs'
+	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+		  $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      else
+	        # g++ 2.7 appears to require `-G' NOT `-shared' on this
+	        # platform.
+	        _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+		  $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      fi
+
+	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir'
+	      case $host_os in
+		solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+		*)
+		  _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+		  ;;
+	      esac
+	    fi
+	    ;;
+        esac
+        ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+      _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+      runpath_var='LD_RUN_PATH'
+
+      case $cc_basename in
+        CC*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+      esac
+      ;;
+
+      sysv5* | sco3.2v5* | sco5v6*)
+	# Note: We can NOT use -z defs as we might desire, because we do not
+	# link with -lc, and that would cause any symbols used from libc to
+	# always be unresolved, which means just about no library would
+	# ever link correctly.  If we're not using GNU ld we use -z text
+	# though, which does catch some bad symbols but isn't as heavy-handed
+	# as -z defs.
+	_LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text'
+	_LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs'
+	_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir'
+	_LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+	_LT_TAGVAR(link_all_deplibs, $1)=yes
+	_LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport'
+	runpath_var='LD_RUN_PATH'
+
+	case $cc_basename in
+          CC*)
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~
+	      '"$_LT_TAGVAR(old_archive_cmds, $1)"
+	    _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~
+	      '"$_LT_TAGVAR(reload_cmds, $1)"
+	    ;;
+	  *)
+	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    ;;
+	esac
+      ;;
+
+      tandem*)
+        case $cc_basename in
+          NCC*)
+	    # NonStop-UX NCC 3.20
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    _LT_TAGVAR(ld_shlibs, $1)=no
+	    ;;
+        esac
+        ;;
+
+      vxworks*)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+
+      *)
+        # FIXME: insert proper C++ library support
+        _LT_TAGVAR(ld_shlibs, $1)=no
+        ;;
+    esac
+
+    AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+    test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no
+
+    _LT_TAGVAR(GCC, $1)="$GXX"
+    _LT_TAGVAR(LD, $1)="$LD"
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    _LT_SYS_HIDDEN_LIBDEPS($1)
+    _LT_COMPILER_PIC($1)
+    _LT_COMPILER_C_O($1)
+    _LT_COMPILER_FILE_LOCKS($1)
+    _LT_LINKER_SHLIBS($1)
+    _LT_SYS_DYNAMIC_LINKER($1)
+    _LT_LINKER_HARDCODE_LIBPATH($1)
+
+    _LT_CONFIG($1)
+  fi # test -n "$compiler"
+
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+  LDCXX=$LD
+  LD=$lt_save_LD
+  GCC=$lt_save_GCC
+  with_gnu_ld=$lt_save_with_gnu_ld
+  lt_cv_path_LDCXX=$lt_cv_path_LD
+  lt_cv_path_LD=$lt_save_path_LD
+  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test "$_lt_caught_CXX_error" != yes
+
+AC_LANG_POP
+])# _LT_LANG_CXX_CONFIG
+
+
+# _LT_FUNC_STRIPNAME_CNF
+# ----------------------
+# func_stripname_cnf prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+#
+# This function is identical to the (non-XSI) version of func_stripname,
+# except this one can be used by m4 code that may be executed by configure,
+# rather than the libtool script.
+m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl
+AC_REQUIRE([_LT_DECL_SED])
+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])
+func_stripname_cnf ()
+{
+  case ${2} in
+  .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+  *)  func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+  esac
+} # func_stripname_cnf
+])# _LT_FUNC_STRIPNAME_CNF
+
+# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
+# ---------------------------------
+# Figure out "hidden" library dependencies from verbose
+# compiler output when linking a shared library.
+# Parse the compiler output and extract the necessary
+# objects, libraries and library flags.
+m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
+[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl
+# Dependencies to place before and after the object being linked:
+_LT_TAGVAR(predep_objects, $1)=
+_LT_TAGVAR(postdep_objects, $1)=
+_LT_TAGVAR(predeps, $1)=
+_LT_TAGVAR(postdeps, $1)=
+_LT_TAGVAR(compiler_lib_search_path, $1)=
+
+dnl we can't use the lt_simple_compile_test_code here,
+dnl because it contains code intended for an executable,
+dnl not a library.  It's possible we should let each
+dnl tag define a new lt_????_link_test_code variable,
+dnl but it's only used here...
+m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF
+int a;
+void foo (void) { a = 0; }
+_LT_EOF
+], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+  Foo (void) { a = 0; }
+private:
+  int a;
+};
+_LT_EOF
+], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF
+      subroutine foo
+      implicit none
+      integer*4 a
+      a=0
+      return
+      end
+_LT_EOF
+], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF
+      subroutine foo
+      implicit none
+      integer a
+      a=0
+      return
+      end
+_LT_EOF
+], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF
+public class foo {
+  private int a;
+  public void bar (void) {
+    a = 0;
+  }
+};
+_LT_EOF
+], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF
+package foo
+func foo() {
+}
+_LT_EOF
+])
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+dnl Parse the compiler output and extract the necessary
+dnl objects, libraries and library flags.
+if AC_TRY_EVAL(ac_compile); then
+  # Parse the compiler output and extract the necessary
+  # objects, libraries and library flags.
+
+  # Sentinel used to keep track of whether or not we are before
+  # the conftest object file.
+  pre_test_object_deps_done=no
+
+  for p in `eval "$output_verbose_link_cmd"`; do
+    case ${prev}${p} in
+
+    -L* | -R* | -l*)
+       # Some compilers place space between "-{L,R}" and the path.
+       # Remove the space.
+       if test $p = "-L" ||
+          test $p = "-R"; then
+	 prev=$p
+	 continue
+       fi
+
+       # Expand the sysroot to ease extracting the directories later.
+       if test -z "$prev"; then
+         case $p in
+         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+         esac
+       fi
+       case $p in
+       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+       esac
+       if test "$pre_test_object_deps_done" = no; then
+	 case ${prev} in
+	 -L | -R)
+	   # Internal compiler library paths should come after those
+	   # provided the user.  The postdeps already come after the
+	   # user supplied libs so there is no need to process them.
+	   if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then
+	     _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}"
+	   else
+	     _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}"
+	   fi
+	   ;;
+	 # The "-l" case would never come before the object being
+	 # linked, so don't bother handling this case.
+	 esac
+       else
+	 if test -z "$_LT_TAGVAR(postdeps, $1)"; then
+	   _LT_TAGVAR(postdeps, $1)="${prev}${p}"
+	 else
+	   _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}"
+	 fi
+       fi
+       prev=
+       ;;
+
+    *.lto.$objext) ;; # Ignore GCC LTO objects
+    *.$objext)
+       # This assumes that the test object file only shows up
+       # once in the compiler output.
+       if test "$p" = "conftest.$objext"; then
+	 pre_test_object_deps_done=yes
+	 continue
+       fi
+
+       if test "$pre_test_object_deps_done" = no; then
+	 if test -z "$_LT_TAGVAR(predep_objects, $1)"; then
+	   _LT_TAGVAR(predep_objects, $1)="$p"
+	 else
+	   _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p"
+	 fi
+       else
+	 if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then
+	   _LT_TAGVAR(postdep_objects, $1)="$p"
+	 else
+	   _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p"
+	 fi
+       fi
+       ;;
+
+    *) ;; # Ignore the rest.
+
+    esac
+  done
+
+  # Clean up.
+  rm -f a.out a.exe
+else
+  echo "libtool.m4: error: problem compiling $1 test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+m4_if([$1], [CXX],
+[case $host_os in
+interix[[3-9]]*)
+  # Interix 3.5 installs completely hosed .la files for C++, so rather than
+  # hack all around it, let's just trust "g++" to DTRT.
+  _LT_TAGVAR(predep_objects,$1)=
+  _LT_TAGVAR(postdep_objects,$1)=
+  _LT_TAGVAR(postdeps,$1)=
+  ;;
+
+linux*)
+  case `$CC -V 2>&1 | sed 5q` in
+  *Sun\ C*)
+    # Sun C++ 5.9
+
+    # The more standards-conforming stlport4 library is
+    # incompatible with the Cstd library. Avoid specifying
+    # it if it's in CXXFLAGS. Ignore libCrun as
+    # -library=stlport4 depends on it.
+    case " $CXX $CXXFLAGS " in
+    *" -library=stlport4 "*)
+      solaris_use_stlport4=yes
+      ;;
+    esac
+
+    if test "$solaris_use_stlport4" != yes; then
+      _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun'
+    fi
+    ;;
+  esac
+  ;;
+
+solaris*)
+  case $cc_basename in
+  CC* | sunCC*)
+    # The more standards-conforming stlport4 library is
+    # incompatible with the Cstd library. Avoid specifying
+    # it if it's in CXXFLAGS. Ignore libCrun as
+    # -library=stlport4 depends on it.
+    case " $CXX $CXXFLAGS " in
+    *" -library=stlport4 "*)
+      solaris_use_stlport4=yes
+      ;;
+    esac
+
+    # Adding this requires a known-good setup of shared libraries for
+    # Sun compiler versions before 5.6, else PIC objects from an old
+    # archive will be linked into the output, leading to subtle bugs.
+    if test "$solaris_use_stlport4" != yes; then
+      _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun'
+    fi
+    ;;
+  esac
+  ;;
+esac
+])
+
+case " $_LT_TAGVAR(postdeps, $1) " in
+*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;;
+esac
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=
+if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then
+ _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+fi
+_LT_TAGDECL([], [compiler_lib_search_dirs], [1],
+    [The directories searched by this compiler when creating a shared library])
+_LT_TAGDECL([], [predep_objects], [1],
+    [Dependencies to place before and after the objects being linked to
+    create a shared library])
+_LT_TAGDECL([], [postdep_objects], [1])
+_LT_TAGDECL([], [predeps], [1])
+_LT_TAGDECL([], [postdeps], [1])
+_LT_TAGDECL([], [compiler_lib_search_path], [1],
+    [The library search path used internally by the compiler when linking
+    a shared library])
+])# _LT_SYS_HIDDEN_LIBDEPS
+
+
+# _LT_LANG_F77_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for a Fortran 77 compiler are
+# suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_F77_CONFIG],
+[AC_LANG_PUSH(Fortran 77)
+if test -z "$F77" || test "X$F77" = "Xno"; then
+  _lt_disable_F77=yes
+fi
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for f77 test sources.
+ac_ext=f
+
+# Object file extension for compiled f77 test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the F77 compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_F77" != yes; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="\
+      subroutine t
+      return
+      end
+"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code="\
+      program t
+      end
+"
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+  _LT_TAG_COMPILER
+
+  # save warnings/boilerplate of simple test code
+  _LT_COMPILER_BOILERPLATE
+  _LT_LINKER_BOILERPLATE
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC="$CC"
+  lt_save_GCC=$GCC
+  lt_save_CFLAGS=$CFLAGS
+  CC=${F77-"f77"}
+  CFLAGS=$FFLAGS
+  compiler=$CC
+  _LT_TAGVAR(compiler, $1)=$CC
+  _LT_CC_BASENAME([$compiler])
+  GCC=$G77
+  if test -n "$compiler"; then
+    AC_MSG_CHECKING([if libtool supports shared libraries])
+    AC_MSG_RESULT([$can_build_shared])
+
+    AC_MSG_CHECKING([whether to build shared libraries])
+    test "$can_build_shared" = "no" && enable_shared=no
+
+    # On AIX, shared libraries and static libraries use the same namespace, and
+    # are all built from PIC.
+    case $host_os in
+      aix3*)
+        test "$enable_shared" = yes && enable_static=no
+        if test -n "$RANLIB"; then
+          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+          postinstall_cmds='$RANLIB $lib'
+        fi
+        ;;
+      aix[[4-9]]*)
+	if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+	  test "$enable_shared" = yes && enable_static=no
+	fi
+        ;;
+    esac
+    AC_MSG_RESULT([$enable_shared])
+
+    AC_MSG_CHECKING([whether to build static libraries])
+    # Make sure either enable_shared or enable_static is yes.
+    test "$enable_shared" = yes || enable_static=yes
+    AC_MSG_RESULT([$enable_static])
+
+    _LT_TAGVAR(GCC, $1)="$G77"
+    _LT_TAGVAR(LD, $1)="$LD"
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    _LT_COMPILER_PIC($1)
+    _LT_COMPILER_C_O($1)
+    _LT_COMPILER_FILE_LOCKS($1)
+    _LT_LINKER_SHLIBS($1)
+    _LT_SYS_DYNAMIC_LINKER($1)
+    _LT_LINKER_HARDCODE_LIBPATH($1)
+
+    _LT_CONFIG($1)
+  fi # test -n "$compiler"
+
+  GCC=$lt_save_GCC
+  CC="$lt_save_CC"
+  CFLAGS="$lt_save_CFLAGS"
+fi # test "$_lt_disable_F77" != yes
+
+AC_LANG_POP
+])# _LT_LANG_F77_CONFIG
+
+
+# _LT_LANG_FC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for a Fortran compiler are
+# suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_FC_CONFIG],
+[AC_LANG_PUSH(Fortran)
+
+if test -z "$FC" || test "X$FC" = "Xno"; then
+  _lt_disable_FC=yes
+fi
+
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+_LT_TAGVAR(allow_undefined_flag, $1)=
+_LT_TAGVAR(always_export_symbols, $1)=no
+_LT_TAGVAR(archive_expsym_cmds, $1)=
+_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+_LT_TAGVAR(hardcode_direct, $1)=no
+_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+_LT_TAGVAR(hardcode_libdir_separator, $1)=
+_LT_TAGVAR(hardcode_minus_L, $1)=no
+_LT_TAGVAR(hardcode_automatic, $1)=no
+_LT_TAGVAR(inherit_rpath, $1)=no
+_LT_TAGVAR(module_cmds, $1)=
+_LT_TAGVAR(module_expsym_cmds, $1)=
+_LT_TAGVAR(link_all_deplibs, $1)=unknown
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+_LT_TAGVAR(no_undefined_flag, $1)=
+_LT_TAGVAR(whole_archive_flag_spec, $1)=
+_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+
+# Source file extension for fc test sources.
+ac_ext=${ac_fc_srcext-f}
+
+# Object file extension for compiled fc test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# No sense in running all these tests if we already determined that
+# the FC compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_FC" != yes; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="\
+      subroutine t
+      return
+      end
+"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code="\
+      program t
+      end
+"
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+  _LT_TAG_COMPILER
+
+  # save warnings/boilerplate of simple test code
+  _LT_COMPILER_BOILERPLATE
+  _LT_LINKER_BOILERPLATE
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC="$CC"
+  lt_save_GCC=$GCC
+  lt_save_CFLAGS=$CFLAGS
+  CC=${FC-"f95"}
+  CFLAGS=$FCFLAGS
+  compiler=$CC
+  GCC=$ac_cv_fc_compiler_gnu
+
+  _LT_TAGVAR(compiler, $1)=$CC
+  _LT_CC_BASENAME([$compiler])
+
+  if test -n "$compiler"; then
+    AC_MSG_CHECKING([if libtool supports shared libraries])
+    AC_MSG_RESULT([$can_build_shared])
+
+    AC_MSG_CHECKING([whether to build shared libraries])
+    test "$can_build_shared" = "no" && enable_shared=no
+
+    # On AIX, shared libraries and static libraries use the same namespace, and
+    # are all built from PIC.
+    case $host_os in
+      aix3*)
+        test "$enable_shared" = yes && enable_static=no
+        if test -n "$RANLIB"; then
+          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+          postinstall_cmds='$RANLIB $lib'
+        fi
+        ;;
+      aix[[4-9]]*)
+	if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+	  test "$enable_shared" = yes && enable_static=no
+	fi
+        ;;
+    esac
+    AC_MSG_RESULT([$enable_shared])
+
+    AC_MSG_CHECKING([whether to build static libraries])
+    # Make sure either enable_shared or enable_static is yes.
+    test "$enable_shared" = yes || enable_static=yes
+    AC_MSG_RESULT([$enable_static])
+
+    _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu"
+    _LT_TAGVAR(LD, $1)="$LD"
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    _LT_SYS_HIDDEN_LIBDEPS($1)
+    _LT_COMPILER_PIC($1)
+    _LT_COMPILER_C_O($1)
+    _LT_COMPILER_FILE_LOCKS($1)
+    _LT_LINKER_SHLIBS($1)
+    _LT_SYS_DYNAMIC_LINKER($1)
+    _LT_LINKER_HARDCODE_LIBPATH($1)
+
+    _LT_CONFIG($1)
+  fi # test -n "$compiler"
+
+  GCC=$lt_save_GCC
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+fi # test "$_lt_disable_FC" != yes
+
+AC_LANG_POP
+])# _LT_LANG_FC_CONFIG
+
+
+# _LT_LANG_GCJ_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Java Compiler compiler
+# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_GCJ_CONFIG],
+[AC_REQUIRE([LT_PROG_GCJ])dnl
+AC_LANG_SAVE
+
+# Source file extension for Java test sources.
+ac_ext=java
+
+# Object file extension for compiled Java test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="class foo {}"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GCJ-"gcj"}
+CFLAGS=$GCJFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)="$LD"
+_LT_CC_BASENAME([$compiler])
+
+# GCJ did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+if test -n "$compiler"; then
+  _LT_COMPILER_NO_RTTI($1)
+  _LT_COMPILER_PIC($1)
+  _LT_COMPILER_C_O($1)
+  _LT_COMPILER_FILE_LOCKS($1)
+  _LT_LINKER_SHLIBS($1)
+  _LT_LINKER_HARDCODE_LIBPATH($1)
+
+  _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GCJ_CONFIG
+
+
+# _LT_LANG_GO_CONFIG([TAG])
+# --------------------------
+# Ensure that the configuration variables for the GNU Go compiler
+# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_GO_CONFIG],
+[AC_REQUIRE([LT_PROG_GO])dnl
+AC_LANG_SAVE
+
+# Source file extension for Go test sources.
+ac_ext=go
+
+# Object file extension for compiled Go test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="package main; func main() { }"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='package main; func main() { }'
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC=$CC
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=yes
+CC=${GOC-"gccgo"}
+CFLAGS=$GOFLAGS
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_TAGVAR(LD, $1)="$LD"
+_LT_CC_BASENAME([$compiler])
+
+# Go did not exist at the time GCC didn't implicitly link libc in.
+_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+
+_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+_LT_TAGVAR(reload_flag, $1)=$reload_flag
+_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+
+if test -n "$compiler"; then
+  _LT_COMPILER_NO_RTTI($1)
+  _LT_COMPILER_PIC($1)
+  _LT_COMPILER_C_O($1)
+  _LT_COMPILER_FILE_LOCKS($1)
+  _LT_LINKER_SHLIBS($1)
+  _LT_LINKER_HARDCODE_LIBPATH($1)
+
+  _LT_CONFIG($1)
+fi
+
+AC_LANG_RESTORE
+
+GCC=$lt_save_GCC
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_GO_CONFIG
+
+
+# _LT_LANG_RC_CONFIG([TAG])
+# -------------------------
+# Ensure that the configuration variables for the Windows resource compiler
+# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+# to write the compiler configuration to `libtool'.
+m4_defun([_LT_LANG_RC_CONFIG],
+[AC_REQUIRE([LT_PROG_RC])dnl
+AC_LANG_SAVE
+
+# Source file extension for RC test sources.
+ac_ext=rc
+
+# Object file extension for compiled RC test sources.
+objext=o
+_LT_TAGVAR(objext, $1)=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
+
+# Code to be used in simple link tests
+lt_simple_link_test_code="$lt_simple_compile_test_code"
+
+# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+_LT_TAG_COMPILER
+
+# save warnings/boilerplate of simple test code
+_LT_COMPILER_BOILERPLATE
+_LT_LINKER_BOILERPLATE
+
+# Allow CC to be a program name with arguments.
+lt_save_CC="$CC"
+lt_save_CFLAGS=$CFLAGS
+lt_save_GCC=$GCC
+GCC=
+CC=${RC-"windres"}
+CFLAGS=
+compiler=$CC
+_LT_TAGVAR(compiler, $1)=$CC
+_LT_CC_BASENAME([$compiler])
+_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+
+if test -n "$compiler"; then
+  :
+  _LT_CONFIG($1)
+fi
+
+GCC=$lt_save_GCC
+AC_LANG_RESTORE
+CC=$lt_save_CC
+CFLAGS=$lt_save_CFLAGS
+])# _LT_LANG_RC_CONFIG
+
+
+# LT_PROG_GCJ
+# -----------
+AC_DEFUN([LT_PROG_GCJ],
+[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ],
+  [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ],
+    [AC_CHECK_TOOL(GCJ, gcj,)
+      test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2"
+      AC_SUBST(GCJFLAGS)])])[]dnl
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
+
+
+# LT_PROG_GO
+# ----------
+AC_DEFUN([LT_PROG_GO],
+[AC_CHECK_TOOL(GOC, gccgo,)
+])
+
+
+# LT_PROG_RC
+# ----------
+AC_DEFUN([LT_PROG_RC],
+[AC_CHECK_TOOL(RC, windres,)
+])
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_RC], [])
+
+
+# _LT_DECL_EGREP
+# --------------
+# If we don't have a new enough Autoconf to choose the best grep
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_EGREP],
+[AC_REQUIRE([AC_PROG_EGREP])dnl
+AC_REQUIRE([AC_PROG_FGREP])dnl
+test -z "$GREP" && GREP=grep
+_LT_DECL([], [GREP], [1], [A grep program that handles long lines])
+_LT_DECL([], [EGREP], [1], [An ERE matcher])
+_LT_DECL([], [FGREP], [1], [A literal string matcher])
+dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too
+AC_SUBST([GREP])
+])
+
+
+# _LT_DECL_OBJDUMP
+# --------------
+# If we don't have a new enough Autoconf to choose the best objdump
+# available, choose the one first in the user's PATH.
+m4_defun([_LT_DECL_OBJDUMP],
+[AC_CHECK_TOOL(OBJDUMP, objdump, false)
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
+AC_SUBST([OBJDUMP])
+])
+
+# _LT_DECL_DLLTOOL
+# ----------------
+# Ensure DLLTOOL variable is set.
+m4_defun([_LT_DECL_DLLTOOL],
+[AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])
+AC_SUBST([DLLTOOL])
+])
+
+# _LT_DECL_SED
+# ------------
+# Check for a fully-functional sed program, that truncates
+# as few characters as possible.  Prefer GNU sed if found.
+m4_defun([_LT_DECL_SED],
+[AC_PROG_SED
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+_LT_DECL([], [SED], [1], [A sed program that does not truncate output])
+_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"],
+    [Sed that helps us avoid accidentally triggering echo(1) options like -n])
+])# _LT_DECL_SED
+
+m4_ifndef([AC_PROG_SED], [
+# NOTE: This macro has been submitted for inclusion into   #
+#  GNU Autoconf as AC_PROG_SED.  When it is available in   #
+#  a released version of Autoconf we should remove this    #
+#  macro and use it instead.                               #
+
+m4_defun([AC_PROG_SED],
+[AC_MSG_CHECKING([for a sed that does not truncate output])
+AC_CACHE_VAL(lt_cv_path_SED,
+[# Loop through the user's path and test for sed and gsed.
+# Then use that list of sed's as ones to test for truncation.
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  for lt_ac_prog in sed gsed; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then
+        lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext"
+      fi
+    done
+  done
+done
+IFS=$as_save_IFS
+lt_ac_max=0
+lt_ac_count=0
+# Add /usr/xpg4/bin/sed as it is typically found on Solaris
+# along with /bin/sed that truncates output.
+for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
+  test ! -f $lt_ac_sed && continue
+  cat /dev/null > conftest.in
+  lt_ac_count=0
+  echo $ECHO_N "0123456789$ECHO_C" >conftest.in
+  # Check for GNU sed and select it if it is found.
+  if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then
+    lt_cv_path_SED=$lt_ac_sed
+    break
+  fi
+  while true; do
+    cat conftest.in conftest.in >conftest.tmp
+    mv conftest.tmp conftest.in
+    cp conftest.in conftest.nl
+    echo >>conftest.nl
+    $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break
+    cmp -s conftest.out conftest.nl || break
+    # 10000 chars as input seems more than enough
+    test $lt_ac_count -gt 10 && break
+    lt_ac_count=`expr $lt_ac_count + 1`
+    if test $lt_ac_count -gt $lt_ac_max; then
+      lt_ac_max=$lt_ac_count
+      lt_cv_path_SED=$lt_ac_sed
+    fi
+  done
+done
+])
+SED=$lt_cv_path_SED
+AC_SUBST([SED])
+AC_MSG_RESULT([$SED])
+])#AC_PROG_SED
+])#m4_ifndef
+
+# Old name:
+AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED])
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([LT_AC_PROG_SED], [])
+
+
+# _LT_CHECK_SHELL_FEATURES
+# ------------------------
+# Find out whether the shell is Bourne or XSI compatible,
+# or has some other useful features.
+m4_defun([_LT_CHECK_SHELL_FEATURES],
+[AC_MSG_CHECKING([whether the shell understands some XSI constructs])
+# Try some XSI features
+xsi_shell=no
+( _lt_dummy="a/b/c"
+  test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \
+      = c,a/b,b/c, \
+    && eval 'test $(( 1 + 1 )) -eq 2 \
+    && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
+  && xsi_shell=yes
+AC_MSG_RESULT([$xsi_shell])
+_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell'])
+
+AC_MSG_CHECKING([whether the shell understands "+="])
+lt_shell_append=no
+( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \
+    >/dev/null 2>&1 \
+  && lt_shell_append=yes
+AC_MSG_RESULT([$lt_shell_append])
+_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append'])
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+  lt_unset=unset
+else
+  lt_unset=false
+fi
+_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+  lt_SP2NL='tr \040 \012'
+  lt_NL2SP='tr \015\012 \040\040'
+  ;;
+ *) # EBCDIC based system
+  lt_SP2NL='tr \100 \n'
+  lt_NL2SP='tr \r\n \100\100'
+  ;;
+esac
+_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl
+_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
+])# _LT_CHECK_SHELL_FEATURES
+
+
+# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY)
+# ------------------------------------------------------
+# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and
+# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY.
+m4_defun([_LT_PROG_FUNCTION_REPLACE],
+[dnl {
+sed -e '/^$1 ()$/,/^} # $1 /c\
+$1 ()\
+{\
+m4_bpatsubsts([$2], [$], [\\], [^\([	 ]\)], [\\\1])
+} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+])
+
+
+# _LT_PROG_REPLACE_SHELLFNS
+# -------------------------
+# Replace existing portable implementations of several shell functions with
+# equivalent extended shell implementations where those features are available..
+m4_defun([_LT_PROG_REPLACE_SHELLFNS],
+[if test x"$xsi_shell" = xyes; then
+  _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl
+    case ${1} in
+      */*) func_dirname_result="${1%/*}${2}" ;;
+      *  ) func_dirname_result="${3}" ;;
+    esac])
+
+  _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl
+    func_basename_result="${1##*/}"])
+
+  _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl
+    case ${1} in
+      */*) func_dirname_result="${1%/*}${2}" ;;
+      *  ) func_dirname_result="${3}" ;;
+    esac
+    func_basename_result="${1##*/}"])
+
+  _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl
+    # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
+    # positional parameters, so assign one to ordinary parameter first.
+    func_stripname_result=${3}
+    func_stripname_result=${func_stripname_result#"${1}"}
+    func_stripname_result=${func_stripname_result%"${2}"}])
+
+  _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl
+    func_split_long_opt_name=${1%%=*}
+    func_split_long_opt_arg=${1#*=}])
+
+  _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl
+    func_split_short_opt_arg=${1#??}
+    func_split_short_opt_name=${1%"$func_split_short_opt_arg"}])
+
+  _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl
+    case ${1} in
+      *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
+      *)    func_lo2o_result=${1} ;;
+    esac])
+
+  _LT_PROG_FUNCTION_REPLACE([func_xform], [    func_xform_result=${1%.*}.lo])
+
+  _LT_PROG_FUNCTION_REPLACE([func_arith], [    func_arith_result=$(( $[*] ))])
+
+  _LT_PROG_FUNCTION_REPLACE([func_len], [    func_len_result=${#1}])
+fi
+
+if test x"$lt_shell_append" = xyes; then
+  _LT_PROG_FUNCTION_REPLACE([func_append], [    eval "${1}+=\\${2}"])
+
+  _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl
+    func_quote_for_eval "${2}"
+dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \
+    eval "${1}+=\\\\ \\$func_quote_for_eval_result"])
+
+  # Save a `func_append' function call where possible by direct use of '+='
+  sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \
+    && mv -f "$cfgfile.tmp" "$cfgfile" \
+      || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+  test 0 -eq $? || _lt_function_replace_fail=:
+else
+  # Save a `func_append' function call even when '+=' is not available
+  sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \
+    && mv -f "$cfgfile.tmp" "$cfgfile" \
+      || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+  test 0 -eq $? || _lt_function_replace_fail=:
+fi
+
+if test x"$_lt_function_replace_fail" = x":"; then
+  AC_MSG_WARN([Unable to substitute extended shell functions in $ofile])
+fi
+])
+
+# _LT_PATH_CONVERSION_FUNCTIONS
+# -----------------------------
+# Determine which file name conversion functions should be used by
+# func_to_host_file (and, implicitly, by func_to_host_path).  These are needed
+# for certain cross-compile configurations and native mingw.
+m4_defun([_LT_PATH_CONVERSION_FUNCTIONS],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+AC_MSG_CHECKING([how to convert $build file names to $host format])
+AC_CACHE_VAL(lt_cv_to_host_file_cmd,
+[case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+        ;;
+    esac
+    ;;
+  *-*-cygwin* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_noop
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+        ;;
+    esac
+    ;;
+  * ) # unhandled hosts (and "normal" native builds)
+    lt_cv_to_host_file_cmd=func_convert_file_noop
+    ;;
+esac
+])
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+AC_MSG_RESULT([$lt_cv_to_host_file_cmd])
+_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd],
+         [0], [convert $build file names to $host format])dnl
+
+AC_MSG_CHECKING([how to convert $build file names to toolchain format])
+AC_CACHE_VAL(lt_cv_to_tool_file_cmd,
+[#assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+        ;;
+    esac
+    ;;
+esac
+])
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+AC_MSG_RESULT([$lt_cv_to_tool_file_cmd])
+_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd],
+         [0], [convert $build files to toolchain format])dnl
+])# _LT_PATH_CONVERSION_FUNCTIONS
+
+# Helper functions for option handling.                    -*- Autoconf -*-
+#
+#   Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation,
+#   Inc.
+#   Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 7 ltoptions.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
+
+
+# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME)
+# ------------------------------------------
+m4_define([_LT_MANGLE_OPTION],
+[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])])
+
+
+# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME)
+# ---------------------------------------
+# Set option OPTION-NAME for macro MACRO-NAME, and if there is a
+# matching handler defined, dispatch to it.  Other OPTION-NAMEs are
+# saved as a flag.
+m4_define([_LT_SET_OPTION],
+[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl
+m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]),
+        _LT_MANGLE_DEFUN([$1], [$2]),
+    [m4_warning([Unknown $1 option `$2'])])[]dnl
+])
+
+
+# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET])
+# ------------------------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+m4_define([_LT_IF_OPTION],
+[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])])
+
+
+# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET)
+# -------------------------------------------------------
+# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME
+# are set.
+m4_define([_LT_UNLESS_OPTIONS],
+[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+	    [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option),
+		      [m4_define([$0_found])])])[]dnl
+m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3
+])[]dnl
+])
+
+
+# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST)
+# ----------------------------------------
+# OPTION-LIST is a space-separated list of Libtool options associated
+# with MACRO-NAME.  If any OPTION has a matching handler declared with
+# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about
+# the unknown option and exit.
+m4_defun([_LT_SET_OPTIONS],
+[# Set options
+m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+    [_LT_SET_OPTION([$1], _LT_Option)])
+
+m4_if([$1],[LT_INIT],[
+  dnl
+  dnl Simply set some default values (i.e off) if boolean options were not
+  dnl specified:
+  _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no
+  ])
+  _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no
+  ])
+  dnl
+  dnl If no reference was made to various pairs of opposing options, then
+  dnl we run the default mode handler for the pair.  For example, if neither
+  dnl `shared' nor `disable-shared' was passed, we enable building of shared
+  dnl archives by default:
+  _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED])
+  _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC])
+  _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC])
+  _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install],
+  		   [_LT_ENABLE_FAST_INSTALL])
+  ])
+])# _LT_SET_OPTIONS
+
+
+
+# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME)
+# -----------------------------------------
+m4_define([_LT_MANGLE_DEFUN],
+[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])])
+
+
+# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE)
+# -----------------------------------------------
+m4_define([LT_OPTION_DEFINE],
+[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl
+])# LT_OPTION_DEFINE
+
+
+# dlopen
+# ------
+LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes
+])
+
+AU_DEFUN([AC_LIBTOOL_DLOPEN],
+[_LT_SET_OPTION([LT_INIT], [dlopen])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `dlopen' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], [])
+
+
+# win32-dll
+# ---------
+# Declare package support for building win32 dll's.
+LT_OPTION_DEFINE([LT_INIT], [win32-dll],
+[enable_win32_dll=yes
+
+case $host in
+*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*)
+  AC_CHECK_TOOL(AS, as, false)
+  AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+  AC_CHECK_TOOL(OBJDUMP, objdump, false)
+  ;;
+esac
+
+test -z "$AS" && AS=as
+_LT_DECL([], [AS],      [1], [Assembler program])dnl
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl
+])# win32-dll
+
+AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
+[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+_LT_SET_OPTION([LT_INIT], [win32-dll])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `win32-dll' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [])
+
+
+# _LT_ENABLE_SHARED([DEFAULT])
+# ----------------------------
+# implement the --enable-shared flag, and supports the `shared' and
+# `disable-shared' LT_INIT options.
+# DEFAULT is either `yes' or `no'.  If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_SHARED],
+[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([shared],
+    [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@],
+	[build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])],
+    [p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_shared=yes ;;
+    no) enable_shared=no ;;
+    *)
+      enable_shared=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for pkg in $enableval; do
+	IFS="$lt_save_ifs"
+	if test "X$pkg" = "X$p"; then
+	  enable_shared=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac],
+    [enable_shared=]_LT_ENABLE_SHARED_DEFAULT)
+
+    _LT_DECL([build_libtool_libs], [enable_shared], [0],
+	[Whether or not to build shared libraries])
+])# _LT_ENABLE_SHARED
+
+LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared])
+])
+
+AC_DEFUN([AC_DISABLE_SHARED],
+[_LT_SET_OPTION([LT_INIT], [disable-shared])
+])
+
+AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_SHARED], [])
+dnl AC_DEFUN([AM_DISABLE_SHARED], [])
+
+
+
+# _LT_ENABLE_STATIC([DEFAULT])
+# ----------------------------
+# implement the --enable-static flag, and support the `static' and
+# `disable-static' LT_INIT options.
+# DEFAULT is either `yes' or `no'.  If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_STATIC],
+[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([static],
+    [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@],
+	[build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])],
+    [p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_static=yes ;;
+    no) enable_static=no ;;
+    *)
+     enable_static=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for pkg in $enableval; do
+	IFS="$lt_save_ifs"
+	if test "X$pkg" = "X$p"; then
+	  enable_static=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac],
+    [enable_static=]_LT_ENABLE_STATIC_DEFAULT)
+
+    _LT_DECL([build_old_libs], [enable_static], [0],
+	[Whether or not to build static libraries])
+])# _LT_ENABLE_STATIC
+
+LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])])
+
+# Old names:
+AC_DEFUN([AC_ENABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static])
+])
+
+AC_DEFUN([AC_DISABLE_STATIC],
+[_LT_SET_OPTION([LT_INIT], [disable-static])
+])
+
+AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AM_ENABLE_STATIC], [])
+dnl AC_DEFUN([AM_DISABLE_STATIC], [])
+
+
+
+# _LT_ENABLE_FAST_INSTALL([DEFAULT])
+# ----------------------------------
+# implement the --enable-fast-install flag, and support the `fast-install'
+# and `disable-fast-install' LT_INIT options.
+# DEFAULT is either `yes' or `no'.  If omitted, it defaults to `yes'.
+m4_define([_LT_ENABLE_FAST_INSTALL],
+[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl
+AC_ARG_ENABLE([fast-install],
+    [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@],
+    [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])],
+    [p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_fast_install=yes ;;
+    no) enable_fast_install=no ;;
+    *)
+      enable_fast_install=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for pkg in $enableval; do
+	IFS="$lt_save_ifs"
+	if test "X$pkg" = "X$p"; then
+	  enable_fast_install=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac],
+    [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT)
+
+_LT_DECL([fast_install], [enable_fast_install], [0],
+	 [Whether or not to optimize for fast installation])dnl
+])# _LT_ENABLE_FAST_INSTALL
+
+LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])])
+LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])])
+
+# Old names:
+AU_DEFUN([AC_ENABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the `fast-install' option into LT_INIT's first parameter.])
+])
+
+AU_DEFUN([AC_DISABLE_FAST_INSTALL],
+[_LT_SET_OPTION([LT_INIT], [disable-fast-install])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+the `disable-fast-install' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], [])
+dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
+
+
+# _LT_WITH_PIC([MODE])
+# --------------------
+# implement the --with-pic flag, and support the `pic-only' and `no-pic'
+# LT_INIT options.
+# MODE is either `yes' or `no'.  If omitted, it defaults to `both'.
+m4_define([_LT_WITH_PIC],
+[AC_ARG_WITH([pic],
+    [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@],
+	[try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
+    [lt_p=${PACKAGE-default}
+    case $withval in
+    yes|no) pic_mode=$withval ;;
+    *)
+      pic_mode=default
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for lt_pkg in $withval; do
+	IFS="$lt_save_ifs"
+	if test "X$lt_pkg" = "X$lt_p"; then
+	  pic_mode=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac],
+    [pic_mode=default])
+
+test -z "$pic_mode" && pic_mode=m4_default([$1], [default])
+
+_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl
+])# _LT_WITH_PIC
+
+LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])])
+LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])])
+
+# Old name:
+AU_DEFUN([AC_LIBTOOL_PICMODE],
+[_LT_SET_OPTION([LT_INIT], [pic-only])
+AC_DIAGNOSE([obsolete],
+[$0: Remove this warning and the call to _LT_SET_OPTION when you
+put the `pic-only' option into LT_INIT's first parameter.])
+])
+
+dnl aclocal-1.4 backwards compatibility:
+dnl AC_DEFUN([AC_LIBTOOL_PICMODE], [])
+
+
+m4_define([_LTDL_MODE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive],
+		 [m4_define([_LTDL_MODE], [nonrecursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [recursive],
+		 [m4_define([_LTDL_MODE], [recursive])])
+LT_OPTION_DEFINE([LTDL_INIT], [subproject],
+		 [m4_define([_LTDL_MODE], [subproject])])
+
+m4_define([_LTDL_TYPE], [])
+LT_OPTION_DEFINE([LTDL_INIT], [installable],
+		 [m4_define([_LTDL_TYPE], [installable])])
+LT_OPTION_DEFINE([LTDL_INIT], [convenience],
+		 [m4_define([_LTDL_TYPE], [convenience])])
+
+# ltsugar.m4 -- libtool m4 base layer.                         -*-Autoconf-*-
+#
+# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc.
+# Written by Gary V. Vaughan, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 6 ltsugar.m4
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])])
+
+
+# lt_join(SEP, ARG1, [ARG2...])
+# -----------------------------
+# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their
+# associated separator.
+# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier
+# versions in m4sugar had bugs.
+m4_define([lt_join],
+[m4_if([$#], [1], [],
+       [$#], [2], [[$2]],
+       [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])])
+m4_define([_lt_join],
+[m4_if([$#$2], [2], [],
+       [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])])
+
+
+# lt_car(LIST)
+# lt_cdr(LIST)
+# ------------
+# Manipulate m4 lists.
+# These macros are necessary as long as will still need to support
+# Autoconf-2.59 which quotes differently.
+m4_define([lt_car], [[$1]])
+m4_define([lt_cdr],
+[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])],
+       [$#], 1, [],
+       [m4_dquote(m4_shift($@))])])
+m4_define([lt_unquote], $1)
+
+
+# lt_append(MACRO-NAME, STRING, [SEPARATOR])
+# ------------------------------------------
+# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'.
+# Note that neither SEPARATOR nor STRING are expanded; they are appended
+# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked).
+# No SEPARATOR is output if MACRO-NAME was previously undefined (different
+# than defined and empty).
+#
+# This macro is needed until we can rely on Autoconf 2.62, since earlier
+# versions of m4sugar mistakenly expanded SEPARATOR but not STRING.
+m4_define([lt_append],
+[m4_define([$1],
+	   m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])])
+
+
+
+# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...])
+# ----------------------------------------------------------
+# Produce a SEP delimited list of all paired combinations of elements of
+# PREFIX-LIST with SUFFIX1 through SUFFIXn.  Each element of the list
+# has the form PREFIXmINFIXSUFFIXn.
+# Needed until we can rely on m4_combine added in Autoconf 2.62.
+m4_define([lt_combine],
+[m4_if(m4_eval([$# > 3]), [1],
+       [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl
+[[m4_foreach([_Lt_prefix], [$2],
+	     [m4_foreach([_Lt_suffix],
+		]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[,
+	[_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])])
+
+
+# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ])
+# -----------------------------------------------------------------------
+# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited
+# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ.
+m4_define([lt_if_append_uniq],
+[m4_ifdef([$1],
+	  [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1],
+		 [lt_append([$1], [$2], [$3])$4],
+		 [$5])],
+	  [lt_append([$1], [$2], [$3])$4])])
+
+
+# lt_dict_add(DICT, KEY, VALUE)
+# -----------------------------
+m4_define([lt_dict_add],
+[m4_define([$1($2)], [$3])])
+
+
+# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE)
+# --------------------------------------------
+m4_define([lt_dict_add_subkey],
+[m4_define([$1($2:$3)], [$4])])
+
+
+# lt_dict_fetch(DICT, KEY, [SUBKEY])
+# ----------------------------------
+m4_define([lt_dict_fetch],
+[m4_ifval([$3],
+	m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]),
+    m4_ifdef([$1($2)], [m4_defn([$1($2)])]))])
+
+
+# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE])
+# -----------------------------------------------------------------
+m4_define([lt_if_dict_fetch],
+[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4],
+	[$5],
+    [$6])])
+
+
+# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...])
+# --------------------------------------------------------------
+m4_define([lt_dict_filter],
+[m4_if([$5], [], [],
+  [lt_join(m4_quote(m4_default([$4], [[, ]])),
+           lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]),
+		      [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl
+])
+
+# ltversion.m4 -- version numbers			-*- Autoconf -*-
+#
+#   Copyright (C) 2004 Free Software Foundation, Inc.
+#   Written by Scott James Remnant, 2004
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# @configure_input@
+
+# serial 3337 ltversion.m4
+# This file is part of GNU Libtool
+
+m4_define([LT_PACKAGE_VERSION], [2.4.2])
+m4_define([LT_PACKAGE_REVISION], [1.3337])
+
+AC_DEFUN([LTVERSION_VERSION],
+[macro_version='2.4.2'
+macro_revision='1.3337'
+_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
+_LT_DECL(, macro_revision, 0)
+])
+
+# lt~obsolete.m4 -- aclocal satisfying obsolete definitions.    -*-Autoconf-*-
+#
+#   Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
+#   Written by Scott James Remnant, 2004.
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+
+# serial 5 lt~obsolete.m4
+
+# These exist entirely to fool aclocal when bootstrapping libtool.
+#
+# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN)
+# which have later been changed to m4_define as they aren't part of the
+# exported API, or moved to Autoconf or Automake where they belong.
+#
+# The trouble is, aclocal is a bit thick.  It'll see the old AC_DEFUN
+# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us
+# using a macro with the same name in our local m4/libtool.m4 it'll
+# pull the old libtool.m4 in (it doesn't see our shiny new m4_define
+# and doesn't know about Autoconf macros at all.)
+#
+# So we provide this file, which has a silly filename so it's always
+# included after everything else.  This provides aclocal with the
+# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything
+# because those macros already exist, or will be overwritten later.
+# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. 
+#
+# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here.
+# Yes, that means every name once taken will need to remain here until
+# we give up compatibility with versions before 1.7, at which point
+# we need to keep only those names which we still refer to.
+
+# This is to help aclocal find these macros, as it can't see m4_define.
+AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])])
+
+m4_ifndef([AC_LIBTOOL_LINKER_OPTION],	[AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])])
+m4_ifndef([AC_PROG_EGREP],		[AC_DEFUN([AC_PROG_EGREP])])
+m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_AC_SHELL_INIT],		[AC_DEFUN([_LT_AC_SHELL_INIT])])
+m4_ifndef([_LT_AC_SYS_LIBPATH_AIX],	[AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])])
+m4_ifndef([_LT_PROG_LTMAIN],		[AC_DEFUN([_LT_PROG_LTMAIN])])
+m4_ifndef([_LT_AC_TAGVAR],		[AC_DEFUN([_LT_AC_TAGVAR])])
+m4_ifndef([AC_LTDL_ENABLE_INSTALL],	[AC_DEFUN([AC_LTDL_ENABLE_INSTALL])])
+m4_ifndef([AC_LTDL_PREOPEN],		[AC_DEFUN([AC_LTDL_PREOPEN])])
+m4_ifndef([_LT_AC_SYS_COMPILER],	[AC_DEFUN([_LT_AC_SYS_COMPILER])])
+m4_ifndef([_LT_AC_LOCK],		[AC_DEFUN([_LT_AC_LOCK])])
+m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE],	[AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])])
+m4_ifndef([_LT_AC_TRY_DLOPEN_SELF],	[AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])])
+m4_ifndef([AC_LIBTOOL_PROG_CC_C_O],	[AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])])
+m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])])
+m4_ifndef([AC_LIBTOOL_OBJDIR],		[AC_DEFUN([AC_LIBTOOL_OBJDIR])])
+m4_ifndef([AC_LTDL_OBJDIR],		[AC_DEFUN([AC_LTDL_OBJDIR])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])])
+m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP],	[AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])])
+m4_ifndef([AC_PATH_MAGIC],		[AC_DEFUN([AC_PATH_MAGIC])])
+m4_ifndef([AC_PROG_LD_GNU],		[AC_DEFUN([AC_PROG_LD_GNU])])
+m4_ifndef([AC_PROG_LD_RELOAD_FLAG],	[AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])])
+m4_ifndef([AC_DEPLIBS_CHECK_METHOD],	[AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])])
+m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])])
+m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])])
+m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS],	[AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])])
+m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP],	[AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])])
+m4_ifndef([LT_AC_PROG_EGREP],		[AC_DEFUN([LT_AC_PROG_EGREP])])
+m4_ifndef([LT_AC_PROG_SED],		[AC_DEFUN([LT_AC_PROG_SED])])
+m4_ifndef([_LT_CC_BASENAME],		[AC_DEFUN([_LT_CC_BASENAME])])
+m4_ifndef([_LT_COMPILER_BOILERPLATE],	[AC_DEFUN([_LT_COMPILER_BOILERPLATE])])
+m4_ifndef([_LT_LINKER_BOILERPLATE],	[AC_DEFUN([_LT_LINKER_BOILERPLATE])])
+m4_ifndef([_AC_PROG_LIBTOOL],		[AC_DEFUN([_AC_PROG_LIBTOOL])])
+m4_ifndef([AC_LIBTOOL_SETUP],		[AC_DEFUN([AC_LIBTOOL_SETUP])])
+m4_ifndef([_LT_AC_CHECK_DLFCN],		[AC_DEFUN([_LT_AC_CHECK_DLFCN])])
+m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER],	[AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])])
+m4_ifndef([_LT_AC_TAGCONFIG],		[AC_DEFUN([_LT_AC_TAGCONFIG])])
+m4_ifndef([AC_DISABLE_FAST_INSTALL],	[AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
+m4_ifndef([_LT_AC_LANG_CXX],		[AC_DEFUN([_LT_AC_LANG_CXX])])
+m4_ifndef([_LT_AC_LANG_F77],		[AC_DEFUN([_LT_AC_LANG_F77])])
+m4_ifndef([_LT_AC_LANG_GCJ],		[AC_DEFUN([_LT_AC_LANG_GCJ])])
+m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
+m4_ifndef([_LT_AC_LANG_C_CONFIG],	[AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
+m4_ifndef([_LT_AC_LANG_CXX_CONFIG],	[AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])])
+m4_ifndef([_LT_AC_LANG_F77_CONFIG],	[AC_DEFUN([_LT_AC_LANG_F77_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])])
+m4_ifndef([_LT_AC_LANG_GCJ_CONFIG],	[AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])])
+m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
+m4_ifndef([_LT_AC_LANG_RC_CONFIG],	[AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
+m4_ifndef([AC_LIBTOOL_CONFIG],		[AC_DEFUN([AC_LIBTOOL_CONFIG])])
+m4_ifndef([_LT_AC_FILE_LTDLL_C],	[AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
+m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS],	[AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])])
+m4_ifndef([_LT_AC_PROG_CXXCPP],		[AC_DEFUN([_LT_AC_PROG_CXXCPP])])
+m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS],	[AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])])
+m4_ifndef([_LT_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])])
+m4_ifndef([_LT_PROG_F77],		[AC_DEFUN([_LT_PROG_F77])])
+m4_ifndef([_LT_PROG_FC],		[AC_DEFUN([_LT_PROG_FC])])
+m4_ifndef([_LT_PROG_CXX],		[AC_DEFUN([_LT_PROG_CXX])])
+
+# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software
+# Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_AUTOMAKE_VERSION(VERSION)
+# ----------------------------
+# Automake X.Y traces this macro to ensure aclocal.m4 has been
+# generated from the m4 files accompanying Automake X.Y.
+# (This private macro should not be called outside this file.)
+AC_DEFUN([AM_AUTOMAKE_VERSION],
+[am__api_version='1.11'
+dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+dnl require some minimum version.  Point them to the right macro.
+m4_if([$1], [1.11.6], [],
+      [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+])
+
+# _AM_AUTOCONF_VERSION(VERSION)
+# -----------------------------
+# aclocal traces this macro to find the Autoconf version.
+# This is a private macro too.  Using m4_define simplifies
+# the logic in aclocal, which can simply ignore this definition.
+m4_define([_AM_AUTOCONF_VERSION], [])
+
+# AM_SET_CURRENT_AUTOMAKE_VERSION
+# -------------------------------
+# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+[AM_AUTOMAKE_VERSION([1.11.6])dnl
+m4_ifndef([AC_AUTOCONF_VERSION],
+  [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+
+# Figure out how to run the assembler.                      -*- Autoconf -*-
+
+# Copyright (C) 2001, 2003, 2004, 2005, 2006  Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# AM_PROG_AS
+# ----------
+AC_DEFUN([AM_PROG_AS],
+[# By default we simply use the C compiler to build assembly code.
+AC_REQUIRE([AC_PROG_CC])
+test "${CCAS+set}" = set || CCAS=$CC
+test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS
+AC_ARG_VAR([CCAS],      [assembler compiler command (defaults to CC)])
+AC_ARG_VAR([CCASFLAGS], [assembler compiler flags (defaults to CFLAGS)])
+_AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
+])
+
+# AM_AUX_DIR_EXPAND                                         -*- Autoconf -*-
+
+# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
+# $ac_aux_dir to `$srcdir/foo'.  In other projects, it is set to
+# `$srcdir', `$srcdir/..', or `$srcdir/../..'.
+#
+# Of course, Automake must honor this variable whenever it calls a
+# tool from the auxiliary directory.  The problem is that $srcdir (and
+# therefore $ac_aux_dir as well) can be either absolute or relative,
+# depending on how configure is run.  This is pretty annoying, since
+# it makes $ac_aux_dir quite unusable in subdirectories: in the top
+# source directory, any form will work fine, but in subdirectories a
+# relative path needs to be adjusted first.
+#
+# $ac_aux_dir/missing
+#    fails when called from a subdirectory if $ac_aux_dir is relative
+# $top_srcdir/$ac_aux_dir/missing
+#    fails if $ac_aux_dir is absolute,
+#    fails when called from a subdirectory in a VPATH build with
+#          a relative $ac_aux_dir
+#
+# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
+# are both prefixed by $srcdir.  In an in-source build this is usually
+# harmless because $srcdir is `.', but things will broke when you
+# start a VPATH build or use an absolute $srcdir.
+#
+# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
+# iff we strip the leading $srcdir from $ac_aux_dir.  That would be:
+#   am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
+# and then we would define $MISSING as
+#   MISSING="\${SHELL} $am_aux_dir/missing"
+# This will work as long as MISSING is not called from configure, because
+# unfortunately $(top_srcdir) has no meaning in configure.
+# However there are other variables, like CC, which are often used in
+# configure, and could therefore not use this "fixed" $ac_aux_dir.
+#
+# Another solution, used here, is to always expand $ac_aux_dir to an
+# absolute PATH.  The drawback is that using absolute paths prevent a
+# configured tree to be moved without reconfiguration.
+
+AC_DEFUN([AM_AUX_DIR_EXPAND],
+[dnl Rely on autoconf to set up CDPATH properly.
+AC_PREREQ([2.50])dnl
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+])
+
+# AM_CONDITIONAL                                            -*- Autoconf -*-
+
+# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 9
+
+# AM_CONDITIONAL(NAME, SHELL-CONDITION)
+# -------------------------------------
+# Define a conditional.
+AC_DEFUN([AM_CONDITIONAL],
+[AC_PREREQ(2.52)dnl
+ ifelse([$1], [TRUE],  [AC_FATAL([$0: invalid condition: $1])],
+	[$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
+AC_SUBST([$1_TRUE])dnl
+AC_SUBST([$1_FALSE])dnl
+_AM_SUBST_NOTMAKE([$1_TRUE])dnl
+_AM_SUBST_NOTMAKE([$1_FALSE])dnl
+m4_define([_AM_COND_VALUE_$1], [$2])dnl
+if $2; then
+  $1_TRUE=
+  $1_FALSE='#'
+else
+  $1_TRUE='#'
+  $1_FALSE=
+fi
+AC_CONFIG_COMMANDS_PRE(
+[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
+  AC_MSG_ERROR([[conditional "$1" was never defined.
+Usually this means the macro was only invoked conditionally.]])
+fi])])
+
+# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009,
+# 2010, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 12
+
+# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be
+# written in clear, in which case automake, when reading aclocal.m4,
+# will think it sees a *use*, and therefore will trigger all it's
+# C support machinery.  Also note that it means that autoscan, seeing
+# CC etc. in the Makefile, will ask for an AC_PROG_CC use...
+
+
+# _AM_DEPENDENCIES(NAME)
+# ----------------------
+# See how the compiler implements dependency checking.
+# NAME is "CC", "CXX", "GCJ", or "OBJC".
+# We try a few techniques and use that to set a single cache variable.
+#
+# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
+# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular
+# dependency, and given that the user is not expected to run this macro,
+# just rely on AC_PROG_CC.
+AC_DEFUN([_AM_DEPENDENCIES],
+[AC_REQUIRE([AM_SET_DEPDIR])dnl
+AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
+AC_REQUIRE([AM_MAKE_INCLUDE])dnl
+AC_REQUIRE([AM_DEP_TRACK])dnl
+
+ifelse([$1], CC,   [depcc="$CC"   am_compiler_list=],
+       [$1], CXX,  [depcc="$CXX"  am_compiler_list=],
+       [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
+       [$1], UPC,  [depcc="$UPC"  am_compiler_list=],
+       [$1], GCJ,  [depcc="$GCJ"  am_compiler_list='gcc3 gcc'],
+                   [depcc="$$1"   am_compiler_list=])
+
+AC_CACHE_CHECK([dependency style of $depcc],
+               [am_cv_$1_dependencies_compiler_type],
+[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named `D' -- because `-MD' means `put the output
+  # in D'.
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_$1_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp`
+  fi
+  am__universal=false
+  m4_case([$1], [CC],
+    [case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac],
+    [CXX],
+    [case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac])
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+      # Solaris 8's {/usr,}/bin/sh.
+      touch sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with `-c' and `-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle `-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # after this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok `-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_$1_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_$1_dependencies_compiler_type=none
+fi
+])
+AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type])
+AM_CONDITIONAL([am__fastdep$1], [
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_$1_dependencies_compiler_type" = gcc3])
+])
+
+
+# AM_SET_DEPDIR
+# -------------
+# Choose a directory name for dependency files.
+# This macro is AC_REQUIREd in _AM_DEPENDENCIES
+AC_DEFUN([AM_SET_DEPDIR],
+[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
+])
+
+
+# AM_DEP_TRACK
+# ------------
+AC_DEFUN([AM_DEP_TRACK],
+[AC_ARG_ENABLE(dependency-tracking,
+[  --disable-dependency-tracking  speeds up one-time build
+  --enable-dependency-tracking   do not reject slow dependency extractors])
+if test "x$enable_dependency_tracking" != xno; then
+  am_depcomp="$ac_aux_dir/depcomp"
+  AMDEPBACKSLASH='\'
+  am__nodep='_no'
+fi
+AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
+AC_SUBST([AMDEPBACKSLASH])dnl
+_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
+AC_SUBST([am__nodep])dnl
+_AM_SUBST_NOTMAKE([am__nodep])dnl
+])
+
+# Generate code to set up dependency tracking.              -*- Autoconf -*-
+
+# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+#serial 5
+
+# _AM_OUTPUT_DEPENDENCY_COMMANDS
+# ------------------------------
+AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
+[{
+  # Autoconf 2.62 quotes --file arguments for eval, but not when files
+  # are listed without --file.  Let's play safe and only enable the eval
+  # if we detect the quoting.
+  case $CONFIG_FILES in
+  *\'*) eval set x "$CONFIG_FILES" ;;
+  *)   set x $CONFIG_FILES ;;
+  esac
+  shift
+  for mf
+  do
+    # Strip MF so we end up with the name of the file.
+    mf=`echo "$mf" | sed -e 's/:.*$//'`
+    # Check whether this is an Automake generated Makefile or not.
+    # We used to match only the files named `Makefile.in', but
+    # some people rename them; so instead we look at the file content.
+    # Grep'ing the first line is not enough: some people post-process
+    # each Makefile.in and add a new line on top of each file to say so.
+    # Grep'ing the whole file is not good either: AIX grep has a line
+    # limit of 2048, but all sed's we know have understand at least 4000.
+    if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+      dirpart=`AS_DIRNAME("$mf")`
+    else
+      continue
+    fi
+    # Extract the definition of DEPDIR, am__include, and am__quote
+    # from the Makefile without running `make'.
+    DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+    test -z "$DEPDIR" && continue
+    am__include=`sed -n 's/^am__include = //p' < "$mf"`
+    test -z "am__include" && continue
+    am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+    # When using ansi2knr, U may be empty or an underscore; expand it
+    U=`sed -n 's/^U = //p' < "$mf"`
+    # Find all dependency output files, they are included files with
+    # $(DEPDIR) in their names.  We invoke sed twice because it is the
+    # simplest approach to changing $(DEPDIR) to its actual value in the
+    # expansion.
+    for file in `sed -n "
+      s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+	 sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+      # Make sure the directory exists.
+      test -f "$dirpart/$file" && continue
+      fdir=`AS_DIRNAME(["$file"])`
+      AS_MKDIR_P([$dirpart/$fdir])
+      # echo "creating $dirpart/$file"
+      echo '# dummy' > "$dirpart/$file"
+    done
+  done
+}
+])# _AM_OUTPUT_DEPENDENCY_COMMANDS
+
+
+# AM_OUTPUT_DEPENDENCY_COMMANDS
+# -----------------------------
+# This macro should only be invoked once -- use via AC_REQUIRE.
+#
+# This code is only required when automatic dependency tracking
+# is enabled.  FIXME.  This creates each `.P' file that we will
+# need in order to bootstrap the dependency handling code.
+AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
+[AC_CONFIG_COMMANDS([depfiles],
+     [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
+     [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"])
+])
+
+# Do all the work for Automake.                             -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
+# 2005, 2006, 2008, 2009 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 16
+
+# This macro actually does too much.  Some checks are only needed if
+# your package does certain things.  But this isn't really a big deal.
+
+# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
+# AM_INIT_AUTOMAKE([OPTIONS])
+# -----------------------------------------------
+# The call with PACKAGE and VERSION arguments is the old style
+# call (pre autoconf-2.50), which is being phased out.  PACKAGE
+# and VERSION should now be passed to AC_INIT and removed from
+# the call to AM_INIT_AUTOMAKE.
+# We support both call styles for the transition.  After
+# the next Automake release, Autoconf can make the AC_INIT
+# arguments mandatory, and then we can depend on a new Autoconf
+# release and drop the old call support.
+AC_DEFUN([AM_INIT_AUTOMAKE],
+[AC_PREREQ([2.62])dnl
+dnl Autoconf wants to disallow AM_ names.  We explicitly allow
+dnl the ones we care about.
+m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
+AC_REQUIRE([AC_PROG_INSTALL])dnl
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+  # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+  # is not polluted with repeated "-I."
+  AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
+  # test to see if srcdir already configured
+  if test -f $srcdir/config.status; then
+    AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
+  fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+  if (cygpath --version) >/dev/null 2>/dev/null; then
+    CYGPATH_W='cygpath -w'
+  else
+    CYGPATH_W=echo
+  fi
+fi
+AC_SUBST([CYGPATH_W])
+
+# Define the identity of the package.
+dnl Distinguish between old-style and new-style calls.
+m4_ifval([$2],
+[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ AC_SUBST([PACKAGE], [$1])dnl
+ AC_SUBST([VERSION], [$2])],
+[_AM_SET_OPTIONS([$1])dnl
+dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,,
+  [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
+
+_AM_IF_OPTION([no-define],,
+[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package])
+ AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl
+
+# Some tools Automake needs.
+AC_REQUIRE([AM_SANITY_CHECK])dnl
+AC_REQUIRE([AC_ARG_PROGRAM])dnl
+AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version})
+AM_MISSING_PROG(AUTOCONF, autoconf)
+AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version})
+AM_MISSING_PROG(AUTOHEADER, autoheader)
+AM_MISSING_PROG(MAKEINFO, makeinfo)
+AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
+AC_REQUIRE([AM_PROG_MKDIR_P])dnl
+# We need awk for the "check" target.  The system "awk" is bad on
+# some platforms.
+AC_REQUIRE([AC_PROG_AWK])dnl
+AC_REQUIRE([AC_PROG_MAKE_SET])dnl
+AC_REQUIRE([AM_SET_LEADING_DOT])dnl
+_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
+	      [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
+			     [_AM_PROG_TAR([v7])])])
+_AM_IF_OPTION([no-dependencies],,
+[AC_PROVIDE_IFELSE([AC_PROG_CC],
+		  [_AM_DEPENDENCIES(CC)],
+		  [define([AC_PROG_CC],
+			  defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_CXX],
+		  [_AM_DEPENDENCIES(CXX)],
+		  [define([AC_PROG_CXX],
+			  defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl
+AC_PROVIDE_IFELSE([AC_PROG_OBJC],
+		  [_AM_DEPENDENCIES(OBJC)],
+		  [define([AC_PROG_OBJC],
+			  defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl
+])
+_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl
+dnl The `parallel-tests' driver may need to know about EXEEXT, so add the
+dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen.  This macro
+dnl is hooked onto _AC_COMPILER_EXEEXT early, see below.
+AC_CONFIG_COMMANDS_PRE(dnl
+[m4_provide_if([_AM_COMPILER_EXEEXT],
+  [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+])
+
+dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion.  Do not
+dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
+dnl mangled by Autoconf and run in a shell conditional statement.
+m4_define([_AC_COMPILER_EXEEXT],
+m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
+
+
+# When config.status generates a header, we must update the stamp-h file.
+# This file resides in the same directory as the config header
+# that is generated.  The stamp files are numbered to have different names.
+
+# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
+# loop where config.status creates the headers, so we can generate
+# our stamp files there.
+AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
+[# Compute $1's index in $config_headers.
+_am_arg=$1
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+  case $_am_header in
+    $_am_arg | $_am_arg:* )
+      break ;;
+    * )
+      _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+  esac
+done
+echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+
+# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation,
+# Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_PROG_INSTALL_SH
+# ------------------
+# Define $install_sh.
+AC_DEFUN([AM_PROG_INSTALL_SH],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+if test x"${install_sh}" != xset; then
+  case $am_aux_dir in
+  *\ * | *\	*)
+    install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+  *)
+    install_sh="\${SHELL} $am_aux_dir/install-sh"
+  esac
+fi
+AC_SUBST(install_sh)])
+
+# Copyright (C) 2003, 2005  Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# Check whether the underlying file-system supports filenames
+# with a leading dot.  For instance MS-DOS doesn't.
+AC_DEFUN([AM_SET_LEADING_DOT],
+[rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+  am__leading_dot=.
+else
+  am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+AC_SUBST([am__leading_dot])])
+
+# Check to see how 'make' treats includes.	            -*- Autoconf -*-
+
+# Copyright (C) 2001, 2002, 2003, 2005, 2009  Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 4
+
+# AM_MAKE_INCLUDE()
+# -----------------
+# Check to see how make treats includes.
+AC_DEFUN([AM_MAKE_INCLUDE],
+[am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+	@echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+AC_MSG_CHECKING([for style of include used by $am_make])
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+  am__include=include
+  am__quote=
+  _am_result=GNU
+  ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+   echo '.include "confinc"' > confmf
+   case `$am_make -s -f confmf 2> /dev/null` in #(
+   *the\ am__doit\ target*)
+     am__include=.include
+     am__quote="\""
+     _am_result=BSD
+     ;;
+   esac
+fi
+AC_SUBST([am__include])
+AC_SUBST([am__quote])
+AC_MSG_RESULT([$_am_result])
+rm -f confinc confmf
+])
+
+# Copyright (C) 1999, 2000, 2001, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 6
+
+# AM_PROG_CC_C_O
+# --------------
+# Like AC_PROG_CC_C_O, but changed for automake.
+AC_DEFUN([AM_PROG_CC_C_O],
+[AC_REQUIRE([AC_PROG_CC_C_O])dnl
+AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+# FIXME: we rely on the cache variable name because
+# there is no other way.
+set dummy $CC
+am_cc=`echo $[2] | sed ['s/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/']`
+eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o
+if test "$am_t" != yes; then
+   # Losing compiler, so override with the script.
+   # FIXME: It is wrong to rewrite CC.
+   # But if we don't then we get into trouble of one sort or another.
+   # A longer-term fix would be to have automake use am__CC in this case,
+   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+   CC="$am_aux_dir/compile $CC"
+fi
+dnl Make sure AC_PROG_CC is never called again, or it will override our
+dnl setting of CC.
+m4_define([AC_PROG_CC],
+          [m4_fatal([AC_PROG_CC cannot be called after AM_PROG_CC_C_O])])
+])
+
+# Fake the existence of programs that GNU maintainers use.  -*- Autoconf -*-
+
+# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 6
+
+# AM_MISSING_PROG(NAME, PROGRAM)
+# ------------------------------
+AC_DEFUN([AM_MISSING_PROG],
+[AC_REQUIRE([AM_MISSING_HAS_RUN])
+$1=${$1-"${am_missing_run}$2"}
+AC_SUBST($1)])
+
+
+# AM_MISSING_HAS_RUN
+# ------------------
+# Define MISSING if not defined so far and test if it supports --run.
+# If it does, set am_missing_run to use it, otherwise, to nothing.
+AC_DEFUN([AM_MISSING_HAS_RUN],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([missing])dnl
+if test x"${MISSING+set}" != xset; then
+  case $am_aux_dir in
+  *\ * | *\	*)
+    MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+  *)
+    MISSING="\${SHELL} $am_aux_dir/missing" ;;
+  esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+  am_missing_run="$MISSING --run "
+else
+  am_missing_run=
+  AC_MSG_WARN([`missing' script is too old or missing])
+fi
+])
+
+# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation,
+# Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_PROG_MKDIR_P
+# ---------------
+# Check for `mkdir -p'.
+AC_DEFUN([AM_PROG_MKDIR_P],
+[AC_PREREQ([2.60])dnl
+AC_REQUIRE([AC_PROG_MKDIR_P])dnl
+dnl Automake 1.8 to 1.9.6 used to define mkdir_p.  We now use MKDIR_P,
+dnl while keeping a definition of mkdir_p for backward compatibility.
+dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile.
+dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of
+dnl Makefile.ins that do not define MKDIR_P, so we do our own
+dnl adjustment using top_builddir (which is defined more often than
+dnl MKDIR_P).
+AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl
+case $mkdir_p in
+  [[\\/$]]* | ?:[[\\/]]*) ;;
+  */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+])
+
+# Helper functions for option handling.                     -*- Autoconf -*-
+
+# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software
+# Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# _AM_MANGLE_OPTION(NAME)
+# -----------------------
+AC_DEFUN([_AM_MANGLE_OPTION],
+[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
+
+# _AM_SET_OPTION(NAME)
+# --------------------
+# Set option NAME.  Presently that only means defining a flag for this option.
+AC_DEFUN([_AM_SET_OPTION],
+[m4_define(_AM_MANGLE_OPTION([$1]), 1)])
+
+# _AM_SET_OPTIONS(OPTIONS)
+# ------------------------
+# OPTIONS is a space-separated list of Automake options.
+AC_DEFUN([_AM_SET_OPTIONS],
+[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
+
+# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
+# -------------------------------------------
+# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+AC_DEFUN([_AM_IF_OPTION],
+[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+
+# Check to make sure that the build environment is sane.    -*- Autoconf -*-
+
+# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008
+# Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 5
+
+# AM_SANITY_CHECK
+# ---------------
+AC_DEFUN([AM_SANITY_CHECK],
+[AC_MSG_CHECKING([whether build environment is sane])
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name.  Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+  *[[\\\"\#\$\&\'\`$am_lf]]*)
+    AC_MSG_ERROR([unsafe absolute working directory name]);;
+esac
+case $srcdir in
+  *[[\\\"\#\$\&\'\`$am_lf\ \	]]*)
+    AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments.  Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+   set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+   if test "$[*]" = "X"; then
+      # -L didn't work.
+      set X `ls -t "$srcdir/configure" conftest.file`
+   fi
+   rm -f conftest.file
+   if test "$[*]" != "X $srcdir/configure conftest.file" \
+      && test "$[*]" != "X conftest.file $srcdir/configure"; then
+
+      # If neither matched, then we have a broken ls.  This can happen
+      # if, for instance, CONFIG_SHELL is bash and it inherits a
+      # broken ls alias from the environment.  This has actually
+      # happened.  Such a system could not be considered "sane".
+      AC_MSG_ERROR([ls -t appears to fail.  Make sure there is not a broken
+alias in your environment])
+   fi
+
+   test "$[2]" = conftest.file
+   )
+then
+   # Ok.
+   :
+else
+   AC_MSG_ERROR([newly created file is older than distributed files!
+Check your system clock])
+fi
+AC_MSG_RESULT(yes)])
+
+# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 1
+
+# AM_PROG_INSTALL_STRIP
+# ---------------------
+# One issue with vendor `install' (even GNU) is that you can't
+# specify the program used to strip binaries.  This is especially
+# annoying in cross-compiling environments, where the build's strip
+# is unlikely to handle the host's binaries.
+# Fortunately install-sh will honor a STRIPPROG variable, so we
+# always use install-sh in `make install-strip', and initialize
+# STRIPPROG with the value of the STRIP variable (set by the user).
+AC_DEFUN([AM_PROG_INSTALL_STRIP],
+[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'.  However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+dnl Don't test for $cross_compiling = yes, because it might be `maybe'.
+if test "$cross_compiling" != no; then
+  AC_CHECK_TOOL([STRIP], [strip], :)
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+AC_SUBST([INSTALL_STRIP_PROGRAM])])
+
+# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 3
+
+# _AM_SUBST_NOTMAKE(VARIABLE)
+# ---------------------------
+# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
+# This macro is traced by Automake.
+AC_DEFUN([_AM_SUBST_NOTMAKE])
+
+# AM_SUBST_NOTMAKE(VARIABLE)
+# --------------------------
+# Public sister of _AM_SUBST_NOTMAKE.
+AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+
+# Check how to create a tarball.                            -*- Autoconf -*-
+
+# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# serial 2
+
+# _AM_PROG_TAR(FORMAT)
+# --------------------
+# Check how to create a tarball in format FORMAT.
+# FORMAT should be one of `v7', `ustar', or `pax'.
+#
+# Substitute a variable $(am__tar) that is a command
+# writing to stdout a FORMAT-tarball containing the directory
+# $tardir.
+#     tardir=directory && $(am__tar) > result.tar
+#
+# Substitute a variable $(am__untar) that extract such
+# a tarball read from stdin.
+#     $(am__untar) < result.tar
+AC_DEFUN([_AM_PROG_TAR],
+[# Always define AMTAR for backward compatibility.  Yes, it's still used
+# in the wild :-(  We should find a proper way to deprecate it ...
+AC_SUBST([AMTAR], ['$${TAR-tar}'])
+m4_if([$1], [v7],
+     [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
+     [m4_case([$1], [ustar],, [pax],,
+              [m4_fatal([Unknown tar format])])
+AC_MSG_CHECKING([how to create a $1 tar archive])
+# Loop over all known methods to create a tar archive until one works.
+_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
+_am_tools=${am_cv_prog_tar_$1-$_am_tools}
+# Do not fold the above two line into one, because Tru64 sh and
+# Solaris sh will not grok spaces in the rhs of `-'.
+for _am_tool in $_am_tools
+do
+  case $_am_tool in
+  gnutar)
+    for _am_tar in tar gnutar gtar;
+    do
+      AM_RUN_LOG([$_am_tar --version]) && break
+    done
+    am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
+    am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
+    am__untar="$_am_tar -xf -"
+    ;;
+  plaintar)
+    # Must skip GNU tar: if it does not support --format= it doesn't create
+    # ustar tarball either.
+    (tar --version) >/dev/null 2>&1 && continue
+    am__tar='tar chf - "$$tardir"'
+    am__tar_='tar chf - "$tardir"'
+    am__untar='tar xf -'
+    ;;
+  pax)
+    am__tar='pax -L -x $1 -w "$$tardir"'
+    am__tar_='pax -L -x $1 -w "$tardir"'
+    am__untar='pax -r'
+    ;;
+  cpio)
+    am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
+    am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
+    am__untar='cpio -i -H $1 -d'
+    ;;
+  none)
+    am__tar=false
+    am__tar_=false
+    am__untar=false
+    ;;
+  esac
+
+  # If the value was cached, stop now.  We just wanted to have am__tar
+  # and am__untar set.
+  test -n "${am_cv_prog_tar_$1}" && break
+
+  # tar/untar a dummy directory, and stop if the command works
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  echo GrepMe > conftest.dir/file
+  AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
+  rm -rf conftest.dir
+  if test -s conftest.tar; then
+    AM_RUN_LOG([$am__untar <conftest.tar])
+    grep GrepMe conftest.dir/file >/dev/null 2>&1 && break
+  fi
+done
+rm -rf conftest.dir
+
+AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
+AC_MSG_RESULT([$am_cv_prog_tar_$1])])
+AC_SUBST([am__tar])
+AC_SUBST([am__untar])
+]) # _AM_PROG_TAR
+
diff --git a/autogen.sh b/autogen.sh
new file mode 100755
index 0000000..708a10d
--- /dev/null
+++ b/autogen.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+
+which aclocal    || { echo "no aclocal executable !" ; exit 1 ; }
+aclocal    || { echo "aclocal fails!" ; exit 1 ; }
+which autoheader || { echo "no autoheader executable !" ; exit 1 ; }
+autoheader || { echo "autoheader fails!" ; exit 1 ; }
+which autoconf   || { echo "no autoconf executable !" ; exit 1 ; }
+autoconf   || { echo "autoconf fails!" ; exit 1 ; }
+if test -f ltmain.sh ; then true ; else libtoolize -c || { echo "no libtoolize ?" ; exit 1 ; } ; fi
+automake -c -Woverride --add-missing || { echo "no automake ?" ; exit 1 ; } 
+
diff --git a/bench/Makefile.am b/bench/Makefile.am
new file mode 100644
index 0000000..653a17a
--- /dev/null
+++ b/bench/Makefile.am
@@ -0,0 +1,5 @@
+subdir=bench
+
+EXTRA_DIST= \
+	*.sh
+
diff --git a/bench/Makefile.in b/bench/Makefile.in
new file mode 100644
index 0000000..ad590aa
--- /dev/null
+++ b/bench/Makefile.in
@@ -0,0 +1,434 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/rsb-config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+subdir = bench
+EXTRA_DIST = \
+	*.sh
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu bench/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu bench/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/bench/dense.sh b/bench/dense.sh
new file mode 100755
index 0000000..4b65c79
--- /dev/null
+++ b/bench/dense.sh
@@ -0,0 +1,333 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# This is a librsb developer tool, so don't expect documentation for it.
+#
+echo "#"
+export RSBDB_COMPARE_SWITCH=${RSBDB_COMPARE_SWITCH:---compare-competitors}
+export RSBDB_TIMES=${RSBDB_TIMES:-30}
+export RSBDB_RSBENCH=${RSBDB_RSBENCH:-./rsbench}
+export RSBDB_LABEL=${RSBDB_LABEL:-}
+export RSBDB_LISTFILE=${RSBDB_LISTFILE:-}
+#version=`grep '\<VERSION\>' config.h | sed 's/.*VERSION *//g;s/"//g;s/0\.//g'`
+#pversion=`grep '\<PACKAGE_VERSION\>' config.h | sed 's/.*VERSION *//g;s/"//g;s/0\.//g'`
+pversion=`./rsbench -C | grep version| sed 's/^.*version *: *//g;s/  */ /g' `
+export RSBDB_VERSION=${RSBDB_VERSION:-$pversion}
+#export RSBDB_MINDIMLOG=0
+#export RSBDB_MAXDIMLOG=11
+export RSBDB_MINDIMLOG=${RSBDB_MINDIMLOG:-5}
+if cat /proc/cpuinfo | grep Xeon ; then
+export RSBDB_MAXDIMLOG=${RSBDB_MAXDIMLOG:-12}
+else
+export RSBDB_MAXDIMLOG=${RSBDB_MAXDIMLOG:-11}
+fi
+echo "#"
+env | grep ^RSBDB_
+echo "#"
+export RSBDB_FILESLIST=""
+#
+if test "$#" != 1 -o x"$1" != x" " ; then echo -e "Invocation example: \nRSBDB_RSBENCH=./rsbench $0 ' ' "; exit; fi
+#
+function proj(){  sed "s/\s\s*/ /g;s/^..//g;s/:/ /g"  | cut -f 1,2,5,9  -d  \  | sed "s/ / /" | grep PERFORMANCE| grep -v SERIAL | sed 's/\<PERFORMANCE/RSB_PERFORMANCE/g' ; }
+function rproj() { cut -d \  -f 2,4 ; }
+function plotstuff()
+{
+export RSBDB_FILESLIST="${RSBDB_FILESLIST} $1"
+shopt -s expand_aliases
+pentries=`cat "$1" | proj | cut -f 1 -d \   | sort | uniq`
+nthreads=`cat "$1" | proj | cut -f 3 -d \   | sort | uniq`
+pbins=`cat "$1" | proj `
+#echo "$pbins"
+#echo $pentries $nthreads
+pf=${1/log/gp}
+{
+of=${1/log/eps}
+echo "set term postscript color eps enhanced "
+echo "set title \""${1}\\n${cinfo}"\" "
+echo "set logscale x"
+echo "set key left Left"
+echo "set xtics autofreq 2"
+echo "set xlabel \"matrix dimension\""
+echo "set ylabel \"MFLOPS\""
+echo "set output \"${of}\""
+echo -en plot
+for nt in $nthreads ; do
+for ne in $pentries ; do
+	ptitle=`echo $ne-$nt | sed s/_/-/g`
+	echo -en ' "-" u 1:2 title "'$ptitle'" with linespoints ,'
+done
+done | sed 's/,$//g'
+echo
+for nt in $nthreads ; do
+for ne in $pentries ; do
+	FILTER="^$ne [[:graph:]][[:graph:]]* $nt "
+	cat "$1" |  proj | grep "${FILTER}"  | rproj
+	echo "#grep ${FILTER}:"
+	echo e
+	echo 
+done
+done
+} > $pf
+gnuplot $pf
+}
+
+# have:
+# * dense SPMV
+# * dense SPMV, with varying cache blocking
+# * dense-spaced SPMV
+# * dense SPMV, COO
+# * dense SPMV, CSR
+# * dense SPMV with incx
+# * dense SPSV
+
+# need:
+# * dense SPSV, with varying cache blocking
+# * types
+
+CFLAGS=`./rsbench -I | grep CFLAGS| sed 's/CFLAGS *: *//g;s/  */ /g'  `
+CC=`./rsbench -I | grep CC| sed 's/CC *: *//g;s/  */ /g'`
+MKLINFO=`./rsbench   -oa -Ob   -R --lower 10 -qH --verbose  ${RSBDB_COMPARE_SWITCH} | grep '#%:MKL'`
+export cinfo="`echo ${CC} ${CFLAGS} // ${MKLINFO} | sed 's/\(.\{80\}\)/\1\\\n/g'`"
+
+if false ; then
+nproc="`./rsbench -I | grep processors.online | sed 's/.*://' | sed s'/ //g'`"
+
+if test $nproc = 4 ; then
+	corecombs="1,2,4"
+elif test $nproc = 6 ; then
+	corecombs="1,2,4,6"
+elif test $nproc = 8 ; then
+	corecombs="1,2,4,8"
+elif test $nproc = 12 ; then
+	#corecombs="1,2,6,12"
+        corecombs="1,2,4,6,8,12"
+elif test $nproc = 16 ; then
+	corecombs="1,2,4,8,16"
+elif test $nproc = 24 ; then
+	        corecombs="1,2,4,8"
+elif test $nproc = 48 ; then
+        # Istanbul fix
+        corecombs="1,2,4,8,12"
+elif test $nproc = 32 ; then
+	# AIX fix
+	corecombs="1,2,4,8,16"
+elif test $nproc = 64 ; then
+	# AIX fix
+	corecombs="1,2,4,8,16"
+else
+	#corecombs=`seq $nproc| sed "s/ /,/g"`#evil : seq produces newlines
+	# FIXME : does not work on weird SP5's
+	corecombs=`seq $nproc| tr '\n' ' ' |  sed "s/\> \</,/g"`
+#else
+#	echo "uhm. did not recognize the number of cores!"
+#	exit
+fi
+else
+	corecombs=":"
+fi
+
+
+toytest=1 # NOTE: the only supported for now
+RB=${RSBDB_RSBENCH}
+#RB=./rsbench 
+SW="--echo-arguments --verbose ${RSBDB_COMPARE_SWITCH} --times ${RSBDB_TIMES}"
+RSB="-Fbo -qH -n $corecombs -R " 
+RSBDB_SPMV="$RB -oa -Ob $SW $RSB" 
+RSBDB_SPMVT="$RB -oa -Ob $SW $RSB --transpose" 
+RSBDB_SPMVSYM="$RB -oa -Ob $SW $RSB --as-symmetric" 
+RSBDB_SPSV="$RB -ot -Ob $SW $RSB"
+OB_SPMV="$RB -oa -Ob $SW -Fo" 
+CB_SPMV="$RB -oa -Ob $SW -Fb"
+
+###############################################################################
+if test x$toytest = x"1" ; then
+###############################################################################
+#seq=`seq 0 2`
+seq=`seq 0 0`
+spacings=`for p in $seq ; do echo $((2**p)) ; done  `
+dseq="`seq $RSBDB_MINDIMLOG $RSBDB_MAXDIMLOG`"
+#dseq=`seq 0 3`
+#dims="512 1024"
+dims=`for p in $dseq ; do if test $p = 0 ; then echo 1; else echo $((2**p)) $(( (3*2**p) / 2)) ; fi ; done  `
+#incxs="1 2 4"
+incxs="1"
+###############################################################################
+else
+###############################################################################
+#seq=`seq 14`
+#seq=`seq 0 10`
+seq=`seq 0 5`
+spacings=`for p in $seq ; do echo $((2**p)) ; done  `
+dims="512 1024 2048 4096"
+incxs="1 2 4 8 16 32"
+###############################################################################
+fi
+###############################################################################
+incxs=${RSBDB_INCXS:-$incxs}
+dims=${RSBDB_DIMS:-$dims}
+spacings=${RSBDB_SPACINGS:-$spacings}
+###############################################################################
+# uncomment the following for testing purposes
+#dims="256 512 "
+#dims="$dims"
+ddims="$dims"
+sdims="$dims"
+###############################################################################
+
+tic() { echo `date +%Y%m%d%H%M%S`.`date +%s`; }
+
+TYPES=`grep RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS types.h | sed 's/  */ /g;s/"//g' | cut -d \  -f 3-`
+
+for type in $TYPES; do  
+mbln=microbench-librsb-$RSBDB_VERSION-$type
+df=--dense
+lf=--lower
+# FIXME: AIX has problems recognizing long switches
+# 20110109  but these short options seem buggy either way
+#df=-d
+#lf=-l
+
+if test -z ${RSBDB_LABEL} ; then did=`tic` ; else did="${RSBDB_LABEL}" ; fi
+export RSBDB_LISTFILE=${RSBDB_LISTFILE:-$mbln-$did-files.txt}
+rm ${RSBDB_LISTFILE}
+
+cp $0 $mbln-$did-script.sh
+ldd ./rsbench  > $mbln-$did-ldd.txt
+export RSBDB_FILESLIST="${RSBDB_FILESLIST} $mbln-$did-ldd.txt $mbln-$did-script.sh"
+
+dfnopt=" -T $type --override-matrix-name "
+if test -z ${RSBDB_LABEL} ; then did=`tic` ; else did="${RSBDB_LABEL}" ; fi
+ln=$mbln-$did.tmp
+
+for dim in $sdims ; do for spa in $spacings ; do 
+	$RSBDB_SPMVSYM $lf $dim --generate-spacing $spa $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-symspmv-spaced.log
+mv $ln $fn 
+plotstuff "$fn"
+
+if test -z ${RSBDB_LABEL} ; then did=`tic` ; else did="${RSBDB_LABEL}" ; fi
+ln=$mbln-$did.tmp
+
+for dim in $sdims ; do for spa in $spacings ; do 
+	$RSBDB_SPMVT $df $dim --generate-spacing $spa $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmvt-spaced.log
+mv $ln $fn 
+plotstuff "$fn"
+
+if test -z ${RSBDB_LABEL} ; then did=`tic` ; else did="${RSBDB_LABEL}" ; fi
+ln=$mbln-$did.tmp
+
+for dim in $sdims ; do for spa in $spacings ; do 
+	$RSBDB_SPMV $df $dim --generate-spacing $spa $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-spaced.log
+mv $ln $fn 
+plotstuff "$fn"
+
+
+if test -z ${RSBDB_LABEL} ; then did=`tic` ; else did="${RSBDB_LABEL}" ; fi
+ln=$mbln-$did.tmp
+for dim in $sdims ; do
+	$RSBDB_SPSV $lf $dim $dfnopt $dim
+done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spsv-solve.log
+mv $ln $fn 
+plotstuff "$fn"
+
+if false ; then 
+
+did=`tic`
+ln=$mbln-$did.tmp
+for dim in $sdims ; do
+	$CB_SPMV $df $dim $dfnopt $dim
+done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-csr.log
+mv $ln $fn 
+plotstuff "$fn"
+
+
+did=`tic`
+ln=$mbln-$did.tmp
+for dim in $sdims ; do
+	$OB_SPMV $df $dim $dfnopt $dim
+done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-coo.log
+mv $ln $fn 
+plotstuff "$fn"
+
+
+did=`tic`
+ln=$mbln-$did.tmp
+for dim in $ddims ; do for i in $incxs ; do
+	$CB_SPMV $df $dim --incx $i $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-dense-csr-incx.log
+mv $ln $fn 
+plotstuff "$fn"
+
+
+did=`tic`
+ln=$mbln-$did.tmp
+for dim in $ddims ; do for i in $incxs ; do
+	$CB_SPMV $df $dim --incy $i $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-dense-csr-incy.log
+mv $ln $fn 
+plotstuff "$fn"
+
+did=`tic`
+ln=$mbln-$did.tmp
+for dim in $ddims ; do for i in $incxs ; do
+	$RSBDB_SPMV $df $dim --incx $i $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-dense-rsb-incx.log
+mv $ln $fn 
+plotstuff "$fn"
+
+
+did=`tic`
+ln=$mbln-$did.tmp
+for dim in $ddims ; do for i in $incxs ; do
+	$RSBDB_SPMV $df $dim --incy $i $dfnopt $dim
+done ; done  2>&1 | tee $ln
+if test -z ${RSBDB_LABEL} ; then dod=`tic` ; else dod="" ; fi
+fn=${ln//.tmp/}$dod-spmv-dense-rsb-incy.log
+mv $ln $fn 
+plotstuff "$fn"
+
+fi
+done
+#
+echo ${RSBDB_FILESLIST} ${RSBDB_FILESLIST//log/gp} > ${RSBDB_LISTFILE}
+#
+# FIXME: missing with differing cache blocking, now
+
diff --git a/bench/dense_quick.sh b/bench/dense_quick.sh
new file mode 100755
index 0000000..bc64ec8
--- /dev/null
+++ b/bench/dense_quick.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+export RSBDB_MAXMEM=${RSBDB_MAXMEM:-}
+if test -z "${RSBDB_MAXMEM}" ; then RSBDB_MAXMEM=`cat /proc/meminfo  | grep MemTotal  | sed 's/MemTotal: *\([0-9]*\)\s*kB/\1/g'` ; RSBDB_MAXMEM=$((${RSBDB_MAXMEM}/1024)) ;  fi
+if test -z "${RSBDB_MAXMEM}" ; then RSBDB_MAXMEM=`cat /proc/meminfo  | grep MemTotal  | sed 's/MemTotal: *\([0-9]*\)\s*MB/\1/g'` ; RSBDB_MAXMEM=${RSBDB_MAXMEM} ;  fi
+export RSBDB_MF='8'
+export RSBDB_BPNZ=${RSBDB_BPNZ:-32}
+#echo ${RSBDB_MAXMEM}
+maxdim=`perl -e "print int(sqrt(((${RSBDB_MAXMEM}*1024*1024)/${RSBDB_BPNZ})/${RSBDB_MF}))"`
+dimv=4;
+dims=`while test $dimv -lt $maxdim ; do echo $dimv $(( ($dimv*99)/70 )) ;dimv=$((dimv*2)); done` 
+#echo $dims
+up=12
+export RSBDB_DIMS="$dims"
+export RSBDB_SPACINGS='1'
+export RSBDB_INCXS='1'
+RSBDB_TIMES=${RSBDB_TIMES:-30}
+#export| grep RSBDB_
+bench/dense.sh \ 
diff --git a/bench/rplot.sh b/bench/rplot.sh
new file mode 100755
index 0000000..596b323
--- /dev/null
+++ b/bench/rplot.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+# Recursion plot script.
+
+e=./rsbench
+
+make $e || exit
+
+md=~/matrices
+echo $md
+
+for f in $md/*.mtx ; do
+	#echo $f
+	hn=`hostname`
+	d=`date +%Y%m%d`
+	# FIXME : this substitution will not work on SP5's bash
+	m=${f/.mtx/}
+	m="`basename $m`"
+	for p in "-E" "-C" " " ; do
+	for h in "-H" " " ; do
+	for l in "-L" " " ; do
+	for D in "-D" " " ; do
+		# -T will force lower triangular
+		flags="$p $h $l $D"
+		flagsn=${flags// }
+		ofn="recplot-$hn-$d-$m-$flagsn.eps"
+		echo "$e --plot-matrix -f $f -aRzd -r1 -c1 -Fbr -T $flags  > $ofn"
+		$e --plot-matrix -f $f -aRzd -r1 -c1 -Fbr -T $flags  >  $ofn
+	done
+	done
+	done
+	done
+done
+
diff --git a/bench/spmv.sh b/bench/spmv.sh
new file mode 100755
index 0000000..d469e7f
--- /dev/null
+++ b/bench/spmv.sh
@@ -0,0 +1,235 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is horrible.
+# This script is intended for the librsb developer usage.
+
+#
+# to force single thread execution non recursive, should invoke after exporting
+# RSB_BENCH_FORMATS=o RSB_BENCH_THREADS=1 RSB_WANT_Q=" " RSB_BENCH_RECURSION=" "
+#
+# to force Z sorted COO:
+# RSB_BENCH_FORMATS=o RSB_BENCH_THREADS=1 RSB_WANT_Q=" " RSB_BENCH_RECURSION=" " RSB_BENCH_EXTRA="--z-sorted-coo"
+#
+# to force Zig Zag CSR:
+# RSB_BENCH_FORMATS=b RSB_BENCH_THREADS=1 RSB_WANT_Q=" " RSB_BENCH_RECURSION=" " RSB_BENCH_EXTRA="--zig-zag"
+#
+#svn revert INSTALL # so no svn diff
+# this will fail often, but it's not a problem :)
+#
+# TODO: need to set affinity. shall work on:
+# - OMP_CPU_BIND # 0 or 1
+# - GOMP_CPU_AFFINITY='0-16:2' # alternate the CPUs
+# - KMP_AFFINITY=verbose,granularity=thread,proclist=[0,1,2,3],explicit
+# - KMP_AFFINITY=verbose,compact
+# - KMP_AFFINITY=verbose,interleaved
+# - KMP_AFFINITY=verbose,
+# - KMP_VERSION=1
+# 
+# TODO: rename these variables
+#
+RSB_BENCH_THREADS=${RSB_BENCH_THREADS:=}
+RSB_BENCH_HALFWORD=${RSB_WANT_Q:=}
+RSB_BENCH_BB=${RSB_WANT_BB:=}
+RSB_BENCH_EXTRA=${RSB_BENCH_EXTRA:=}
+RSB_BENCH_RECURSION=${RSB_BENCH_RECURSION:=-R}
+RSB_BENCH_FORMATS=${RSB_BENCH_FORMATS:=ob}
+RSB_BENCH_COMPARE_OPTION=${RSB_BENCH_COMPARE_OPTION:=--compare-competitors}
+#RSB_BENCH_AUTOTUNE_OPTION=${RSB_BENCH_AUTOTUNE_OPTION:=--want-autotune 3s10x-1t}
+RSB_BENCH_AUTOTUNE_OPTION=${RSB_BENCH_AUTOTUNE_OPTION:=}
+RSB_SKIP_SYMMETRIC=${RSB_SKIP_SYMMETRIC:=0}
+RSB_SKIP_COMPLEX=${RSB_SKIP_COMPLEX:=0}
+RSB_SKIP_UNSYMMETRIC=${RSB_SKIP_UNSYMMETRIC:=0}
+RSB_MATDIR=${RSB_MATDIR:=}
+RSB_TRANSPOSITIONS=${RSB_TRANSPOSITIONS:=--notranspose --transpose}
+#RSB_TRANSPOSITIONS=${RSB_TRANSPOSITIONS:=--also-transpose}
+RSB_BENCH_REPEAT_CONSTRUCTOR=${RSB_BENCH_REPEAT_CONSTRUCTOR:="5"}
+RSB_WANT_CP_TO_SHM=${RSB_WANT_CP_TO_SHM:=0}
+RSB_SHM=/dev/shm
+
+#version=`svnversion` 
+bid=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+version=`grep '\<VERSION\>' config.h | sed 's/.*VERSION *//g;s/"//g;s/0\.//g'`
+
+# Matrices directory:
+arg="$@"
+
+#if true ; then
+#if false ; then
+if ! test -f rsbench ; then
+#touch configure.ac
+#sh autogen.sh
+./configure --enable-sparse-blas-interface --enable-matrix-types=blas --enable-matrix-ops=blas --enable-openmp
+make cleanall
+make clean
+make
+make feedback || exit
+[ -f rsbench ] || exit
+
+fi
+
+did=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+md=~/matrices
+
+#matrices=$md/raefsky4.mtx
+if true ; then
+	matrices=""
+	md=
+	if test "x$arg" != x ; then
+		md="$arg" ;echo $md;
+	else
+		if test x"${RSB_MATDIR}" != x ; then  md="${RSB_MATDIR}" ; fi
+	fi
+	om=`find $md -name \*.mtx -or -name \*.mtx.gz | sort`
+	for m in $om ; do
+		#f=$md/$m
+		if test "x$RSB_SKIP_SYMMETRIC" = x1 ; then if head -n 1 "$m" | grep -i "symmetric\|hermitian" > /dev/null ; then echo "skipping $m" ; continue ; fi ; fi
+		if test "x$RSB_SKIP_COMPLEX" = x1 ; then if head -n 1 "$m" | grep -i "complex" > /dev/null ; then echo "skipping $m" ; continue ; fi ; fi
+		if test "x$RSB_SKIP_UNSYMMETRIC" = x1 ; then if head -n 1 "$m" | grep -i "general" > /dev/null ; then echo "skipping $m" ; continue ; fi ; fi
+		f=$m
+		matrices="$matrices $f"
+	done
+fi
+#echo $matrices
+
+hn=`hostname`
+od=bench-svn$version-$hn-$did
+mkdir -p $od || exit
+
+# we gain processor info, if available.
+cat /proc/cpuinfo > $od/$hn-cpuinfo.txt
+cat /proc/meminfo > $od/$hn-meminfo.txt
+x86info           > $od/$hn-x86info.txt
+cpuid             > $od/$hn-cpuid.txt
+cpuinfo           > $od/$hn-cpuinfo.txt
+numactl --hardware > $od/$hn-numactl-H.txt # -H seems not to work on some systems
+cp config.h         $od/ && gzip $od/config.h     
+cp config.log       $od/ && gzip $od/config.log
+cp Makefile         $od/ && gzip $od/Makefile
+tar czf $od/sys-cpu.tgz /sys/devices/system/cpu/
+cp bench/spmv.sh    $od/ && gzip $od/spmv.sh
+env                > $od/env.txt
+
+./rsbench -v  2>&1 > $od/rsbench-v.txt
+./rsbench -C  2>&1 > $od/rsbench-C.txt
+#./rsbench -M  2>&1 > $od/rsbench-M.txt # why comment this ? because it's very slow. (put it at the end)
+./rsbench -I  2>&1 > $od/rsbench-I.txt
+ldd ./rsbench  > $od/ldd-rsbench.txt
+
+( for m in $matrices ; do echo $m ; done ) > $od/matrices-list.txt
+( for m in $matrices ; do ls -l $m ; done ) > $od/matrices-list-l.txt
+
+#nproc="`cat /proc/cpuinfo | grep ^processor | wc -l`"
+nproc="`./rsbench -I | grep processors.online | sed 's/.*://' | sed s'/ //g'`"
+
+if test $nproc = 4 ; then
+	corecombs="1,2,4"
+elif test $nproc = 6 ; then
+	corecombs="1,2,4,6"
+elif test $nproc = 8 ; then
+	corecombs="1,2,4,8"
+elif test $nproc = 12 ; then
+	#corecombs="1,2,6,12"
+        corecombs="1,2,4,6,8,12"
+elif test $nproc = 16 ; then
+	corecombs="1,2,4,8,16"
+elif test $nproc = 24 ; then
+	        corecombs="1,2,4,8"
+elif test $nproc = 48 ; then
+        # Istanbul fix
+        corecombs="1,2,4,8,12"
+elif test $nproc = 32 ; then
+	# AIX fix
+	corecombs="1,2,4,8,16"
+elif test $nproc = 64 ; then
+	# AIX fix
+	corecombs="1,2,4,8,16"
+else
+	#corecombs=`seq $nproc| sed "s/ /,/g"`#evil : seq produces newlines
+	# FIXME : does not work on weird SP5's
+	corecombs=`seq $nproc| tr '\n' ' ' |  sed "s/\> \</,/g"`
+#else
+#	echo "uhm. did not recognize the number of cores!"
+#	exit
+fi
+
+corecombs=${RSB_BENCH_THREADS:=$corecombs}
+
+
+if test x`hostname` = x"drachma" ; then n="numactl --physcpubind=0-11 --membind=0-1 " ; else n="" ; fi
+
+for take in '-take1' '-take2' ;
+do
+ont=$nproc
+#ont=""
+
+#export OMP_NUM_THREADS=$ont
+#types=${RSB_BENCH_TYPES:=D S C Z}
+types=${RSB_BENCH_TYPES:=D}
+if test "x$RSB_CACHE_FLUSH" = x1 ; then cache_flush=--cache-flush ; else cache_flush=''; fi
+
+#for s in   -R ; do for F in br bc ; do for m in $matrices ; do
+#for s in  -R ; do for F in br ; do for m in $matrices ; do
+for T in $types ; do for s in "$RSB_BENCH_RECURSION" ; do for F in "$RSB_BENCH_FORMATS" ; do for m in $matrices ; do 
+	mn=`basename $m`
+	if test x${RSB_WANT_CP_TO_SHM} = x"1" ; then
+		cp --dereference $m ${RSB_SHM} || exit ; m="${RSB_SHM}/$mn" ;
+	fi
+
+	for transa in ${RSB_TRANSPOSITIONS} ; do
+
+#for s in   -R ; do for F in br bc ; do for m in $md/*.mtx ; do
+	times=100
+	# IBM sp5's stat is BUGGED : do not use it
+        if test $(( `du -sk "$m" | cut -f 1` > 500000 )) = 1 ; then times=$((times/2)) ; fi
+#	if test $(( `du -sb "$m" | cut -f 1` > 500000000 )) = 1 ; then times=$((times/2)) ; fi
+#	if test $(( `stat --format "%s" "$m"` > 500000000 )) = 1 ; then times=$((times/2)) ; fi
+	did=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+	# FIXME : using -TD segfaults (it's due to the 'T' switch)!
+	# use short options, as getopt_long does not exist on some platforms.
+
+	DQ="-qH" # --cache-flush
+	Q=${RSB_BENCH_HALFWORD:-$DQ}
+	BB=${RSB_BENCH_BB:- --bounded-box=1}
+	Q="$Q $BB -V $transa --repeat-constructor $RSB_BENCH_REPEAT_CONSTRUCTOR " # --cache-flush
+
+	cmd="./rsbench -oa -Ob -F$F -f $m $s -t $times -r1 -c1 ${RSB_BENCH_COMPARE_OPTION} $Q -n $corecombs -T $T $cache_flush $RSB_BENCH_EXTRA"
+	echo $cmd
+	$cmd 2>&1 | tee $od/bench-spmv-svn$version-$hn-$did-$mn-$s$F-T$T-omp-"$ont"''$transa'-'cores$take.log
+
+done ;
+	if test x${RSB_WANT_CP_TO_SHM} = x"1" ; then
+		rm $m || exit
+	fi
+done ; done ; done ; done
+
+done
+
+if [ `hostname | cut -c 1-6` != "helios" ]; then # 20120413 temporary!
+./rsbench -M  2>&1 > $od/rsbench-M.txt # why comment this ? because it's very slow. (put it at the end)
+fi
+./rsbench -F  2>&1 > $od/rsbench-F.txt # 
+did=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+echo "benchmarking began at $bid"
+echo "benchmarking ended at $did"
+mv $od $od-$did || exit
+
+
+
diff --git a/bench/trsv.sh b/bench/trsv.sh
new file mode 100755
index 0000000..1131f9e
--- /dev/null
+++ b/bench/trsv.sh
@@ -0,0 +1,199 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+
+# This script is horrible.
+# This script is intended for the librsb developer usage.
+
+#svn revert INSTALL # so no svn diff
+# this will fail often, but it's not a problem :)
+
+#version=`svnversion` 
+version=`grep '\<VERSION\>' config.h | sed 's/.*VERSION *//g;s/"//g;s/0\.//g'`
+
+arg="$1"
+
+#if true ; then
+#if false ; then
+if ! test -f rsbench ; then
+#touch configure.ac
+#sh autogen.sh
+./configure --enable-sparse-blas-interface --enable-matrix-types=blas --enable-matrix-ops=blas --disable-vb --with-openmp
+make cleanall
+make clean
+make
+make feedback || exit
+[ -f rsbench ] || exit
+
+fi
+
+did=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+md=~/matrices_csb
+md=~/matrices_hawaii
+md=~/matrices
+
+matrices=$md/raefsky4.mtx
+# we use L factors of 
+if false ; then
+	md=~/matrices
+	om="memplus.mtx wang4.mtx ex11.mtx raefsky4.mtx goodwin.mtx lhr10.mtx"
+	for m in $om ; do
+		f=$md/$m
+		echo $f '->' ${f/.mtx/-superlu-L.mtx}
+		./rsbench -L $f > ${f/.mtx/-superlu.mtx}
+	done
+else
+	matrices=""
+	md=~/matrices
+	[ -d ~/matrices_ussv_colamd_L ] && md=~/matrices_ussv_colamd_L
+	[ -d /scratch/downloads/matrices_ussv_colamd_L/ ] && md=/scratch/downloads/matrices_ussv_colamd_L/
+	[ -d /opt/martone/matrices_ussv_colamd_L/ ] && md=/opt/martone/matrices_ussv_colamd_L/
+	if test "x$arg" != x ; then md="$arg" ;echo $md; fi
+	
+
+	om=`find $md -name \*.mtx`
+	#om="memplus.mtx wang4.mtx ex11.mtx raefsky4.mtx goodwin.mtx lhr10.mtx"
+	#mom="av41092 FEM_3D g7jac180 g7jac200 garon2 ns3Da ohne2 para-9 poisson3Db rajat31 rma10 sme3Dc torso1 venkat50"
+	#xom="stomach twotone"
+	for m in $om ; do
+		#f=$md/$m
+		f=$m
+		#ls -l $f
+#		echo $f
+	#	echo $f '->' ${f/.mtx/-superlu-L.mtx}
+	#	matrices="$matrices "${f/.mtx/-superlu.mtx}
+		matrices="$matrices $f"
+	done
+
+fi
+
+#echo $matrices
+
+hn=`hostname`
+od=bench-svn$version-$hn-$did
+mkdir -p $od || exit
+
+# we gain processor info, if available.
+cat /proc/cpuinfo > $od/$hn-cpuinfo.txt
+cat /proc/meminfo > $od/$hn-meminfo.txt
+x86info           > $od/$hn-x86info.txt
+cpuid             > $od/$hn-cpuid.txt
+cpuinfo           > $od/$hn-cpuinfo.txt
+numactl --hardware > $od/$hn-numactl-H.txt # -H seems not to work on some systems
+cp config.h         $od/ && gzip $od/config.h     
+cp config.log       $od/ && gzip $od/config.log
+cp Makefile         $od/ && gzip $od/Makefile
+tar czf $od/sys-cpu.tgz /sys/devices/system/cpu/
+cp bench/trsv.sh    $od/ && gzip $od/trsv.sh
+env                > $od/env.txt
+
+./rsbench -v  2>&1 > $od/rsbench-v.txt
+./rsbench -C  2>&1 > $od/rsbench-C.txt
+./rsbench -M  2>&1 > $od/rsbench-M.txt
+./rsbench -F  2>&1 > $od/rsbench-F.txt
+./rsbench -I  2>&1 > $od/rsbench-I.txt
+ldd ./rsbench  > $od/ldd-rsbench.txt
+
+#nproc="`cat /proc/cpuinfo | grep ^processor | wc -l`"
+nproc="`./rsbench -I | grep processors.online | sed 's/.*://'`"
+
+if test $nproc = 4 ; then
+	corecombs="1,2,4"
+elif test $nproc = 6 ; then
+	corecombs="1,2,4,6"
+elif test $nproc = 8 ; then
+	corecombs="1,2,4,8"
+elif test $nproc = 12 ; then
+	#corecombs="1,2,6,12"
+        corecombs="1,2,4,6,8,12"
+elif test $nproc = 16 ; then
+	corecombs="1,2,4,8,16"
+elif test $nproc = 24 ; then
+	corecombs="1,2,4,8"
+elif test $nproc = 32 ; then
+	# AIX fix
+	corecombs="1,2,4,8,16"
+elif test $nproc = 48 ; then
+        # Istanbul fix
+        corecombs="1,2,4,8,12"
+elif test $nproc = 64 ; then
+	# AIX fix
+	corecombs="1,2,4,8,16"
+else
+	#corecombs=`seq $nproc| sed "s/ /,/g"`#evil : seq produces newlines
+	# FIXME : does not work on weird SP5's (nproc keeps having a heading,useless space, and therefore we have to quote later on)
+	corecombs=`seq $nproc| tr '\n' ' ' |  sed "s/\> \</,/g"`
+fi
+
+for take in '-take1' '-take2' ;
+do
+#for ont in `seq $nproc` ;
+#do
+nproc=${nproc/ /}
+ont=${nproc/ /}
+ 
+repeat_constructor=${repeat_constructor:="1"}
+
+if test x`hostname` = x"drachma" ; then n="numactl --physcpubind=0-11 --membind=0-1 " ; else n="" ; fi
+
+#export OMP_NUM_THREADS=$ont
+
+( for m in $matrices ; do echo $m ; done ) > $od/matrices-list.txt
+( for m in $matrices ; do ls -l $m ; done ) > $od/matrices-list-l.txt
+
+#export OMP_NUM_THREADS=$ont
+#types=${RSB_BENCH_TYPES:=D S C Z}
+types=${RSB_BENCH_TYPES:=D}
+if test "x$RSB_CACHE_FLUSH" = x1 ; then cache_flush=--cache-flush ; else cache_flush=''; fi
+
+#for s in   -R ; do for F in br bc ; do for m in $matrices ; do
+for T in $types ; do for s in   -R ; do for F in ob ; do for m in $matrices ; do for transa in --transpose --notranspose ; do
+#for s in   -R ; do for F in br bc ; do for m in $md/*.mtx ; do
+	mn=`basename $m`
+	times=100
+	# IBM sp5's stat is BUGGED : do not use it
+#	if test $(( `du -sb "$m" | cut -f 1` > 500000000 )) = 1 ; then times=$((times/2)) ; fi
+	# AIX does not have -b. but it has -k indeed.
+	if test $(( `du -sk "$m" | cut -f 1` > 500000 )) = 1 ; then times=$((times/2)) ; fi
+#	if test $(( `stat --format "%s" "$m"` > 500000000 )) = 1 ; then times=$((times/2)) ; fi
+	did=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+
+	DQ="-qH" # --cache-flush
+	Q=${RSB_WANT_Q:-$DQ}
+	BB=${RSB_WANT_BB:- --bounded-box=1}
+	Q="$Q $BB -V $transa --repeat-constructor $repeat_constructor" # --cache-flush
+
+	# FIXME : using -TD segfaults (it's due to the 'T' switch)!
+	# use short options, as getopt_long does not exist on some platforms.
+	#cmd="$n ./rsbench -ot -Ob -F$F -f $m $s -t $times -r1 -c1 -Q -n $corecombs"
+	cmd="$n ./rsbench -ot -Ob -F$F -f $m $s -t $times -r1 -c1 --compare-competitors $Q -n $corecombs -T $T $cache_flush --only-lower-triangle"
+	#cmd="./rsbench -ot -Ob -F$F -f $m $s -t $times -r1 -c1 -qCDL -n $corecombs"
+	echo $cmd
+	$cmd 2>&1 | tee "$od/bench-trsv-svn$version-$hn-$did-$mn-$s$F-T$T-omp-"$ont"''$transa'-'''cores$take.log"
+
+done ; done ; done ; done ; done
+
+done
+#done
+did=`date +%Y%m%d%H%M%S`.`date +%s` # date id
+mv $od $od-$did || exit
+
+
+
diff --git a/blas_sparse.h b/blas_sparse.h
new file mode 100644
index 0000000..e1ed033
--- /dev/null
+++ b/blas_sparse.h
@@ -0,0 +1,577 @@
+
+
+/*!
+        @file
+        @author Michele Martone
+
+	@brief  This file specifies the Sparse BLAS interface to librsb.
+	Supported types  :(float,double,float complex,double complex) .
+	Unsupported types:() .
+	Level 1 ops      :(dot,axpy,ga,gz,sc) .
+	Level 2 ops      :(mv,sv) .
+	Level 3 ops      :(mm,sm) .
+*/
+
+#ifndef RSB_LIBSPBLAS_H_INCLUDED
+#define RSB_LIBSPBLAS_H_INCLUDED
+#ifndef RSB_RSB_H_INCLUDED
+#error "You are using Sparse BLAS headers from librsb -- You should include <rsb.h> first!"
+#endif /* RSB_RSB_H_INCLUDED */
+#ifndef BLAS_ENUM_H
+#define BLAS_ENUM_H
+
+  /* Enumerated types */
+
+/*! Used to specify a dense array's elements layout. */
+enum blas_order_type {
+            blas_rowmajor = 101, /*!< Row major. */
+            blas_colmajor = 102  /*!< Column major. */ };
+
+/*! Used to specify a transposition operator to a matrix operand. */
+enum blas_trans_type {
+            blas_no_trans   = 111, /*!< No transposition. */
+            blas_trans      = 112, /*!< Transposition. */
+            blas_conj_trans = 113  /*!< Transposition and conjugation. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) upper or lower triangularity of a matrix. */
+enum blas_uplo_type  {
+            blas_upper = 121, /*!< Upper triangular matrix. */
+            blas_lower = 122  /*!< Lower triangular matrix. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) whether the diagonal of a matrix is (implicitly) unitary or not. */
+enum blas_diag_type {
+            blas_non_unit_diag = 131,  /*!< Unit diagional matrix. */
+            blas_unit_diag     = 132   /*!< Non unit diagional matrix (the default). */ };
+
+/*! Unused/Unsupported. */
+enum blas_side_type {
+            blas_left_side  = 141, /*!< Unsupported. */ 
+            blas_right_side = 142  /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_cmach_type {
+            blas_base      = 151, /*!< Unsupported. */ 
+            blas_t         = 152, /*!< Unsupported. */ 
+            blas_rnd       = 153, /*!< Unsupported. */ 
+            blas_ieee      = 154, /*!< Unsupported. */ 
+            blas_emin      = 155, /*!< Unsupported. */ 
+            blas_emax      = 156, /*!< Unsupported. */ 
+            blas_eps       = 157, /*!< Unsupported. */ 
+            blas_prec      = 158, /*!< Unsupported. */ 
+            blas_underflow = 159, /*!< Unsupported. */ 
+            blas_overflow  = 160, /*!< Unsupported. */ 
+            blas_sfmin     = 161  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_norm_type {
+            blas_one_norm       = 171, /*!< Unsupported. */ 
+            blas_real_one_norm  = 172, /*!< Unsupported. */ 
+            blas_two_norm       = 173, /*!< Unsupported. */ 
+            blas_frobenius_norm = 174, /*!< Unsupported. */ 
+            blas_inf_norm       = 175, /*!< Unsupported. */ 
+            blas_real_inf_norm  = 176, /*!< Unsupported. */ 
+            blas_max_norm       = 177, /*!< Unsupported. */ 
+            blas_real_max_norm  = 178  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_sort_type {
+            blas_increasing_order = 181,  /*!< Unsupported. */ 
+            blas_decreasing_order = 182   /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_conj_type {
+            blas_conj    = 191, /*!< Unsupported. */
+            blas_no_conj = 192  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_jrot_type {
+            blas_jrot_inner  = 201, /*!< Unsupported. */
+            blas_jrot_outer  = 202, /*!< Unsupported. */
+            blas_jrot_sorted = 203  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_prec_type {
+            blas_prec_single     = 211, /*!< Unsupported. */
+            blas_prec_double     = 212, /*!< Unsupported. */
+            blas_prec_indigenous = 213, /*!< Unsupported. */
+            blas_prec_extra      = 214  /*!< Unsupported. */ };
+
+/*! Index base (valid at matrix build/modify time). */
+enum blas_base_type {
+            blas_zero_base = 221, /*!< Zero based indices (default when matrix created using the C interface). */
+            blas_one_base  = 222  /*!< Zero based indices (default when matrix created using the Fortran interface). */ };
+
+/*! Symmetry properties. If not specified otherwise, valid for the both of #BLAS_ussp and #BLAS_usgp.
+ */
+enum blas_symmetry_type {
+            blas_general          = 231, /*!< General unsymmetric matrix (default). For #BLAS_usgp only. */
+            blas_symmetric        = 232, /*!< Symmetric matrix (either #blas_lower_symmetric or #blas_upper_symmetric). For #BLAS_usgp only. */
+            blas_hermitian        = 233, /*!< Hermitian matrix (either #blas_lower_hermitian or #blas_upper_hermitian). For #BLAS_usgp only. */
+            blas_triangular       = 234, /*!< Triangular matrix (either #blas_lower_triangular or #blas_upper_triangular). For #BLAS_usgp only. */
+            blas_lower_triangular = 235, /*!< Lower triangular matrix. */
+            blas_upper_triangular = 236, /*!< Upper triangular matrix. */
+            blas_lower_symmetric  = 237, /*!< Lower symmetric matrix. */
+            blas_upper_symmetric  = 238, /*!< Upper symmetric matrix. */
+            blas_lower_hermitian  = 239, /*!< Lower hermitian matrix. */
+            blas_upper_hermitian  = 240  /*!< Upper hermitian matrix. */ };
+
+/*! Numerical field type; can be used with #BLAS_usgp to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). */
+enum blas_field_type {
+            blas_complex          = 241, /*!< Will succeed if matrix is of 'C' or 'Z' type. */
+            blas_real             = 242, /*!< Will succeed if matrix is of 'S' or 'D' type. */
+            blas_double_precision = 243, /*!< Will succeed if matrix is of 'D' or 'Z' type. */
+            blas_single_precision = 244  /*!< Will succeed if matrix is of 'S' or 'C' type. */ };
+
+/*! Quantities that can be obtained via #BLAS_usgp. */
+enum blas_size_type {
+            blas_num_rows      = 251, /*!< Get the matrix rows count. */
+            blas_num_cols      = 252, /*!< Get the matrix columns count. */
+            blas_num_nonzeros  = 253  /*!< Get the matrix nonzeros count. */ };
+
+/*! The following are not fully implemented. Usable with #BLAS_usgp. */
+enum blas_handle_type{
+            blas_invalid_handle = 261, /*!< Used to check whether the handle is invalid. */
+			blas_new_handle     = 262, /*!< Will give 1 if the handle is new. */
+			blas_open_handle    = 263, /*!< will give 1 if the handle is open. */
+			blas_valid_handle   = 264  /*!< Will give 1 if the handle is valid (that is, after #BLAS_duscr_end/#BLAS_zuscr_end/#BLAS_cuscr_end/#BLAS_zuscr_end). */ };
+
+/*! The following are usable with #BLAS_usgp only. */
+enum blas_sparsity_optimization_type {
+            blas_regular       = 271, /*!< Will give 0. */
+            blas_irregular     = 272, /*!< Will give 1. */
+            blas_block         = 273, /*!< Will give 0. */
+            blas_unassembled   = 274  /*!< Complementary to #blas_valid_handle. */ };
+
+/*! Properties suitable to be used with #BLAS_ussp/#BLAS_usgp. All of these are not in the Sparse BLAS standard. */
+enum blas_rsb_ext_type {
+            blas_rsb_spmv_autotuning_on   = 6660,	/*!< Turn on executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. As an extension to the standard, the autotuning properties can be turned on/off at any time; if the autotuning feature has not been enabled at build time, using these properties will make the call fail. For more information, see #rsb_tune_spmm. (EXPERIMENTAL) */
+            blas_rsb_spmv_autotuning_off  = 6661,	/*!< Turn off executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_on   = 6662,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_off  = 6663,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_on   = 6664,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_off  = 6665,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_autotune_next_operation= 6666,	/*!< Turn on executing threads autotuning for the next operation among #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv). See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_rep_rsb         = 9995,	/*!< Request/check for RSB representation. */
+            blas_rsb_rep_csr         = 9996,	/*!< Request/check for CSR representation. */
+            blas_rsb_rep_coo         = 9997,	/*!< Request/check for COO representation. */
+            blas_rsb_duplicates_ovw   = 9998,	/*!< Request/check for duplicate nonzeroes overwriting policy. */
+            blas_rsb_duplicates_sum   = 9999 	/*!< Request/check for duplicate nonzeroes summation policy. */
+};
+
+#endif
+   /* BLAS_ENUM_H */
+
+/** the sparse matrix descriptor type */
+typedef int blas_sparse_matrix;
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+               /* Level 1 Computational Routines */
+int BLAS_susdot(enum blas_conj_type conj, int nnz, const float * x,
+		const int *indx, const float * y, int incy, float * r,
+		enum blas_base_type index_base);
+void blas_susdot_(enum blas_conj_type*conj,int*nnz,const float *x,const int *indx,const float *y,int*incy,float *r,enum blas_base_type*index_base,int*istat);
+int BLAS_dusdot(enum blas_conj_type conj, int nnz, const double * x,
+		const int *indx, const double * y, int incy, double * r,
+		enum blas_base_type index_base);
+void blas_dusdot_(enum blas_conj_type*conj,int*nnz,const double *x,const int *indx,const double *y,int*incy,double *r,enum blas_base_type*index_base,int*istat);
+int BLAS_cusdot(enum blas_conj_type conj, int nnz, const void *x,
+		const int *indx, const void *y, int incy, void *r,
+		enum blas_base_type index_base);
+void blas_cusdot_(enum blas_conj_type*conj,int*nnz,const void *x,const int *indx,const void *y,int*incy,void *r,enum blas_base_type*index_base,int*istat);
+int BLAS_zusdot(enum blas_conj_type conj, int nnz, const void *x,
+		const int *indx, const void *y, int incy, void *r,
+		enum blas_base_type index_base);
+void blas_zusdot_(enum blas_conj_type*conj,int*nnz,const void *x,const int *indx,const void *y,int*incy,void *r,enum blas_base_type*index_base,int*istat);
+
+int BLAS_susaxpy(int nnz, float  alpha, const float * x, const int *indx,
+                 float * y, int incy, enum blas_base_type index_base);
+void blas_susaxpy_(int*nnz,float*alpha,const float *x,const int *indx,float *y,int*incy,enum blas_base_type*index_base,int*istat);
+int BLAS_dusaxpy(int nnz, double  alpha, const double * x, const int *indx,
+                 double * y, int incy, enum blas_base_type index_base);
+void blas_dusaxpy_(int*nnz,double*alpha,const double *x,const int *indx,double *y,int*incy,enum blas_base_type*index_base,int*istat);
+int BLAS_cusaxpy(int nnz, const void * alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base);
+void blas_cusaxpy_(int*nnz,const void *alpha,const void *x,const int *indx,void *y,int*incy,enum blas_base_type*index_base,int*istat);
+int BLAS_zusaxpy(int nnz, const void * alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base);
+void blas_zusaxpy_(int*nnz,const void *alpha,const void *x,const int *indx,void *y,int*incy,enum blas_base_type*index_base,int*istat);
+
+int BLAS_susga(int nnz, const float * y, int incy, float * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_susga_(int*nnz,const float *y,int*incy,float *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_dusga(int nnz, const double * y, int incy, double * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_dusga_(int*nnz,const double *y,int*incy,double *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_cusga(int nnz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_cusga_(int*nnz,const void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_zusga(int nnz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_zusga_(int*nnz,const void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+
+int BLAS_susgz(int nnz, float * y, int incy, float * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_susgz_(int*nnz,float *y,int*incy,float *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_dusgz(int nnz, double * y, int incy, double * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_dusgz_(int*nnz,double *y,int*incy,double *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_cusgz(int nnz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_cusgz_(int*nnz,void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_zusgz(int nnz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_zusgz_(int*nnz,void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+
+int BLAS_sussc(int nnz, const float * x, float * y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_sussc_(int*nnz,const float *x,float *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_dussc(int nnz, const double * x, double * y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_dussc_(int*nnz,const double *x,double *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_cussc(int nnz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_cussc_(int*nnz,const void *x,void *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_zussc(int nnz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_zussc_(int*nnz,const void *x,void *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+
+               /* Level 2 Computational Routines */
+
+int BLAS_susmv(enum blas_trans_type transA, float alpha,
+    blas_sparse_matrix A, const float * x, int incx, float * y, int incy);
+
+void blas_susmv_(enum blas_trans_type*transA,float*alpha,blas_sparse_matrix*A,const float *x,int*incx,float *y,int*incy,int*istat);
+
+int BLAS_dusmv(enum blas_trans_type transA, double alpha,
+    blas_sparse_matrix A, const double * x, int incx, double * y, int incy);
+
+void blas_dusmv_(enum blas_trans_type*transA,double*alpha,blas_sparse_matrix*A,const double *x,int*incx,double *y,int*incy,int*istat);
+
+int BLAS_cusmv(enum blas_trans_type transA, const void *alpha,
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy);
+
+void blas_cusmv_(enum blas_trans_type*transA,const void *alpha,blas_sparse_matrix*A,const void *x,int*incx,void *y,int*incy,int*istat);
+
+int BLAS_zusmv(enum blas_trans_type transA, const void *alpha,
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy);
+
+void blas_zusmv_(enum blas_trans_type*transA,const void *alpha,blas_sparse_matrix*A,const void *x,int*incx,void *y,int*incy,int*istat);
+
+
+int BLAS_sussv(enum blas_trans_type transT, float alpha,
+    blas_sparse_matrix T, float * x, int incx);
+
+void blas_sussv_(enum blas_trans_type*transT,float*alpha,blas_sparse_matrix*T,float *x,int*incx,int*istat);
+
+int BLAS_dussv(enum blas_trans_type transT, double alpha,
+    blas_sparse_matrix T, double * x, int incx);
+
+void blas_dussv_(enum blas_trans_type*transT,double*alpha,blas_sparse_matrix*T,double *x,int*incx,int*istat);
+
+int BLAS_cussv(enum blas_trans_type transT, const void *alpha,
+    blas_sparse_matrix T, void *x, int incx);
+
+void blas_cussv_(enum blas_trans_type*transT,const void *alpha,blas_sparse_matrix*T,void *x,int*incx,int*istat);
+
+int BLAS_zussv(enum blas_trans_type transT, const void *alpha,
+    blas_sparse_matrix T, void *x, int incx);
+
+void blas_zussv_(enum blas_trans_type*transT,const void *alpha,blas_sparse_matrix*T,void *x,int*incx,int*istat);
+
+
+               /* Level 3 Computational Routines */
+
+int BLAS_susmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, float alpha, blas_sparse_matrix A, const float * b, int ldb,
+       float *  c, int ldc);
+
+void blas_susmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,float*alpha,blas_sparse_matrix*A,const float *b,int*ldb,float *c,int*ldc,int*istat);
+
+int BLAS_dusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, double alpha, blas_sparse_matrix A, const double * b, int ldb,
+       double *  c, int ldc);
+
+void blas_dusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,double*alpha,blas_sparse_matrix*A,const double *b,int*ldb,double *c,int*ldc,int*istat);
+
+int BLAS_cusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, int ldb,
+       void * c, int ldc);
+
+void blas_cusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,const void *alpha,blas_sparse_matrix*A,const void *b,int*ldb,void *c,int*ldc,int*istat);
+
+int BLAS_zusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, int ldb,
+       void * c, int ldc);
+
+void blas_zusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,const void *alpha,blas_sparse_matrix*A,const void *b,int*ldb,void *c,int*ldc,int*istat);
+
+
+int BLAS_sussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, float alpha, blas_sparse_matrix T, float * b, int ldb);
+
+void blas_sussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,float*alpha,blas_sparse_matrix*T,float *b,int*ldb,int*istat);
+
+int BLAS_dussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, double alpha, blas_sparse_matrix T, double * b, int ldb);
+
+void blas_dussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,double*alpha,blas_sparse_matrix*T,double *b,int*ldb,int*istat);
+
+int BLAS_cussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, const void *alpha, blas_sparse_matrix T, void *b, int ldb);
+
+void blas_cussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,const void *alpha,blas_sparse_matrix*T,void *b,int*ldb,int*istat);
+
+int BLAS_zussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, const void *alpha, blas_sparse_matrix T, void *b, int ldb);
+
+void blas_zussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,const void *alpha,blas_sparse_matrix*T,void *b,int*ldb,int*istat);
+
+
+               /* Handle Management Routines */
+               /*             +              */
+               /* Creation Routines */
+               /*             +              */
+               /* Insertion Routines */
+               /*             +              */
+               /* Completion of Construction Routines */
+               /*             +              */
+               /* Matrix Property Routines */
+               /*             +              */
+               /* Destruction Routine */
+
+blas_sparse_matrix BLAS_suscr_begin(int m, int n);
+void blas_suscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_duscr_begin(int m, int n);
+void blas_duscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_cuscr_begin(int m, int n);
+void blas_cuscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_zuscr_begin(int m, int n);
+void blas_zuscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+
+blas_sparse_matrix BLAS_suscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_suscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_duscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_duscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_cuscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_cuscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_zuscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_zuscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+
+blas_sparse_matrix BLAS_suscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_suscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_duscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_duscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_cuscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_cuscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_zuscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_zuscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+
+int BLAS_suscr_end(blas_sparse_matrix A);
+void blas_suscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_duscr_end(blas_sparse_matrix A);
+void blas_duscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_cuscr_end(blas_sparse_matrix A);
+void blas_cuscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_zuscr_end(blas_sparse_matrix A);
+void blas_zuscr_end_(blas_sparse_matrix*A,int*istat);
+
+int BLAS_suscr_insert_entry(blas_sparse_matrix A, float  val, int i, int j);
+void blas_suscr_insert_entry_(blas_sparse_matrix*A,float*val,int*i,int*j,int*istat);
+int BLAS_duscr_insert_entry(blas_sparse_matrix A, double  val, int i, int j);
+void blas_duscr_insert_entry_(blas_sparse_matrix*A,double*val,int*i,int*j,int*istat);
+int BLAS_cuscr_insert_entry(blas_sparse_matrix A, const void * val, int i, int j);
+void blas_cuscr_insert_entry_(blas_sparse_matrix*A,const void *val,int*i,int*j,int*istat);
+int BLAS_zuscr_insert_entry(blas_sparse_matrix A, const void * val, int i, int j);
+void blas_zuscr_insert_entry_(blas_sparse_matrix*A,const void *val,int*i,int*j,int*istat);
+
+int BLAS_suscr_insert_entries(blas_sparse_matrix A, int nnz, const float * val,
+                            const int *indx, const int *jndx);
+void blas_suscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const float *val,const int *indx,const int *jndx,int*istat);
+int BLAS_duscr_insert_entries(blas_sparse_matrix A, int nnz, const double * val,
+                            const int *indx, const int *jndx);
+void blas_duscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const double *val,const int *indx,const int *jndx,int*istat);
+int BLAS_cuscr_insert_entries(blas_sparse_matrix A, int nnz, const void *val,
+                            const int *indx, const int *jndx);
+void blas_cuscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const void *val,const int *indx,const int *jndx,int*istat);
+int BLAS_zuscr_insert_entries(blas_sparse_matrix A, int nnz, const void *val,
+                            const int *indx, const int *jndx);
+void blas_zuscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const void *val,const int *indx,const int *jndx,int*istat);
+
+int BLAS_suscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const float * val, const int *indx);
+void blas_suscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const float *val,const int *indx,int*istat);
+int BLAS_duscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const double * val, const int *indx);
+void blas_duscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const double *val,const int *indx,int*istat);
+int BLAS_cuscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const void *val, const int *indx);
+void blas_cuscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const void *val,const int *indx,int*istat);
+int BLAS_zuscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const void *val, const int *indx);
+void blas_zuscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const void *val,const int *indx,int*istat);
+
+int BLAS_suscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const float * val, const int *indx);
+void blas_suscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const float *val,const int *indx,int*istat);
+int BLAS_duscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const double * val, const int *indx);
+void blas_duscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const double *val,const int *indx,int*istat);
+int BLAS_cuscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const void *val, const int *indx);
+void blas_cuscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const void *val,const int *indx,int*istat);
+int BLAS_zuscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const void *val, const int *indx);
+void blas_zuscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const void *val,const int *indx,int*istat);
+
+int BLAS_suscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const float * val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_suscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const float *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+int BLAS_duscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const double * val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_duscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const double *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+int BLAS_cuscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const void *val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_cuscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const void *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+int BLAS_zuscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const void *val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_zuscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const void *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+
+int BLAS_suscr_insert_block(blas_sparse_matrix A, const float * val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_suscr_insert_block_(blas_sparse_matrix*A,const float *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+int BLAS_duscr_insert_block(blas_sparse_matrix A, const double * val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_duscr_insert_block_(blas_sparse_matrix*A,const double *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+int BLAS_cuscr_insert_block(blas_sparse_matrix A, const void *val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_cuscr_insert_block_(blas_sparse_matrix*A,const void *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+int BLAS_zuscr_insert_block(blas_sparse_matrix A, const void *val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_zuscr_insert_block_(blas_sparse_matrix*A,const void *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+
+
+
+int BLAS_uscr_end(blas_sparse_matrix A);
+void blas_uscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_usds(blas_sparse_matrix A);
+void blas_usds_(blas_sparse_matrix*A,int*istat);
+
+int BLAS_susrows_scale(blas_sparse_matrix A,const float *  d, enum blas_trans_type trans);
+void blas_susrows_scale_(blas_sparse_matrix*A,const float *d,enum blas_trans_type*trans,int*istat);
+int BLAS_dusrows_scale(blas_sparse_matrix A,const double *  d, enum blas_trans_type trans);
+void blas_dusrows_scale_(blas_sparse_matrix*A,const double *d,enum blas_trans_type*trans,int*istat);
+int BLAS_cusrows_scale(blas_sparse_matrix A,const void * d, enum blas_trans_type trans);
+void blas_cusrows_scale_(blas_sparse_matrix*A,const void *d,enum blas_trans_type*trans,int*istat);
+int BLAS_zusrows_scale(blas_sparse_matrix A,const void * d, enum blas_trans_type trans);
+void blas_zusrows_scale_(blas_sparse_matrix*A,const void *d,enum blas_trans_type*trans,int*istat);
+
+int BLAS_susget_diag(blas_sparse_matrix A,float *  d);
+void blas_susget_diag_(blas_sparse_matrix*A,float *d,int*istat);
+int BLAS_dusget_diag(blas_sparse_matrix A,double *  d);
+void blas_dusget_diag_(blas_sparse_matrix*A,double *d,int*istat);
+int BLAS_cusget_diag(blas_sparse_matrix A,void * d);
+void blas_cusget_diag_(blas_sparse_matrix*A,void *d,int*istat);
+int BLAS_zusget_diag(blas_sparse_matrix A,void * d);
+void blas_zusget_diag_(blas_sparse_matrix*A,void *d,int*istat);
+
+int BLAS_susget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_susget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+int BLAS_dusget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_dusget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+int BLAS_cusget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_cusget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+int BLAS_zusget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_zusget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+
+int BLAS_susget_rows_sparse(blas_sparse_matrix A, float *  VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_susget_rows_sparse_(blas_sparse_matrix*A,float *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+int BLAS_dusget_rows_sparse(blas_sparse_matrix A, double *  VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_dusget_rows_sparse_(blas_sparse_matrix*A,double *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+int BLAS_cusget_rows_sparse(blas_sparse_matrix A, void * VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_cusget_rows_sparse_(blas_sparse_matrix*A,void *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+int BLAS_zusget_rows_sparse(blas_sparse_matrix A, void * VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_zusget_rows_sparse_(blas_sparse_matrix*A,void *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+
+int BLAS_susget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_susget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+int BLAS_dusget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_dusget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+int BLAS_cusget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_cusget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+int BLAS_zusget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_zusget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+
+int BLAS_susget_infinity_norm(blas_sparse_matrix A,float * in, enum blas_trans_type trans);
+void blas_susget_infinity_norm_(blas_sparse_matrix*A,float *in,enum blas_trans_type*trans,int*istat);
+int BLAS_dusget_infinity_norm(blas_sparse_matrix A,double * in, enum blas_trans_type trans);
+void blas_dusget_infinity_norm_(blas_sparse_matrix*A,double *in,enum blas_trans_type*trans,int*istat);
+int BLAS_cusget_infinity_norm(blas_sparse_matrix A,void *in, enum blas_trans_type trans);
+void blas_cusget_infinity_norm_(blas_sparse_matrix*A,void *in,enum blas_trans_type*trans,int*istat);
+int BLAS_zusget_infinity_norm(blas_sparse_matrix A,void *in, enum blas_trans_type trans);
+void blas_zusget_infinity_norm_(blas_sparse_matrix*A,void *in,enum blas_trans_type*trans,int*istat);
+
+int BLAS_susset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const float *  va, int nnz);
+void blas_susset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const float *va,int*nnz,int*istat);
+int BLAS_dusset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const double *  va, int nnz);
+void blas_dusset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const double *va,int*nnz,int*istat);
+int BLAS_cusset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const void * va, int nnz);
+void blas_cusset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const void *va,int*nnz,int*istat);
+int BLAS_zusset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const void * va, int nnz);
+void blas_zusset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const void *va,int*nnz,int*istat);
+
+int BLAS_susset_element(blas_sparse_matrix A,int i, int j, float *  v);
+void blas_susset_element_(blas_sparse_matrix*A,int*i,int*j,float *v,int*istat);
+int BLAS_dusset_element(blas_sparse_matrix A,int i, int j, double *  v);
+void blas_dusset_element_(blas_sparse_matrix*A,int*i,int*j,double *v,int*istat);
+int BLAS_cusset_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_cusset_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+int BLAS_zusset_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_zusset_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+
+int BLAS_susget_element(blas_sparse_matrix A,int i, int j, float *  v);
+void blas_susget_element_(blas_sparse_matrix*A,int*i,int*j,float *v,int*istat);
+int BLAS_dusget_element(blas_sparse_matrix A,int i, int j, double *  v);
+void blas_dusget_element_(blas_sparse_matrix*A,int*i,int*j,double *v,int*istat);
+int BLAS_cusget_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_cusget_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+int BLAS_zusget_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_zusget_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+
+
+
+
+
+
+#define BLAS_ussp rsb_wp__BLAS_ussp
+#define BLAS_usgp rsb_wp__BLAS_usgp
+int BLAS_ussp( blas_sparse_matrix A, int pname );
+int BLAS_usgp( blas_sparse_matrix A, int pname );
+blas_sparse_matrix rsb_load_spblas_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode ); /* This is a librsb extension. */
+
+
+
+struct rsb_mtx_t * rsb_blas_get_mtx(blas_sparse_matrix A);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+
+#endif /* RSB_LIBSPBLAS_H_INCLUDED */
+
+
diff --git a/blas_sparse/Makefile.am b/blas_sparse/Makefile.am
new file mode 100644
index 0000000..88f93a3
--- /dev/null
+++ b/blas_sparse/Makefile.am
@@ -0,0 +1,11 @@
+
+subdir=blas_sparse
+
+EXTRA_DIST= \
+	blas_enum.h  blas_sparse.h  blas_sparse_proto.h blas_enum.F90
+
+all: blas_enum.F90
+
+blas_enum.F90: blas_enum.h
+	( echo '! This file has been auto-generated from blas_enum.h.'; cat blas_enum.h  | grep '^\s*blas' | sed 's/[,};/].*//g;s/\s*//g;s/^/        INTEGER,PARAMETER :: /' ) > $@
+
diff --git a/blas_sparse/Makefile.in b/blas_sparse/Makefile.in
new file mode 100644
index 0000000..4c6a84d
--- /dev/null
+++ b/blas_sparse/Makefile.in
@@ -0,0 +1,439 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/rsb-config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+subdir = blas_sparse
+EXTRA_DIST = \
+	blas_enum.h  blas_sparse.h  blas_sparse_proto.h blas_enum.F90
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu blas_sparse/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu blas_sparse/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+all: blas_enum.F90
+
+blas_enum.F90: blas_enum.h
+	( echo '! This file has been auto-generated from blas_enum.h.'; cat blas_enum.h  | grep '^\s*blas' | sed 's/[,};/].*//g;s/\s*//g;s/^/        INTEGER,PARAMETER :: /' ) > $@
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/blas_sparse/blas_enum.F90 b/blas_sparse/blas_enum.F90
new file mode 100644
index 0000000..22eb3a7
--- /dev/null
+++ b/blas_sparse/blas_enum.F90
@@ -0,0 +1,81 @@
+! This file has been auto-generated from blas_enum.h.
+        INTEGER,PARAMETER :: blas_rowmajor=101
+        INTEGER,PARAMETER :: blas_colmajor=102
+        INTEGER,PARAMETER :: blas_no_trans=111
+        INTEGER,PARAMETER :: blas_trans=112
+        INTEGER,PARAMETER :: blas_conj_trans=113
+        INTEGER,PARAMETER :: blas_upper=121
+        INTEGER,PARAMETER :: blas_lower=122
+        INTEGER,PARAMETER :: blas_non_unit_diag=131
+        INTEGER,PARAMETER :: blas_unit_diag=132
+        INTEGER,PARAMETER :: blas_left_side=141
+        INTEGER,PARAMETER :: blas_right_side=142
+        INTEGER,PARAMETER :: blas_base=151
+        INTEGER,PARAMETER :: blas_t=152
+        INTEGER,PARAMETER :: blas_rnd=153
+        INTEGER,PARAMETER :: blas_ieee=154
+        INTEGER,PARAMETER :: blas_emin=155
+        INTEGER,PARAMETER :: blas_emax=156
+        INTEGER,PARAMETER :: blas_eps=157
+        INTEGER,PARAMETER :: blas_prec=158
+        INTEGER,PARAMETER :: blas_underflow=159
+        INTEGER,PARAMETER :: blas_overflow=160
+        INTEGER,PARAMETER :: blas_sfmin=161
+        INTEGER,PARAMETER :: blas_one_norm=171
+        INTEGER,PARAMETER :: blas_real_one_norm=172
+        INTEGER,PARAMETER :: blas_two_norm=173
+        INTEGER,PARAMETER :: blas_frobenius_norm=174
+        INTEGER,PARAMETER :: blas_inf_norm=175
+        INTEGER,PARAMETER :: blas_real_inf_norm=176
+        INTEGER,PARAMETER :: blas_max_norm=177
+        INTEGER,PARAMETER :: blas_real_max_norm=178
+        INTEGER,PARAMETER :: blas_increasing_order=181
+        INTEGER,PARAMETER :: blas_decreasing_order=182
+        INTEGER,PARAMETER :: blas_conj=191
+        INTEGER,PARAMETER :: blas_no_conj=192
+        INTEGER,PARAMETER :: blas_jrot_inner=201
+        INTEGER,PARAMETER :: blas_jrot_outer=202
+        INTEGER,PARAMETER :: blas_jrot_sorted=203
+        INTEGER,PARAMETER :: blas_prec_single=211
+        INTEGER,PARAMETER :: blas_prec_double=212
+        INTEGER,PARAMETER :: blas_prec_indigenous=213
+        INTEGER,PARAMETER :: blas_prec_extra=214
+        INTEGER,PARAMETER :: blas_zero_base=221
+        INTEGER,PARAMETER :: blas_one_base=222
+        INTEGER,PARAMETER :: blas_general=231
+        INTEGER,PARAMETER :: blas_symmetric=232
+        INTEGER,PARAMETER :: blas_hermitian=233
+        INTEGER,PARAMETER :: blas_triangular=234
+        INTEGER,PARAMETER :: blas_lower_triangular=235
+        INTEGER,PARAMETER :: blas_upper_triangular=236
+        INTEGER,PARAMETER :: blas_lower_symmetric=237
+        INTEGER,PARAMETER :: blas_upper_symmetric=238
+        INTEGER,PARAMETER :: blas_lower_hermitian=239
+        INTEGER,PARAMETER :: blas_upper_hermitian=240
+        INTEGER,PARAMETER :: blas_complex=241
+        INTEGER,PARAMETER :: blas_real=242
+        INTEGER,PARAMETER :: blas_double_precision=243
+        INTEGER,PARAMETER :: blas_single_precision=244
+        INTEGER,PARAMETER :: blas_num_rows=251
+        INTEGER,PARAMETER :: blas_num_cols=252
+        INTEGER,PARAMETER :: blas_num_nonzeros=253
+        INTEGER,PARAMETER :: blas_invalid_handle=261
+        INTEGER,PARAMETER :: blas_new_handle=262
+        INTEGER,PARAMETER :: blas_open_handle=263
+        INTEGER,PARAMETER :: blas_valid_handle=264
+        INTEGER,PARAMETER :: blas_regular=271
+        INTEGER,PARAMETER :: blas_irregular=272
+        INTEGER,PARAMETER :: blas_block=273
+        INTEGER,PARAMETER :: blas_unassembled=274
+        INTEGER,PARAMETER :: blas_rsb_spmv_autotuning_on=6660
+        INTEGER,PARAMETER :: blas_rsb_spmv_autotuning_off=6661
+        INTEGER,PARAMETER :: blas_rsb_spmv_n_autotuning_on=6662
+        INTEGER,PARAMETER :: blas_rsb_spmv_n_autotuning_off=6663
+        INTEGER,PARAMETER :: blas_rsb_spmv_t_autotuning_on=6664
+        INTEGER,PARAMETER :: blas_rsb_spmv_t_autotuning_off=6665
+        INTEGER,PARAMETER :: blas_rsb_autotune_next_operation=6666
+        INTEGER,PARAMETER :: blas_rsb_rep_rsb=9995
+        INTEGER,PARAMETER :: blas_rsb_rep_csr=9996
+        INTEGER,PARAMETER :: blas_rsb_rep_coo=9997
+        INTEGER,PARAMETER :: blas_rsb_duplicates_ovw=9998
+        INTEGER,PARAMETER :: blas_rsb_duplicates_sum=9999
diff --git a/blas_sparse/blas_enum.h b/blas_sparse/blas_enum.h
new file mode 100644
index 0000000..c58e09e
--- /dev/null
+++ b/blas_sparse/blas_enum.h
@@ -0,0 +1,143 @@
+#ifndef BLAS_ENUM_H
+#define BLAS_ENUM_H
+
+  /* Enumerated types */
+
+/*! Used to specify a dense array's elements layout. */
+enum blas_order_type {
+            blas_rowmajor = 101, /*!< Row major. */
+            blas_colmajor = 102  /*!< Column major. */ };
+
+/*! Used to specify a transposition operator to a matrix operand. */
+enum blas_trans_type {
+            blas_no_trans   = 111, /*!< No transposition. */
+            blas_trans      = 112, /*!< Transposition. */
+            blas_conj_trans = 113  /*!< Transposition and conjugation. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) upper or lower triangularity of a matrix. */
+enum blas_uplo_type  {
+            blas_upper = 121, /*!< Upper triangular matrix. */
+            blas_lower = 122  /*!< Lower triangular matrix. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) whether the diagonal of a matrix is (implicitly) unitary or not. */
+enum blas_diag_type {
+            blas_non_unit_diag = 131,  /*!< Unit diagional matrix. */
+            blas_unit_diag     = 132   /*!< Non unit diagional matrix (the default). */ };
+
+/*! Unused/Unsupported. */
+enum blas_side_type {
+            blas_left_side  = 141, /*!< Unsupported. */ 
+            blas_right_side = 142  /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_cmach_type {
+            blas_base      = 151, /*!< Unsupported. */ 
+            blas_t         = 152, /*!< Unsupported. */ 
+            blas_rnd       = 153, /*!< Unsupported. */ 
+            blas_ieee      = 154, /*!< Unsupported. */ 
+            blas_emin      = 155, /*!< Unsupported. */ 
+            blas_emax      = 156, /*!< Unsupported. */ 
+            blas_eps       = 157, /*!< Unsupported. */ 
+            blas_prec      = 158, /*!< Unsupported. */ 
+            blas_underflow = 159, /*!< Unsupported. */ 
+            blas_overflow  = 160, /*!< Unsupported. */ 
+            blas_sfmin     = 161  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_norm_type {
+            blas_one_norm       = 171, /*!< Unsupported. */ 
+            blas_real_one_norm  = 172, /*!< Unsupported. */ 
+            blas_two_norm       = 173, /*!< Unsupported. */ 
+            blas_frobenius_norm = 174, /*!< Unsupported. */ 
+            blas_inf_norm       = 175, /*!< Unsupported. */ 
+            blas_real_inf_norm  = 176, /*!< Unsupported. */ 
+            blas_max_norm       = 177, /*!< Unsupported. */ 
+            blas_real_max_norm  = 178  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_sort_type {
+            blas_increasing_order = 181,  /*!< Unsupported. */ 
+            blas_decreasing_order = 182   /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_conj_type {
+            blas_conj    = 191, /*!< Unsupported. */
+            blas_no_conj = 192  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_jrot_type {
+            blas_jrot_inner  = 201, /*!< Unsupported. */
+            blas_jrot_outer  = 202, /*!< Unsupported. */
+            blas_jrot_sorted = 203  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_prec_type {
+            blas_prec_single     = 211, /*!< Unsupported. */
+            blas_prec_double     = 212, /*!< Unsupported. */
+            blas_prec_indigenous = 213, /*!< Unsupported. */
+            blas_prec_extra      = 214  /*!< Unsupported. */ };
+
+/*! Index base (valid at matrix build/modify time). */
+enum blas_base_type {
+            blas_zero_base = 221, /*!< Zero based indices (default when matrix created using the C interface). */
+            blas_one_base  = 222  /*!< Zero based indices (default when matrix created using the Fortran interface). */ };
+
+/*! Symmetry properties. If not specified otherwise, valid for the both of #BLAS_ussp and #BLAS_usgp.
+ */
+enum blas_symmetry_type {
+            blas_general          = 231, /*!< General unsymmetric matrix (default). For #BLAS_usgp only. */
+            blas_symmetric        = 232, /*!< Symmetric matrix (either #blas_lower_symmetric or #blas_upper_symmetric). For #BLAS_usgp only. */
+            blas_hermitian        = 233, /*!< Hermitian matrix (either #blas_lower_hermitian or #blas_upper_hermitian). For #BLAS_usgp only. */
+            blas_triangular       = 234, /*!< Triangular matrix (either #blas_lower_triangular or #blas_upper_triangular). For #BLAS_usgp only. */
+            blas_lower_triangular = 235, /*!< Lower triangular matrix. */
+            blas_upper_triangular = 236, /*!< Upper triangular matrix. */
+            blas_lower_symmetric  = 237, /*!< Lower symmetric matrix. */
+            blas_upper_symmetric  = 238, /*!< Upper symmetric matrix. */
+            blas_lower_hermitian  = 239, /*!< Lower hermitian matrix. */
+            blas_upper_hermitian  = 240  /*!< Upper hermitian matrix. */ };
+
+/*! Numerical field type; can be used with #BLAS_usgp to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). */
+enum blas_field_type {
+            blas_complex          = 241, /*!< Will succeed if matrix is of 'C' or 'Z' type. */
+            blas_real             = 242, /*!< Will succeed if matrix is of 'S' or 'D' type. */
+            blas_double_precision = 243, /*!< Will succeed if matrix is of 'D' or 'Z' type. */
+            blas_single_precision = 244  /*!< Will succeed if matrix is of 'S' or 'C' type. */ };
+
+/*! Quantities that can be obtained via #BLAS_usgp. */
+enum blas_size_type {
+            blas_num_rows      = 251, /*!< Get the matrix rows count. */
+            blas_num_cols      = 252, /*!< Get the matrix columns count. */
+            blas_num_nonzeros  = 253  /*!< Get the matrix nonzeros count. */ };
+
+/*! The following are not fully implemented. Usable with #BLAS_usgp. */
+enum blas_handle_type{
+            blas_invalid_handle = 261, /*!< Used to check whether the handle is invalid. */
+			blas_new_handle     = 262, /*!< Will give 1 if the handle is new. */
+			blas_open_handle    = 263, /*!< will give 1 if the handle is open. */
+			blas_valid_handle   = 264  /*!< Will give 1 if the handle is valid (that is, after #BLAS_duscr_end/#BLAS_zuscr_end/#BLAS_cuscr_end/#BLAS_zuscr_end). */ };
+
+/*! The following are usable with #BLAS_usgp only. */
+enum blas_sparsity_optimization_type {
+            blas_regular       = 271, /*!< Will give 0. */
+            blas_irregular     = 272, /*!< Will give 1. */
+            blas_block         = 273, /*!< Will give 0. */
+            blas_unassembled   = 274  /*!< Complementary to #blas_valid_handle. */ };
+
+/*! Properties suitable to be used with #BLAS_ussp/#BLAS_usgp. All of these are not in the Sparse BLAS standard. */
+enum blas_rsb_ext_type {
+            blas_rsb_spmv_autotuning_on   = 6660,	/*!< Turn on executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. As an extension to the standard, the autotuning properties can be turned on/off at any time; if the autotuning feature has not been enabled at build time, using these properties will make the call fail. For more information, see #rsb_tune_spmm. (EXPERIMENTAL) */
+            blas_rsb_spmv_autotuning_off  = 6661,	/*!< Turn off executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_on   = 6662,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_off  = 6663,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_on   = 6664,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_off  = 6665,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_autotune_next_operation= 6666,	/*!< Turn on executing threads autotuning for the next operation among #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv). See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_rep_rsb         = 9995,	/*!< Request/check for RSB representation. */
+            blas_rsb_rep_csr         = 9996,	/*!< Request/check for CSR representation. */
+            blas_rsb_rep_coo         = 9997,	/*!< Request/check for COO representation. */
+            blas_rsb_duplicates_ovw   = 9998,	/*!< Request/check for duplicate nonzeroes overwriting policy. */
+            blas_rsb_duplicates_sum   = 9999 	/*!< Request/check for duplicate nonzeroes summation policy. */
+};
+
+#endif
+   /* BLAS_ENUM_H */
diff --git a/blas_sparse/blas_sparse.h b/blas_sparse/blas_sparse.h
new file mode 100644
index 0000000..8ee16cc
--- /dev/null
+++ b/blas_sparse/blas_sparse.h
@@ -0,0 +1,8 @@
+#ifndef BLAS_SPARSE_H
+#define BLAS_SPARSE_H
+
+#include "blas_enum.h"
+#include "blas_sparse_proto.h"
+
+#endif
+   /* BLAS_SPARSE_H */
diff --git a/blas_sparse/blas_sparse_proto.h b/blas_sparse/blas_sparse_proto.h
new file mode 100644
index 0000000..76c7d1c
--- /dev/null
+++ b/blas_sparse/blas_sparse_proto.h
@@ -0,0 +1,205 @@
+#ifndef BLAS_SPARSE_PROTO_H
+#define BLAS_SPARSE_PROTO_H
+
+typedef int blas_sparse_matrix;
+
+
+  /* Level 1 Computational Routines */
+
+void BLAS_susdot( enum blas_conj_type conj, int nz, const float *x, 
+                  const int *indx, const float *y, int incy, float *r,
+                  enum blas_base_type index_base );
+void BLAS_dusdot( enum blas_conj_type conj, int nz, const double *x, 
+                  const int *indx, const double *y, int incy, double *r,
+                  enum blas_base_type index_base );
+void BLAS_cusdot( enum blas_conj_type conj, int nz, const void *x, 
+                  const int *indx, const void *y, int incy, void *r,
+                  enum blas_base_type index_base );
+void BLAS_zusdot( enum blas_conj_type conj, int nz, const void *x, 
+                  const int *indx, const void *y, int incy, void *r,
+                  enum blas_base_type index_base );
+
+void BLAS_susaxpy( int nz, float alpha, const float *x, const int *indx,
+                 float *y, int incy, enum blas_base_type index_base );
+void BLAS_dusaxpy( int nz, double alpha, const double *x, const int *indx,
+                 double *y, int incy, enum blas_base_type index_base );
+void BLAS_cusaxpy( int nz, const void *alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base );
+void BLAS_zusaxpy( int nz, const void *alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base );
+
+void BLAS_susga( int nz, const float *y, int incy, float *x, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_dusga( int nz, const double *y, int incy, double *x, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_cusga( int nz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_zusga( int nz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base );
+
+void BLAS_susgz( int nz, float *y, int incy, float *x, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_dusgz( int nz, double *y, int incy, double *x, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_cusgz( int nz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_zusgz( int nz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base );
+
+void BLAS_sussc( int nz, const float *x, float *y, int incy, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_dussc( int nz, const double *x, double *y, int incy, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_cussc( int nz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base );
+void BLAS_zussc( int nz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base );
+
+               /* Level 2 Computational Routines */
+
+int BLAS_susmv( enum blas_trans_type transa, float alpha, 
+    blas_sparse_matrix A, const float *x, int incx, float *y, int incy );
+int BLAS_dusmv( enum blas_trans_type transa, double alpha, 
+    blas_sparse_matrix A, const double *x, int incx, double *y, int incy );
+int BLAS_cusmv( enum blas_trans_type transa, const void *alpha, 
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy );
+int BLAS_zusmv( enum blas_trans_type transa, const void *alpha, 
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy );
+
+int BLAS_sussv( enum blas_trans_type transt, float alpha, 
+    blas_sparse_matrix T, float *x, int incx );
+int BLAS_dussv( enum blas_trans_type transt, double alpha, 
+    blas_sparse_matrix T, double *x, int incx );
+int BLAS_cussv( enum blas_trans_type transt, const void *alpha, 
+    blas_sparse_matrix T, void *x, int incx );
+int BLAS_zussv( enum blas_trans_type transt, const void *alpha, 
+    blas_sparse_matrix T, void *x, int incx );
+
+               /* Level 3 Computational Routines */
+
+int BLAS_susmm( enum blas_order_type order, enum blas_trans_type transa,
+    int nrhs, float alpha, blas_sparse_matrix A, const float *b, int ldb,
+        float *c, int ldc );
+int BLAS_dusmm( enum blas_order_type order, enum blas_trans_type transa,
+        int nrhs, double alpha, blas_sparse_matrix A, const double *b,
+        int ldb, double *c, int ldc );
+int BLAS_cusmm( enum blas_order_type order, enum blas_trans_type transa,
+         int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, 
+     int ldb, void *c, int ldc );
+int BLAS_zusmm( enum blas_order_type order, enum blas_trans_type transa,
+         int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, 
+     int ldb, void *c, int ldc );
+
+int BLAS_sussm( enum blas_order_type order, enum blas_trans_type transt,
+              int nrhs, float alpha, int t, float *b, int ldb );
+int BLAS_dussm( enum blas_order_type order, enum blas_trans_type transt,
+              int nrhs, double alpha, int t, double *b, int ldb );
+int BLAS_cussm( enum blas_order_type order, enum blas_trans_type transt,
+              int nrhs, const void *alpha, int t, void *b, int ldb );
+int BLAS_zussm( enum blas_order_type order, enum blas_trans_type transt,
+              int nrhs, const void *alpha, int t, void *b, int ldb );
+
+               /* Handle Management Routines */
+
+               /* Creation Routines */
+
+blas_sparse_matrix BLAS_suscr_begin( int m, int n );
+blas_sparse_matrix BLAS_duscr_begin( int m, int n );
+blas_sparse_matrix BLAS_cuscr_begin( int m, int n );
+blas_sparse_matrix BLAS_zuscr_begin( int m, int n );
+
+
+blas_sparse_matrix BLAS_suscr_block_begin( int Mb, int Nb, int k, int l );
+blas_sparse_matrix BLAS_duscr_block_begin( int Mb, int Nb, int k, int l );
+blas_sparse_matrix BLAS_cuscr_block_begin( int Mb, int Nb, int k, int l );
+blas_sparse_matrix BLAS_zuscr_block_begin( int Mb, int Nb, int k, int l );
+
+blas_sparse_matrix BLAS_suscr_variable_block_begin( int Mb, int Nb, 
+		const int *k, const int *l );
+blas_sparse_matrix BLAS_duscr_variable_block_begin( int Mb, int Nb, 
+		const int *k, const int *l );
+blas_sparse_matrix BLAS_cuscr_variable_block_begin( int Mb, int Nb, 
+		const int *k, const int *l );
+blas_sparse_matrix BLAS_zuscr_variable_block_begin( int Mb, int Nb, 
+		const int *k, const int *l );
+
+
+               /* Insertion Routines */
+
+int BLAS_suscr_insert_entry( blas_sparse_matrix A, float val, int i, int j );
+int BLAS_duscr_insert_entry( blas_sparse_matrix A, double val, int i, int j );
+int BLAS_cuscr_insert_entry( blas_sparse_matrix A, void *val, int i, int j );
+int BLAS_zuscr_insert_entry( blas_sparse_matrix A, void *val, int i, int j );
+
+int BLAS_suscr_insert_entries( blas_sparse_matrix A, int nz, const float *val,
+                            const int *indx, const int *jndx );
+int BLAS_duscr_insert_entries( blas_sparse_matrix A, int nz, const double *val,
+                            const int *indx, const int *jndx );
+int BLAS_cuscr_insert_entries( blas_sparse_matrix A, int nz, const void *val,
+                            const int *indx, const int *jndx );
+int BLAS_zuscr_insert_entries( blas_sparse_matrix A, int nz, const void *val,
+                            const int *indx, const int *jndx );
+
+int BLAS_suscr_insert_col( blas_sparse_matrix A, int j, int nz,
+                           const float *val, const int *indx );
+int BLAS_duscr_insert_col( blas_sparse_matrix A, int j, int nz,
+                           const double *val, const int *indx );
+int BLAS_cuscr_insert_col( blas_sparse_matrix A, int j, int nz,
+                           const void *val, const int *indx );
+int BLAS_zuscr_insert_col( blas_sparse_matrix A, int j, int nz,
+                           const void *val, const int *indx );
+
+int BLAS_suscr_insert_row( blas_sparse_matrix A, int i, int nz,
+                           const float *val, const int *indx );
+int BLAS_duscr_insert_row( blas_sparse_matrix A, int i, int nz,
+                           const double *val, const int *indx );
+int BLAS_cuscr_insert_row( blas_sparse_matrix A, int i, int nz,
+                           const void *val, const int *indx );
+int BLAS_zuscr_insert_row( blas_sparse_matrix A, int i, int nz,
+                           const void *val, const int *indx );
+
+int BLAS_suscr_insert_clique( blas_sparse_matrix A, const int k, const int l, 
+                        const float *val, const int row_stride, 
+                        const int col_stride, const int *indx, 
+                        const int *jndx );
+int BLAS_duscr_insert_clique( blas_sparse_matrix A, const int k, const int l, 
+                        const double *val, const int row_stride, 
+                        const int col_stride, const int *indx, 
+                        const int *jndx );
+int BLAS_cuscr_insert_clique( blas_sparse_matrix A, const int k, const int l, 
+                        const void *val, const int row_stride, 
+                        const int col_stride, const int *indx, 
+                        const int *jndx );
+int BLAS_zuscr_insert_clique( blas_sparse_matrix A, const int k, const int l, 
+                        const void *val, const int row_stride, 
+                        const int col_stride, const int *indx, 
+                        const int *jndx );
+
+int BLAS_suscr_insert_block( blas_sparse_matrix A, const float *val, 
+                        int row_stride, int col_stride, int i, int j );
+int BLAS_duscr_insert_block( blas_sparse_matrix A, const double *val, 
+                        int row_stride, int col_stride, int i, int j );
+int BLAS_cuscr_insert_block( blas_sparse_matrix A, const void *val, 
+                        int row_stride, int col_stride, int i, int j );
+int BLAS_zuscr_insert_block( blas_sparse_matrix A, const void *val, 
+                        int row_stride, int col_stride, int i, int j );
+
+               /* Completion of Construction Routines */
+
+int BLAS_suscr_end( blas_sparse_matrix A );
+int BLAS_duscr_end( blas_sparse_matrix A );
+int BLAS_cuscr_end( blas_sparse_matrix A );
+int BLAS_zuscr_end( blas_sparse_matrix A );
+
+               /* Matrix Property Routines */
+
+int BLAS_usgp( blas_sparse_matrix A, int pname );
+
+int BLAS_ussp( blas_sparse_matrix A, int pname );
+
+               /* Destruction Routine */
+
+int BLAS_usds( blas_sparse_matrix A );
+
+#endif
+  /* BLAS_SPARSE_PROTO_H */
diff --git a/ch2icfb.c b/ch2icfb.c
new file mode 100644
index 0000000..693a309
--- /dev/null
+++ b/ch2icfb.c
@@ -0,0 +1,1428 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This standalone C 99 program produces ISO C BINDING oriented Fortran code of the rsb.h C header.
+ *
+ * It is easy to break this program; e.g.:
+ * #define a 1 comment_begin ..
+ *  ... comment_end  
+ * \internal
+ *
+ * Missing features:
+ *  - handling of 'extern' 
+ *  - proper preprocessor macros expansion
+ *  - skipping function definitions
+ *  - struct definitions
+ *  - skipping RSB_RESTRICT, RSB_INLINE, restrict, double complex, RSB_EMPTY_FILE_FILLER , RSB_INNER_NRHS_SPMV_ARGS, RSB_OUTER_NRHS_SPMV_ARGS
+ *  - skipping some enum's
+ *  - rsb_time_t/void as return type
+ *  - argv[], typedef char a[  as return types]
+ * */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h> // isblank, ...
+
+#define BS 1
+#define VERBOSE 0
+#define WANT_BINDINGS 1
+#define WANT_REAL_STAR 1
+#define WANT_C_STRINGS 0
+#define WANT_PURE 0
+#define WANT_POINTERS_TO_INTEGERS 1 /* if 1, will have to invoke C_LOC(IA), ... */
+#define PRINT_FUNC_LIST 0
+#define WANT_CHAR_FOR_TYPECODE 1
+#define WANT_PTR_COMMENT 1 /* FIXME */
+
+#if WANT_REAL_STAR
+#define C2F_DOUBLE "REAL(C_DOUBLE)"
+#define C2F_FLOAT  "REAL(C_FLOAT)"
+#else
+/*
+ integer, parameter :: rsb_dpk_=KIND(1.d0)
+ integer, parameter :: rsb_spk_=KIND(1.s0)
+ integer, parameter :: rsb_dpk_=SELECTED_REAL_KIND(15,300)
+ integer, parameter :: rsb_spk_=SELECTED_REAL_KIND(6,37)
+*/
+#define C2F_DOUBLE "REAL(rsb_dpk_)"
+#define C2F_FLOAT  "REAL(rsb_spk_)"
+#endif
+
+#if VERBOSE
+#define INSPECT fprintf(stderr,"at 0:%c, at %d:%c\n",s[0],n,s[n])
+#if 0
+//#define WARN printf("in %s:%d\n",__FUNCTION__,__LINE__)
+#else /* */
+#define WARN fprintf(stderr,"in %s:%d (%d chars)\n",__FUNCTION__,__LINE__,n)
+#endif /* */
+#define DEBUG printf
+#define INFO fprintf
+#else /* VERBOSE */
+#define WARN
+#define INSPECT
+#define DEBUG( ... )
+#define INFO( ... )
+#endif /* VERBOSE */
+
+#define PRINTF  printf 
+#if 0
+#define IPRINTF printf
+#else
+#define IPRINTF( ... )
+#endif
+#define WANT_UNTYPED_VA 1
+	const char * g_fortran_append = "";
+	const char * g_fortran_prepend = "";
+
+enum syntypes { NULLST, POPST, FUNCSIG, RETTYPE, FUNCSYM, ARGTYPES, ARGLABELS, INITIALIZE, FINALIZE }; 
+struct ccs_t{ enum syntypes st; const char *s; size_t sl ; struct ccs_t * ss; } ; 
+typedef struct ccs_t cc_t;
+typedef int (*ccfp_t)(cc_t *);
+struct pts_t{ ccfp_t fp; cc_t*ca; size_t cn,cc; int isstatic; } ; 
+typedef struct pts_t pt_t;
+#if 0
+#define C2IFS (enum syntypes st, char *s, size_t sl)
+#else
+#define C2IFS (cc_t *cc)
+#endif
+typedef int (*c2ifp_t) C2IFS;
+#define CCTMAX 100
+#define TKLMAX 300
+#define BUFLEN 1024*16 /* e.g. 1024 is too little ! */
+#define ARGMAX 100
+#define CCFPAPPLY(PT,ST,SS,SL) if((PT)&&((PT)->fp)){cc_t cc = {(ST),(SS),(SL),NULL};((PT)->fp)(&cc);/**/}
+#define CCPRINT(CC) {if((CC)){PRINTF("%c..%c\n",(CC)->s[0],(CC)->s[(CC)->sl?(CC)->sl-1:0]);}}
+#define CH2_MIN(X,Y) ((X)<(Y)?(X):(Y))		/*!< quick macro for minimum */
+
+#if PRINT_FUNC_LIST
+#define g_fnlmax 10000
+ size_t g_fnlc = 0;
+ char g_fns[g_fnlmax][TKLMAX];
+ int  g_fnl[g_fnlmax];
+#define FNADD(FNAME,FNL) {strncpy(&g_fns[g_fnlc][0],FNAME,FNL);g_fnl[g_fnlc]=FNL;/* printf("%s",g_fns[g_fnlc]);*/g_fnlc++;}
+#define FDUMP() {int g_fni;printf("! ");for (g_fni=0;g_fni<g_fnlc;++g_fni){  printf("%s ",g_fns[g_fni]); }printf("\n"); }
+#else
+#define FNADD(FNAME,FNL) 
+#define FDUMP() 
+#endif /* PRINT_FUNC_LIST */
+
+#define DOXY_SEE_STR "!> ISO C BINDING interface to ::"
+
+size_t parse_c_substr_gen(const char*s, size_t l, const char *kw, int aat)
+{
+	size_t kl;
+
+	kl = strlen(kw);
+	if( l>=kl && strncmp(s,kw,kl)==0
+		&& ( aat || !isalnum(s[kl]) ) )
+		return kl;
+	return 0;
+}
+
+size_t parse_c_type_kw(const char*s, size_t l, const char *kw)
+{
+	return parse_c_substr_gen(s, l, kw, 0);
+}
+
+size_t parse_c_substr(const char*s, size_t l, const char *kw)
+{
+	return parse_c_substr_gen(s, l, kw, 1);
+}
+
+const char * get_ptr_to_type_with_spec(const char*s, size_t l);
+
+int a_primitive_type(const char *s, size_t l)
+{
+	if(parse_c_type_kw(s,l,"double")) return 1;
+	if(parse_c_type_kw(s,l,"float")) return 1;
+	if(parse_c_type_kw(s,l,"complex")) return 1;
+	if(parse_c_type_kw(s,l,"double complex")) return 1;
+	if(parse_c_type_kw(s,l,"float complex")) return 1;
+#if 0
+	if(parse_c_type_kw(s,l,"enum rsb_mif_t")) return 1;
+	if(parse_c_type_kw(s,l,"enum rsb_elopf_t")) return 1;
+#endif
+	if(parse_c_type_kw(s,l,"int")) return 1;
+	return 0;
+}
+
+
+int should_use_pointer_as_err_ref_in_type_string(const char *ls, size_t ll,const char *s, size_t l, char*ds)
+{
+	if(parse_c_type_kw(ls,ll,"errvalp"))
+	{
+#if WANT_PTR_COMMENT
+		if(ds)
+		       	strcpy(ds, " ! INTEGER(C_INT)");
+#endif
+		return 1;
+	}
+#if 0
+	if(parse_c_type_kw(ls,ll,"rnz")) return 1;
+#endif
+	return 0;
+}
+
+int should_use_pointer_as_value_ref_in_type_string(const char *ls, size_t ll,const char *s, size_t l, char *ds)
+{
+	if(parse_c_type_kw(ls,ll,"VA")) goto isnt;
+	if(parse_c_type_kw(ls,ll,"VAp")) goto isnt;
+	if(parse_c_type_kw(ls,ll,"alphap")) goto isnt;
+	if(parse_c_type_kw(ls,ll,"betap")) goto isnt;
+	if(parse_c_type_kw(ls,ll,"alpha")) goto isnt;
+	if(parse_c_type_kw(ls,ll,"beta")) goto isnt;
+#if 0
+	if(parse_c_type_kw(ls,ll,"infinity_norm")) return 1;
+#endif
+	if(parse_c_type_kw(ls,ll,"Np")==ll) goto isnt;
+	if(parse_c_type_kw(ls,ll,"Dp")==ll) goto isnt;
+	if(parse_c_type_kw(ls,ll,"Xp")==ll) goto isnt;
+	if(parse_c_type_kw(ls,ll,"Yp")==ll) goto isnt;
+	if(parse_c_type_kw(ls,ll,"iop")) goto isat;
+	goto boh;
+isnt:
+#if WANT_PTR_COMMENT
+	if(ds)
+	       	strcpy(ds, " ! A single variable of the same numerical type of the matrix.");
+#endif
+	return 1;
+isat:
+#if WANT_PTR_COMMENT
+	if(ds)
+	       	strcpy(ds, " ! C_NULL_PTR is a safe value. Please consult the rsb.h documentation for other options.");
+#endif
+	return 1;
+boh:
+	return 0;
+}
+
+int should_use_pointer_as_index_array_in_type_string(const char *ls, size_t ll,const char *s, size_t l)
+{
+	if(parse_c_type_kw(ls,ll,"JA")) goto iia;
+	if(parse_c_type_kw(ls,ll,"JAp")) goto iia;
+	if(parse_c_type_kw(ls,ll,"IA")) goto iia;
+	if(parse_c_type_kw(ls,ll,"IAp")) goto iia;
+	if(parse_c_type_kw(ls,ll,"JAc")) goto iia;
+	if(parse_c_type_kw(ls,ll,"IAc")) goto iia;
+	return 0;
+iia:
+	return 1;
+}
+
+int should_use_pointer_as_ref_in_type_string(const char *ls, size_t ll,const char *s, size_t l, char *ds)
+{
+	return	
+#if WANT_POINTERS_TO_INTEGERS
+		should_use_pointer_as_index_array_in_type_string(ls,ll,s,l)+
+#endif
+		should_use_pointer_as_err_ref_in_type_string(ls,ll,s,l,ds)+
+		should_use_pointer_as_value_ref_in_type_string(ls,ll,s,l,ds);
+}
+
+int should_use_pointer_as_value_array_in_type_string(const char *ls, size_t ll,const char *s, size_t l, char *ds)
+{
+#if 0
+	if(parse_c_type_kw(ls,ll,"VA")==ll) return 1;
+#endif
+	s = get_ptr_to_type_with_spec(s,l);
+	if(memchr(s,'*',l) && a_primitive_type(s,l))
+		goto upava;
+	return 0;
+upava:
+	return 1;
+}
+
+int should_use_pointer_as_char_c_string(const char *ls, size_t ll,const char *s, size_t l, char*ds)
+{
+#if WANT_C_STRINGS 
+	if(parse_c_type_kw(ls,ll,"filename")) goto iss;
+	if(parse_c_type_kw(ls,ll,"opvp")) goto iss;
+	if(parse_c_type_kw(ls,ll,"opnp")) goto iss;
+	if(parse_c_type_kw(ls,ll,"buf")) goto iss;
+	if(parse_c_type_kw(ls,ll,"mis")) goto iss;
+#endif
+	return 0;
+#if WANT_C_STRINGS 
+iss:
+#endif
+#if WANT_PTR_COMMENT
+	if(ds)
+		strcpy(ds, " ! A text string."); /* FIXME */
+#endif
+	return 1;
+}
+
+int should_use_pointer_as_array_in_type_string(const char *ls, size_t ll,const char *s, size_t l, char*ds)
+{
+	return 
+#if !WANT_POINTERS_TO_INTEGERS
+		should_use_pointer_as_index_array_in_type_string(ls,ll,s,l)+
+#endif
+		should_use_pointer_as_value_array_in_type_string(ls,ll,s,l,ds);
+}
+
+size_t parse_c_blanks_and_comments(const char*s, size_t l);
+size_t parse_substr(const char*s, size_t l, char*ss);
+
+const char * c2f_rettype(const char * s, size_t l, char*ds)
+{
+	/* The following is very important.
+	 * If we were capable of interpreting typedef's it would be much better.
+	 * */
+	s = get_ptr_to_type_with_spec(s,l);
+	if(parse_c_type_kw(s,l,"char")) return "CHARACTER(C_CHAR)";
+#if WANT_C_STRINGS 
+	if(parse_c_type_kw(s,l,"rsb_char_t")) return "CHARACTER(C_CHAR), DIMENSION(*)";
+#endif
+	if(parse_c_type_kw(s,l,"double")) return C2F_DOUBLE;
+	if(parse_c_type_kw(s,l,"float")) return C2F_FLOAT;
+	if(parse_c_type_kw(s,l,"complex")) return "REAL(C_COMPLEX)";
+	if(parse_c_type_kw(s,l,"double complex")) return "REAL(C_DOUBLE_COMPLEX)";
+	if(memchr(s,'*',l))
+		goto ip;
+	if(parse_c_type_kw(s,l,"rsb_time_t")) return C2F_DOUBLE;
+	if(parse_c_type_kw(s,l,"rsb_err_t")) goto ii;
+	if(parse_c_type_kw(s,l,"int")) goto ii;
+	if(parse_c_type_kw(s,l,"rsb_coo_idx_t")) goto ii;
+	if(parse_c_type_kw(s,l,"rsb_blk_idx_t")) goto ii;
+	if(parse_c_type_kw(s,l,"rsb_nnz_idx_t")) goto ii;
+	if(parse_c_type_kw(s,l,"rsb_opt_t")) goto ii;
+#if WANT_CHAR_FOR_TYPECODE
+	if(parse_c_type_kw(s,l,"rsb_type_t")) return "INTEGER(C_SIGNED_CHAR)";
+#else
+	if(parse_c_type_kw(s,l,"rsb_type_t")) goto ii;
+#endif
+	if(parse_c_type_kw(s,l,"rsb_flags_t")) { sprintf(ds," " DOXY_SEE_STR "rsb_flags_t");goto ii; }
+	/* if(parse_c_type_kw(s,l,"rsb_prec_flags_t")) goto ii; */
+	if(parse_c_type_kw(s,l,"rsb_extff_t")) { sprintf(ds," " DOXY_SEE_STR "rsb_extff_t");goto ii; }
+	if(parse_c_type_kw(s,l,"rsb_trans_t")) { /*sprintf(ds," " DOXY_SEE_STR" rsb_trans_t");*/goto ii; /* FIXME */ }
+	if(parse_c_type_kw(s,l,"rsb_marf_t"))  { sprintf(ds," " DOXY_SEE_STR "rsb_marf_t");goto ii; }
+	if(parse_c_type_kw(s,l,"rsb_bool_t"))  { sprintf(ds," " DOXY_SEE_STR "rsb_bool_t");goto ii; }
+	if(parse_c_type_kw(s,l,"rsb_int_t")) goto ii;
+	if(parse_c_type_kw(s,l,"blas_sparse_matrix")) goto ii;
+#if 0
+	if(parse_c_type_kw(s,l,"enum rsb_mif_t")) return "INTEGER(C_ENUM)";
+	if(parse_c_type_kw(s,l,"enum rsb_elopf_t")) return "INTEGER(C_ENUM)";
+	if(parse_c_type_kw(s,l,"rsb_mif_t")) return "INTEGER(C_ENUM)"; // FIXME: skipped "enum" !
+	if(parse_c_type_kw(s,l,"rsb_elopf_t")) return "INTEGER(C_ENUM)"; // FIXME: skipped "enum" !
+#endif
+	if(parse_c_type_kw(s,l,"rsb_mif_t")) goto ii; // FIXME: skipped "enum" !
+	if(parse_c_type_kw(s,l,"rsb_elopf_t")) goto ii; // FIXME: skipped "enum" !
+	if(parse_c_type_kw(s,l,"rsb_precf_t")) goto ii;
+	if(parse_c_type_kw(s,l,"size_t")) return "INTEGER(C_SIZE_T)";
+	return "(UNKNOWN TYPE)";
+ii:
+	return "INTEGER(C_INT)";
+ip:
+#if WANT_PTR_COMMENT
+	if(ds)
+	{
+		/* FIXME: very fragile and cheap test: breaks with substrings */
+		if(parse_substr(s,l,"rsb_coo_idx_t")) sprintf(ds," ! INTEGER(C_INT)");
+		if(parse_substr(s,l,"rsb_nnz_idx_t")) sprintf(ds," ! INTEGER(C_INT)");
+		if(parse_substr(s,l,"rsb_int_t")) sprintf(ds," ! INTEGER(C_INT)");
+		if(parse_substr(s,l,"rsb_flags_t")) sprintf(ds," ! INTEGER(C_INT)");
+		if(parse_substr(s,l,"char")) sprintf(ds," ! CHARACTER(C_CHAR)");
+		if(parse_substr(s,l,"void")) sprintf(ds," ! A numerical type");
+		if(parse_substr(s,l,"rsb_mtx_t")) sprintf(ds," ! A matrix pointer variable: (TYPE(C_PTR),TARGET)");
+		if(parse_substr(s,l,"rsb_real_t")) sprintf(ds," ! REAL*8");
+	}
+#endif
+	return "TYPE(C_PTR)";
+}
+
+const char * c2f_argtype(const char * ls, size_t ll, const char * s, size_t l, char * ds)
+{
+	/* static const char*etl = "INTEGER(C_INT)"; */
+#if WANT_UNTYPED_VA
+	static const char*rtl = "TYPE(C_PTR),VALUE";
+#else
+	static const char*rtl = C2F_DOUBLE;
+#endif
+#if WANT_POINTERS_TO_INTEGERS
+#else
+	static const char*itl = "INTEGER(C_INT)";
+#endif
+
+	if(should_use_pointer_as_index_array_in_type_string(ls,ll,s,l))
+	{
+#if WANT_POINTERS_TO_INTEGERS
+#if WANT_PTR_COMMENT
+		if(ds)
+		       	strcpy(ds, " ! INTEGER(C_INT)");
+#endif
+		return rtl;
+#else
+		return itl;
+#endif
+	}
+	if(should_use_pointer_as_value_array_in_type_string(ls,ll,s,l,NULL))
+	{
+#if WANT_PTR_COMMENT
+		if(ds)
+		       	strcpy(ds, " ! An array of numerical type");
+#endif
+		return rtl;
+	}
+	if(should_use_pointer_as_err_ref_in_type_string(ls,ll,s,l,ds))
+	{
+		return rtl;
+	}
+	if(should_use_pointer_as_value_ref_in_type_string(ls,ll,s,l,ds))
+	{
+		return rtl;
+	}
+	return c2f_rettype(s,l,ds);
+}
+
+const char * c2f_varlabel(const char * s, size_t l)
+{
+	static char buf[BUFLEN];
+
+	strncpy(buf,s,l);
+	buf[l] = '\0';
+	return buf;
+}
+
+const char * c2f_funname(const char * s, size_t l)
+{
+	static char buf[BUFLEN];
+
+	strcpy(buf,"");
+	strncat(buf,s,l);
+#if 0
+	strcat(buf,"_c2f");
+#endif
+	strcat(buf,g_fortran_append);
+	strcat(buf,g_fortran_prepend);
+	return buf;
+}
+
+int c2i C2IFS
+{
+	static enum syntypes s0 = NULLST;
+	enum syntypes st = cc->st;
+#if 0
+	static cc_t ccts[CCTMAX];
+	static pt_t pt = { c2i,ccts,0,CCTMAX,1 };
+#endif
+	static const char*funname = ""; 
+	static size_t funnamel = 0; 
+	static const char*rettype = ""; 
+	static size_t rettypel = 0;
+	static const char*arglabels[ARGMAX];
+	static const char*argtypes[ARGMAX];
+	static size_t arglabelsl[ARGMAX];
+	static size_t argtypesl[ARGMAX];
+	static size_t argn = 0;
+#if WANT_UNTYPED_VA
+	const char*modname = "rsb";
+#else
+	const char*modname = "rsb_d_mod";
+#endif
+
+#if 0
+	IPRINTF("STATIC %d, ARG %d\n",s0,st);
+#endif
+
+	if(st==INITIALIZE)
+	{
+		/* PRINTF("MODULE %s\n   USE ISO_C_BINDING, ONLY: C_INT,C_SIZE_T,C_DOUBLE,C_PTR,C_NULL_PTR,C_CHAR\n\n",modname); */
+		PRINTF("MODULE %s\n   USE ISO_C_BINDING, ONLY: C_INT,C_PTR,C_NULL_PTR"
+#if WANT_CHAR_FOR_TYPECODE
+				",C_SIGNED_CHAR"
+#endif
+				"\n\n",modname);
+		/* PRINTF("MODULE %s\n   USE ISO_C_BINDING\n\n",modname); */
+#if 0
+		PRINTF("! MODULE constants:\n");
+#endif
+	}
+	else
+	if(st==FINALIZE)
+	{
+		PRINTF("END MODULE %s\n",modname);
+	}
+	else
+	if(s0==NULLST && st==FUNCSIG)
+	{
+#if 0
+		IPRINTF("in %d\n",s0);
+#endif
+		IPRINTF("func begin:");
+		s0 = st;
+		argn = 0;
+#if 0
+		IPRINTF("in %d\n",s0);
+#endif
+	}
+	else
+	if(s0==FUNCSIG && st==FUNCSIG)
+	{
+#if 0
+		/int ti,tc = 4;
+		for(ti = 0;ti<tc;++ti)
+#endif
+	{
+		size_t i;
+		size_t nl=0,ol=0,mll=50;
+		char buf[BUFLEN],*linbrk = "&\n  &";
+
+		s0 = NULLST;
+		IPRINTF("func end (%d args):",argn);
+		strcpy(buf,"");
+		strcat(buf,DOXY_SEE_STR);
+		strcat(buf,c2f_funname(funname,funnamel));
+		strcat(buf,".\n");
+		strcat(buf,"INTERFACE\n");
+		strcat(buf," ");
+#if WANT_PURE
+		if(0)
+		{
+			int ras = 0, rar = 0, rav =0;
+			for(i = 0;i<argn;++i)
+			{
+				ras += should_use_pointer_as_array_in_type_string(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i]),NULL;
+				rar += should_use_pointer_as_ref_in_type_string(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i]);
+				rav += should_use_pointer_as_char_c_string(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i],NULL);
+			}
+			if( ras + rar + rav == 0)
+				strcat(buf," PURE ");
+		}
+		else
+		if(parse_c_type_kw(funname,funnamel,"rsb_strerror_r")) /* need to replace this with is_rsb_pure(funname,funnamel,arglabelsl)  */
+			strcat(buf," PURE ");
+#endif
+		strcat(buf,c2f_rettype(rettype,rettypel,NULL));
+		strcat(buf," FUNCTION ");
+		strcat(buf,linbrk);
+		strcat(buf,c2f_funname(funname,funnamel));
+		strcat(buf,linbrk);
+		strcat(buf,"(");
+		ol=strlen(buf);
+		for(i = 0;i<argn;++i)
+		{
+			if(i)strcat(buf,",");
+			strncat(buf,arglabels[i],arglabelsl[i]);
+			if(strlen(buf+ol)-nl*mll>mll)
+				strcat(buf,"&\n&"),
+				++nl;
+		}
+		strcat(buf,")");
+		strcat(buf,linbrk);
+		strcat(buf,"BIND(c,NAME = '");
+		strncat(buf,funname,funnamel);
+		strcat(buf,"')\n");
+		strcat(buf," USE ISO_C_BINDING\n");
+		for(i = 0;i<argn;++i)
+		{
+#if WANT_PTR_COMMENT
+			char ds[BUFLEN];
+#else
+			char * ds = NULL;
+#endif
+			int as, ar, av;
+
+#if WANT_PTR_COMMENT
+			ds[0] = '\0';
+#endif
+			as = should_use_pointer_as_array_in_type_string(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i],ds);
+			ar = should_use_pointer_as_ref_in_type_string(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i],ds);
+			av = should_use_pointer_as_char_c_string(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i],ds);
+
+			strcat(buf," ");
+			strcat(buf,c2f_argtype(arglabels[i],arglabelsl[i],argtypes[i],argtypesl[i],ds));
+			if(!as && !ar && !av)strcat(buf,", VALUE ");
+			strcat(buf," :: ");
+			strcat(buf,c2f_varlabel(arglabels[i],arglabelsl[i]));
+			if(as)strcat(buf,"(*)");
+#if 0
+			strcat(buf," ! ");
+			strncat(buf,argtypes[i],argtypesl[i]);
+			strcat(buf," ");
+			strncat(buf,arglabels[i],arglabelsl[i]);
+#endif
+#if WANT_PTR_COMMENT
+#else
+			if(ds)
+#endif
+				strcat(buf,ds);
+			strcat(buf,"\n");
+		}
+		strcat(buf," END FUNCTION ");
+		strcat(buf,c2f_funname(funname,funnamel));
+		FNADD(c2f_funname(funname,funnamel),funnamel);
+		strcat(buf,"\n");
+		strcat(buf,"END INTERFACE");
+		/* FIXME: if buf matches 'DOUBLE' shall reallocate it with different types and replicate it */
+#if WANT_BINDINGS
+		PRINTF("\n%s\n",buf);
+#endif /* WANT_BINDINGS */
+#if 0
+		dump ...
+		free ...
+		IPRINTF("in %d\n",s0);
+#endif
+	}
+	}
+	else
+	if(s0==FUNCSIG && st==RETTYPE)
+	{
+		/* ... */
+		IPRINTF("rettype:");
+		rettype = cc->s;
+		rettypel = cc->sl;
+	}
+	else
+	if(s0==FUNCSIG && st==FUNCSYM)
+	{
+		/* ... */
+		IPRINTF("funcsym:");
+		funname = cc->s;
+		funnamel = cc->sl;
+	}
+	else
+	if(s0==FUNCSIG && st==ARGTYPES)
+	{
+		/* append type ... */
+		IPRINTF("argtypes %d:",argn);
+		argtypes[argn] = cc->s;
+		argtypesl[argn] = cc->sl;
+	}
+	else
+	if(s0==FUNCSIG && st==ARGLABELS)
+	{
+		/* append type ... */
+		IPRINTF("arglabels %d:",argn);
+		arglabels[argn] = cc->s;
+		arglabelsl[argn] = cc->sl;
+	       	argn++;
+	}
+	else
+	if(s0==FUNCSIG && st==POPST)
+	{
+		IPRINTF("pop %d:\n",argn);
+		s0 = NULLST;
+	       	argn = 0;
+	}
+	else
+	{
+		/*
+		IPRINTF("unprocessed transition %d -> %d:%s\n",s0,st,cc->s);
+		if(s0!=NULLST && st==POPST)
+		if(st==POPST)
+		if(s0==FUNCSIG && st==POPST)
+			s0 = NULLST;
+		*/
+		goto no;
+	}
+	//CCPRINT(cc);
+	goto ok;
+ok:
+	return 0;
+no:
+	return -1;
+}
+
+pt_t*realloc_pt(pt_t*pt)
+{
+	/* FIXME if isstatic, could not realloc ! */
+	(pt)->ca = realloc((pt)->ca,sizeof(cc_t)*(pt)->cc);
+	return pt;
+}
+
+pt_t*append_pc(pt_t*pt,cc_t *cc)
+{
+	if((pt)->cc-(pt)->cn==0)
+		(pt)->cn += 16,
+		realloc_pt(pt);
+	IPRINTF("APPENDING %d\n",(pt)->ca[(pt)->cn].st);
+	(pt)->ca[(pt)->cn++] = *cc;
+	return pt;
+}
+
+pt_t*append_pt_l(pt_t*pt,enum syntypes st, char *s, size_t sl)
+{
+	cc_t cc = {st,s,sl,NULL};
+
+	return append_pc(pt,&cc);
+}
+
+size_t parse_substr(const char*s, size_t l, char*ss)
+{
+	size_t sl = strlen(ss),n = 0;
+
+	if(sl>l)
+		return 0;
+	for( ; n <= l-sl ; ++n )
+		if(strncmp(s+n,ss,sl)==0)
+		{
+			n += sl;
+			/* WARN; */
+			return n;
+		}
+	return 0;
+}
+
+size_t parse_c_multi_line_comment(const char*s, size_t l)
+{
+	size_t n;
+
+	if(l>=4 && s[0]=='/' && s[1]=='*')
+	{
+		n = parse_substr(s+2,l-2,"*/");
+		if(n>=2)
+		{
+			n += 2;
+			/* WARN; */
+			return n;
+		}
+	}
+	return 0;
+}
+
+size_t parse_c_line(const char*s, size_t l)
+{
+	size_t n = 0;
+
+again:
+	while(n<l && s[n]!='\n')
+		++n;
+	if(n>0 && s[n-1]=='\\' && s[n]=='\n')
+	{n++;goto again;}
+	if(s[n]=='\n')
+		++n;
+	/* DEBUG("%d\n",n); */
+	return n;
+}
+
+
+
+size_t parse_c_one_line_comment(const char*s, size_t l)
+{
+	if(l>=2 && s[0]=='/' && s[1]=='/')
+	{
+		return 2+parse_c_line(s+2,l-2);
+	}
+	return 0;
+}
+
+size_t parse_c_comment(const char*s, size_t l)
+{
+	size_t n = parse_c_one_line_comment(s,l);
+
+	if(n)
+		goto ok;
+	n = parse_c_multi_line_comment(s,l);
+	if(n)
+		goto ok;
+	return 0;
+ok:
+	if(n)WARN;	
+	return n;
+}
+
+
+
+int is_id_fchar(const char c)
+{
+	return isalpha(c) || c=='_';
+}
+
+int is_id_lchar(const char c)
+{
+	return is_id_fchar(c) || isdigit(c);
+}
+
+size_t parse_c_blanks(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	while(n<l && ( isblank(s[n]) || s[n]=='\n'  || s[n]=='\r' )) /* FIXME: does not handle DOS style newlines */
+	{
+		++n;
+	}
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_type_pointer_spec(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	if((n = parse_c_substr(s,l,"***"))) return n;
+	if((n = parse_c_substr(s,l,"**"))) return n;
+	if((n = parse_c_substr(s,l,"*"))) return n;
+	return 0;
+}
+size_t parse_c_type_spec(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	if((n = parse_c_type_kw(s,l,"static"))) return n;
+	if((n = parse_c_type_kw(s,l,"const"))) return n;
+	if((n = parse_c_type_kw(s,l,"enum"))) return n;
+	if((n = parse_c_type_kw(s,l,"struct"))) return n;
+#if 0
+	if((n = parse_c_type_kw(s,l,"void"))) return n;
+#endif
+	return parse_c_type_pointer_spec(s,l);
+}
+
+size_t parse_c_blanks_and_comments(const char*s, size_t l)
+{
+	size_t n = 0,nc = 0,nb = 0;
+
+	do
+	{
+		nc = parse_c_comment(s+n,l-n);
+		n += nc;
+		nb = parse_c_blanks(s+n,l-n);
+		n += nb;
+	}
+	while(nb+nc);
+	return n;
+}
+
+size_t parse_c_int(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	while(isxdigit(s[n]) && l-n>0)
+		++n;
+	return n;
+}
+
+size_t parse_c_hexa(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	n = parse_substr(s+n,l-n,"0x");
+	if(n==0)
+		return 0;
+	while(isxdigit(s[n]) && l-n>0)
+		++n;
+	return n;
+}
+
+size_t parse_c_type_specs(const char*s, size_t l)
+{
+	size_t n = 0,nn = parse_c_type_spec(s+n,l-n);
+
+	n += nn;
+	if(nn)
+	do
+	{
+		size_t nc,ni;
+		nc = parse_c_blanks_and_comments(s+n,l-n);
+		if(!nc)goto ok;
+		ni = parse_c_type_spec(s+n+nc,l-n-nc);
+		if(!ni)goto ok;
+		nn = ni+nc;
+		n += nn;
+	}
+	while(nn>0 && n-l);
+ok:
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_identifier(const char*s, size_t l)
+{
+	if(l>0 && is_id_fchar(s[0]))
+	{
+		size_t n = 1;
+		for(;is_id_lchar(s[n]);++n)
+				;
+		return n;
+	}
+	return 0;
+}
+size_t parse_c_tidentifier(const char*s, size_t l)
+{
+	return parse_c_identifier(s,l);
+}
+
+size_t parse_c_type_with_spec(const char*s, size_t l)
+{
+	size_t n = 0,nc = 0,nn = parse_c_type_specs(s+n,l-n);
+
+	/* PRINTF("SPECS %d..%d:%s\n",0,nn,s); */
+	if(nn)
+	{
+		size_t nc = parse_c_blanks_and_comments(s+nn,l-nn);
+		if(!nc) goto ok;
+		nn += nc;
+	}
+	n = parse_c_tidentifier(s+nn,l-nn);
+	if(n)n += nn;
+	nc = parse_c_blanks_and_comments(s+n,l-n);
+	nn = parse_c_type_pointer_spec(s+n+nc,l-n-nc);
+	if(nn)n += nc+nn;
+#if 0
+	n += parse_c_blanks_and_comments(s+n,l-n);
+	PRINTF("STYPE %d..%d:%s\n",0,n,s+n);
+#endif
+	if(n)WARN;
+ok:
+	return n;
+}
+
+const char * get_ptr_to_type_with_spec(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	n += parse_c_type_specs(s+n,l-n);
+	n += parse_c_blanks_and_comments(s+n,l-n);
+#if 0
+	n += parse_c_tidentifier(s+n,l-n);//type name
+	n += parse_c_blanks_and_comments(s+n,l-n);
+	n += parse_c_type_pointer_spec(s+n,l-n);
+#endif
+	return s+n;
+}
+
+size_t parse_c_void_func_args(const char*s, size_t l)
+{
+	size_t nc = 0,nv = 0;
+
+	nc += parse_c_blanks_and_comments(s,l);
+	nv = parse_c_type_kw(s+nc,l-nc,"void");
+	nc += parse_c_blanks_and_comments(s+nv+nc,l-nc-nv);
+	if(nv)
+		return nc+nv;
+	return 0;
+}
+
+size_t parse_c_func_args(const char*s, size_t l, pt_t*pt)
+{
+	size_t n = 0,nn = parse_c_void_func_args(s+n,l-n);
+
+	if(nn==4) /* FIXME */
+	{
+		n = nn+1;
+		goto ok;
+	}
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	if(nn<l)
+	do
+	{
+		size_t nc = 0,nv = 0,ni = 0;
+
+		nn = 0;
+		nv = parse_c_type_with_spec(s+n,l-n);
+		if(!nv)goto ok;
+		nc = parse_c_blanks_and_comments(s+n+nv,l-n-nv);
+		/* if(!nc)goto ok; */
+       		ni = parse_c_identifier(s+n+nc+nv,l-n-nc-nv);
+		if(!ni)goto ok;
+		CCFPAPPLY(pt,ARGTYPES,s+n,nv);
+		CCFPAPPLY(pt,ARGLABELS,s+n+nc+nv,ni);
+		nc += parse_c_blanks_and_comments(s+n+nv+nc+ni,l-n-nv-nc+ni);
+		n += ni+nv+nc;
+		if(n>=l)
+			goto no;
+		if(s[n]!=',')
+		{
+			++n;
+		       	goto ok;/* FIXME: hack */
+		}
+		++n;
+		nc = parse_c_blanks_and_comments(s+n,l-n);
+		n += nc;
+		nn = nc+ni+nv;
+		/*
+		PRINTF("OK ->%s\n",s+n);
+		... parse label ...
+		... parse comments ...
+		... parse comma ...
+		FIXME: UNFINISHED.CONTINUE HERE
+		*/
+	}
+	while(nn>0 && n-l);
+ok:
+	if(n)WARN;
+	return n;
+no:
+	return 0;
+}
+
+size_t parse_c_typename(const char*s, size_t l)
+{
+	size_t n = parse_c_identifier(s,l),nn = n;
+
+	while(nn>0 && l-n>0)
+	{
+		size_t nc = parse_c_blanks_and_comments(s+n,l-n), ni = 0;
+		if(!nc) goto ok;
+		ni = parse_c_identifier(s+n+nc,l-n-nc);
+		if(!ni) goto ok;
+		nn = nc+ni;
+		n += nn;
+	}
+ok:
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_typedec(const char*s, size_t l)
+{
+	/* FIXME: UNFINISHED */
+	return parse_c_identifier(s,l);
+}
+
+size_t parse_c_var_decl(const char*s, size_t l)
+{
+	size_t nn = 0,n = parse_c_typename(s,l);
+
+	while(l-n>0 && (s[n]=='*' || s[n]==' '))++n;
+	/* size_t nn = 0,n = parse_c_type_with_spec(s,l); */
+	if(!n)
+		return 0;
+       	nn = parse_c_identifier(s+n,l-n);
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	/* nn = parse_substr(s+n,l-n,";"); */
+	nn = parse_c_substr(s+n,l-n,";");
+	if(!nn)
+		return 0;
+	/* FIXME: QUICK HACK */
+	n += nn;
+	WARN;
+	return n;
+}
+
+size_t parse_c_var_mdef(const char*s, size_t l)
+{
+	size_t nn = 0, n = parse_c_typename(s,l);
+
+	if(!n)
+		return 0;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	nn = parse_c_substr(s+n,l-n,"=");
+	if(nn==0) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	nn = parse_c_hexa(s+n,l-n);
+	if(nn==0)
+	{
+		nn = parse_c_int(s+n,l-n);
+		if(nn==0)
+			return 0;
+	}
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	/* FIXME: QUICK HACK */
+	n += nn;
+	WARN;
+	return n;
+}
+
+size_t parse_c_enum_decls(const char*s, size_t l)
+{
+	size_t n = 0, ll = 0;
+	
+	if(l-n>0)
+	do
+	{
+		ll = 0;
+		ll += parse_c_var_mdef(s+n+ll,l-n-ll);
+		if(ll==0)return 0;
+		/* WARN; */
+		n += ll;
+		/* INSPECT; */
+		ll = parse_c_substr(s+n,l-n,",");
+		n += ll;
+		n += parse_c_blanks_and_comments(s+n,l-n);
+	}
+	while(l-n>0 && ll>0);
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_var_decls(const char*s, size_t l)
+{
+	size_t n = 0,ll = 0;
+	
+	if(l-n>0)
+	do
+	{
+		ll = 0;
+		ll += parse_c_blanks_and_comments(s+n+ll,l-n-ll);
+		ll += parse_c_var_decl(s+n+ll,l-n-ll);
+		/* WARN; */
+		n += ll;
+		/* INSPECT; */
+	}
+	while(l-n>0 && ll>0);
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_func_body(const char*s, size_t l)
+{
+	/* FIXME: UNFINISHED */
+	return 0;
+}
+
+size_t parse_c_func_signature(const char*s, size_t l, pt_t*pt)
+{
+	size_t n = 0,nn = parse_c_type_with_spec(s,l);
+
+	if(!nn)goto no;
+	CCFPAPPLY(pt,FUNCSIG,s,0);
+	CCFPAPPLY(pt,RETTYPE,s,nn);
+	/* PRINTF("PIPPO %d..%d:%s\n",0,nn,s); */
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	if(!nn && (s[n-1]!='*'))goto no;
+	n += nn;
+       	nn = parse_c_identifier(s+n,l-n);
+	if(!nn)goto no;
+	CCFPAPPLY(pt,FUNCSYM,s+n,nn);
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	if(l-n==0 || s[n]!='(')
+		goto no;
+	++n;
+#if 1
+	nn = parse_c_func_args(s+n,l-n,pt);
+#else
+	/* FIXME: a quick hack. need a cycle, here. */
+	nn = parse_substr(s+n,l-n,")");
+#endif
+	if(s[n+nn-1]!=')')goto no; // (void) will go, with this hack
+	n += nn;
+	goto yes;
+yes:
+	CCFPAPPLY(pt,FUNCSIG,s,n);
+	WARN;
+	/* *pt = append_pt_l(pt,FUNCSIG,s,n); */
+	return n;
+no:
+	CCFPAPPLY(pt,POPST,s,0);
+	return 0;
+}
+
+size_t parse_c_func_def(const char*s, size_t l, pt_t*pt)
+{
+	/* FIXME: UNFINISHED */
+	size_t n = parse_c_func_signature(s,l,pt),nn = 0;
+
+	if(!n)
+		return 0;
+	nn = parse_c_func_body(s+n,l-n);
+	if(!nn)
+		return 0;
+	/* FIXME: HACK */
+	return n+nn;
+}
+
+size_t parse_c_func_decl(const char*s, size_t l, pt_t*pt)
+{
+	size_t n = parse_c_func_signature(s,l,pt);
+
+	if(!n)
+		return 0;
+	n += parse_c_blanks_and_comments(s+n,l-n);
+	if(n>=l || s[n]!=';')
+		return 0;
+	n++;
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_struct_def(const char*s, size_t l)
+{
+	/* size_t n = parse_substr(s,l,"struct"),nn = 0; */
+	size_t n = parse_c_type_kw(s,l,"struct"),nn = 0;
+
+	if(!n) return 0;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	if(!nn) return 0;
+	n += nn;
+       	nn = parse_c_identifier(s+n,l-n);
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	nn = parse_c_substr(s+n,l-n,"{");
+	/* nn = parse_substr(s+n,l-n,"{"); */
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	/* FIXME: identifier declarations */
+	nn = parse_c_var_decls(s+n,l-n);
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_substr(s+n,l-n,"}");
+	/* nn = parse_substr(s+n,l-n,"}"); */
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	nn = parse_substr(s+n,l-n,";");
+	if(!nn) return 0;
+	n += nn;
+	WARN;
+	return n;
+
+}
+
+size_t parse_c_preproc_line(const char*s, size_t l)
+{
+	size_t n = 0;
+	size_t nn = parse_c_blanks_and_comments(s+n,l-n);
+
+	n = parse_c_substr(s+nn,l-nn,"#");
+	WARN;
+	if(!n)
+		return 0;
+	n += nn;
+	n += parse_c_line(s+n,l-n);
+	return n;
+}
+
+size_t parse_c_enum_def(const char*s, size_t l)
+{
+	/* size_t n = parse_substr(s,l,"enum"),nn = 0; */
+	size_t n = parse_c_type_kw(s,l,"enum"),nn = 0;
+
+	if(!n) return 0;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	if(!nn) return 0;
+	n += nn;
+       	nn = parse_c_identifier(s+n,l-n);
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	nn = parse_substr(s+n,l-n,"{");
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	/* FIXME: identifier declarations */
+	/* nn = parse_c_var_decls(s+n,l-n); */
+	nn = parse_c_enum_decls(s+n,l-n);
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_substr(s+n,l-n,"};");
+	if(!nn) return 0;
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	WARN;
+	return n;
+}
+
+size_t parse_c_special(const char*s, size_t l)
+{
+	/* FIXME: UNFINISHED */
+	const char *el = "extern \"C\" {";
+	size_t n = 0,ell = strlen(el);
+
+	if(l>=ell && strncmp(s,el,ell)==0)
+		n = ell;
+	if(n)
+	{
+		n += parse_c_line(s+n,l-n);
+		goto ok;
+	}
+	if(l>0 && s[0]=='}') /* FIXME: this is as a complement to 'extern "C"'  */
+	{
+		n = 1;
+		goto ok;
+	}
+ok:
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_typedef(const char*s, size_t l)
+{
+	const char *ts = "typedef";
+	size_t n = 0,nn = 0,sl = strlen(ts);
+
+	if(l>=sl && strncmp(s,ts,sl)==0)
+		n = sl;
+	if(!n)
+ 	      return 0;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	if(!nn)
+		return 0;
+	n += nn;
+	nn = parse_c_typename(s+n,l-n);
+	if(!nn)
+		return 0;
+	/* printf("nn at %d:%c%c\n",nn,s[n],s[n+1]); */
+	n += nn;
+	nn = parse_c_blanks_and_comments(s+n,l-n);
+	n += nn;
+	/* WARN; INSPECT; */
+	if(l-n<1 || s[n]!=';')
+		return 0;
+	/* FIXME: simplifications */
+	n += 1;
+	WARN; INSPECT;
+	return n;
+}
+
+size_t parse_c_multiline_lines(const char*s, size_t l)
+{
+	size_t n = parse_c_line(s,l),nn = n;
+	
+	while(n>1 && ( s[nn-2]=='\\' || s[nn-1]=='\\' ) && l-nn>0)
+	{
+		n = parse_c_line(s+nn,l-nn);
+		nn += n;
+	}
+	if(nn)n = nn;
+	/* INSPECT; */
+	if(n)WARN;
+	return n;
+}
+
+size_t parse_c_prepcode(const char*s, size_t l)
+{
+	size_t n = 0;
+
+	if(l>0 && s[0]=='#')
+	{
+		n++;
+		n += parse_c_multiline_lines(s+1,l-n);
+	}
+	if(!n)
+		goto no;
+	WARN;
+no:
+	return n;
+}
+
+size_t parse_c_header(const char*s, size_t l, pt_t*pt)
+{
+	size_t tl = 0,ll = 0;
+#if 0
+	while((ll = parse_c_line(s+tl,l-tl))>0)
+		tl += ll;
+	//DEBUG("%d\n",tl);
+#else
+	CCFPAPPLY(pt,INITIALIZE,NULL,0);
+
+	if(l-tl>0)
+	do
+	{
+		ll = 0;
+		ll += parse_c_blanks(s+tl+ll,l-tl-ll);
+	 	ll += parse_c_prepcode(s+tl+ll,l-tl-ll);
+		ll += parse_c_special(s+tl+ll,l-tl-ll);
+		ll += parse_c_comment(s+tl+ll,l-tl-ll);
+		ll += parse_c_typedef(s+tl+ll,l-tl-ll);
+		ll += parse_c_struct_def(s+tl+ll,l-tl-ll);
+		ll += parse_c_enum_def(s+tl+ll,l-tl-ll);
+		ll += parse_c_preproc_line(s+tl+ll,l-tl-ll);
+		ll += parse_c_func_decl(s+tl+ll,l-tl-ll,pt);
+		ll += parse_c_var_decl(s+tl+ll,l-tl-ll);
+	/*	
+		ll += parse_c_func_def(s+tl+ll,l-tl-ll,pt);
+		*/
+		tl += ll;
+		if(ll==0 && tl<l)
+		{
+			size_t bl=CH2_MIN(100,l-tl);
+			char buf[bl];
+			strncpy(buf,s,bl-1);
+			buf[bl-1]='\0';
+			INFO(stderr,"terminating prematurely and char %d / %d:\n...\n\"%s\"\n...\n!\n",tl,l,buf);
+		}
+	}
+	while(l-tl>0 && ll>0);
+	CCFPAPPLY(pt,FINALIZE,NULL,0);
+#endif
+	return tl;
+}
+
+void * do_realloc(void *p, size_t n)
+{
+	return realloc(p,n);
+}
+
+int main(void)
+{
+	char *s = NULL;
+	size_t rb = 0,sb = 0,cs = BS,pc = 0;
+	ssize_t rn = -1;
+	int ret = 0;
+	pt_t pt = { c2i,NULL,0,CCTMAX,1 };
+
+	/* bzero(&pt,sizeof(pt)); */
+
+	if((s = do_realloc(s,cs)))
+		rb += cs,cs *= 2;
+
+	/* chomp */
+	while(rn)
+	{
+		while( rb>0 && (rn = read(0,s+sb,rb))>0 )
+			sb += rn,rb -= rn;
+
+		/* printf("REALLO %d+%d %d: %p(%d)\n",sb,rb,rn,s,cs); */
+		if(rn && rb==0)
+			if((s = do_realloc(s,sb+cs)))
+				rb += cs, cs *= 2;
+	}
+
+	PRINTF("!> @file.\n");
+	PRINTF("!! @brief Header file automatically generated from <rsb.h>, offering ISO-C-BINDING interfaces to <rsb.h>'s functions.\n");
+	PRINTF("!! Defines \\c MODULE \\c rsb.\n");
+	PRINTF("!! For examples of usage, see Fortran examples in \\ref rsb_doc_examples.\n");
+	PRINTF("!! The official documentation is that of <rsb.h>.\n");
+	PRINTF("!! Make sure you are using a modern Fortran compiler.\n\n");
+	/* PRINTF("!> @cond INNERDOC\n"); */
+	PRINTF("!DEC$IF .NOT. DEFINED (RSB_FORTRAN_HEADER)\n!DEC$DEFINE RSB_FORTRAN_HEADER\n\n");
+	if((pc = parse_c_header(s,sb,&pt))==sb)
+	{ INFO(stderr,"header file parsed (%d chars parsed).\n",pc);ret = 0; }
+	else
+	{ INFO(stderr,"header file NOT parsed (%d chars parsed out of %d).\n",pc,sb);ret = 1; }
+	FDUMP()
+	PRINTF("\n!DEC$ENDIF\n\n");
+	/* PRINTF("!> @endcond\n"); */
+	goto err;
+err:
+	if(s)
+		free(s);
+	return ret;
+}
+/* @endcond */
diff --git a/compile b/compile
new file mode 100755
index 0000000..862a14e
--- /dev/null
+++ b/compile
@@ -0,0 +1,343 @@
+#! /bin/sh
+# Wrapper for compilers which do not understand '-c -o'.
+
+scriptversion=2012-03-05.13; # UTC
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2009, 2010, 2012 Free
+# Software Foundation, Inc.
+# Written by Tom Tromey <tromey at cygnus.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake at gnu.org> or send patches to
+# <automake-patches at gnu.org>.
+
+nl='
+'
+
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent tools from complaining about whitespace usage.
+IFS=" ""	$nl"
+
+file_conv=
+
+# func_file_conv build_file lazy
+# Convert a $build file to $host form and store it in $file
+# Currently only supports Windows hosts. If the determined conversion
+# type is listed in (the comma separated) LAZY, no conversion will
+# take place.
+func_file_conv ()
+{
+  file=$1
+  case $file in
+    / | /[!/]*) # absolute file, and not a UNC file
+      if test -z "$file_conv"; then
+	# lazily determine how to convert abs files
+	case `uname -s` in
+	  MINGW*)
+	    file_conv=mingw
+	    ;;
+	  CYGWIN*)
+	    file_conv=cygwin
+	    ;;
+	  *)
+	    file_conv=wine
+	    ;;
+	esac
+      fi
+      case $file_conv/,$2, in
+	*,$file_conv,*)
+	  ;;
+	mingw/*)
+	  file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'`
+	  ;;
+	cygwin/*)
+	  file=`cygpath -m "$file" || echo "$file"`
+	  ;;
+	wine/*)
+	  file=`winepath -w "$file" || echo "$file"`
+	  ;;
+      esac
+      ;;
+  esac
+}
+
+# func_cl_dashL linkdir
+# Make cl look for libraries in LINKDIR
+func_cl_dashL ()
+{
+  func_file_conv "$1"
+  if test -z "$lib_path"; then
+    lib_path=$file
+  else
+    lib_path="$lib_path;$file"
+  fi
+  linker_opts="$linker_opts -LIBPATH:$file"
+}
+
+# func_cl_dashl library
+# Do a library search-path lookup for cl
+func_cl_dashl ()
+{
+  lib=$1
+  found=no
+  save_IFS=$IFS
+  IFS=';'
+  for dir in $lib_path $LIB
+  do
+    IFS=$save_IFS
+    if $shared && test -f "$dir/$lib.dll.lib"; then
+      found=yes
+      lib=$dir/$lib.dll.lib
+      break
+    fi
+    if test -f "$dir/$lib.lib"; then
+      found=yes
+      lib=$dir/$lib.lib
+      break
+    fi
+  done
+  IFS=$save_IFS
+
+  if test "$found" != yes; then
+    lib=$lib.lib
+  fi
+}
+
+# func_cl_wrapper cl arg...
+# Adjust compile command to suit cl
+func_cl_wrapper ()
+{
+  # Assume a capable shell
+  lib_path=
+  shared=:
+  linker_opts=
+  for arg
+  do
+    if test -n "$eat"; then
+      eat=
+    else
+      case $1 in
+	-o)
+	  # configure might choose to run compile as 'compile cc -o foo foo.c'.
+	  eat=1
+	  case $2 in
+	    *.o | *.[oO][bB][jJ])
+	      func_file_conv "$2"
+	      set x "$@" -Fo"$file"
+	      shift
+	      ;;
+	    *)
+	      func_file_conv "$2"
+	      set x "$@" -Fe"$file"
+	      shift
+	      ;;
+	  esac
+	  ;;
+	-I)
+	  eat=1
+	  func_file_conv "$2" mingw
+	  set x "$@" -I"$file"
+	  shift
+	  ;;
+	-I*)
+	  func_file_conv "${1#-I}" mingw
+	  set x "$@" -I"$file"
+	  shift
+	  ;;
+	-l)
+	  eat=1
+	  func_cl_dashl "$2"
+	  set x "$@" "$lib"
+	  shift
+	  ;;
+	-l*)
+	  func_cl_dashl "${1#-l}"
+	  set x "$@" "$lib"
+	  shift
+	  ;;
+	-L)
+	  eat=1
+	  func_cl_dashL "$2"
+	  ;;
+	-L*)
+	  func_cl_dashL "${1#-L}"
+	  ;;
+	-static)
+	  shared=false
+	  ;;
+	-Wl,*)
+	  arg=${1#-Wl,}
+	  save_ifs="$IFS"; IFS=','
+	  for flag in $arg; do
+	    IFS="$save_ifs"
+	    linker_opts="$linker_opts $flag"
+	  done
+	  IFS="$save_ifs"
+	  ;;
+	-Xlinker)
+	  eat=1
+	  linker_opts="$linker_opts $2"
+	  ;;
+	-*)
+	  set x "$@" "$1"
+	  shift
+	  ;;
+	*.cc | *.CC | *.cxx | *.CXX | *.[cC]++)
+	  func_file_conv "$1"
+	  set x "$@" -Tp"$file"
+	  shift
+	  ;;
+	*.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO])
+	  func_file_conv "$1" mingw
+	  set x "$@" "$file"
+	  shift
+	  ;;
+	*)
+	  set x "$@" "$1"
+	  shift
+	  ;;
+      esac
+    fi
+    shift
+  done
+  if test -n "$linker_opts"; then
+    linker_opts="-link$linker_opts"
+  fi
+  exec "$@" $linker_opts
+  exit 1
+}
+
+eat=
+
+case $1 in
+  '')
+     echo "$0: No command.  Try '$0 --help' for more information." 1>&2
+     exit 1;
+     ;;
+  -h | --h*)
+    cat <<\EOF
+Usage: compile [--help] [--version] PROGRAM [ARGS]
+
+Wrapper for compilers which do not understand '-c -o'.
+Remove '-o dest.o' from ARGS, run PROGRAM with the remaining
+arguments, and rename the output as expected.
+
+If you are trying to build a whole package this is not the
+right script to run: please start by reading the file 'INSTALL'.
+
+Report bugs to <bug-automake at gnu.org>.
+EOF
+    exit $?
+    ;;
+  -v | --v*)
+    echo "compile $scriptversion"
+    exit $?
+    ;;
+  cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
+    func_cl_wrapper "$@"      # Doesn't return...
+    ;;
+esac
+
+ofile=
+cfile=
+
+for arg
+do
+  if test -n "$eat"; then
+    eat=
+  else
+    case $1 in
+      -o)
+	# configure might choose to run compile as 'compile cc -o foo foo.c'.
+	# So we strip '-o arg' only if arg is an object.
+	eat=1
+	case $2 in
+	  *.o | *.obj)
+	    ofile=$2
+	    ;;
+	  *)
+	    set x "$@" -o "$2"
+	    shift
+	    ;;
+	esac
+	;;
+      *.c)
+	cfile=$1
+	set x "$@" "$1"
+	shift
+	;;
+      *)
+	set x "$@" "$1"
+	shift
+	;;
+    esac
+  fi
+  shift
+done
+
+if test -z "$ofile" || test -z "$cfile"; then
+  # If no '-o' option was seen then we might have been invoked from a
+  # pattern rule where we don't need one.  That is ok -- this is a
+  # normal compilation that the losing compiler can handle.  If no
+  # '.c' file was seen then we are probably linking.  That is also
+  # ok.
+  exec "$@"
+fi
+
+# Name of file we expect compiler to create.
+cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
+
+# Create the lock directory.
+# Note: use '[/\\:.-]' here to ensure that we don't use the same name
+# that we are using for the .o file.  Also, base the name on the expected
+# object file name, since that is what matters with a parallel build.
+lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
+while true; do
+  if mkdir "$lockdir" >/dev/null 2>&1; then
+    break
+  fi
+  sleep 1
+done
+# FIXME: race condition here if user kills between mkdir and trap.
+trap "rmdir '$lockdir'; exit 1" 1 2 15
+
+# Run the compile.
+"$@"
+ret=$?
+
+if test -f "$cofile"; then
+  test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
+elif test -f "${cofile}bj"; then
+  test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
+fi
+
+rmdir "$lockdir"
+exit $ret
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/config.guess b/config.guess
new file mode 100755
index 0000000..d622a44
--- /dev/null
+++ b/config.guess
@@ -0,0 +1,1530 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+#   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+#   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+#   2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2012-02-10'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner.  Please send patches (context
+# diff format) to <config-patches at gnu.org> and include a ChangeLog
+# entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub.  If it succeeds, it prints the system name on stdout, and
+# exits with 0.  Otherwise, it exits with 1.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches at gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )	# Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help" >&2
+       exit 1 ;;
+    * )
+       break ;;
+  esac
+done
+
+if test $# != 0; then
+  echo "$me: too many arguments$help" >&2
+  exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,)    echo "int x;" > $dummy.c ;
+	for c in cc gcc c89 c99 ; do
+	  if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+	     CC_FOR_BUILD="$c"; break ;
+	  fi ;
+	done ;
+	if test x"$CC_FOR_BUILD" = x ; then
+	  CC_FOR_BUILD=no_compiler_found ;
+	fi
+	;;
+ ,,*)   CC_FOR_BUILD=$CC ;;
+ ,*,*)  CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi at noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+	PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null`  || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+    *:NetBSD:*:*)
+	# NetBSD (nbsd) targets should (where applicable) match one or
+	# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+	# *-*-netbsdecoff* and *-*-netbsd*.  For targets that recently
+	# switched to ELF, *-*-netbsd* would select the old
+	# object file format.  This provides both forward
+	# compatibility and a consistent mechanism for selecting the
+	# object file format.
+	#
+	# Note: NetBSD doesn't particularly care about the vendor
+	# portion of the name.  We always set it to "unknown".
+	sysctl="sysctl -n hw.machine_arch"
+	UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+	    /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+	case "${UNAME_MACHINE_ARCH}" in
+	    armeb) machine=armeb-unknown ;;
+	    arm*) machine=arm-unknown ;;
+	    sh3el) machine=shl-unknown ;;
+	    sh3eb) machine=sh-unknown ;;
+	    sh5el) machine=sh5le-unknown ;;
+	    *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+	esac
+	# The Operating System including object format, if it has switched
+	# to ELF recently, or will in the future.
+	case "${UNAME_MACHINE_ARCH}" in
+	    arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+		eval $set_cc_for_build
+		if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+			| grep -q __ELF__
+		then
+		    # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+		    # Return netbsd for either.  FIX?
+		    os=netbsd
+		else
+		    os=netbsdelf
+		fi
+		;;
+	    *)
+		os=netbsd
+		;;
+	esac
+	# The OS release
+	# Debian GNU/NetBSD machines have a different userland, and
+	# thus, need a distinct triplet. However, they do not need
+	# kernel version information, so it can be replaced with a
+	# suitable tag, in the style of linux-gnu.
+	case "${UNAME_VERSION}" in
+	    Debian*)
+		release='-gnu'
+		;;
+	    *)
+		release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+		;;
+	esac
+	# Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+	# contains redundant information, the shorter form:
+	# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+	echo "${machine}-${os}${release}"
+	exit ;;
+    *:OpenBSD:*:*)
+	UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+	echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+	exit ;;
+    *:ekkoBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+	exit ;;
+    *:SolidBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+	exit ;;
+    macppc:MirBSD:*:*)
+	echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+	exit ;;
+    *:MirBSD:*:*)
+	echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+	exit ;;
+    alpha:OSF1:*:*)
+	case $UNAME_RELEASE in
+	*4.0)
+		UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+		;;
+	*5.*)
+		UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+		;;
+	esac
+	# According to Compaq, /usr/sbin/psrinfo has been available on
+	# OSF/1 and Tru64 systems produced since 1995.  I hope that
+	# covers most systems running today.  This code pipes the CPU
+	# types through head -n 1, so we only detect the type of CPU 0.
+	ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^  The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+	case "$ALPHA_CPU_TYPE" in
+	    "EV4 (21064)")
+		UNAME_MACHINE="alpha" ;;
+	    "EV4.5 (21064)")
+		UNAME_MACHINE="alpha" ;;
+	    "LCA4 (21066/21068)")
+		UNAME_MACHINE="alpha" ;;
+	    "EV5 (21164)")
+		UNAME_MACHINE="alphaev5" ;;
+	    "EV5.6 (21164A)")
+		UNAME_MACHINE="alphaev56" ;;
+	    "EV5.6 (21164PC)")
+		UNAME_MACHINE="alphapca56" ;;
+	    "EV5.7 (21164PC)")
+		UNAME_MACHINE="alphapca57" ;;
+	    "EV6 (21264)")
+		UNAME_MACHINE="alphaev6" ;;
+	    "EV6.7 (21264A)")
+		UNAME_MACHINE="alphaev67" ;;
+	    "EV6.8CB (21264C)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.8AL (21264B)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.8CX (21264D)")
+		UNAME_MACHINE="alphaev68" ;;
+	    "EV6.9A (21264/EV69A)")
+		UNAME_MACHINE="alphaev69" ;;
+	    "EV7 (21364)")
+		UNAME_MACHINE="alphaev7" ;;
+	    "EV7.9 (21364A)")
+		UNAME_MACHINE="alphaev79" ;;
+	esac
+	# A Pn.n version is a patched version.
+	# A Vn.n version is a released version.
+	# A Tn.n version is a released field test version.
+	# A Xn.n version is an unreleased experimental baselevel.
+	# 1.2 uses "1.2" for uname -r.
+	echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+	exitcode=$?
+	trap '' 0
+	exit $exitcode ;;
+    Alpha\ *:Windows_NT*:*)
+	# How do we know it's Interix rather than the generic POSIX subsystem?
+	# Should we change UNAME_MACHINE based on the output of uname instead
+	# of the specific Alpha model?
+	echo alpha-pc-interix
+	exit ;;
+    21064:Windows_NT:50:3)
+	echo alpha-dec-winnt3.5
+	exit ;;
+    Amiga*:UNIX_System_V:4.0:*)
+	echo m68k-unknown-sysv4
+	exit ;;
+    *:[Aa]miga[Oo][Ss]:*:*)
+	echo ${UNAME_MACHINE}-unknown-amigaos
+	exit ;;
+    *:[Mm]orph[Oo][Ss]:*:*)
+	echo ${UNAME_MACHINE}-unknown-morphos
+	exit ;;
+    *:OS/390:*:*)
+	echo i370-ibm-openedition
+	exit ;;
+    *:z/VM:*:*)
+	echo s390-ibm-zvmoe
+	exit ;;
+    *:OS400:*:*)
+	echo powerpc-ibm-os400
+	exit ;;
+    arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+	echo arm-acorn-riscix${UNAME_RELEASE}
+	exit ;;
+    arm:riscos:*:*|arm:RISCOS:*:*)
+	echo arm-unknown-riscos
+	exit ;;
+    SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+	echo hppa1.1-hitachi-hiuxmpp
+	exit ;;
+    Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+	# akee at wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+	if test "`(/bin/universe) 2>/dev/null`" = att ; then
+		echo pyramid-pyramid-sysv3
+	else
+		echo pyramid-pyramid-bsd
+	fi
+	exit ;;
+    NILE*:*:*:dcosx)
+	echo pyramid-pyramid-svr4
+	exit ;;
+    DRS?6000:unix:4.0:6*)
+	echo sparc-icl-nx6
+	exit ;;
+    DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+	case `/usr/bin/uname -p` in
+	    sparc) echo sparc-icl-nx7; exit ;;
+	esac ;;
+    s390x:SunOS:*:*)
+	echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4H:SunOS:5.*:*)
+	echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+	echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+	echo i386-pc-auroraux${UNAME_RELEASE}
+	exit ;;
+    i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+	eval $set_cc_for_build
+	SUN_ARCH="i386"
+	# If there is a compiler, see if it is configured for 64-bit objects.
+	# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+	# This test works for both compilers.
+	if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+	    if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+		(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		grep IS_64BIT_ARCH >/dev/null
+	    then
+		SUN_ARCH="x86_64"
+	    fi
+	fi
+	echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:6*:*)
+	# According to config.sub, this is the proper way to canonicalize
+	# SunOS6.  Hard to guess exactly what SunOS6 will be like, but
+	# it's likely to be more like Solaris than SunOS4.
+	echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    sun4*:SunOS:*:*)
+	case "`/usr/bin/arch -k`" in
+	    Series*|S4*)
+		UNAME_RELEASE=`uname -v`
+		;;
+	esac
+	# Japanese Language versions have a version number like `4.1.3-JL'.
+	echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+	exit ;;
+    sun3*:SunOS:*:*)
+	echo m68k-sun-sunos${UNAME_RELEASE}
+	exit ;;
+    sun*:*:4.2BSD:*)
+	UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+	test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+	case "`/bin/arch`" in
+	    sun3)
+		echo m68k-sun-sunos${UNAME_RELEASE}
+		;;
+	    sun4)
+		echo sparc-sun-sunos${UNAME_RELEASE}
+		;;
+	esac
+	exit ;;
+    aushp:SunOS:*:*)
+	echo sparc-auspex-sunos${UNAME_RELEASE}
+	exit ;;
+    # The situation for MiNT is a little confusing.  The machine name
+    # can be virtually everything (everything which is not
+    # "atarist" or "atariste" at least should have a processor
+    # > m68000).  The system name ranges from "MiNT" over "FreeMiNT"
+    # to the lowercase version "mint" (or "freemint").  Finally
+    # the system name "TOS" denotes a system which is actually not
+    # MiNT.  But MiNT is downward compatible to TOS, so this should
+    # be no problem.
+    atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+	echo m68k-atari-mint${UNAME_RELEASE}
+	exit ;;
+    milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+	echo m68k-milan-mint${UNAME_RELEASE}
+	exit ;;
+    hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+	echo m68k-hades-mint${UNAME_RELEASE}
+	exit ;;
+    *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+	echo m68k-unknown-mint${UNAME_RELEASE}
+	exit ;;
+    m68k:machten:*:*)
+	echo m68k-apple-machten${UNAME_RELEASE}
+	exit ;;
+    powerpc:machten:*:*)
+	echo powerpc-apple-machten${UNAME_RELEASE}
+	exit ;;
+    RISC*:Mach:*:*)
+	echo mips-dec-mach_bsd4.3
+	exit ;;
+    RISC*:ULTRIX:*:*)
+	echo mips-dec-ultrix${UNAME_RELEASE}
+	exit ;;
+    VAX*:ULTRIX*:*:*)
+	echo vax-dec-ultrix${UNAME_RELEASE}
+	exit ;;
+    2020:CLIX:*:* | 2430:CLIX:*:*)
+	echo clipper-intergraph-clix${UNAME_RELEASE}
+	exit ;;
+    mips:*:*:UMIPS | mips:*:*:RISCos)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h>  /* for printf() prototype */
+	int main (int argc, char *argv[]) {
+#else
+	int main (argc, argv) int argc; char *argv[]; {
+#endif
+	#if defined (host_mips) && defined (MIPSEB)
+	#if defined (SYSTYPE_SYSV)
+	  printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+	#endif
+	#if defined (SYSTYPE_SVR4)
+	  printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+	#endif
+	#if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+	  printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+	#endif
+	#endif
+	  exit (-1);
+	}
+EOF
+	$CC_FOR_BUILD -o $dummy $dummy.c &&
+	  dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+	  SYSTEM_NAME=`$dummy $dummyarg` &&
+	    { echo "$SYSTEM_NAME"; exit; }
+	echo mips-mips-riscos${UNAME_RELEASE}
+	exit ;;
+    Motorola:PowerMAX_OS:*:*)
+	echo powerpc-motorola-powermax
+	exit ;;
+    Motorola:*:4.3:PL8-*)
+	echo powerpc-harris-powermax
+	exit ;;
+    Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+	echo powerpc-harris-powermax
+	exit ;;
+    Night_Hawk:Power_UNIX:*:*)
+	echo powerpc-harris-powerunix
+	exit ;;
+    m88k:CX/UX:7*:*)
+	echo m88k-harris-cxux7
+	exit ;;
+    m88k:*:4*:R4*)
+	echo m88k-motorola-sysv4
+	exit ;;
+    m88k:*:3*:R3*)
+	echo m88k-motorola-sysv3
+	exit ;;
+    AViiON:dgux:*:*)
+	# DG/UX returns AViiON for all architectures
+	UNAME_PROCESSOR=`/usr/bin/uname -p`
+	if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+	then
+	    if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+	       [ ${TARGET_BINARY_INTERFACE}x = x ]
+	    then
+		echo m88k-dg-dgux${UNAME_RELEASE}
+	    else
+		echo m88k-dg-dguxbcs${UNAME_RELEASE}
+	    fi
+	else
+	    echo i586-dg-dgux${UNAME_RELEASE}
+	fi
+	exit ;;
+    M88*:DolphinOS:*:*)	# DolphinOS (SVR3)
+	echo m88k-dolphin-sysv3
+	exit ;;
+    M88*:*:R3*:*)
+	# Delta 88k system running SVR3
+	echo m88k-motorola-sysv3
+	exit ;;
+    XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+	echo m88k-tektronix-sysv3
+	exit ;;
+    Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+	echo m68k-tektronix-bsd
+	exit ;;
+    *:IRIX*:*:*)
+	echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+	exit ;;
+    ????????:AIX?:[12].1:2)   # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+	echo romp-ibm-aix     # uname -m gives an 8 hex-code CPU id
+	exit ;;               # Note that: echo "'`uname -s`'" gives 'AIX '
+    i*86:AIX:*:*)
+	echo i386-ibm-aix
+	exit ;;
+    ia64:AIX:*:*)
+	if [ -x /usr/bin/oslevel ] ; then
+		IBM_REV=`/usr/bin/oslevel`
+	else
+		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+	fi
+	echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+	exit ;;
+    *:AIX:2:3)
+	if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+		eval $set_cc_for_build
+		sed 's/^		//' << EOF >$dummy.c
+		#include <sys/systemcfg.h>
+
+		main()
+			{
+			if (!__power_pc())
+				exit(1);
+			puts("powerpc-ibm-aix3.2.5");
+			exit(0);
+			}
+EOF
+		if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+		then
+			echo "$SYSTEM_NAME"
+		else
+			echo rs6000-ibm-aix3.2.5
+		fi
+	elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+		echo rs6000-ibm-aix3.2.4
+	else
+		echo rs6000-ibm-aix3.2
+	fi
+	exit ;;
+    *:AIX:*:[4567])
+	IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+	if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+		IBM_ARCH=rs6000
+	else
+		IBM_ARCH=powerpc
+	fi
+	if [ -x /usr/bin/oslevel ] ; then
+		IBM_REV=`/usr/bin/oslevel`
+	else
+		IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+	fi
+	echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+	exit ;;
+    *:AIX:*:*)
+	echo rs6000-ibm-aix
+	exit ;;
+    ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+	echo romp-ibm-bsd4.4
+	exit ;;
+    ibmrt:*BSD:*|romp-ibm:BSD:*)            # covers RT/PC BSD and
+	echo romp-ibm-bsd${UNAME_RELEASE}   # 4.3 with uname added to
+	exit ;;                             # report: romp-ibm BSD 4.3
+    *:BOSX:*:*)
+	echo rs6000-bull-bosx
+	exit ;;
+    DPX/2?00:B.O.S.:*:*)
+	echo m68k-bull-sysv3
+	exit ;;
+    9000/[34]??:4.3bsd:1.*:*)
+	echo m68k-hp-bsd
+	exit ;;
+    hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+	echo m68k-hp-bsd4.4
+	exit ;;
+    9000/[34678]??:HP-UX:*:*)
+	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+	case "${UNAME_MACHINE}" in
+	    9000/31? )            HP_ARCH=m68000 ;;
+	    9000/[34]?? )         HP_ARCH=m68k ;;
+	    9000/[678][0-9][0-9])
+		if [ -x /usr/bin/getconf ]; then
+		    sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+		    sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+		    case "${sc_cpu_version}" in
+		      523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+		      528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+		      532)                      # CPU_PA_RISC2_0
+			case "${sc_kernel_bits}" in
+			  32) HP_ARCH="hppa2.0n" ;;
+			  64) HP_ARCH="hppa2.0w" ;;
+			  '') HP_ARCH="hppa2.0" ;;   # HP-UX 10.20
+			esac ;;
+		    esac
+		fi
+		if [ "${HP_ARCH}" = "" ]; then
+		    eval $set_cc_for_build
+		    sed 's/^		//' << EOF >$dummy.c
+
+		#define _HPUX_SOURCE
+		#include <stdlib.h>
+		#include <unistd.h>
+
+		int main ()
+		{
+		#if defined(_SC_KERNEL_BITS)
+		    long bits = sysconf(_SC_KERNEL_BITS);
+		#endif
+		    long cpu  = sysconf (_SC_CPU_VERSION);
+
+		    switch (cpu)
+			{
+			case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+			case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+			case CPU_PA_RISC2_0:
+		#if defined(_SC_KERNEL_BITS)
+			    switch (bits)
+				{
+				case 64: puts ("hppa2.0w"); break;
+				case 32: puts ("hppa2.0n"); break;
+				default: puts ("hppa2.0"); break;
+				} break;
+		#else  /* !defined(_SC_KERNEL_BITS) */
+			    puts ("hppa2.0"); break;
+		#endif
+			default: puts ("hppa1.0"); break;
+			}
+		    exit (0);
+		}
+EOF
+		    (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+		    test -z "$HP_ARCH" && HP_ARCH=hppa
+		fi ;;
+	esac
+	if [ ${HP_ARCH} = "hppa2.0w" ]
+	then
+	    eval $set_cc_for_build
+
+	    # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+	    # 32-bit code.  hppa64-hp-hpux* has the same kernel and a compiler
+	    # generating 64-bit code.  GNU and HP use different nomenclature:
+	    #
+	    # $ CC_FOR_BUILD=cc ./config.guess
+	    # => hppa2.0w-hp-hpux11.23
+	    # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+	    # => hppa64-hp-hpux11.23
+
+	    if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+		grep -q __LP64__
+	    then
+		HP_ARCH="hppa2.0w"
+	    else
+		HP_ARCH="hppa64"
+	    fi
+	fi
+	echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+	exit ;;
+    ia64:HP-UX:*:*)
+	HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+	echo ia64-hp-hpux${HPUX_REV}
+	exit ;;
+    3050*:HI-UX:*:*)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#include <unistd.h>
+	int
+	main ()
+	{
+	  long cpu = sysconf (_SC_CPU_VERSION);
+	  /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+	     true for CPU_PA_RISC1_0.  CPU_IS_PA_RISC returns correct
+	     results, however.  */
+	  if (CPU_IS_PA_RISC (cpu))
+	    {
+	      switch (cpu)
+		{
+		  case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+		  case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+		  case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+		  default: puts ("hppa-hitachi-hiuxwe2"); break;
+		}
+	    }
+	  else if (CPU_IS_HP_MC68K (cpu))
+	    puts ("m68k-hitachi-hiuxwe2");
+	  else puts ("unknown-hitachi-hiuxwe2");
+	  exit (0);
+	}
+EOF
+	$CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+		{ echo "$SYSTEM_NAME"; exit; }
+	echo unknown-hitachi-hiuxwe2
+	exit ;;
+    9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+	echo hppa1.1-hp-bsd
+	exit ;;
+    9000/8??:4.3bsd:*:*)
+	echo hppa1.0-hp-bsd
+	exit ;;
+    *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+	echo hppa1.0-hp-mpeix
+	exit ;;
+    hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+	echo hppa1.1-hp-osf
+	exit ;;
+    hp8??:OSF1:*:*)
+	echo hppa1.0-hp-osf
+	exit ;;
+    i*86:OSF1:*:*)
+	if [ -x /usr/sbin/sysversion ] ; then
+	    echo ${UNAME_MACHINE}-unknown-osf1mk
+	else
+	    echo ${UNAME_MACHINE}-unknown-osf1
+	fi
+	exit ;;
+    parisc*:Lites*:*:*)
+	echo hppa1.1-hp-lites
+	exit ;;
+    C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+	echo c1-convex-bsd
+	exit ;;
+    C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+	if getsysinfo -f scalar_acc
+	then echo c32-convex-bsd
+	else echo c2-convex-bsd
+	fi
+	exit ;;
+    C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+	echo c34-convex-bsd
+	exit ;;
+    C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+	echo c38-convex-bsd
+	exit ;;
+    C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+	echo c4-convex-bsd
+	exit ;;
+    CRAY*Y-MP:*:*:*)
+	echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*[A-Z]90:*:*:*)
+	echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+	| sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+	      -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+	      -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*TS:*:*:*)
+	echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*T3E:*:*:*)
+	echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    CRAY*SV1:*:*:*)
+	echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    *:UNICOS/mp:*:*)
+	echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+	exit ;;
+    F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+	FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+	echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+	exit ;;
+    5000:UNIX_System_V:4.*:*)
+	FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+	FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+	echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+	exit ;;
+    i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+	echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+	exit ;;
+    sparc*:BSD/OS:*:*)
+	echo sparc-unknown-bsdi${UNAME_RELEASE}
+	exit ;;
+    *:BSD/OS:*:*)
+	echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+	exit ;;
+    *:FreeBSD:*:*)
+	UNAME_PROCESSOR=`/usr/bin/uname -p`
+	case ${UNAME_PROCESSOR} in
+	    amd64)
+		echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+	    *)
+		echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+	esac
+	exit ;;
+    i*:CYGWIN*:*)
+	echo ${UNAME_MACHINE}-pc-cygwin
+	exit ;;
+    *:MINGW*:*)
+	echo ${UNAME_MACHINE}-pc-mingw32
+	exit ;;
+    i*:MSYS*:*)
+	echo ${UNAME_MACHINE}-pc-msys
+	exit ;;
+    i*:windows32*:*)
+	# uname -m includes "-pc" on this system.
+	echo ${UNAME_MACHINE}-mingw32
+	exit ;;
+    i*:PW*:*)
+	echo ${UNAME_MACHINE}-pc-pw32
+	exit ;;
+    *:Interix*:*)
+	case ${UNAME_MACHINE} in
+	    x86)
+		echo i586-pc-interix${UNAME_RELEASE}
+		exit ;;
+	    authenticamd | genuineintel | EM64T)
+		echo x86_64-unknown-interix${UNAME_RELEASE}
+		exit ;;
+	    IA64)
+		echo ia64-unknown-interix${UNAME_RELEASE}
+		exit ;;
+	esac ;;
+    [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+	echo i${UNAME_MACHINE}-pc-mks
+	exit ;;
+    8664:Windows_NT:*)
+	echo x86_64-pc-mks
+	exit ;;
+    i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+	# How do we know it's Interix rather than the generic POSIX subsystem?
+	# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+	# UNAME_MACHINE based on the output of uname instead of i386?
+	echo i586-pc-interix
+	exit ;;
+    i*:UWIN*:*)
+	echo ${UNAME_MACHINE}-pc-uwin
+	exit ;;
+    amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+	echo x86_64-unknown-cygwin
+	exit ;;
+    p*:CYGWIN*:*)
+	echo powerpcle-unknown-cygwin
+	exit ;;
+    prep*:SunOS:5.*:*)
+	echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+	exit ;;
+    *:GNU:*:*)
+	# the GNU system
+	echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+	exit ;;
+    *:GNU/*:*:*)
+	# other systems with GNU libc and userland
+	echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+	exit ;;
+    i*86:Minix:*:*)
+	echo ${UNAME_MACHINE}-pc-minix
+	exit ;;
+    aarch64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    aarch64_be:Linux:*:*)
+	UNAME_MACHINE=aarch64_be
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    alpha:Linux:*:*)
+	case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+	  EV5)   UNAME_MACHINE=alphaev5 ;;
+	  EV56)  UNAME_MACHINE=alphaev56 ;;
+	  PCA56) UNAME_MACHINE=alphapca56 ;;
+	  PCA57) UNAME_MACHINE=alphapca56 ;;
+	  EV6)   UNAME_MACHINE=alphaev6 ;;
+	  EV67)  UNAME_MACHINE=alphaev67 ;;
+	  EV68*) UNAME_MACHINE=alphaev68 ;;
+	esac
+	objdump --private-headers /bin/sh | grep -q ld.so.1
+	if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+	echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+	exit ;;
+    arm*:Linux:*:*)
+	eval $set_cc_for_build
+	if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+	    | grep -q __ARM_EABI__
+	then
+	    echo ${UNAME_MACHINE}-unknown-linux-gnu
+	else
+	    if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+		| grep -q __ARM_PCS_VFP
+	    then
+		echo ${UNAME_MACHINE}-unknown-linux-gnueabi
+	    else
+		echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
+	    fi
+	fi
+	exit ;;
+    avr32*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    cris:Linux:*:*)
+	echo ${UNAME_MACHINE}-axis-linux-gnu
+	exit ;;
+    crisv32:Linux:*:*)
+	echo ${UNAME_MACHINE}-axis-linux-gnu
+	exit ;;
+    frv:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    hexagon:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    i*86:Linux:*:*)
+	LIBC=gnu
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#ifdef __dietlibc__
+	LIBC=dietlibc
+	#endif
+EOF
+	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
+	echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+	exit ;;
+    ia64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    m32r*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    m68*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    mips:Linux:*:* | mips64:Linux:*:*)
+	eval $set_cc_for_build
+	sed 's/^	//' << EOF >$dummy.c
+	#undef CPU
+	#undef ${UNAME_MACHINE}
+	#undef ${UNAME_MACHINE}el
+	#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+	CPU=${UNAME_MACHINE}el
+	#else
+	#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+	CPU=${UNAME_MACHINE}
+	#else
+	CPU=
+	#endif
+	#endif
+EOF
+	eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+	test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+	;;
+    or32:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    padre:Linux:*:*)
+	echo sparc-unknown-linux-gnu
+	exit ;;
+    parisc64:Linux:*:* | hppa64:Linux:*:*)
+	echo hppa64-unknown-linux-gnu
+	exit ;;
+    parisc:Linux:*:* | hppa:Linux:*:*)
+	# Look for CPU level
+	case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+	  PA7*) echo hppa1.1-unknown-linux-gnu ;;
+	  PA8*) echo hppa2.0-unknown-linux-gnu ;;
+	  *)    echo hppa-unknown-linux-gnu ;;
+	esac
+	exit ;;
+    ppc64:Linux:*:*)
+	echo powerpc64-unknown-linux-gnu
+	exit ;;
+    ppc:Linux:*:*)
+	echo powerpc-unknown-linux-gnu
+	exit ;;
+    s390:Linux:*:* | s390x:Linux:*:*)
+	echo ${UNAME_MACHINE}-ibm-linux
+	exit ;;
+    sh64*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    sh*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    sparc:Linux:*:* | sparc64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    tile*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    vax:Linux:*:*)
+	echo ${UNAME_MACHINE}-dec-linux-gnu
+	exit ;;
+    x86_64:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    xtensa*:Linux:*:*)
+	echo ${UNAME_MACHINE}-unknown-linux-gnu
+	exit ;;
+    i*86:DYNIX/ptx:4*:*)
+	# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+	# earlier versions are messed up and put the nodename in both
+	# sysname and nodename.
+	echo i386-sequent-sysv4
+	exit ;;
+    i*86:UNIX_SV:4.2MP:2.*)
+	# Unixware is an offshoot of SVR4, but it has its own version
+	# number series starting with 2...
+	# I am not positive that other SVR4 systems won't match this,
+	# I just have to hope.  -- rms.
+	# Use sysv4.2uw... so that sysv4* matches it.
+	echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+	exit ;;
+    i*86:OS/2:*:*)
+	# If we were able to find `uname', then EMX Unix compatibility
+	# is probably installed.
+	echo ${UNAME_MACHINE}-pc-os2-emx
+	exit ;;
+    i*86:XTS-300:*:STOP)
+	echo ${UNAME_MACHINE}-unknown-stop
+	exit ;;
+    i*86:atheos:*:*)
+	echo ${UNAME_MACHINE}-unknown-atheos
+	exit ;;
+    i*86:syllable:*:*)
+	echo ${UNAME_MACHINE}-pc-syllable
+	exit ;;
+    i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+	echo i386-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    i*86:*DOS:*:*)
+	echo ${UNAME_MACHINE}-pc-msdosdjgpp
+	exit ;;
+    i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+	UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+	if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+		echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+	else
+		echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+	fi
+	exit ;;
+    i*86:*:5:[678]*)
+	# UnixWare 7.x, OpenUNIX and OpenServer 6.
+	case `/bin/uname -X | grep "^Machine"` in
+	    *486*)	     UNAME_MACHINE=i486 ;;
+	    *Pentium)	     UNAME_MACHINE=i586 ;;
+	    *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+	esac
+	echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+	exit ;;
+    i*86:*:3.2:*)
+	if test -f /usr/options/cb.name; then
+		UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+		echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+	elif /bin/uname -X 2>/dev/null >/dev/null ; then
+		UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+		(/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+		(/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+			&& UNAME_MACHINE=i586
+		(/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+			&& UNAME_MACHINE=i686
+		(/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+			&& UNAME_MACHINE=i686
+		echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+	else
+		echo ${UNAME_MACHINE}-pc-sysv32
+	fi
+	exit ;;
+    pc:*:*:*)
+	# Left here for compatibility:
+	# uname -m prints for DJGPP always 'pc', but it prints nothing about
+	# the processor, so we play safe by assuming i586.
+	# Note: whatever this is, it MUST be the same as what config.sub
+	# prints for the "djgpp" host, or else GDB configury will decide that
+	# this is a cross-build.
+	echo i586-pc-msdosdjgpp
+	exit ;;
+    Intel:Mach:3*:*)
+	echo i386-pc-mach3
+	exit ;;
+    paragon:*:*:*)
+	echo i860-intel-osf1
+	exit ;;
+    i860:*:4.*:*) # i860-SVR4
+	if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+	  echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+	else # Add other i860-SVR4 vendors below as they are discovered.
+	  echo i860-unknown-sysv${UNAME_RELEASE}  # Unknown i860-SVR4
+	fi
+	exit ;;
+    mini*:CTIX:SYS*5:*)
+	# "miniframe"
+	echo m68010-convergent-sysv
+	exit ;;
+    mc68k:UNIX:SYSTEM5:3.51m)
+	echo m68k-convergent-sysv
+	exit ;;
+    M680?0:D-NIX:5.3:*)
+	echo m68k-diab-dnix
+	exit ;;
+    M68*:*:R3V[5678]*:*)
+	test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+    3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+	OS_REL=''
+	test -r /etc/.relid \
+	&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	  && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+	  && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	  && { echo i486-ncr-sysv4; exit; } ;;
+    NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+	OS_REL='.3'
+	test -r /etc/.relid \
+	    && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+	/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+	    && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+	/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+	    && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+    m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+	echo m68k-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    mc68030:UNIX_System_V:4.*:*)
+	echo m68k-atari-sysv4
+	exit ;;
+    TSUNAMI:LynxOS:2.*:*)
+	echo sparc-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    rs6000:LynxOS:2.*:*)
+	echo rs6000-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+	echo powerpc-unknown-lynxos${UNAME_RELEASE}
+	exit ;;
+    SM[BE]S:UNIX_SV:*:*)
+	echo mips-dde-sysv${UNAME_RELEASE}
+	exit ;;
+    RM*:ReliantUNIX-*:*:*)
+	echo mips-sni-sysv4
+	exit ;;
+    RM*:SINIX-*:*:*)
+	echo mips-sni-sysv4
+	exit ;;
+    *:SINIX-*:*:*)
+	if uname -p 2>/dev/null >/dev/null ; then
+		UNAME_MACHINE=`(uname -p) 2>/dev/null`
+		echo ${UNAME_MACHINE}-sni-sysv4
+	else
+		echo ns32k-sni-sysv
+	fi
+	exit ;;
+    PENTIUM:*:4.0*:*)	# Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+			# says <Richard.M.Bartel at ccMail.Census.GOV>
+	echo i586-unisys-sysv4
+	exit ;;
+    *:UNIX_System_V:4*:FTX*)
+	# From Gerald Hewes <hewes at openmarket.com>.
+	# How about differentiating between stratus architectures? -djm
+	echo hppa1.1-stratus-sysv4
+	exit ;;
+    *:*:*:FTX*)
+	# From seanf at swdc.stratus.com.
+	echo i860-stratus-sysv4
+	exit ;;
+    i*86:VOS:*:*)
+	# From Paul.Green at stratus.com.
+	echo ${UNAME_MACHINE}-stratus-vos
+	exit ;;
+    *:VOS:*:*)
+	# From Paul.Green at stratus.com.
+	echo hppa1.1-stratus-vos
+	exit ;;
+    mc68*:A/UX:*:*)
+	echo m68k-apple-aux${UNAME_RELEASE}
+	exit ;;
+    news*:NEWS-OS:6*:*)
+	echo mips-sony-newsos6
+	exit ;;
+    R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+	if [ -d /usr/nec ]; then
+		echo mips-nec-sysv${UNAME_RELEASE}
+	else
+		echo mips-unknown-sysv${UNAME_RELEASE}
+	fi
+	exit ;;
+    BeBox:BeOS:*:*)	# BeOS running on hardware made by Be, PPC only.
+	echo powerpc-be-beos
+	exit ;;
+    BeMac:BeOS:*:*)	# BeOS running on Mac or Mac clone, PPC only.
+	echo powerpc-apple-beos
+	exit ;;
+    BePC:BeOS:*:*)	# BeOS running on Intel PC compatible.
+	echo i586-pc-beos
+	exit ;;
+    BePC:Haiku:*:*)	# Haiku running on Intel PC compatible.
+	echo i586-pc-haiku
+	exit ;;
+    SX-4:SUPER-UX:*:*)
+	echo sx4-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-5:SUPER-UX:*:*)
+	echo sx5-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-6:SUPER-UX:*:*)
+	echo sx6-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-7:SUPER-UX:*:*)
+	echo sx7-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-8:SUPER-UX:*:*)
+	echo sx8-nec-superux${UNAME_RELEASE}
+	exit ;;
+    SX-8R:SUPER-UX:*:*)
+	echo sx8r-nec-superux${UNAME_RELEASE}
+	exit ;;
+    Power*:Rhapsody:*:*)
+	echo powerpc-apple-rhapsody${UNAME_RELEASE}
+	exit ;;
+    *:Rhapsody:*:*)
+	echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+	exit ;;
+    *:Darwin:*:*)
+	UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+	case $UNAME_PROCESSOR in
+	    i386)
+		eval $set_cc_for_build
+		if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
+		  if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+		      (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+		      grep IS_64BIT_ARCH >/dev/null
+		  then
+		      UNAME_PROCESSOR="x86_64"
+		  fi
+		fi ;;
+	    unknown) UNAME_PROCESSOR=powerpc ;;
+	esac
+	echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+	exit ;;
+    *:procnto*:*:* | *:QNX:[0123456789]*:*)
+	UNAME_PROCESSOR=`uname -p`
+	if test "$UNAME_PROCESSOR" = "x86"; then
+		UNAME_PROCESSOR=i386
+		UNAME_MACHINE=pc
+	fi
+	echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+	exit ;;
+    *:QNX:*:4*)
+	echo i386-pc-qnx
+	exit ;;
+    NEO-?:NONSTOP_KERNEL:*:*)
+	echo neo-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    NSE-?:NONSTOP_KERNEL:*:*)
+	echo nse-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    NSR-?:NONSTOP_KERNEL:*:*)
+	echo nsr-tandem-nsk${UNAME_RELEASE}
+	exit ;;
+    *:NonStop-UX:*:*)
+	echo mips-compaq-nonstopux
+	exit ;;
+    BS2000:POSIX*:*:*)
+	echo bs2000-siemens-sysv
+	exit ;;
+    DS/*:UNIX_System_V:*:*)
+	echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+	exit ;;
+    *:Plan9:*:*)
+	# "uname -m" is not consistent, so use $cputype instead. 386
+	# is converted to i386 for consistency with other x86
+	# operating systems.
+	if test "$cputype" = "386"; then
+	    UNAME_MACHINE=i386
+	else
+	    UNAME_MACHINE="$cputype"
+	fi
+	echo ${UNAME_MACHINE}-unknown-plan9
+	exit ;;
+    *:TOPS-10:*:*)
+	echo pdp10-unknown-tops10
+	exit ;;
+    *:TENEX:*:*)
+	echo pdp10-unknown-tenex
+	exit ;;
+    KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+	echo pdp10-dec-tops20
+	exit ;;
+    XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+	echo pdp10-xkl-tops20
+	exit ;;
+    *:TOPS-20:*:*)
+	echo pdp10-unknown-tops20
+	exit ;;
+    *:ITS:*:*)
+	echo pdp10-unknown-its
+	exit ;;
+    SEI:*:*:SEIUX)
+	echo mips-sei-seiux${UNAME_RELEASE}
+	exit ;;
+    *:DragonFly:*:*)
+	echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+	exit ;;
+    *:*VMS:*:*)
+	UNAME_MACHINE=`(uname -p) 2>/dev/null`
+	case "${UNAME_MACHINE}" in
+	    A*) echo alpha-dec-vms ; exit ;;
+	    I*) echo ia64-dec-vms ; exit ;;
+	    V*) echo vax-dec-vms ; exit ;;
+	esac ;;
+    *:XENIX:*:SysV)
+	echo i386-pc-xenix
+	exit ;;
+    i*86:skyos:*:*)
+	echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+	exit ;;
+    i*86:rdos:*:*)
+	echo ${UNAME_MACHINE}-pc-rdos
+	exit ;;
+    i*86:AROS:*:*)
+	echo ${UNAME_MACHINE}-pc-aros
+	exit ;;
+    x86_64:VMkernel:*:*)
+	echo ${UNAME_MACHINE}-unknown-esx
+	exit ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+  /* BFD wants "bsd" instead of "newsos".  Perhaps BFD should be changed,
+     I don't know....  */
+  printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+  printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+	"4"
+#else
+	""
+#endif
+	); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+  printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+  printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+  int version;
+  version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+  if (version < 4)
+    printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+  else
+    printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+  exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+  printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+  printf ("ns32k-encore-mach\n"); exit (0);
+#else
+  printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+  printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+  printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+  printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+    struct utsname un;
+
+    uname(&un);
+
+    if (strncmp(un.version, "V2", 2) == 0) {
+	printf ("i386-sequent-ptx2\n"); exit (0);
+    }
+    if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+	printf ("i386-sequent-ptx1\n"); exit (0);
+    }
+    printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+#  include <sys/param.h>
+#  if defined (BSD)
+#   if BSD == 43
+      printf ("vax-dec-bsd4.3\n"); exit (0);
+#   else
+#    if BSD == 199006
+      printf ("vax-dec-bsd4.3reno\n"); exit (0);
+#    else
+      printf ("vax-dec-bsd\n"); exit (0);
+#    endif
+#   endif
+#  else
+    printf ("vax-dec-bsd\n"); exit (0);
+#  endif
+# else
+    printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+  printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+  exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+	{ echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+    case `getsysinfo -f cpu_type` in
+    c1*)
+	echo c1-convex-bsd
+	exit ;;
+    c2*)
+	if getsysinfo -f scalar_acc
+	then echo c32-convex-bsd
+	else echo c2-convex-bsd
+	fi
+	exit ;;
+    c34*)
+	echo c34-convex-bsd
+	exit ;;
+    c38*)
+	echo c38-convex-bsd
+	exit ;;
+    c4*)
+	echo c4-convex-bsd
+	exit ;;
+    esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
+and
+  http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches at gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo               = `(hostinfo) 2>/dev/null`
+/bin/universe          = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch              = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM  = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/config.sub b/config.sub
new file mode 100755
index 0000000..6205f84
--- /dev/null
+++ b/config.sub
@@ -0,0 +1,1782 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+#   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+#   2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+#   2011, 2012 Free Software Foundation, Inc.
+
+timestamp='2012-04-18'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine.  It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Please send patches to <config-patches at gnu.org>.  Submit a context
+# diff and a properly formatted GNU ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support.  The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+#	CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+#	CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+       $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+  -h, --help         print this help, then exit
+  -t, --time-stamp   print date of last modification, then exit
+  -v, --version      print version number, then exit
+
+Report bugs and patches to <config-patches at gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
+2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions.  There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+  case $1 in
+    --time-stamp | --time* | -t )
+       echo "$timestamp" ; exit ;;
+    --version | -v )
+       echo "$version" ; exit ;;
+    --help | --h* | -h )
+       echo "$usage"; exit ;;
+    -- )     # Stop option processing
+       shift; break ;;
+    - )	# Use stdin as input.
+       break ;;
+    -* )
+       echo "$me: invalid option $1$help"
+       exit 1 ;;
+
+    *local*)
+       # First pass through any local machine types.
+       echo $1
+       exit ;;
+
+    * )
+       break ;;
+  esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+    exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+    exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+  linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+  knetbsd*-gnu* | netbsd*-gnu* | \
+  kopensolaris*-gnu* | \
+  storm-chaos* | os2-emx* | rtmk-nova*)
+    os=-$maybe_os
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+    ;;
+  android-linux)
+    os=-linux-android
+    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+    ;;
+  *)
+    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+    if [ $basic_machine != $1 ]
+    then os=`echo $1 | sed 's/.*-/-/'`
+    else os=; fi
+    ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work.  We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+	-sun*os*)
+		# Prevent following clause from handling this invalid input.
+		;;
+	-dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+	-att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+	-unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+	-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+	-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+	-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+	-apple | -axis | -knuth | -cray | -microblaze)
+		os=
+		basic_machine=$1
+		;;
+	-bluegene*)
+		os=-cnk
+		;;
+	-sim | -cisco | -oki | -wec | -winbond)
+		os=
+		basic_machine=$1
+		;;
+	-scout)
+		;;
+	-wrs)
+		os=-vxworks
+		basic_machine=$1
+		;;
+	-chorusos*)
+		os=-chorusos
+		basic_machine=$1
+		;;
+	-chorusrdb)
+		os=-chorusrdb
+		basic_machine=$1
+		;;
+	-hiux*)
+		os=-hiuxwe2
+		;;
+	-sco6)
+		os=-sco5v6
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco5)
+		os=-sco3.2v5
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco4)
+		os=-sco3.2v4
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco3.2.[4-9]*)
+		os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco3.2v[4-9]*)
+		# Don't forget version if it is 3.2v4 or newer.
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco5v6*)
+		# Don't forget version if it is 3.2v4 or newer.
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-sco*)
+		os=-sco3.2v2
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-udk*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-isc)
+		os=-isc2.2
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-clix*)
+		basic_machine=clipper-intergraph
+		;;
+	-isc*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+		;;
+	-lynx*178)
+		os=-lynxos178
+		;;
+	-lynx*5)
+		os=-lynxos5
+		;;
+	-lynx*)
+		os=-lynxos
+		;;
+	-ptx*)
+		basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+		;;
+	-windowsnt*)
+		os=`echo $os | sed -e 's/windowsnt/winnt/'`
+		;;
+	-psos*)
+		os=-psos
+		;;
+	-mint | -mint[0-9]*)
+		basic_machine=m68k-atari
+		os=-mint
+		;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+	# Recognize the basic CPU types without company name.
+	# Some are omitted here because they have special meanings below.
+	1750a | 580 \
+	| a29k \
+	| aarch64 | aarch64_be \
+	| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+	| am33_2.0 \
+	| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+        | be32 | be64 \
+	| bfin \
+	| c4x | clipper \
+	| d10v | d30v | dlx | dsp16xx \
+	| epiphany \
+	| fido | fr30 | frv \
+	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+	| hexagon \
+	| i370 | i860 | i960 | ia64 \
+	| ip2k | iq2000 \
+	| le32 | le64 \
+	| lm32 \
+	| m32c | m32r | m32rle | m68000 | m68k | m88k \
+	| maxq | mb | microblaze | mcore | mep | metag \
+	| mips | mipsbe | mipseb | mipsel | mipsle \
+	| mips16 \
+	| mips64 | mips64el \
+	| mips64octeon | mips64octeonel \
+	| mips64orion | mips64orionel \
+	| mips64r5900 | mips64r5900el \
+	| mips64vr | mips64vrel \
+	| mips64vr4100 | mips64vr4100el \
+	| mips64vr4300 | mips64vr4300el \
+	| mips64vr5000 | mips64vr5000el \
+	| mips64vr5900 | mips64vr5900el \
+	| mipsisa32 | mipsisa32el \
+	| mipsisa32r2 | mipsisa32r2el \
+	| mipsisa64 | mipsisa64el \
+	| mipsisa64r2 | mipsisa64r2el \
+	| mipsisa64sb1 | mipsisa64sb1el \
+	| mipsisa64sr71k | mipsisa64sr71kel \
+	| mipstx39 | mipstx39el \
+	| mn10200 | mn10300 \
+	| moxie \
+	| mt \
+	| msp430 \
+	| nds32 | nds32le | nds32be \
+	| nios | nios2 \
+	| ns16k | ns32k \
+	| open8 \
+	| or32 \
+	| pdp10 | pdp11 | pj | pjl \
+	| powerpc | powerpc64 | powerpc64le | powerpcle \
+	| pyramid \
+	| rl78 | rx \
+	| score \
+	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+	| sh64 | sh64le \
+	| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+	| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+	| spu \
+	| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+	| ubicom32 \
+	| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+	| we32k \
+	| x86 | xc16x | xstormy16 | xtensa \
+	| z8k | z80)
+		basic_machine=$basic_machine-unknown
+		;;
+	c54x)
+		basic_machine=tic54x-unknown
+		;;
+	c55x)
+		basic_machine=tic55x-unknown
+		;;
+	c6x)
+		basic_machine=tic6x-unknown
+		;;
+	m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
+		basic_machine=$basic_machine-unknown
+		os=-none
+		;;
+	m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+		;;
+	ms1)
+		basic_machine=mt-unknown
+		;;
+
+	strongarm | thumb | xscale)
+		basic_machine=arm-unknown
+		;;
+	xgate)
+		basic_machine=$basic_machine-unknown
+		os=-none
+		;;
+	xscaleeb)
+		basic_machine=armeb-unknown
+		;;
+
+	xscaleel)
+		basic_machine=armel-unknown
+		;;
+
+	# We use `pc' rather than `unknown'
+	# because (1) that's what they normally are, and
+	# (2) the word "unknown" tends to confuse beginning users.
+	i*86 | x86_64)
+	  basic_machine=$basic_machine-pc
+	  ;;
+	# Object if more than one company name word.
+	*-*-*)
+		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		exit 1
+		;;
+	# Recognize the basic CPU types with company name.
+	580-* \
+	| a29k-* \
+	| aarch64-* | aarch64_be-* \
+	| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+	| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
+	| avr-* | avr32-* \
+	| be32-* | be64-* \
+	| bfin-* | bs2000-* \
+	| c[123]* | c30-* | [cjt]90-* | c4x-* \
+	| clipper-* | craynv-* | cydra-* \
+	| d10v-* | d30v-* | dlx-* \
+	| elxsi-* \
+	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+	| h8300-* | h8500-* \
+	| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+	| hexagon-* \
+	| i*86-* | i860-* | i960-* | ia64-* \
+	| ip2k-* | iq2000-* \
+	| le32-* | le64-* \
+	| lm32-* \
+	| m32c-* | m32r-* | m32rle-* \
+	| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+	| m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
+	| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+	| mips16-* \
+	| mips64-* | mips64el-* \
+	| mips64octeon-* | mips64octeonel-* \
+	| mips64orion-* | mips64orionel-* \
+	| mips64r5900-* | mips64r5900el-* \
+	| mips64vr-* | mips64vrel-* \
+	| mips64vr4100-* | mips64vr4100el-* \
+	| mips64vr4300-* | mips64vr4300el-* \
+	| mips64vr5000-* | mips64vr5000el-* \
+	| mips64vr5900-* | mips64vr5900el-* \
+	| mipsisa32-* | mipsisa32el-* \
+	| mipsisa32r2-* | mipsisa32r2el-* \
+	| mipsisa64-* | mipsisa64el-* \
+	| mipsisa64r2-* | mipsisa64r2el-* \
+	| mipsisa64sb1-* | mipsisa64sb1el-* \
+	| mipsisa64sr71k-* | mipsisa64sr71kel-* \
+	| mipstx39-* | mipstx39el-* \
+	| mmix-* \
+	| mt-* \
+	| msp430-* \
+	| nds32-* | nds32le-* | nds32be-* \
+	| nios-* | nios2-* \
+	| none-* | np1-* | ns16k-* | ns32k-* \
+	| open8-* \
+	| orion-* \
+	| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+	| pyramid-* \
+	| rl78-* | romp-* | rs6000-* | rx-* \
+	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+	| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+	| sparclite-* \
+	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
+	| tahoe-* \
+	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+	| tile*-* \
+	| tron-* \
+	| ubicom32-* \
+	| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+	| vax-* \
+	| we32k-* \
+	| x86-* | x86_64-* | xc16x-* | xps100-* \
+	| xstormy16-* | xtensa*-* \
+	| ymp-* \
+	| z8k-* | z80-*)
+		;;
+	# Recognize the basic CPU types without company name, with glob match.
+	xtensa*)
+		basic_machine=$basic_machine-unknown
+		;;
+	# Recognize the various machine names and aliases which stand
+	# for a CPU type and a company and sometimes even an OS.
+	386bsd)
+		basic_machine=i386-unknown
+		os=-bsd
+		;;
+	3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+		basic_machine=m68000-att
+		;;
+	3b*)
+		basic_machine=we32k-att
+		;;
+	a29khif)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	abacus)
+		basic_machine=abacus-unknown
+		;;
+	adobe68k)
+		basic_machine=m68010-adobe
+		os=-scout
+		;;
+	alliant | fx80)
+		basic_machine=fx80-alliant
+		;;
+	altos | altos3068)
+		basic_machine=m68k-altos
+		;;
+	am29k)
+		basic_machine=a29k-none
+		os=-bsd
+		;;
+	amd64)
+		basic_machine=x86_64-pc
+		;;
+	amd64-*)
+		basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	amdahl)
+		basic_machine=580-amdahl
+		os=-sysv
+		;;
+	amiga | amiga-*)
+		basic_machine=m68k-unknown
+		;;
+	amigaos | amigados)
+		basic_machine=m68k-unknown
+		os=-amigaos
+		;;
+	amigaunix | amix)
+		basic_machine=m68k-unknown
+		os=-sysv4
+		;;
+	apollo68)
+		basic_machine=m68k-apollo
+		os=-sysv
+		;;
+	apollo68bsd)
+		basic_machine=m68k-apollo
+		os=-bsd
+		;;
+	aros)
+		basic_machine=i386-pc
+		os=-aros
+		;;
+	aux)
+		basic_machine=m68k-apple
+		os=-aux
+		;;
+	balance)
+		basic_machine=ns32k-sequent
+		os=-dynix
+		;;
+	blackfin)
+		basic_machine=bfin-unknown
+		os=-linux
+		;;
+	blackfin-*)
+		basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	bluegene*)
+		basic_machine=powerpc-ibm
+		os=-cnk
+		;;
+	c54x-*)
+		basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c55x-*)
+		basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c6x-*)
+		basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	c90)
+		basic_machine=c90-cray
+		os=-unicos
+		;;
+	cegcc)
+		basic_machine=arm-unknown
+		os=-cegcc
+		;;
+	convex-c1)
+		basic_machine=c1-convex
+		os=-bsd
+		;;
+	convex-c2)
+		basic_machine=c2-convex
+		os=-bsd
+		;;
+	convex-c32)
+		basic_machine=c32-convex
+		os=-bsd
+		;;
+	convex-c34)
+		basic_machine=c34-convex
+		os=-bsd
+		;;
+	convex-c38)
+		basic_machine=c38-convex
+		os=-bsd
+		;;
+	cray | j90)
+		basic_machine=j90-cray
+		os=-unicos
+		;;
+	craynv)
+		basic_machine=craynv-cray
+		os=-unicosmp
+		;;
+	cr16 | cr16-*)
+		basic_machine=cr16-unknown
+		os=-elf
+		;;
+	crds | unos)
+		basic_machine=m68k-crds
+		;;
+	crisv32 | crisv32-* | etraxfs*)
+		basic_machine=crisv32-axis
+		;;
+	cris | cris-* | etrax*)
+		basic_machine=cris-axis
+		;;
+	crx)
+		basic_machine=crx-unknown
+		os=-elf
+		;;
+	da30 | da30-*)
+		basic_machine=m68k-da30
+		;;
+	decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+		basic_machine=mips-dec
+		;;
+	decsystem10* | dec10*)
+		basic_machine=pdp10-dec
+		os=-tops10
+		;;
+	decsystem20* | dec20*)
+		basic_machine=pdp10-dec
+		os=-tops20
+		;;
+	delta | 3300 | motorola-3300 | motorola-delta \
+	      | 3300-motorola | delta-motorola)
+		basic_machine=m68k-motorola
+		;;
+	delta88)
+		basic_machine=m88k-motorola
+		os=-sysv3
+		;;
+	dicos)
+		basic_machine=i686-pc
+		os=-dicos
+		;;
+	djgpp)
+		basic_machine=i586-pc
+		os=-msdosdjgpp
+		;;
+	dpx20 | dpx20-*)
+		basic_machine=rs6000-bull
+		os=-bosx
+		;;
+	dpx2* | dpx2*-bull)
+		basic_machine=m68k-bull
+		os=-sysv3
+		;;
+	ebmon29k)
+		basic_machine=a29k-amd
+		os=-ebmon
+		;;
+	elxsi)
+		basic_machine=elxsi-elxsi
+		os=-bsd
+		;;
+	encore | umax | mmax)
+		basic_machine=ns32k-encore
+		;;
+	es1800 | OSE68k | ose68k | ose | OSE)
+		basic_machine=m68k-ericsson
+		os=-ose
+		;;
+	fx2800)
+		basic_machine=i860-alliant
+		;;
+	genix)
+		basic_machine=ns32k-ns
+		;;
+	gmicro)
+		basic_machine=tron-gmicro
+		os=-sysv
+		;;
+	go32)
+		basic_machine=i386-pc
+		os=-go32
+		;;
+	h3050r* | hiux*)
+		basic_machine=hppa1.1-hitachi
+		os=-hiuxwe2
+		;;
+	h8300hms)
+		basic_machine=h8300-hitachi
+		os=-hms
+		;;
+	h8300xray)
+		basic_machine=h8300-hitachi
+		os=-xray
+		;;
+	h8500hms)
+		basic_machine=h8500-hitachi
+		os=-hms
+		;;
+	harris)
+		basic_machine=m88k-harris
+		os=-sysv3
+		;;
+	hp300-*)
+		basic_machine=m68k-hp
+		;;
+	hp300bsd)
+		basic_machine=m68k-hp
+		os=-bsd
+		;;
+	hp300hpux)
+		basic_machine=m68k-hp
+		os=-hpux
+		;;
+	hp3k9[0-9][0-9] | hp9[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hp9k2[0-9][0-9] | hp9k31[0-9])
+		basic_machine=m68000-hp
+		;;
+	hp9k3[2-9][0-9])
+		basic_machine=m68k-hp
+		;;
+	hp9k6[0-9][0-9] | hp6[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hp9k7[0-79][0-9] | hp7[0-79][0-9])
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k78[0-9] | hp78[0-9])
+		# FIXME: really hppa2.0-hp
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+		# FIXME: really hppa2.0-hp
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[0-9][13679] | hp8[0-9][13679])
+		basic_machine=hppa1.1-hp
+		;;
+	hp9k8[0-9][0-9] | hp8[0-9][0-9])
+		basic_machine=hppa1.0-hp
+		;;
+	hppa-next)
+		os=-nextstep3
+		;;
+	hppaosf)
+		basic_machine=hppa1.1-hp
+		os=-osf
+		;;
+	hppro)
+		basic_machine=hppa1.1-hp
+		os=-proelf
+		;;
+	i370-ibm* | ibm*)
+		basic_machine=i370-ibm
+		;;
+	i*86v32)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv32
+		;;
+	i*86v4*)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv4
+		;;
+	i*86v)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-sysv
+		;;
+	i*86sol2)
+		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+		os=-solaris2
+		;;
+	i386mach)
+		basic_machine=i386-mach
+		os=-mach
+		;;
+	i386-vsta | vsta)
+		basic_machine=i386-unknown
+		os=-vsta
+		;;
+	iris | iris4d)
+		basic_machine=mips-sgi
+		case $os in
+		    -irix*)
+			;;
+		    *)
+			os=-irix4
+			;;
+		esac
+		;;
+	isi68 | isi)
+		basic_machine=m68k-isi
+		os=-sysv
+		;;
+	m68knommu)
+		basic_machine=m68k-unknown
+		os=-linux
+		;;
+	m68knommu-*)
+		basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	m88k-omron*)
+		basic_machine=m88k-omron
+		;;
+	magnum | m3230)
+		basic_machine=mips-mips
+		os=-sysv
+		;;
+	merlin)
+		basic_machine=ns32k-utek
+		os=-sysv
+		;;
+	microblaze)
+		basic_machine=microblaze-xilinx
+		;;
+	mingw32)
+		basic_machine=i386-pc
+		os=-mingw32
+		;;
+	mingw32ce)
+		basic_machine=arm-unknown
+		os=-mingw32ce
+		;;
+	miniframe)
+		basic_machine=m68000-convergent
+		;;
+	*mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+		basic_machine=m68k-atari
+		os=-mint
+		;;
+	mips3*-*)
+		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+		;;
+	mips3*)
+		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+		;;
+	monitor)
+		basic_machine=m68k-rom68k
+		os=-coff
+		;;
+	morphos)
+		basic_machine=powerpc-unknown
+		os=-morphos
+		;;
+	msdos)
+		basic_machine=i386-pc
+		os=-msdos
+		;;
+	ms1-*)
+		basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+		;;
+	msys)
+		basic_machine=i386-pc
+		os=-msys
+		;;
+	mvs)
+		basic_machine=i370-ibm
+		os=-mvs
+		;;
+	nacl)
+		basic_machine=le32-unknown
+		os=-nacl
+		;;
+	ncr3000)
+		basic_machine=i486-ncr
+		os=-sysv4
+		;;
+	netbsd386)
+		basic_machine=i386-unknown
+		os=-netbsd
+		;;
+	netwinder)
+		basic_machine=armv4l-rebel
+		os=-linux
+		;;
+	news | news700 | news800 | news900)
+		basic_machine=m68k-sony
+		os=-newsos
+		;;
+	news1000)
+		basic_machine=m68030-sony
+		os=-newsos
+		;;
+	news-3600 | risc-news)
+		basic_machine=mips-sony
+		os=-newsos
+		;;
+	necv70)
+		basic_machine=v70-nec
+		os=-sysv
+		;;
+	next | m*-next )
+		basic_machine=m68k-next
+		case $os in
+		    -nextstep* )
+			;;
+		    -ns2*)
+		      os=-nextstep2
+			;;
+		    *)
+		      os=-nextstep3
+			;;
+		esac
+		;;
+	nh3000)
+		basic_machine=m68k-harris
+		os=-cxux
+		;;
+	nh[45]000)
+		basic_machine=m88k-harris
+		os=-cxux
+		;;
+	nindy960)
+		basic_machine=i960-intel
+		os=-nindy
+		;;
+	mon960)
+		basic_machine=i960-intel
+		os=-mon960
+		;;
+	nonstopux)
+		basic_machine=mips-compaq
+		os=-nonstopux
+		;;
+	np1)
+		basic_machine=np1-gould
+		;;
+	neo-tandem)
+		basic_machine=neo-tandem
+		;;
+	nse-tandem)
+		basic_machine=nse-tandem
+		;;
+	nsr-tandem)
+		basic_machine=nsr-tandem
+		;;
+	op50n-* | op60c-*)
+		basic_machine=hppa1.1-oki
+		os=-proelf
+		;;
+	openrisc | openrisc-*)
+		basic_machine=or32-unknown
+		;;
+	os400)
+		basic_machine=powerpc-ibm
+		os=-os400
+		;;
+	OSE68000 | ose68000)
+		basic_machine=m68000-ericsson
+		os=-ose
+		;;
+	os68k)
+		basic_machine=m68k-none
+		os=-os68k
+		;;
+	pa-hitachi)
+		basic_machine=hppa1.1-hitachi
+		os=-hiuxwe2
+		;;
+	paragon)
+		basic_machine=i860-intel
+		os=-osf
+		;;
+	parisc)
+		basic_machine=hppa-unknown
+		os=-linux
+		;;
+	parisc-*)
+		basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+		os=-linux
+		;;
+	pbd)
+		basic_machine=sparc-tti
+		;;
+	pbb)
+		basic_machine=m68k-tti
+		;;
+	pc532 | pc532-*)
+		basic_machine=ns32k-pc532
+		;;
+	pc98)
+		basic_machine=i386-pc
+		;;
+	pc98-*)
+		basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentium | p5 | k5 | k6 | nexgen | viac3)
+		basic_machine=i586-pc
+		;;
+	pentiumpro | p6 | 6x86 | athlon | athlon_*)
+		basic_machine=i686-pc
+		;;
+	pentiumii | pentium2 | pentiumiii | pentium3)
+		basic_machine=i686-pc
+		;;
+	pentium4)
+		basic_machine=i786-pc
+		;;
+	pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+		basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentiumpro-* | p6-* | 6x86-* | athlon-*)
+		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pentium4-*)
+		basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	pn)
+		basic_machine=pn-gould
+		;;
+	power)	basic_machine=power-ibm
+		;;
+	ppc | ppcbe)	basic_machine=powerpc-unknown
+		;;
+	ppc-* | ppcbe-*)
+		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppcle | powerpclittle | ppc-le | powerpc-little)
+		basic_machine=powerpcle-unknown
+		;;
+	ppcle-* | powerpclittle-*)
+		basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppc64)	basic_machine=powerpc64-unknown
+		;;
+	ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+		basic_machine=powerpc64le-unknown
+		;;
+	ppc64le-* | powerpc64little-*)
+		basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	ps2)
+		basic_machine=i386-ibm
+		;;
+	pw32)
+		basic_machine=i586-unknown
+		os=-pw32
+		;;
+	rdos)
+		basic_machine=i386-pc
+		os=-rdos
+		;;
+	rom68k)
+		basic_machine=m68k-rom68k
+		os=-coff
+		;;
+	rm[46]00)
+		basic_machine=mips-siemens
+		;;
+	rtpc | rtpc-*)
+		basic_machine=romp-ibm
+		;;
+	s390 | s390-*)
+		basic_machine=s390-ibm
+		;;
+	s390x | s390x-*)
+		basic_machine=s390x-ibm
+		;;
+	sa29200)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	sb1)
+		basic_machine=mipsisa64sb1-unknown
+		;;
+	sb1el)
+		basic_machine=mipsisa64sb1el-unknown
+		;;
+	sde)
+		basic_machine=mipsisa32-sde
+		os=-elf
+		;;
+	sei)
+		basic_machine=mips-sei
+		os=-seiux
+		;;
+	sequent)
+		basic_machine=i386-sequent
+		;;
+	sh)
+		basic_machine=sh-hitachi
+		os=-hms
+		;;
+	sh5el)
+		basic_machine=sh5le-unknown
+		;;
+	sh64)
+		basic_machine=sh64-unknown
+		;;
+	sparclite-wrs | simso-wrs)
+		basic_machine=sparclite-wrs
+		os=-vxworks
+		;;
+	sps7)
+		basic_machine=m68k-bull
+		os=-sysv2
+		;;
+	spur)
+		basic_machine=spur-unknown
+		;;
+	st2000)
+		basic_machine=m68k-tandem
+		;;
+	stratus)
+		basic_machine=i860-stratus
+		os=-sysv4
+		;;
+	strongarm-* | thumb-*)
+		basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+		;;
+	sun2)
+		basic_machine=m68000-sun
+		;;
+	sun2os3)
+		basic_machine=m68000-sun
+		os=-sunos3
+		;;
+	sun2os4)
+		basic_machine=m68000-sun
+		os=-sunos4
+		;;
+	sun3os3)
+		basic_machine=m68k-sun
+		os=-sunos3
+		;;
+	sun3os4)
+		basic_machine=m68k-sun
+		os=-sunos4
+		;;
+	sun4os3)
+		basic_machine=sparc-sun
+		os=-sunos3
+		;;
+	sun4os4)
+		basic_machine=sparc-sun
+		os=-sunos4
+		;;
+	sun4sol2)
+		basic_machine=sparc-sun
+		os=-solaris2
+		;;
+	sun3 | sun3-*)
+		basic_machine=m68k-sun
+		;;
+	sun4)
+		basic_machine=sparc-sun
+		;;
+	sun386 | sun386i | roadrunner)
+		basic_machine=i386-sun
+		;;
+	sv1)
+		basic_machine=sv1-cray
+		os=-unicos
+		;;
+	symmetry)
+		basic_machine=i386-sequent
+		os=-dynix
+		;;
+	t3e)
+		basic_machine=alphaev5-cray
+		os=-unicos
+		;;
+	t90)
+		basic_machine=t90-cray
+		os=-unicos
+		;;
+	tile*)
+		basic_machine=$basic_machine-unknown
+		os=-linux-gnu
+		;;
+	tx39)
+		basic_machine=mipstx39-unknown
+		;;
+	tx39el)
+		basic_machine=mipstx39el-unknown
+		;;
+	toad1)
+		basic_machine=pdp10-xkl
+		os=-tops20
+		;;
+	tower | tower-32)
+		basic_machine=m68k-ncr
+		;;
+	tpf)
+		basic_machine=s390x-ibm
+		os=-tpf
+		;;
+	udi29k)
+		basic_machine=a29k-amd
+		os=-udi
+		;;
+	ultra3)
+		basic_machine=a29k-nyu
+		os=-sym1
+		;;
+	v810 | necv810)
+		basic_machine=v810-nec
+		os=-none
+		;;
+	vaxv)
+		basic_machine=vax-dec
+		os=-sysv
+		;;
+	vms)
+		basic_machine=vax-dec
+		os=-vms
+		;;
+	vpp*|vx|vx-*)
+		basic_machine=f301-fujitsu
+		;;
+	vxworks960)
+		basic_machine=i960-wrs
+		os=-vxworks
+		;;
+	vxworks68)
+		basic_machine=m68k-wrs
+		os=-vxworks
+		;;
+	vxworks29k)
+		basic_machine=a29k-wrs
+		os=-vxworks
+		;;
+	w65*)
+		basic_machine=w65-wdc
+		os=-none
+		;;
+	w89k-*)
+		basic_machine=hppa1.1-winbond
+		os=-proelf
+		;;
+	xbox)
+		basic_machine=i686-pc
+		os=-mingw32
+		;;
+	xps | xps100)
+		basic_machine=xps100-honeywell
+		;;
+	xscale-* | xscalee[bl]-*)
+		basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+		;;
+	ymp)
+		basic_machine=ymp-cray
+		os=-unicos
+		;;
+	z8k-*-coff)
+		basic_machine=z8k-unknown
+		os=-sim
+		;;
+	z80-*-coff)
+		basic_machine=z80-unknown
+		os=-sim
+		;;
+	none)
+		basic_machine=none-none
+		os=-none
+		;;
+
+# Here we handle the default manufacturer of certain CPU types.  It is in
+# some cases the only manufacturer, in others, it is the most popular.
+	w89k)
+		basic_machine=hppa1.1-winbond
+		;;
+	op50n)
+		basic_machine=hppa1.1-oki
+		;;
+	op60c)
+		basic_machine=hppa1.1-oki
+		;;
+	romp)
+		basic_machine=romp-ibm
+		;;
+	mmix)
+		basic_machine=mmix-knuth
+		;;
+	rs6000)
+		basic_machine=rs6000-ibm
+		;;
+	vax)
+		basic_machine=vax-dec
+		;;
+	pdp10)
+		# there are many clones, so DEC is not a safe bet
+		basic_machine=pdp10-unknown
+		;;
+	pdp11)
+		basic_machine=pdp11-dec
+		;;
+	we32k)
+		basic_machine=we32k-att
+		;;
+	sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+		basic_machine=sh-unknown
+		;;
+	sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+		basic_machine=sparc-sun
+		;;
+	cydra)
+		basic_machine=cydra-cydrome
+		;;
+	orion)
+		basic_machine=orion-highlevel
+		;;
+	orion105)
+		basic_machine=clipper-highlevel
+		;;
+	mac | mpw | mac-mpw)
+		basic_machine=m68k-apple
+		;;
+	pmac | pmac-mpw)
+		basic_machine=powerpc-apple
+		;;
+	*-unknown)
+		# Make sure to match an already-canonicalized machine name.
+		;;
+	*)
+		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+		exit 1
+		;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+	*-digital*)
+		basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+		;;
+	*-commodore*)
+		basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+		;;
+	*)
+		;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+	# First match some system type aliases
+	# that might get confused with valid system types.
+	# -solaris* is a basic system type, with this one exception.
+	-auroraux)
+		os=-auroraux
+		;;
+	-solaris1 | -solaris1.*)
+		os=`echo $os | sed -e 's|solaris1|sunos4|'`
+		;;
+	-solaris)
+		os=-solaris2
+		;;
+	-svr4*)
+		os=-sysv4
+		;;
+	-unixware*)
+		os=-sysv4.2uw
+		;;
+	-gnu/linux*)
+		os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+		;;
+	# First accept the basic system types.
+	# The portable systems comes first.
+	# Each alternative MUST END IN A *, to match a version number.
+	# -sysv* is not here because it comes later, after sysvr4.
+	-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+	      | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+	      | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+	      | -sym* | -kopensolaris* \
+	      | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+	      | -aos* | -aros* \
+	      | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+	      | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+	      | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+	      | -openbsd* | -solidbsd* \
+	      | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+	      | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+	      | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+	      | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+	      | -chorusos* | -chorusrdb* | -cegcc* \
+	      | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+	      | -mingw32* | -linux-gnu* | -linux-android* \
+	      | -linux-newlib* | -linux-uclibc* \
+	      | -uxpv* | -beos* | -mpeix* | -udk* \
+	      | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+	      | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
+	# Remember, each alternative MUST END IN *, to match a version number.
+		;;
+	-qnx*)
+		case $basic_machine in
+		    x86-* | i*86-*)
+			;;
+		    *)
+			os=-nto$os
+			;;
+		esac
+		;;
+	-nto-qnx*)
+		;;
+	-nto*)
+		os=`echo $os | sed -e 's|nto|nto-qnx|'`
+		;;
+	-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+	      | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+		;;
+	-mac*)
+		os=`echo $os | sed -e 's|mac|macos|'`
+		;;
+	-linux-dietlibc)
+		os=-linux-dietlibc
+		;;
+	-linux*)
+		os=`echo $os | sed -e 's|linux|linux-gnu|'`
+		;;
+	-sunos5*)
+		os=`echo $os | sed -e 's|sunos5|solaris2|'`
+		;;
+	-sunos6*)
+		os=`echo $os | sed -e 's|sunos6|solaris3|'`
+		;;
+	-opened*)
+		os=-openedition
+		;;
+	-os400*)
+		os=-os400
+		;;
+	-wince*)
+		os=-wince
+		;;
+	-osfrose*)
+		os=-osfrose
+		;;
+	-osf*)
+		os=-osf
+		;;
+	-utek*)
+		os=-bsd
+		;;
+	-dynix*)
+		os=-bsd
+		;;
+	-acis*)
+		os=-aos
+		;;
+	-atheos*)
+		os=-atheos
+		;;
+	-syllable*)
+		os=-syllable
+		;;
+	-386bsd)
+		os=-bsd
+		;;
+	-ctix* | -uts*)
+		os=-sysv
+		;;
+	-nova*)
+		os=-rtmk-nova
+		;;
+	-ns2 )
+		os=-nextstep2
+		;;
+	-nsk*)
+		os=-nsk
+		;;
+	# Preserve the version number of sinix5.
+	-sinix5.*)
+		os=`echo $os | sed -e 's|sinix|sysv|'`
+		;;
+	-sinix*)
+		os=-sysv4
+		;;
+	-tpf*)
+		os=-tpf
+		;;
+	-triton*)
+		os=-sysv3
+		;;
+	-oss*)
+		os=-sysv3
+		;;
+	-svr4)
+		os=-sysv4
+		;;
+	-svr3)
+		os=-sysv3
+		;;
+	-sysvr4)
+		os=-sysv4
+		;;
+	# This must come after -sysvr4.
+	-sysv*)
+		;;
+	-ose*)
+		os=-ose
+		;;
+	-es1800*)
+		os=-ose
+		;;
+	-xenix)
+		os=-xenix
+		;;
+	-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+		os=-mint
+		;;
+	-aros*)
+		os=-aros
+		;;
+	-kaos*)
+		os=-kaos
+		;;
+	-zvmoe)
+		os=-zvmoe
+		;;
+	-dicos*)
+		os=-dicos
+		;;
+	-nacl*)
+		;;
+	-none)
+		;;
+	*)
+		# Get rid of the `-' at the beginning of $os.
+		os=`echo $os | sed 's/[^-]*-//'`
+		echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+		exit 1
+		;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system.  Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+	score-*)
+		os=-elf
+		;;
+	spu-*)
+		os=-elf
+		;;
+	*-acorn)
+		os=-riscix1.2
+		;;
+	arm*-rebel)
+		os=-linux
+		;;
+	arm*-semi)
+		os=-aout
+		;;
+	c4x-* | tic4x-*)
+		os=-coff
+		;;
+	hexagon-*)
+		os=-elf
+		;;
+	tic54x-*)
+		os=-coff
+		;;
+	tic55x-*)
+		os=-coff
+		;;
+	tic6x-*)
+		os=-coff
+		;;
+	# This must come before the *-dec entry.
+	pdp10-*)
+		os=-tops20
+		;;
+	pdp11-*)
+		os=-none
+		;;
+	*-dec | vax-*)
+		os=-ultrix4.2
+		;;
+	m68*-apollo)
+		os=-domain
+		;;
+	i386-sun)
+		os=-sunos4.0.2
+		;;
+	m68000-sun)
+		os=-sunos3
+		;;
+	m68*-cisco)
+		os=-aout
+		;;
+	mep-*)
+		os=-elf
+		;;
+	mips*-cisco)
+		os=-elf
+		;;
+	mips*-*)
+		os=-elf
+		;;
+	or32-*)
+		os=-coff
+		;;
+	*-tti)	# must be before sparc entry or we get the wrong os.
+		os=-sysv3
+		;;
+	sparc-* | *-sun)
+		os=-sunos4.1.1
+		;;
+	*-be)
+		os=-beos
+		;;
+	*-haiku)
+		os=-haiku
+		;;
+	*-ibm)
+		os=-aix
+		;;
+	*-knuth)
+		os=-mmixware
+		;;
+	*-wec)
+		os=-proelf
+		;;
+	*-winbond)
+		os=-proelf
+		;;
+	*-oki)
+		os=-proelf
+		;;
+	*-hp)
+		os=-hpux
+		;;
+	*-hitachi)
+		os=-hiux
+		;;
+	i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+		os=-sysv
+		;;
+	*-cbm)
+		os=-amigaos
+		;;
+	*-dg)
+		os=-dgux
+		;;
+	*-dolphin)
+		os=-sysv3
+		;;
+	m68k-ccur)
+		os=-rtu
+		;;
+	m88k-omron*)
+		os=-luna
+		;;
+	*-next )
+		os=-nextstep
+		;;
+	*-sequent)
+		os=-ptx
+		;;
+	*-crds)
+		os=-unos
+		;;
+	*-ns)
+		os=-genix
+		;;
+	i370-*)
+		os=-mvs
+		;;
+	*-next)
+		os=-nextstep3
+		;;
+	*-gould)
+		os=-sysv
+		;;
+	*-highlevel)
+		os=-bsd
+		;;
+	*-encore)
+		os=-bsd
+		;;
+	*-sgi)
+		os=-irix
+		;;
+	*-siemens)
+		os=-sysv4
+		;;
+	*-masscomp)
+		os=-rtu
+		;;
+	f30[01]-fujitsu | f700-fujitsu)
+		os=-uxpv
+		;;
+	*-rom68k)
+		os=-coff
+		;;
+	*-*bug)
+		os=-coff
+		;;
+	*-apple)
+		os=-macos
+		;;
+	*-atari*)
+		os=-mint
+		;;
+	*)
+		os=-none
+		;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer.  We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+	*-unknown)
+		case $os in
+			-riscix*)
+				vendor=acorn
+				;;
+			-sunos*)
+				vendor=sun
+				;;
+			-cnk*|-aix*)
+				vendor=ibm
+				;;
+			-beos*)
+				vendor=be
+				;;
+			-hpux*)
+				vendor=hp
+				;;
+			-mpeix*)
+				vendor=hp
+				;;
+			-hiux*)
+				vendor=hitachi
+				;;
+			-unos*)
+				vendor=crds
+				;;
+			-dgux*)
+				vendor=dg
+				;;
+			-luna*)
+				vendor=omron
+				;;
+			-genix*)
+				vendor=ns
+				;;
+			-mvs* | -opened*)
+				vendor=ibm
+				;;
+			-os400*)
+				vendor=ibm
+				;;
+			-ptx*)
+				vendor=sequent
+				;;
+			-tpf*)
+				vendor=ibm
+				;;
+			-vxsim* | -vxworks* | -windiss*)
+				vendor=wrs
+				;;
+			-aux*)
+				vendor=apple
+				;;
+			-hms*)
+				vendor=hitachi
+				;;
+			-mpw* | -macos*)
+				vendor=apple
+				;;
+			-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+				vendor=atari
+				;;
+			-vos*)
+				vendor=stratus
+				;;
+		esac
+		basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+		;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/configure b/configure
new file mode 100755
index 0000000..547fb97
--- /dev/null
+++ b/configure
@@ -0,0 +1,26061 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.69 for librsb 1.2.0-rc5.
+#
+# Report bugs to <michelemartone_AT_users_DOT_sourceforge_DOT_net>.
+#
+#
+# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+#
+# Copyright (c) 2008-2016, Michele Martone
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='print -r --'
+  as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='printf %s\n'
+  as_echo_n='printf %s'
+else
+  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+    as_echo_n='/usr/ucb/echo -n'
+  else
+    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+    as_echo_n_body='eval
+      arg=$1;
+      case $arg in #(
+      *"$as_nl"*)
+	expr "X$arg" : "X\\(.*\\)$as_nl";
+	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+      esac;
+      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+    '
+    export as_echo_n_body
+    as_echo_n='sh -c $as_echo_n_body as_echo'
+  fi
+  export as_echo_body
+  as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" ""	$as_nl"
+
+# Find who we are.  Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there.  '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+# Use a proper internal environment variable to ensure we don't fall
+  # into an infinite loop, continuously re-executing ourselves.
+  if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
+    _as_can_reexec=no; export _as_can_reexec;
+    # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+  *v*x* | *x*v* ) as_opts=-vx ;;
+  *v* ) as_opts=-v ;;
+  *x* ) as_opts=-x ;;
+  * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+as_fn_exit 255
+  fi
+  # We don't want this to propagate to other subprocesses.
+          { _as_can_reexec=; unset _as_can_reexec;}
+if test "x$CONFIG_SHELL" = x; then
+  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else
+  case \`(set -o) 2>/dev/null\` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+"
+  as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+  exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1
+test -x / || exit 1"
+  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+
+  test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || (
+    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+    ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+    ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+    PATH=/empty FPATH=/empty; export PATH FPATH
+    test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\
+      || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+  if (eval "$as_required") 2>/dev/null; then :
+  as_have_required=yes
+else
+  as_have_required=no
+fi
+  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  as_found=:
+  case $as_dir in #(
+	 /*)
+	   for as_base in sh bash ksh sh5; do
+	     # Try only shells that exist, to save several forks.
+	     as_shell=$as_dir/$as_base
+	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+  CONFIG_SHELL=$as_shell as_have_required=yes
+		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+  break 2
+fi
+fi
+	   done;;
+       esac
+  as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+  CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+      if test "x$CONFIG_SHELL" != x; then :
+  export CONFIG_SHELL
+             # We cannot yet assume a decent shell, so we have to provide a
+# neutralization value for shells without unset; and this also
+# works around shells that cannot unset nonexistent variables.
+# Preserve -v and -x to the replacement shell.
+BASH_ENV=/dev/null
+ENV=/dev/null
+(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+case $- in # ((((
+  *v*x* | *x*v* ) as_opts=-vx ;;
+  *v* ) as_opts=-v ;;
+  *x* ) as_opts=-x ;;
+  * ) as_opts= ;;
+esac
+exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+# Admittedly, this is quite paranoid, since all the known shells bail
+# out after a failed `exec'.
+$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+exit 255
+fi
+
+    if test x$as_have_required = xno; then :
+  $as_echo "$0: This script requires a shell more modern than all"
+  $as_echo "$0: the shells that I found on your system."
+  if test x${ZSH_VERSION+set} = xset ; then
+    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+  else
+    $as_echo "$0: Please tell bug-autoconf at gnu.org and
+$0: michelemartone_AT_users_DOT_sourceforge_DOT_net about
+$0: your system, including any error possibly output before
+$0: this message. Then install a modern shell, or manually
+$0: run the script under such a shell if you do have one."
+  fi
+  exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+  test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$1; test $as_status -eq 0 && as_status=1
+  if test "$4"; then
+    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+  fi
+  $as_echo "$as_me: error: $2" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
+  as_expr=expr
+else
+  as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+  as_basename=basename
+else
+  as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$0" : 'X\(//\)$' \| \
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+  as_lineno_1=$LINENO as_lineno_1a=$LINENO
+  as_lineno_2=$LINENO as_lineno_2a=$LINENO
+  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
+  sed -n '
+    p
+    /[$]LINENO/=
+  ' <$as_myself |
+    sed '
+      s/[$]LINENO.*/&-/
+      t lineno
+      b
+      :lineno
+      N
+      :loop
+      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+      t loop
+      s/-\n.*//
+    ' >$as_me.lineno &&
+  chmod +x "$as_me.lineno" ||
+    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+  # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+  # already done that, so ensure we don't try to do so again and fall
+  # in an infinite loop.  This has already happened in practice.
+  _as_can_reexec=no; export _as_can_reexec
+  # Don't try to exec as it changes $[0], causing all sort of problems
+  # (the dirname of $[0] is not the place where we might find the
+  # original and so on.  Autoconf is especially sensitive to this).
+  . "./$as_me.lineno"
+  # Exit status is that of the last command.
+  exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
+else
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -pR'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -pR'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -pR'
+  fi
+else
+  as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+  as_mkdir_p='mkdir -p "$as_dir"'
+else
+  test -d ./-p && rmdir ./-p
+  as_mkdir_p=false
+fi
+
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+SHELL=${CONFIG_SHELL-/bin/sh}
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='librsb'
+PACKAGE_TARNAME='librsb'
+PACKAGE_VERSION='1.2.0-rc5'
+PACKAGE_STRING='librsb 1.2.0-rc5'
+PACKAGE_BUGREPORT='michelemartone_AT_users_DOT_sourceforge_DOT_net'
+PACKAGE_URL=''
+
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+#  include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+#  include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='am__EXEEXT_FALSE
+am__EXEEXT_TRUE
+LTLIBOBJS
+LIBOBJS
+OCTAVE_FLAGS
+WANT_OMPIO_SUPPORT_FALSE
+WANT_OMPIO_SUPPORT_TRUE
+WANT_INTERNAL_HEADERS_INSTALL_FALSE
+WANT_INTERNAL_HEADERS_INSTALL_TRUE
+HAVE_SPARSE_BLAS_INTERFACE_FALSE
+HAVE_SPARSE_BLAS_INTERFACE_TRUE
+HAVE_FC_FALSE
+HAVE_FC_TRUE
+HAVE_M4_FALSE
+HAVE_M4_TRUE
+HAVE_HELP2MAN_FALSE
+HAVE_HELP2MAN_TRUE
+HAVE_PKGCONFIG_INSTALL_FALSE
+HAVE_PKGCONFIG_INSTALL_TRUE
+WANT_BUILD_DOC_FALSE
+WANT_BUILD_DOC_TRUE
+HAVE_DOXYGEN_FALSE
+HAVE_DOXYGEN_TRUE
+WANT_CXX_TEST_RSBENCH_FALSE
+WANT_CXX_TEST_RSBENCH_TRUE
+WANT_BLAS_SPARSE_MOD_INSTALL_FALSE
+WANT_BLAS_SPARSE_MOD_INSTALL_TRUE
+WANT_OCTAVE_TESTING_AND_INT_FALSE
+WANT_OCTAVE_TESTING_AND_INT_TRUE
+WANT_OCTAVE_TESTING_FALSE
+WANT_OCTAVE_TESTING_TRUE
+HAVE_OCTAVE_FALSE
+HAVE_OCTAVE_TRUE
+HAVE_C_EXAMPLES_FALSE
+HAVE_C_EXAMPLES_TRUE
+HAVE_FORTRAN_EXAMPLES_FALSE
+HAVE_FORTRAN_EXAMPLES_TRUE
+WANT_MATRIX_ALL_TYPES
+WANT_MATRIX_ALL_OPS
+WANT_MATRIX_ALL_META_OPS
+WANT_MATRIX_OPS
+WANT_MATRIX_STORAGE
+WANT_MATRIX_VB_STORAGE
+WANT_MATRIX_LINKED_STORAGE
+WANT_MATRIX_BCOO_STORAGE
+WANT_MATRIX_BCSS_STORAGE
+WANT_TYPES
+WANT_SPSM_DIAG_CHECK
+WANT_HALFWORD_INDICES
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR
+WANT_COLUMN_UNLOOP_FACTORS
+WANT_LOOPING_KERNELS
+WANT_ROW_UNLOOP_FACTORS
+RSB_RSBENCH_CFLAGS
+RSB_RSBENCH_LIBS
+NOUNROLLCFLAGS
+enable_openmp
+enable_restrict
+RSB_CONST_MAX_SUPPORTED_THREADS
+RSB_DETECTED_MEM_HIERARCHY_INFO
+OPENMP_FCFLAGS
+RSB_USER_SET_MEM_HIERARCHY_INFO
+ARFLAGS
+M4
+HELP2MAN
+DOXYGEN
+OCTAVE
+have_sed
+have_grep
+am__fastdepCCAS_FALSE
+am__fastdepCCAS_TRUE
+CCASDEPMODE
+CCASFLAGS
+CCAS
+CXXCPP
+am__fastdepCXX_FALSE
+am__fastdepCXX_TRUE
+CXXDEPMODE
+ac_ct_CXX
+CXXFLAGS
+CXX
+OPENMP_CFLAGS
+ac_ct_FC
+FCFLAGS
+FC
+CPP
+OTOOL64
+OTOOL
+LIPO
+NMEDIT
+DSYMUTIL
+MANIFEST_TOOL
+RANLIB
+ac_ct_AR
+AR
+DLLTOOL
+OBJDUMP
+LN_S
+NM
+ac_ct_DUMPBIN
+DUMPBIN
+LD
+FGREP
+EGREP
+GREP
+SED
+am__fastdepCC_FALSE
+am__fastdepCC_TRUE
+CCDEPMODE
+am__nodep
+AMDEPBACKSLASH
+AMDEP_FALSE
+AMDEP_TRUE
+am__quote
+am__include
+DEPDIR
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+host_os
+host_vendor
+host_cpu
+host
+build_os
+build_vendor
+build_cpu
+build
+LIBTOOL
+am__untar
+am__tar
+AMTAR
+am__leading_dot
+SET_MAKE
+AWK
+mkdir_p
+MKDIR_P
+INSTALL_STRIP_PROGRAM
+STRIP
+install_sh
+MAKEINFO
+AUTOHEADER
+AUTOMAKE
+AUTOCONF
+ACLOCAL
+VERSION
+PACKAGE
+CYGPATH_W
+am__isrc
+INSTALL_DATA
+INSTALL_SCRIPT
+INSTALL_PROGRAM
+SVN_REVISION
+LIBRSB_ABI_VERSION
+LIBRSB_MAIN_RELEASE
+LIBRSB_VERSION
+LIBRSB_LIBRSB_VER
+LIBRSB_VER_PRERS
+LIBRSB_VER_DATE
+LIBRSB_VER_PATCH
+LIBRSB_VER_MINOR
+LIBRSB_VER_MAJOR
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+enable_shared
+enable_static
+with_pic
+enable_fast_install
+enable_dependency_tracking
+with_gnu_ld
+with_sysroot
+enable_libtool_lock
+enable_openmp
+with_math
+with_xdr
+with_hwloc
+with_dmalloc
+with_mkl_include
+with_mkl
+with_zlib
+with_ompio
+with_nounroll_cflag
+enable_internals_error_verbosity
+enable_interface_error_verbosity
+enable_io_level
+with_max_threads
+with_memhinfo
+with_ar
+with_arflags
+with_m4
+enable_matrix_types
+enable_matrix_ops
+enable_vector_utils_loop_unrolls
+enable_octave_testing
+enable_sparse_blas_interface
+with_oski
+with_likwid
+enable_allocator_wrapper
+enable_alignment
+enable_librsb_stats
+enable_rsb_num_threads
+enable_fortran_module_install
+enable_pkg_config_install
+enable_doc_build
+enable_shlib_linked_examples
+enable_c_examples
+enable_fortran_examples
+enable_restrict
+with_c99_flag
+enable_zero_division_checks_on_solve
+enable_sigaction_interruptible_rsbench
+enable_internal_headers_install
+enable_debug_getenvs
+enable_debug
+with_papi
+'
+      ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CPP
+FC
+FCFLAGS
+CXX
+CXXFLAGS
+CCC
+CXXCPP
+CCAS
+CCASFLAGS
+M4
+OCTAVE
+AR
+ARFLAGS
+LD
+DOXYGEN
+HELP2MAN
+RSB_USER_SET_MEM_HIERARCHY_INFO
+OPENMP_CFLAGS
+OPENMP_FCFLAGS'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+  # If the previous option needs an argument, assign it.
+  if test -n "$ac_prev"; then
+    eval $ac_prev=\$ac_option
+    ac_prev=
+    continue
+  fi
+
+  case $ac_option in
+  *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+  *=)   ac_optarg= ;;
+  *)    ac_optarg=yes ;;
+  esac
+
+  # Accept the important Cygnus configure options, so we can diagnose typos.
+
+  case $ac_dashdash$ac_option in
+  --)
+    ac_dashdash=yes ;;
+
+  -bindir | --bindir | --bindi | --bind | --bin | --bi)
+    ac_prev=bindir ;;
+  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+    bindir=$ac_optarg ;;
+
+  -build | --build | --buil | --bui | --bu)
+    ac_prev=build_alias ;;
+  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+    build_alias=$ac_optarg ;;
+
+  -cache-file | --cache-file | --cache-fil | --cache-fi \
+  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+    ac_prev=cache_file ;;
+  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+    cache_file=$ac_optarg ;;
+
+  --config-cache | -C)
+    cache_file=config.cache ;;
+
+  -datadir | --datadir | --datadi | --datad)
+    ac_prev=datadir ;;
+  -datadir=* | --datadir=* | --datadi=* | --datad=*)
+    datadir=$ac_optarg ;;
+
+  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+  | --dataroo | --dataro | --datar)
+    ac_prev=datarootdir ;;
+  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+    datarootdir=$ac_optarg ;;
+
+  -disable-* | --disable-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid feature name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=no ;;
+
+  -docdir | --docdir | --docdi | --doc | --do)
+    ac_prev=docdir ;;
+  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+    docdir=$ac_optarg ;;
+
+  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+    ac_prev=dvidir ;;
+  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+    dvidir=$ac_optarg ;;
+
+  -enable-* | --enable-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid feature name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=\$ac_optarg ;;
+
+  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+  | --exec | --exe | --ex)
+    ac_prev=exec_prefix ;;
+  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+  | --exec=* | --exe=* | --ex=*)
+    exec_prefix=$ac_optarg ;;
+
+  -gas | --gas | --ga | --g)
+    # Obsolete; use --with-gas.
+    with_gas=yes ;;
+
+  -help | --help | --hel | --he | -h)
+    ac_init_help=long ;;
+  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+    ac_init_help=recursive ;;
+  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+    ac_init_help=short ;;
+
+  -host | --host | --hos | --ho)
+    ac_prev=host_alias ;;
+  -host=* | --host=* | --hos=* | --ho=*)
+    host_alias=$ac_optarg ;;
+
+  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+    ac_prev=htmldir ;;
+  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+  | --ht=*)
+    htmldir=$ac_optarg ;;
+
+  -includedir | --includedir | --includedi | --included | --include \
+  | --includ | --inclu | --incl | --inc)
+    ac_prev=includedir ;;
+  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+  | --includ=* | --inclu=* | --incl=* | --inc=*)
+    includedir=$ac_optarg ;;
+
+  -infodir | --infodir | --infodi | --infod | --info | --inf)
+    ac_prev=infodir ;;
+  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+    infodir=$ac_optarg ;;
+
+  -libdir | --libdir | --libdi | --libd)
+    ac_prev=libdir ;;
+  -libdir=* | --libdir=* | --libdi=* | --libd=*)
+    libdir=$ac_optarg ;;
+
+  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+  | --libexe | --libex | --libe)
+    ac_prev=libexecdir ;;
+  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+  | --libexe=* | --libex=* | --libe=*)
+    libexecdir=$ac_optarg ;;
+
+  -localedir | --localedir | --localedi | --localed | --locale)
+    ac_prev=localedir ;;
+  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+    localedir=$ac_optarg ;;
+
+  -localstatedir | --localstatedir | --localstatedi | --localstated \
+  | --localstate | --localstat | --localsta | --localst | --locals)
+    ac_prev=localstatedir ;;
+  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+    localstatedir=$ac_optarg ;;
+
+  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+    ac_prev=mandir ;;
+  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+    mandir=$ac_optarg ;;
+
+  -nfp | --nfp | --nf)
+    # Obsolete; use --without-fp.
+    with_fp=no ;;
+
+  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+  | --no-cr | --no-c | -n)
+    no_create=yes ;;
+
+  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+    no_recursion=yes ;;
+
+  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+  | --oldin | --oldi | --old | --ol | --o)
+    ac_prev=oldincludedir ;;
+  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+    oldincludedir=$ac_optarg ;;
+
+  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+    ac_prev=prefix ;;
+  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+    prefix=$ac_optarg ;;
+
+  -program-prefix | --program-prefix | --program-prefi | --program-pref \
+  | --program-pre | --program-pr | --program-p)
+    ac_prev=program_prefix ;;
+  -program-prefix=* | --program-prefix=* | --program-prefi=* \
+  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+    program_prefix=$ac_optarg ;;
+
+  -program-suffix | --program-suffix | --program-suffi | --program-suff \
+  | --program-suf | --program-su | --program-s)
+    ac_prev=program_suffix ;;
+  -program-suffix=* | --program-suffix=* | --program-suffi=* \
+  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+    program_suffix=$ac_optarg ;;
+
+  -program-transform-name | --program-transform-name \
+  | --program-transform-nam | --program-transform-na \
+  | --program-transform-n | --program-transform- \
+  | --program-transform | --program-transfor \
+  | --program-transfo | --program-transf \
+  | --program-trans | --program-tran \
+  | --progr-tra | --program-tr | --program-t)
+    ac_prev=program_transform_name ;;
+  -program-transform-name=* | --program-transform-name=* \
+  | --program-transform-nam=* | --program-transform-na=* \
+  | --program-transform-n=* | --program-transform-=* \
+  | --program-transform=* | --program-transfor=* \
+  | --program-transfo=* | --program-transf=* \
+  | --program-trans=* | --program-tran=* \
+  | --progr-tra=* | --program-tr=* | --program-t=*)
+    program_transform_name=$ac_optarg ;;
+
+  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+    ac_prev=pdfdir ;;
+  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+    pdfdir=$ac_optarg ;;
+
+  -psdir | --psdir | --psdi | --psd | --ps)
+    ac_prev=psdir ;;
+  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+    psdir=$ac_optarg ;;
+
+  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+  | -silent | --silent | --silen | --sile | --sil)
+    silent=yes ;;
+
+  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+    ac_prev=sbindir ;;
+  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+  | --sbi=* | --sb=*)
+    sbindir=$ac_optarg ;;
+
+  -sharedstatedir | --sharedstatedir | --sharedstatedi \
+  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+  | --sharedst | --shareds | --shared | --share | --shar \
+  | --sha | --sh)
+    ac_prev=sharedstatedir ;;
+  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+  | --sha=* | --sh=*)
+    sharedstatedir=$ac_optarg ;;
+
+  -site | --site | --sit)
+    ac_prev=site ;;
+  -site=* | --site=* | --sit=*)
+    site=$ac_optarg ;;
+
+  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+    ac_prev=srcdir ;;
+  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+    srcdir=$ac_optarg ;;
+
+  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+  | --syscon | --sysco | --sysc | --sys | --sy)
+    ac_prev=sysconfdir ;;
+  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+    sysconfdir=$ac_optarg ;;
+
+  -target | --target | --targe | --targ | --tar | --ta | --t)
+    ac_prev=target_alias ;;
+  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+    target_alias=$ac_optarg ;;
+
+  -v | -verbose | --verbose | --verbos | --verbo | --verb)
+    verbose=yes ;;
+
+  -version | --version | --versio | --versi | --vers | -V)
+    ac_init_version=: ;;
+
+  -with-* | --with-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid package name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=\$ac_optarg ;;
+
+  -without-* | --without-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error $? "invalid package name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=no ;;
+
+  --x)
+    # Obsolete; use --with-x.
+    with_x=yes ;;
+
+  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+  | --x-incl | --x-inc | --x-in | --x-i)
+    ac_prev=x_includes ;;
+  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+    x_includes=$ac_optarg ;;
+
+  -x-libraries | --x-libraries | --x-librarie | --x-librari \
+  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+    ac_prev=x_libraries ;;
+  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+    x_libraries=$ac_optarg ;;
+
+  -*) as_fn_error $? "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information"
+    ;;
+
+  *=*)
+    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+    # Reject names that are not valid shell variable names.
+    case $ac_envvar in #(
+      '' | [0-9]* | *[!_$as_cr_alnum]* )
+      as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
+    esac
+    eval $ac_envvar=\$ac_optarg
+    export $ac_envvar ;;
+
+  *)
+    # FIXME: should be removed in autoconf 3.0.
+    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+    : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+    ;;
+
+  esac
+done
+
+if test -n "$ac_prev"; then
+  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+  as_fn_error $? "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+  case $enable_option_checking in
+    no) ;;
+    fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+  esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
+		datadir sysconfdir sharedstatedir localstatedir includedir \
+		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+		libdir localedir mandir
+do
+  eval ac_val=\$$ac_var
+  # Remove trailing slashes.
+  case $ac_val in
+    */ )
+      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+      eval $ac_var=\$ac_val;;
+  esac
+  # Be sure to have absolute directory names.
+  case $ac_val in
+    [\\/$]* | ?:[\\/]* )  continue;;
+    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+  esac
+  as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+  if test "x$build_alias" = x; then
+    cross_compiling=maybe
+  elif test "x$build_alias" != "x$host_alias"; then
+    cross_compiling=yes
+  fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+  as_fn_error $? "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+  as_fn_error $? "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+  ac_srcdir_defaulted=yes
+  # Try the directory containing this script, then the parent directory.
+  ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_myself" : 'X\(//\)[^/]' \| \
+	 X"$as_myself" : 'X\(//\)$' \| \
+	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  srcdir=$ac_confdir
+  if test ! -r "$srcdir/$ac_unique_file"; then
+    srcdir=..
+  fi
+else
+  ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+  as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
+	pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+  srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+  eval ac_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_env_${ac_var}_value=\$${ac_var}
+  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+  # Omit some internal or obsolete options to make the list less imposing.
+  # This message is too long to be a string in the A/UX 3.1 sh.
+  cat <<_ACEOF
+\`configure' configures librsb 1.2.0-rc5 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE.  See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+  -h, --help              display this help and exit
+      --help=short        display options specific to this package
+      --help=recursive    display the short help of all the included packages
+  -V, --version           display version information and exit
+  -q, --quiet, --silent   do not print \`checking ...' messages
+      --cache-file=FILE   cache test results in FILE [disabled]
+  -C, --config-cache      alias for \`--cache-file=config.cache'
+  -n, --no-create         do not create output files
+      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+  --prefix=PREFIX         install architecture-independent files in PREFIX
+                          [$ac_default_prefix]
+  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
+                          [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+  --bindir=DIR            user executables [EPREFIX/bin]
+  --sbindir=DIR           system admin executables [EPREFIX/sbin]
+  --libexecdir=DIR        program executables [EPREFIX/libexec]
+  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
+  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
+  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+  --libdir=DIR            object code libraries [EPREFIX/lib]
+  --includedir=DIR        C header files [PREFIX/include]
+  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
+  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
+  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
+  --infodir=DIR           info documentation [DATAROOTDIR/info]
+  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
+  --mandir=DIR            man documentation [DATAROOTDIR/man]
+  --docdir=DIR            documentation root [DATAROOTDIR/doc/librsb]
+  --htmldir=DIR           html documentation [DOCDIR]
+  --dvidir=DIR            dvi documentation [DOCDIR]
+  --pdfdir=DIR            pdf documentation [DOCDIR]
+  --psdir=DIR             ps documentation [DOCDIR]
+_ACEOF
+
+  cat <<\_ACEOF
+
+Program names:
+  --program-prefix=PREFIX            prepend PREFIX to installed program names
+  --program-suffix=SUFFIX            append SUFFIX to installed program names
+  --program-transform-name=PROGRAM   run sed PROGRAM on installed program names
+
+System types:
+  --build=BUILD     configure for building on BUILD [guessed]
+  --host=HOST       cross-compile to build programs to run on HOST [BUILD]
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+  case $ac_init_help in
+     short | recursive ) echo "Configuration of librsb 1.2.0-rc5:";;
+   esac
+  cat <<\_ACEOF
+
+Optional Features:
+  --disable-option-checking  ignore unrecognized --enable/--with options
+  --disable-FEATURE       do not include FEATURE (same as --enable-FEATURE=no)
+  --enable-FEATURE[=ARG]  include FEATURE [ARG=yes]
+  --enable-shared[=PKGS]  build shared libraries [default=yes]
+  --enable-static[=PKGS]  build static libraries [default=yes]
+  --enable-fast-install[=PKGS]
+                          optimize for fast installation [default=yes]
+  --disable-dependency-tracking  speeds up one-time build
+  --enable-dependency-tracking   do not reject slow dependency extractors
+  --disable-libtool-lock  avoid locking (might break parallel builds)
+  --disable-openmp        do not use OpenMP
+  --enable-internals-error-verbosity
+                          Set error verbosity level of library internal
+                          functions (RSB_INT_ERR_VERBOSITY): can be 0 (no
+                          printout at all, never), 1 (on error). Use this to
+                          debug the library itself. Experimental.
+  --enable-interface-error-verbosity
+                          Set error verbosity level of library interface
+                          functions (RSB_OUT_ERR_VERBOSITY): can be 0 (no
+                          printout), 1 (printout on error, if requested), 2
+                          (printout on error), 99 (exit on error). Use this to
+                          debug your program or to better understand the
+                          library usage.
+  --enable-io-level       Set input/output functionality level
+                          (RSB_WANT_IO_LEVEL), a number between 0 and 7, as
+                          any sum combination of 1 (standard input/output), 2
+                          (standard error), 4 (arbitrary descriptors).
+                          (experimental).
+  --enable-matrix-types   Generate kernels for specified types
+                          (default:"double,float,float complex,double complex") (you can specify 'all' to
+                          get all of them, or 'blas' for Sparse BLAS ones)
+  --enable-matrix-ops     Generate kernels for specified matrix operations
+                          (default:spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spsv_uxua,spmv_sxsa,spsv_sxsx,infty_norm,rowssums,scale) (you can specify 'all'
+                          to get all of them, or 'blas' for only ones for
+                          Sparse BLAS, or 'psblas' for only ones for PSBLAS)
+                          (Experimental, the default "all" is recommended.)
+  --enable-vector-utils-loop-unrolls
+                          Loop unrolling of generated vector utility functions
+                          (default:16)
+  --enable-octave-testing Enabling GNU Octave based testing.
+  --disable-sparse-blas-interface
+                          Build a Sparse BLAS interface to librsb.
+  --enable-allocator-wrapper
+                          If enabled, librsb will keep count of internal
+                          memory allocations via a allocator functions
+                          wrappers.
+  --disable-alignment     By default, we allocate aligned memory. This can be
+                          disabled.
+  --enable-librsb-stats   If enabled, will allow collection of time statistics
+                          in librsb operations.
+  --enable-rsb-num-threads
+                          RSB_NUM_THREADS environment variable to control
+                          number of threads (Experimental, with effect on
+                          rsb_spmv/rsb_spmm).
+  --enable-fortran-module-install
+                          Install (compiler specific) Fortran module
+                          (blas_sparse.mod) (experimental).
+  --enable-pkg-config-install
+                          Install pkg-config file (librsb.pc) installation.
+  --enable-doc-build      If doxygen is detected or supplied (DOXYGEN
+                          environment variable), documentation will be
+                          rebuilt. If 'help2man' (HELP2MAN) is also present,
+                          it will be used to build additional man pages.
+  --enable-shlib-linked-examples
+                          Shared library based examples (experimental:
+                          developer only).
+  --disable-c-examples    C example programs building.
+  --disable-fortran-examples
+                          Fortran test and example programs generation and
+                          building. (experimental)
+  --disable-restrict      Use the restrict keyword.
+  --enable-zero-division-checks-on-solve
+                          Prevents zero-division when performing triangular
+                          solution.
+  --enable-sigaction-interruptible-rsbench
+                          rsbench will be interruptible using sigaction
+                          (breaks the standard: may break the build.).
+  --enable-internal-headers-install
+                          Install internal headers (only for debugging /
+                          inspection purposes, not for ordinary users).
+  --enable-debug-getenvs  Enable (undocumented) developer oriented
+                          getenv-based controls.
+  --enable-debug          Compile with debug flags and enable assertions and
+                          other internals. This will slow down the code
+                          considerably.
+
+Optional Packages:
+  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
+  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
+  --with-pic[=PKGS]       try to use only PIC/non-PIC objects [default=use
+                          both]
+  --with-gnu-ld           assume the C compiler uses GNU ld [default=no]
+  --with-sysroot=DIR Search for dependent libraries within DIR
+                        (or the compiler's sysroot if not specified).
+  --with-math             Specify the math library
+  --with-xdr              Specify XDR library. e.g.: --with-xdr="..."
+  --with-hwloc            Specify the hwloc library (EXPERIMENTAL)
+  --with-dmalloc          With dmalloc (experimental).
+  --with-mkl-include      Specify the MKL (Intel Math Kernel Library) library
+                          headers path. e.g.:
+                          --with-mkl-include="/opt/intel/mkl/include".
+  --with-mkl              Specify the MKL (Intel Math Kernel Library) library
+                          to be used with the benchmarking program. E.g.:
+                          --with-mkl="...". Include options should be
+                          specified in the MKL_INCLUDE environment variable.
+  --with-zlib             Specify Z library. e.g.: --with-zlib="..." for
+                          reading gzip-compressed matrix files.
+  --with-ompio            Use OpenMP and fgets_unlocked() for parallel I/O
+  --with-nounroll-cflag   Specify the no unroll compiler flag (if unset, will
+                          be guessed).
+  --with-max-threads      Maximal number of supported threads (default 64).
+  --with-memhinfo         Compile with user specified memory hierarchy
+                          information, which can be overridden by runtime
+                          detection and runtime read of
+                          RSB_USER_SET_MEM_HIERARCHY_INFO environment
+                          variable.
+  --with-ar               Specify the library archiver program explicitly.
+  --with-arflags          Specify the library archiver program flags
+                          explicitly.
+  --with-m4               Specify the M4 preprocessor program explicitly.
+  --with-oski             OSKI comparative benchmarking (WARNING: be sure to
+                          set OSKI_INCLUDE, OSKI_LUA_PATH, OSKI_PATH
+                          environment variables first). UNFINISHED.
+  --with-likwid           LIKWID support (will add the LIKWID_LIBS variable to
+                          LIBS and LIKWID_CFLAGS to CFLAGS). UNFINISHED.
+  --with-c99-flag         Add the -std=c99 compilation flag to CFLAGS.
+  --with-papi             Specify the PAPI library (UNFINISHED)
+
+Some influential environment variables:
+  CC          C compiler command
+  CFLAGS      C compiler flags
+  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
+              nonstandard directory <lib dir>
+  LIBS        libraries to pass to the linker, e.g. -l<library>
+  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+              you have headers in a nonstandard directory <include dir>
+  CPP         C preprocessor
+  FC          Fortran compiler command
+  FCFLAGS     Fortran compiler flags
+  CXX         C++ compiler command
+  CXXFLAGS    C++ compiler flags
+  CXXCPP      C++ preprocessor
+  CCAS        assembler compiler command (defaults to CC)
+  CCASFLAGS   assembler compiler flags (defaults to CFLAGS)
+  M4          M4 macro preprocessor
+  OCTAVE      GNU Octave executable
+  AR          Library archiver program
+  ARFLAGS     Library archiver program flags
+  LD          Linker program
+  DOXYGEN     Doxygen program for generating documentation from librsb source
+              code
+  HELP2MAN    Help2man is a program for generating man pages from program help
+              output
+  RSB_USER_SET_MEM_HIERARCHY_INFO
+              Memory hierarchy info string for librsb; e.g.:
+              L2:4/64/512K,L1:8/64/24K
+  OPENMP_CFLAGS
+              C compilation flags for OpenMP
+  OPENMP_FCFLAGS
+              Fortran compilation flags for OpenMP
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to <michelemartone_AT_users_DOT_sourceforge_DOT_net>.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+  # If there are subdirs, report their specific --help.
+  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+    test -d "$ac_dir" ||
+      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+      continue
+    ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+  .)  # We are building in place.
+    ac_srcdir=.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
+    ac_srcdir=$srcdir$ac_dir_suffix;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+    cd "$ac_dir" || { ac_status=$?; continue; }
+    # Check for guested configure.
+    if test -f "$ac_srcdir/configure.gnu"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+    elif test -f "$ac_srcdir/configure"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure" --help=recursive
+    else
+      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+    fi || ac_status=$?
+    cd "$ac_pwd" || { ac_status=$?; break; }
+  done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+  cat <<\_ACEOF
+librsb configure 1.2.0-rc5
+generated by GNU Autoconf 2.69
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+
+Copyright (c) 2008-2016, Michele Martone
+_ACEOF
+  exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  eval "$3=yes"
+else
+  eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_compile
+
+# ac_fn_c_try_cpp LINENO
+# ----------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_cpp ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } > conftest.i && {
+	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+    ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_cpp
+
+# ac_fn_c_try_run LINENO
+# ----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_c_try_run ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: program exited with status $ac_status" >&5
+       $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+       ac_retval=$ac_status
+fi
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_run
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+    which can conflict with char $2 (); below.
+    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+    <limits.h> exists even on freestanding compilers.  */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+    to always fail with ENOSYS.  Some functions are actually named
+    something starting with __ and the normal name is an alias.  */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  eval "$3=yes"
+else
+  eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_func
+
+# ac_fn_fc_try_compile LINENO
+# ---------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_fc_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_fc_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_fc_try_compile
+
+# ac_fn_fc_try_link LINENO
+# ------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_fc_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_fc_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_fc_try_link
+
+# ac_fn_cxx_try_compile LINENO
+# ----------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_compile
+
+# ac_fn_cxx_try_cpp LINENO
+# ------------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_cpp ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } > conftest.i && {
+	 test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+    ac_retval=1
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_cpp
+
+# ac_fn_cxx_try_link LINENO
+# -------------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_cxx_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_cxx_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 test -x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_cxx_try_link
+
+# ac_fn_c_compute_int LINENO EXPR VAR INCLUDES
+# --------------------------------------------
+# Tries to find the compile-time value of EXPR in a program that includes
+# INCLUDES, setting VAR accordingly. Returns whether the value could be
+# computed
+ac_fn_c_compute_int ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if test "$cross_compiling" = yes; then
+    # Depending upon the size, compute the lo and hi bounds.
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) >= 0)];
+test_array [0] = 0;
+return test_array [0];
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_lo=0 ac_mid=0
+  while :; do
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) <= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_hi=$ac_mid; break
+else
+  as_fn_arith $ac_mid + 1 && ac_lo=$as_val
+			if test $ac_lo -le $ac_mid; then
+			  ac_lo= ac_hi=
+			  break
+			fi
+			as_fn_arith 2 '*' $ac_mid + 1 && ac_mid=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+  done
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) < 0)];
+test_array [0] = 0;
+return test_array [0];
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_hi=-1 ac_mid=-1
+  while :; do
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) >= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_lo=$ac_mid; break
+else
+  as_fn_arith '(' $ac_mid ')' - 1 && ac_hi=$as_val
+			if test $ac_mid -le $ac_hi; then
+			  ac_lo= ac_hi=
+			  break
+			fi
+			as_fn_arith 2 '*' $ac_mid && ac_mid=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+  done
+else
+  ac_lo= ac_hi=
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+# Binary search between lo and hi bounds.
+while test "x$ac_lo" != "x$ac_hi"; do
+  as_fn_arith '(' $ac_hi - $ac_lo ')' / 2 + $ac_lo && ac_mid=$as_val
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+static int test_array [1 - 2 * !(($2) <= $ac_mid)];
+test_array [0] = 0;
+return test_array [0];
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_hi=$ac_mid
+else
+  as_fn_arith '(' $ac_mid ')' + 1 && ac_lo=$as_val
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+done
+case $ac_lo in #((
+?*) eval "$3=\$ac_lo"; ac_retval=0 ;;
+'') ac_retval=1 ;;
+esac
+  else
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+static long int longval () { return $2; }
+static unsigned long int ulongval () { return $2; }
+#include <stdio.h>
+#include <stdlib.h>
+int
+main ()
+{
+
+  FILE *f = fopen ("conftest.val", "w");
+  if (! f)
+    return 1;
+  if (($2) < 0)
+    {
+      long int i = longval ();
+      if (i != ($2))
+	return 1;
+      fprintf (f, "%ld", i);
+    }
+  else
+    {
+      unsigned long int i = ulongval ();
+      if (i != ($2))
+	return 1;
+      fprintf (f, "%lu", i);
+    }
+  /* Do not output a trailing newline, as this causes \r\n confusion
+     on some platforms.  */
+  return ferror (f) || fclose (f) != 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+  echo >>conftest.val; read $3 <conftest.val; ac_retval=0
+else
+  ac_retval=1
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+rm -f conftest.val
+
+  fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_compute_int
+
+# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
+# -------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_c_check_type ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  eval "$3=no"
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+	 return 0;
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+	    return 0;
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+  eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_type
+
+# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_c_check_header_mongrel ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if eval \${$3+:} false; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+  # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_header_compiler=yes
+else
+  ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  ac_header_preproc=yes
+else
+  ac_header_preproc=no
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So?  What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
+  yes:no: )
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+    ;;
+  no:yes:* )
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+( $as_echo "## -------------------------------------------------------------- ##
+## Report this to michelemartone_AT_users_DOT_sourceforge_DOT_net ##
+## -------------------------------------------------------------- ##"
+     ) | sed "s/^/$as_me: WARNING:     /" >&2
+    ;;
+esac
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if eval \${$3+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+
+} # ac_fn_c_check_header_mongrel
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by librsb $as_me 1.2.0-rc5, which was
+generated by GNU Autoconf 2.69.  Invocation command line was
+
+  $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
+
+/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
+/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
+/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    $as_echo "PATH: $as_dir"
+  done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+  for ac_arg
+  do
+    case $ac_arg in
+    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+    | -silent | --silent | --silen | --sile | --sil)
+      continue ;;
+    *\'*)
+      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    case $ac_pass in
+    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+    2)
+      as_fn_append ac_configure_args1 " '$ac_arg'"
+      if test $ac_must_keep_next = true; then
+	ac_must_keep_next=false # Got value, back to normal.
+      else
+	case $ac_arg in
+	  *=* | --config-cache | -C | -disable-* | --disable-* \
+	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+	  | -with-* | --with-* | -without-* | --without-* | --x)
+	    case "$ac_configure_args0 " in
+	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+	    esac
+	    ;;
+	  -* ) ac_must_keep_next=true ;;
+	esac
+      fi
+      as_fn_append ac_configure_args " '$ac_arg'"
+      ;;
+    esac
+  done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log.  We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+  # Save into config.log some information that might help in debugging.
+  {
+    echo
+
+    $as_echo "## ---------------- ##
+## Cache variables. ##
+## ---------------- ##"
+    echo
+    # The following way of writing the cache mishandles newlines in values,
+(
+  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
+  (set) 2>&1 |
+    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
+      sed -n \
+	"s/'\''/'\''\\\\'\'''\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+      ;; #(
+    *)
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+      ;;
+    esac |
+    sort
+)
+    echo
+
+    $as_echo "## ----------------- ##
+## Output variables. ##
+## ----------------- ##"
+    echo
+    for ac_var in $ac_subst_vars
+    do
+      eval ac_val=\$$ac_var
+      case $ac_val in
+      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+      esac
+      $as_echo "$ac_var='\''$ac_val'\''"
+    done | sort
+    echo
+
+    if test -n "$ac_subst_files"; then
+      $as_echo "## ------------------- ##
+## File substitutions. ##
+## ------------------- ##"
+      echo
+      for ac_var in $ac_subst_files
+      do
+	eval ac_val=\$$ac_var
+	case $ac_val in
+	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+	esac
+	$as_echo "$ac_var='\''$ac_val'\''"
+      done | sort
+      echo
+    fi
+
+    if test -s confdefs.h; then
+      $as_echo "## ----------- ##
+## confdefs.h. ##
+## ----------- ##"
+      echo
+      cat confdefs.h
+      echo
+    fi
+    test "$ac_signal" != 0 &&
+      $as_echo "$as_me: caught signal $ac_signal"
+    $as_echo "$as_me: exit $exit_status"
+  } >&5
+  rm -f core *.core core.conftest.* &&
+    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+    exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+  # We do not want a PATH search for config.site.
+  case $CONFIG_SITE in #((
+    -*)  ac_site_file1=./$CONFIG_SITE;;
+    */*) ac_site_file1=$CONFIG_SITE;;
+    *)   ac_site_file1=./$CONFIG_SITE;;
+  esac
+elif test "x$prefix" != xNONE; then
+  ac_site_file1=$prefix/share/config.site
+  ac_site_file2=$prefix/etc/config.site
+else
+  ac_site_file1=$ac_default_prefix/share/config.site
+  ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+  test "x$ac_site_file" = xNONE && continue
+  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+    sed 's/^/| /' "$ac_site_file" >&5
+    . "$ac_site_file" \
+      || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "failed to load site script $ac_site_file
+See \`config.log' for more details" "$LINENO" 5; }
+  fi
+done
+
+if test -r "$cache_file"; then
+  # Some versions of bash will fail to source /dev/null (special files
+  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
+  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+    case $cache_file in
+      [\\/]* | ?:[\\/]* ) . "$cache_file";;
+      *)                      . "./$cache_file";;
+    esac
+  fi
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+  >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+  eval ac_old_set=\$ac_cv_env_${ac_var}_set
+  eval ac_new_set=\$ac_env_${ac_var}_set
+  eval ac_old_val=\$ac_cv_env_${ac_var}_value
+  eval ac_new_val=\$ac_env_${ac_var}_value
+  case $ac_old_set,$ac_new_set in
+    set,)
+      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+      ac_cache_corrupted=: ;;
+    ,set)
+      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+      ac_cache_corrupted=: ;;
+    ,);;
+    *)
+      if test "x$ac_old_val" != "x$ac_new_val"; then
+	# differences in whitespace do not lead to failure.
+	ac_old_val_w=`echo x $ac_old_val`
+	ac_new_val_w=`echo x $ac_new_val`
+	if test "$ac_old_val_w" != "$ac_new_val_w"; then
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+	  ac_cache_corrupted=:
+	else
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+	  eval $ac_var=\$ac_old_val
+	fi
+	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
+$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
+	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
+$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
+      fi;;
+  esac
+  # Pass precious variables to config.status.
+  if test "$ac_new_set" = set; then
+    case $ac_new_val in
+    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+    *) ac_arg=$ac_var=$ac_new_val ;;
+    esac
+    case " $ac_configure_args " in
+      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
+      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+    esac
+  fi
+done
+if $ac_cache_corrupted; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+  as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# The following are valid for both trunk and release version.
+# It indicates the version this trunk is loosely related to.
+LIBRSB_VER_MAJOR=1
+LIBRSB_VER_MINOR=2
+LIBRSB_VER_PATCH=0
+LIBRSB_LIBRSB_VER=1"0"2"00"
+if test x"librsbsvnversion" = x"trunk" ; then
+LIBRSB_VER_PRERS="-trunk"
+else
+LIBRSB_VER_PRERS="-rc5"
+fi
+LIBRSB_VER_DATE="September 01, 2016"
+LIBRSB_VERSION="1.2.0-rc5"
+LIBRSB_MAIN_RELEASE="1.2.0"
+LIBRSB_ABI_VERSION="0:0:0"
+
+
+
+
+
+
+
+
+
+################################################################################
+SVN_REVISION="3488M"
+
+
+$as_echo "#define SVN_REVISION \"3488M\"" >>confdefs.h
+
+
+
+$as_echo "#define COPYRIGHT_STRING \"Copyright (c) 2008-2016 Michele Martone\"" >>confdefs.h
+
+
+################################################################################
+am__api_version='1.11'
+
+ac_aux_dir=
+for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
+  if test -f "$ac_dir/install-sh"; then
+    ac_aux_dir=$ac_dir
+    ac_install_sh="$ac_aux_dir/install-sh -c"
+    break
+  elif test -f "$ac_dir/install.sh"; then
+    ac_aux_dir=$ac_dir
+    ac_install_sh="$ac_aux_dir/install.sh -c"
+    break
+  elif test -f "$ac_dir/shtool"; then
+    ac_aux_dir=$ac_dir
+    ac_install_sh="$ac_aux_dir/shtool install -c"
+    break
+  fi
+done
+if test -z "$ac_aux_dir"; then
+  as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+fi
+
+# These three variables are undocumented and unsupported,
+# and are intended to be withdrawn in a future Autoconf release.
+# They can cause serious problems if a builder's source tree is in a directory
+# whose full name contains unusual characters.
+ac_config_guess="$SHELL $ac_aux_dir/config.guess"  # Please don't use this var.
+ac_config_sub="$SHELL $ac_aux_dir/config.sub"  # Please don't use this var.
+ac_configure="$SHELL $ac_aux_dir/configure"  # Please don't use this var.
+
+
+# Find a good install program.  We prefer a C program (faster),
+# so one script is as good as another.  But avoid the broken or
+# incompatible versions:
+# SysV /etc/install, /usr/sbin/install
+# SunOS /usr/etc/install
+# IRIX /sbin/install
+# AIX /bin/install
+# AmigaOS /C/install, which installs bootblocks on floppy discs
+# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
+# AFS /usr/afsws/bin/install, which mishandles nonexistent args
+# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
+# OS/2's system install, which has a completely different semantic
+# ./install, which can be erroneously created by make from ./install.sh.
+# Reject install programs that cannot install multiple files.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+$as_echo_n "checking for a BSD-compatible install... " >&6; }
+if test -z "$INSTALL"; then
+if ${ac_cv_path_install+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    # Account for people who put trailing slashes in PATH elements.
+case $as_dir/ in #((
+  ./ | .// | /[cC]/* | \
+  /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+  ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+  /usr/ucb/* ) ;;
+  *)
+    # OSF1 and SCO ODT 3.0 have their own names for install.
+    # Don't use installbsd from OSF since it installs stuff as root
+    # by default.
+    for ac_prog in ginstall scoinst install; do
+      for ac_exec_ext in '' $ac_executable_extensions; do
+	if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+	  if test $ac_prog = install &&
+	    grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+	    # AIX install.  It has an incompatible calling convention.
+	    :
+	  elif test $ac_prog = install &&
+	    grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+	    # program-specific install script used by HP pwplus--don't use.
+	    :
+	  else
+	    rm -rf conftest.one conftest.two conftest.dir
+	    echo one > conftest.one
+	    echo two > conftest.two
+	    mkdir conftest.dir
+	    if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+	      test -s conftest.one && test -s conftest.two &&
+	      test -s conftest.dir/conftest.one &&
+	      test -s conftest.dir/conftest.two
+	    then
+	      ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+	      break 3
+	    fi
+	  fi
+	fi
+      done
+    done
+    ;;
+esac
+
+  done
+IFS=$as_save_IFS
+
+rm -rf conftest.one conftest.two conftest.dir
+
+fi
+  if test "${ac_cv_path_install+set}" = set; then
+    INSTALL=$ac_cv_path_install
+  else
+    # As a last resort, use the slow shell script.  Don't cache a
+    # value for INSTALL within a source directory, because that will
+    # break other packages using the cache if that directory is
+    # removed, or if the value is a relative name.
+    INSTALL=$ac_install_sh
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+$as_echo "$INSTALL" >&6; }
+
+# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+# It thinks the first close brace ends the variable substitution.
+test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
+
+test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+
+test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+$as_echo_n "checking whether build environment is sane... " >&6; }
+# Just in case
+sleep 1
+echo timestamp > conftest.file
+# Reject unsafe characters in $srcdir or the absolute working directory
+# name.  Accept space and tab only in the latter.
+am_lf='
+'
+case `pwd` in
+  *[\\\"\#\$\&\'\`$am_lf]*)
+    as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
+esac
+case $srcdir in
+  *[\\\"\#\$\&\'\`$am_lf\ \	]*)
+    as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;;
+esac
+
+# Do `set' in a subshell so we don't clobber the current shell's
+# arguments.  Must try -L first in case configure is actually a
+# symlink; some systems play weird games with the mod time of symlinks
+# (eg FreeBSD returns the mod time of the symlink's containing
+# directory).
+if (
+   set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
+   if test "$*" = "X"; then
+      # -L didn't work.
+      set X `ls -t "$srcdir/configure" conftest.file`
+   fi
+   rm -f conftest.file
+   if test "$*" != "X $srcdir/configure conftest.file" \
+      && test "$*" != "X conftest.file $srcdir/configure"; then
+
+      # If neither matched, then we have a broken ls.  This can happen
+      # if, for instance, CONFIG_SHELL is bash and it inherits a
+      # broken ls alias from the environment.  This has actually
+      # happened.  Such a system could not be considered "sane".
+      as_fn_error $? "ls -t appears to fail.  Make sure there is not a broken
+alias in your environment" "$LINENO" 5
+   fi
+
+   test "$2" = conftest.file
+   )
+then
+   # Ok.
+   :
+else
+   as_fn_error $? "newly created file is older than distributed files!
+Check your system clock" "$LINENO" 5
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+test "$program_prefix" != NONE &&
+  program_transform_name="s&^&$program_prefix&;$program_transform_name"
+# Use a double $ so make ignores it.
+test "$program_suffix" != NONE &&
+  program_transform_name="s&\$&$program_suffix&;$program_transform_name"
+# Double any \ or $.
+# By default was `s,x,x', remove it if useless.
+ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+
+# expand $ac_aux_dir to an absolute path
+am_aux_dir=`cd $ac_aux_dir && pwd`
+
+if test x"${MISSING+set}" != xset; then
+  case $am_aux_dir in
+  *\ * | *\	*)
+    MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
+  *)
+    MISSING="\${SHELL} $am_aux_dir/missing" ;;
+  esac
+fi
+# Use eval to expand $SHELL
+if eval "$MISSING --run true"; then
+  am_missing_run="$MISSING --run "
+else
+  am_missing_run=
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5
+$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;}
+fi
+
+if test x"${install_sh}" != xset; then
+  case $am_aux_dir in
+  *\ * | *\	*)
+    install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
+  *)
+    install_sh="\${SHELL} $am_aux_dir/install-sh"
+  esac
+fi
+
+# Installed binaries are usually stripped using `strip' when the user
+# run `make install-strip'.  However `strip' might not be the right
+# tool to use in cross-compilation environments, therefore Automake
+# will honor the `STRIP' environment variable to overrule this program.
+if test "$cross_compiling" != no; then
+  if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_STRIP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$STRIP"; then
+  ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+  ac_ct_STRIP=$STRIP
+  # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_STRIP"; then
+  ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_STRIP="strip"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_STRIP" = x; then
+    STRIP=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    STRIP=$ac_ct_STRIP
+  fi
+else
+  STRIP="$ac_cv_prog_STRIP"
+fi
+
+fi
+INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
+$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+if test -z "$MKDIR_P"; then
+  if ${ac_cv_path_mkdir+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in mkdir gmkdir; do
+	 for ac_exec_ext in '' $ac_executable_extensions; do
+	   as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
+	   case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
+	     'mkdir (GNU coreutils) '* | \
+	     'mkdir (coreutils) '* | \
+	     'mkdir (fileutils) '4.1*)
+	       ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+	       break 3;;
+	   esac
+	 done
+       done
+  done
+IFS=$as_save_IFS
+
+fi
+
+  test -d ./--version && rmdir ./--version
+  if test "${ac_cv_path_mkdir+set}" = set; then
+    MKDIR_P="$ac_cv_path_mkdir -p"
+  else
+    # As a last resort, use the slow shell script.  Don't cache a
+    # value for MKDIR_P within a source directory, because that will
+    # break other packages using the cache if that directory is
+    # removed, or if the value is a relative name.
+    MKDIR_P="$ac_install_sh -d"
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+$as_echo "$MKDIR_P" >&6; }
+
+mkdir_p="$MKDIR_P"
+case $mkdir_p in
+  [\\/$]* | ?:[\\/]*) ;;
+  */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;;
+esac
+
+for ac_prog in gawk mawk nawk awk
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AWK+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$AWK"; then
+  ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_AWK="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+	@echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+  *@@@%%%=?*=@@@%%%*)
+    eval ac_cv_prog_make_${ac_make}_set=yes;;
+  *)
+    eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+  SET_MAKE=
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+  SET_MAKE="MAKE=${MAKE-make}"
+fi
+
+rm -rf .tst 2>/dev/null
+mkdir .tst 2>/dev/null
+if test -d .tst; then
+  am__leading_dot=.
+else
+  am__leading_dot=_
+fi
+rmdir .tst 2>/dev/null
+
+if test "`cd $srcdir && pwd`" != "`pwd`"; then
+  # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
+  # is not polluted with repeated "-I."
+  am__isrc=' -I$(srcdir)'
+  # test to see if srcdir already configured
+  if test -f $srcdir/config.status; then
+    as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
+  fi
+fi
+
+# test whether we have cygpath
+if test -z "$CYGPATH_W"; then
+  if (cygpath --version) >/dev/null 2>/dev/null; then
+    CYGPATH_W='cygpath -w'
+  else
+    CYGPATH_W=echo
+  fi
+fi
+
+
+# Define the identity of the package.
+ PACKAGE='librsb'
+ VERSION='1.2.0-rc5'
+
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE "$PACKAGE"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define VERSION "$VERSION"
+_ACEOF
+
+# Some tools Automake needs.
+
+ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
+
+
+AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
+
+
+AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
+
+
+AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
+
+
+MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
+
+# We need awk for the "check" target.  The system "awk" is bad on
+# some platforms.
+# Always define AMTAR for backward compatibility.  Yes, it's still used
+# in the wild :-(  We should find a proper way to deprecate it ...
+AMTAR='$${TAR-tar}'
+
+am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+
+
+
+
+
+case `pwd` in
+  *\ * | *\	*)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
+$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
+esac
+
+
+
+macro_version='2.4.2'
+macro_revision='1.3337'
+
+
+
+
+
+
+
+
+
+
+
+
+
+ltmain="$ac_aux_dir/ltmain.sh"
+
+# Make sure we can run config.sub.
+$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
+  as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+$as_echo_n "checking build system type... " >&6; }
+if ${ac_cv_build+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_build_alias=$build_alias
+test "x$ac_build_alias" = x &&
+  ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+test "x$ac_build_alias" = x &&
+  as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
+  as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+$as_echo "$ac_cv_build" >&6; }
+case $ac_cv_build in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+esac
+build=$ac_cv_build
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_build
+shift
+build_cpu=$1
+build_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+build_os=$*
+IFS=$ac_save_IFS
+case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+$as_echo_n "checking host system type... " >&6; }
+if ${ac_cv_host+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test "x$host_alias" = x; then
+  ac_cv_host=$ac_cv_build
+else
+  ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
+    as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+$as_echo "$ac_cv_host" >&6; }
+case $ac_cv_host in
+*-*-*) ;;
+*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+esac
+host=$ac_cv_host
+ac_save_IFS=$IFS; IFS='-'
+set x $ac_cv_host
+shift
+host_cpu=$1
+host_vendor=$2
+shift; shift
+# Remember, the first character of IFS is used to create $*,
+# except with old shells:
+host_os=$*
+IFS=$ac_save_IFS
+case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+
+
+# Backslashify metacharacters that are still active within
+# double-quoted strings.
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution to delay expansion of an escaped shell variable in a
+# double_quote_subst'ed string.
+delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+
+# Sed substitution to delay expansion of an escaped single quote.
+delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+
+# Sed substitution to avoid accidental globbing in evaled expressions
+no_glob_subst='s/\*/\\\*/g'
+
+ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
+$as_echo_n "checking how to print strings... " >&6; }
+# Test print first, because it will be a builtin if present.
+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='print -r --'
+elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+  ECHO='printf %s\n'
+else
+  # Use this function as a fallback that always works.
+  func_fallback_echo ()
+  {
+    eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+  }
+  ECHO='func_fallback_echo'
+fi
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO ""
+}
+
+case "$ECHO" in
+  printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5
+$as_echo "printf" >&6; } ;;
+  print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
+$as_echo "print -r" >&6; } ;;
+  *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5
+$as_echo "cat" >&6; } ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+DEPDIR="${am__leading_dot}deps"
+
+ac_config_commands="$ac_config_commands depfiles"
+
+
+am_make=${MAKE-make}
+cat > confinc << 'END'
+am__doit:
+	@echo this is the am__doit target
+.PHONY: am__doit
+END
+# If we don't find an include directive, just comment out the code.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5
+$as_echo_n "checking for style of include used by $am_make... " >&6; }
+am__include="#"
+am__quote=
+_am_result=none
+# First try GNU make style include.
+echo "include confinc" > confmf
+# Ignore all kinds of additional output from `make'.
+case `$am_make -s -f confmf 2> /dev/null` in #(
+*the\ am__doit\ target*)
+  am__include=include
+  am__quote=
+  _am_result=GNU
+  ;;
+esac
+# Now try BSD make style include.
+if test "$am__include" = "#"; then
+   echo '.include "confinc"' > confmf
+   case `$am_make -s -f confmf 2> /dev/null` in #(
+   *the\ am__doit\ target*)
+     am__include=.include
+     am__quote="\""
+     _am_result=BSD
+     ;;
+   esac
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5
+$as_echo "$_am_result" >&6; }
+rm -f confinc confmf
+
+# Check whether --enable-dependency-tracking was given.
+if test "${enable_dependency_tracking+set}" = set; then :
+  enableval=$enable_dependency_tracking;
+fi
+
+if test "x$enable_dependency_tracking" != xno; then
+  am_depcomp="$ac_aux_dir/depcomp"
+  AMDEPBACKSLASH='\'
+  am__nodep='_no'
+fi
+ if test "x$enable_dependency_tracking" != xno; then
+  AMDEP_TRUE=
+  AMDEP_FALSE='#'
+else
+  AMDEP_TRUE='#'
+  AMDEP_FALSE=
+fi
+
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="${ac_tool_prefix}gcc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+  ac_ct_CC=$CC
+  # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CC="gcc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+else
+  CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+          if test -n "$ac_tool_prefix"; then
+    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="${ac_tool_prefix}cc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  fi
+fi
+if test -z "$CC"; then
+  # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+  ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+       ac_prog_rejected=yes
+       continue
+     fi
+    ac_cv_prog_CC="cc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+  # We found a bogon in the path, so make sure we never use it.
+  set dummy $ac_cv_prog_CC
+  shift
+  if test $# != 0; then
+    # We chose a different compiler from the bogus one.
+    # However, it has the same basename, so the bogon will be chosen
+    # first if we set CC to just the basename; use the full file name.
+    shift
+    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+  fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+  if test -n "$ac_tool_prefix"; then
+  for ac_prog in cl.exe
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$CC" && break
+  done
+fi
+if test -z "$CC"; then
+  ac_ct_CC=$CC
+  for ac_prog in cl.exe
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CC="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CC" && break
+done
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+  esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link_default") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile.  We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+  test -f "$ac_file" || continue
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+	;;
+    [ab].out )
+	# We found the default executable, but exeext='' is most
+	# certainly right.
+	break;;
+    *.* )
+	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+	then :; else
+	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	fi
+	# We set ac_cv_exeext here because the later test for it is not
+	# safe: cross compilers may not add the suffix if given an `-o'
+	# argument, so we may need to know it at that point already.
+	# Even if this section looks crufty: it has the advantage of
+	# actually working.
+	break;;
+    * )
+	break;;
+  esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+  ac_file=''
+fi
+if test -z "$ac_file"; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "C compiler cannot create executables
+See \`config.log' for more details" "$LINENO" 5; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+  test -f "$ac_file" || continue
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	  break;;
+    * ) break;;
+  esac
+done
+else
+  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run.  If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+  { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+  if { ac_try='./conftest$ac_cv_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+    cross_compiling=no
+  else
+    if test "$cross_compiling" = maybe; then
+	cross_compiling=yes
+    else
+	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details" "$LINENO" 5; }
+    fi
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if ${ac_cv_objext+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  for ac_file in conftest.o conftest.obj conftest.*; do
+  test -f "$ac_file" || continue;
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+       break;;
+  esac
+done
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_compiler_gnu=yes
+else
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GCC=yes
+else
+  GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_c_werror_flag=$ac_c_werror_flag
+   ac_c_werror_flag=yes
+   ac_cv_prog_cc_g=no
+   CFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+else
+  CFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+  ac_c_werror_flag=$ac_save_c_werror_flag
+	 CFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+  CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+  if test "$GCC" = yes; then
+    CFLAGS="-g -O2"
+  else
+    CFLAGS="-g"
+  fi
+else
+  if test "$GCC" = yes; then
+    CFLAGS="-O2"
+  else
+    CFLAGS=
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdarg.h>
+#include <stdio.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+     char **p;
+     int i;
+{
+  return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+  char *s;
+  va_list v;
+  va_start (v,p);
+  s = g (p, va_arg (v,int));
+  va_end (v);
+  return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
+   function prototypes and stuff, but not '\xHH' hex character constants.
+   These don't provoke an error unfortunately, instead are silently treated
+   as 'x'.  The following induces an error, until -std is added to get
+   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
+   array size at least.  It's necessary to write '\x00'==0 to get something
+   that's true only with -std.  */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+   inside strings and character constants.  */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
+  ;
+  return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+  CC="$ac_save_CC $ac_arg"
+  if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+  test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+  x)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+  xno)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+  *)
+    CC="$CC $ac_cv_prog_cc_c89"
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CC"   am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CC_dependencies_compiler_type+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named `D' -- because `-MD' means `put the output
+  # in D'.
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CC_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+  case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+      # Solaris 8's {/usr,}/bin/sh.
+      touch sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with `-c' and `-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle `-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # after this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok `-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CC_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+  am__fastdepCC_TRUE=
+  am__fastdepCC_FALSE='#'
+else
+  am__fastdepCC_TRUE='#'
+  am__fastdepCC_FALSE=
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
+$as_echo_n "checking for a sed that does not truncate output... " >&6; }
+if ${ac_cv_path_SED+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+            ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+     for ac_i in 1 2 3 4 5 6 7; do
+       ac_script="$ac_script$as_nl$ac_script"
+     done
+     echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed
+     { ac_script=; unset ac_script;}
+     if test -z "$SED"; then
+  ac_path_SED_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in sed gsed; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_SED="$as_dir/$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_SED" || continue
+# Check for GNU ac_path_SED and select it if it is found.
+  # Check for GNU $ac_path_SED
+case `"$ac_path_SED" --version 2>&1` in
+*GNU*)
+  ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo '' >> "conftest.nl"
+    "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_SED_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_SED="$ac_path_SED"
+      ac_path_SED_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_SED_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_SED"; then
+    as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5
+  fi
+else
+  ac_cv_path_SED=$SED
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
+$as_echo "$ac_cv_path_SED" >&6; }
+ SED="$ac_cv_path_SED"
+  rm -f conftest.sed
+
+test -z "$SED" && SED=sed
+Xsed="$SED -e 1s/^X//"
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$GREP"; then
+  ac_path_GREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in grep ggrep; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+  # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo 'GREP' >> "conftest.nl"
+    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_GREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_GREP="$ac_path_GREP"
+      ac_path_GREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_GREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_GREP"; then
+    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if ${ac_cv_path_EGREP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+   then ac_cv_path_EGREP="$GREP -E"
+   else
+     if test -z "$EGREP"; then
+  ac_path_EGREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in egrep; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_EGREP" || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+  # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo 'EGREP' >> "conftest.nl"
+    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_EGREP="$ac_path_EGREP"
+      ac_path_EGREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_EGREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_EGREP"; then
+    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_EGREP=$EGREP
+fi
+
+   fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
+$as_echo_n "checking for fgrep... " >&6; }
+if ${ac_cv_path_FGREP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
+   then ac_cv_path_FGREP="$GREP -F"
+   else
+     if test -z "$FGREP"; then
+  ac_path_FGREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in fgrep; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_FGREP" || continue
+# Check for GNU ac_path_FGREP and select it if it is found.
+  # Check for GNU $ac_path_FGREP
+case `"$ac_path_FGREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo 'FGREP' >> "conftest.nl"
+    "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_FGREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_FGREP="$ac_path_FGREP"
+      ac_path_FGREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_FGREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_FGREP"; then
+    as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_FGREP=$FGREP
+fi
+
+   fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5
+$as_echo "$ac_cv_path_FGREP" >&6; }
+ FGREP="$ac_cv_path_FGREP"
+
+
+test -z "$GREP" && GREP=grep
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then :
+  withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+  with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$GCC" = yes; then
+  # Check if gcc -print-prog-name=ld gives a path.
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+$as_echo_n "checking for ld used by $CC... " >&6; }
+  case $host in
+  *-*-mingw*)
+    # gcc leaves a trailing carriage return which upsets mingw
+    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+  *)
+    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+  esac
+  case $ac_prog in
+    # Accept absolute paths.
+    [\\/]* | ?:[\\/]*)
+      re_direlt='/[^/][^/]*/\.\./'
+      # Canonicalize the pathname of ld
+      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+      done
+      test -z "$LD" && LD="$ac_prog"
+      ;;
+  "")
+    # If it fails, then pretend we aren't using GCC.
+    ac_prog=ld
+    ;;
+  *)
+    # If it is relative, then search for the first ld in PATH.
+    with_gnu_ld=unknown
+    ;;
+  esac
+elif test "$with_gnu_ld" = yes; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if ${lt_cv_path_LD+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$LD"; then
+  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+  for ac_dir in $PATH; do
+    IFS="$lt_save_ifs"
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+      lt_cv_path_LD="$ac_dir/$ac_prog"
+      # Check to see if the program is GNU ld.  I'd rather use --version,
+      # but apparently some variants of GNU ld only accept -v.
+      # Break only if it was the GNU/non-GNU ld that we prefer.
+      case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+      *GNU* | *'with BFD'*)
+	test "$with_gnu_ld" != no && break
+	;;
+      *)
+	test "$with_gnu_ld" != yes && break
+	;;
+      esac
+    fi
+  done
+  IFS="$lt_save_ifs"
+else
+  lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
+if ${lt_cv_prog_gnu_ld+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+  lt_cv_prog_gnu_ld=yes
+  ;;
+*)
+  lt_cv_prog_gnu_ld=no
+  ;;
+esac
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
+$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
+$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
+if ${lt_cv_path_NM+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$NM"; then
+  # Let the user override the test.
+  lt_cv_path_NM="$NM"
+else
+  lt_nm_to_check="${ac_tool_prefix}nm"
+  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+    lt_nm_to_check="$lt_nm_to_check nm"
+  fi
+  for lt_tmp_nm in $lt_nm_to_check; do
+    lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+      IFS="$lt_save_ifs"
+      test -z "$ac_dir" && ac_dir=.
+      tmp_nm="$ac_dir/$lt_tmp_nm"
+      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+	# Check to see if the nm accepts a BSD-compat flag.
+	# Adding the `sed 1q' prevents false positives on HP-UX, which says:
+	#   nm: unknown option "B" ignored
+	# Tru64's nm complains that /dev/null is an invalid object file
+	case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
+	*/dev/null* | *'Invalid file or object type'*)
+	  lt_cv_path_NM="$tmp_nm -B"
+	  break
+	  ;;
+	*)
+	  case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+	  */dev/null*)
+	    lt_cv_path_NM="$tmp_nm -p"
+	    break
+	    ;;
+	  *)
+	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+	    continue # so that we can try to find one that supports BSD flags
+	    ;;
+	  esac
+	  ;;
+	esac
+      fi
+    done
+    IFS="$lt_save_ifs"
+  done
+  : ${lt_cv_path_NM=no}
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5
+$as_echo "$lt_cv_path_NM" >&6; }
+if test "$lt_cv_path_NM" != "no"; then
+  NM="$lt_cv_path_NM"
+else
+  # Didn't find any BSD compatible name lister, look for dumpbin.
+  if test -n "$DUMPBIN"; then :
+    # Let the user override the test.
+  else
+    if test -n "$ac_tool_prefix"; then
+  for ac_prog in dumpbin "link -dump"
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DUMPBIN+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$DUMPBIN"; then
+  ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DUMPBIN=$ac_cv_prog_DUMPBIN
+if test -n "$DUMPBIN"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5
+$as_echo "$DUMPBIN" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$DUMPBIN" && break
+  done
+fi
+if test -z "$DUMPBIN"; then
+  ac_ct_DUMPBIN=$DUMPBIN
+  for ac_prog in dumpbin "link -dump"
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_DUMPBIN"; then
+  ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN
+if test -n "$ac_ct_DUMPBIN"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5
+$as_echo "$ac_ct_DUMPBIN" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_DUMPBIN" && break
+done
+
+  if test "x$ac_ct_DUMPBIN" = x; then
+    DUMPBIN=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    DUMPBIN=$ac_ct_DUMPBIN
+  fi
+fi
+
+    case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+    *COFF*)
+      DUMPBIN="$DUMPBIN -symbols"
+      ;;
+    *)
+      DUMPBIN=:
+      ;;
+    esac
+  fi
+
+  if test "$DUMPBIN" != ":"; then
+    NM="$DUMPBIN"
+  fi
+fi
+test -z "$NM" && NM=nm
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
+$as_echo_n "checking the name lister ($NM) interface... " >&6; }
+if ${lt_cv_nm_interface+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_nm_interface="BSD nm"
+  echo "int some_variable = 0;" > conftest.$ac_ext
+  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5)
+  (eval "$ac_compile" 2>conftest.err)
+  cat conftest.err >&5
+  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
+  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+  cat conftest.err >&5
+  (eval echo "\"\$as_me:$LINENO: output\"" >&5)
+  cat conftest.out >&5
+  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+    lt_cv_nm_interface="MS dumpbin"
+  fi
+  rm -f conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5
+$as_echo "$lt_cv_nm_interface" >&6; }
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
+$as_echo_n "checking whether ln -s works... " >&6; }
+LN_S=$as_ln_s
+if test "$LN_S" = "ln -s"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
+$as_echo "no, using $LN_S" >&6; }
+fi
+
+# find the maximum length of command line arguments
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
+$as_echo_n "checking the maximum length of command line arguments... " >&6; }
+if ${lt_cv_sys_max_cmd_len+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+    i=0
+  teststring="ABCD"
+
+  case $build_os in
+  msdosdjgpp*)
+    # On DJGPP, this test can blow up pretty badly due to problems in libc
+    # (any single argument exceeding 2000 bytes causes a buffer overrun
+    # during glob expansion).  Even if it were fixed, the result of this
+    # check would be larger than it should be.
+    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
+    ;;
+
+  gnu*)
+    # Under GNU Hurd, this test is not required because there is
+    # no limit to the length of command line arguments.
+    # Libtool will interpret -1 as no limit whatsoever
+    lt_cv_sys_max_cmd_len=-1;
+    ;;
+
+  cygwin* | mingw* | cegcc*)
+    # On Win9x/ME, this test blows up -- it succeeds, but takes
+    # about 5 minutes as the teststring grows exponentially.
+    # Worse, since 9x/ME are not pre-emptively multitasking,
+    # you end up with a "frozen" computer, even though with patience
+    # the test eventually succeeds (with a max line length of 256k).
+    # Instead, let's just punt: use the minimum linelength reported by
+    # all of the supported platforms: 8192 (on NT/2K/XP).
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  mint*)
+    # On MiNT this can take a long time and run out of memory.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  amigaos*)
+    # On AmigaOS with pdksh, this test takes hours, literally.
+    # So we just punt and use a minimum line length of 8192.
+    lt_cv_sys_max_cmd_len=8192;
+    ;;
+
+  netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+    # This has been around since 386BSD, at least.  Likely further.
+    if test -x /sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+    elif test -x /usr/sbin/sysctl; then
+      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+    else
+      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
+    fi
+    # And add a safety zone
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    ;;
+
+  interix*)
+    # We know the value 262144 and hardcode it with a safety zone (like BSD)
+    lt_cv_sys_max_cmd_len=196608
+    ;;
+
+  os2*)
+    # The test takes a long time on OS/2.
+    lt_cv_sys_max_cmd_len=8192
+    ;;
+
+  osf*)
+    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+    # nice to cause kernel panics so lets avoid the loop below.
+    # First set a reasonable default.
+    lt_cv_sys_max_cmd_len=16384
+    #
+    if test -x /sbin/sysconfig; then
+      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+        *1*) lt_cv_sys_max_cmd_len=-1 ;;
+      esac
+    fi
+    ;;
+  sco3.2v5*)
+    lt_cv_sys_max_cmd_len=102400
+    ;;
+  sysv5* | sco5v6* | sysv4.2uw2*)
+    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+    if test -n "$kargmax"; then
+      lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[	 ]//'`
+    else
+      lt_cv_sys_max_cmd_len=32768
+    fi
+    ;;
+  *)
+    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+    if test -n "$lt_cv_sys_max_cmd_len"; then
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+    else
+      # Make teststring a little bigger before we do anything with it.
+      # a 1K string should be a reasonable start.
+      for i in 1 2 3 4 5 6 7 8 ; do
+        teststring=$teststring$teststring
+      done
+      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+      # If test is not a shell built-in, we'll probably end up computing a
+      # maximum length that is only half of the actual maximum length, but
+      # we can't tell.
+      while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \
+	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+	      test $i != 17 # 1/2 MB should be enough
+      do
+        i=`expr $i + 1`
+        teststring=$teststring$teststring
+      done
+      # Only check the string length outside the loop.
+      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+      teststring=
+      # Add a significant safety factor because C++ compilers can tack on
+      # massive amounts of additional arguments before passing them to the
+      # linker.  It appears as though 1/2 is a usable value.
+      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+    fi
+    ;;
+  esac
+
+fi
+
+if test -n $lt_cv_sys_max_cmd_len ; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5
+$as_echo "$lt_cv_sys_max_cmd_len" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5
+$as_echo "none" >&6; }
+fi
+max_cmd_len=$lt_cv_sys_max_cmd_len
+
+
+
+
+
+
+: ${CP="cp -f"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5
+$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; }
+# Try some XSI features
+xsi_shell=no
+( _lt_dummy="a/b/c"
+  test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \
+      = c,a/b,b/c, \
+    && eval 'test $(( 1 + 1 )) -eq 2 \
+    && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
+  && xsi_shell=yes
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5
+$as_echo "$xsi_shell" >&6; }
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5
+$as_echo_n "checking whether the shell understands \"+=\"... " >&6; }
+lt_shell_append=no
+( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \
+    >/dev/null 2>&1 \
+  && lt_shell_append=yes
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5
+$as_echo "$lt_shell_append" >&6; }
+
+
+if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+  lt_unset=unset
+else
+  lt_unset=false
+fi
+
+
+
+
+
+# test EBCDIC or ASCII
+case `echo X|tr X '\101'` in
+ A) # ASCII based system
+    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+  lt_SP2NL='tr \040 \012'
+  lt_NL2SP='tr \015\012 \040\040'
+  ;;
+ *) # EBCDIC based system
+  lt_SP2NL='tr \100 \n'
+  lt_NL2SP='tr \r\n \100\100'
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5
+$as_echo_n "checking how to convert $build file names to $host format... " >&6; }
+if ${lt_cv_to_host_file_cmd+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+        ;;
+    esac
+    ;;
+  *-*-cygwin* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+        ;;
+      *-*-cygwin* )
+        lt_cv_to_host_file_cmd=func_convert_file_noop
+        ;;
+      * ) # otherwise, assume *nix
+        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+        ;;
+    esac
+    ;;
+  * ) # unhandled hosts (and "normal" native builds)
+    lt_cv_to_host_file_cmd=func_convert_file_noop
+    ;;
+esac
+
+fi
+
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5
+$as_echo "$lt_cv_to_host_file_cmd" >&6; }
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5
+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; }
+if ${lt_cv_to_tool_file_cmd+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  #assume ordinary cross tools, or native build.
+lt_cv_to_tool_file_cmd=func_convert_file_noop
+case $host in
+  *-*-mingw* )
+    case $build in
+      *-*-mingw* ) # actually msys
+        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+        ;;
+    esac
+    ;;
+esac
+
+fi
+
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5
+$as_echo "$lt_cv_to_tool_file_cmd" >&6; }
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
+$as_echo_n "checking for $LD option to reload object files... " >&6; }
+if ${lt_cv_ld_reload_flag+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_ld_reload_flag='-r'
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5
+$as_echo "$lt_cv_ld_reload_flag" >&6; }
+reload_flag=$lt_cv_ld_reload_flag
+case $reload_flag in
+"" | " "*) ;;
+*) reload_flag=" $reload_flag" ;;
+esac
+reload_cmds='$LD$reload_flag -o $output$reload_objs'
+case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    if test "$GCC" != yes; then
+      reload_cmds=false
+    fi
+    ;;
+  darwin*)
+    if test "$GCC" = yes; then
+      reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+    else
+      reload_cmds='$LD$reload_flag -o $output$reload_objs'
+    fi
+    ;;
+esac
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
+set dummy ${ac_tool_prefix}objdump; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OBJDUMP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$OBJDUMP"; then
+  ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OBJDUMP=$ac_cv_prog_OBJDUMP
+if test -n "$OBJDUMP"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5
+$as_echo "$OBJDUMP" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OBJDUMP"; then
+  ac_ct_OBJDUMP=$OBJDUMP
+  # Extract the first word of "objdump", so it can be a program name with args.
+set dummy objdump; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_OBJDUMP"; then
+  ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_OBJDUMP="objdump"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
+if test -n "$ac_ct_OBJDUMP"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5
+$as_echo "$ac_ct_OBJDUMP" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_OBJDUMP" = x; then
+    OBJDUMP="false"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    OBJDUMP=$ac_ct_OBJDUMP
+  fi
+else
+  OBJDUMP="$ac_cv_prog_OBJDUMP"
+fi
+
+test -z "$OBJDUMP" && OBJDUMP=objdump
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
+$as_echo_n "checking how to recognize dependent libraries... " >&6; }
+if ${lt_cv_deplibs_check_method+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_file_magic_cmd='$MAGIC_CMD'
+lt_cv_file_magic_test_file=
+lt_cv_deplibs_check_method='unknown'
+# Need to set the preceding variable on all platforms that support
+# interlibrary dependencies.
+# 'none' -- dependencies not supported.
+# `unknown' -- same as none, but documents that we really don't know.
+# 'pass_all' -- all dependencies passed with no checks.
+# 'test_compile' -- check by making test program.
+# 'file_magic [[regex]]' -- check by looking for files in library path
+# which responds to the $file_magic_cmd with a given extended regex.
+# If you have `file' or equivalent on your system and you're not sure
+# whether `pass_all' will *always* work, you probably want this one.
+
+case $host_os in
+aix[4-9]*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+beos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+bsdi[45]*)
+  lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+  lt_cv_file_magic_cmd='/usr/bin/file -L'
+  lt_cv_file_magic_test_file=/shlib/libc.so
+  ;;
+
+cygwin*)
+  # func_win32_libid is a shell function defined in ltmain.sh
+  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+  lt_cv_file_magic_cmd='func_win32_libid'
+  ;;
+
+mingw* | pw32*)
+  # Base MSYS/MinGW do not provide the 'file' command needed by
+  # func_win32_libid shell function, so use a weaker test based on 'objdump',
+  # unless we find 'file', for example because we are cross-compiling.
+  # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
+  if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
+    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+    lt_cv_file_magic_cmd='func_win32_libid'
+  else
+    # Keep this pattern in sync with the one in func_win32_libid.
+    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+    lt_cv_file_magic_cmd='$OBJDUMP -f'
+  fi
+  ;;
+
+cegcc*)
+  # use the weaker test based on 'objdump'. See mingw*.
+  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+  lt_cv_file_magic_cmd='$OBJDUMP -f'
+  ;;
+
+darwin* | rhapsody*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+freebsd* | dragonfly*)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    case $host_cpu in
+    i*86 )
+      # Not sure whether the presence of OpenBSD here was a mistake.
+      # Let's accept both of them until this is cleared up.
+      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library'
+      lt_cv_file_magic_cmd=/usr/bin/file
+      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+      ;;
+    esac
+  else
+    lt_cv_deplibs_check_method=pass_all
+  fi
+  ;;
+
+haiku*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+hpux10.20* | hpux11*)
+  lt_cv_file_magic_cmd=/usr/bin/file
+  case $host_cpu in
+  ia64*)
+    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64'
+    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+    ;;
+  hppa*64*)
+    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'
+    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+    ;;
+  *)
+    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library'
+    lt_cv_file_magic_test_file=/usr/lib/libc.sl
+    ;;
+  esac
+  ;;
+
+interix[3-9]*)
+  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+  lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$'
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $LD in
+  *-32|*"-32 ") libmagic=32-bit;;
+  *-n32|*"-n32 ") libmagic=N32;;
+  *-64|*"-64 ") libmagic=64-bit;;
+  *) libmagic=never-match;;
+  esac
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+netbsd* | netbsdelf*-gnu)
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$'
+  fi
+  ;;
+
+newos6*)
+  lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
+  lt_cv_file_magic_cmd=/usr/bin/file
+  lt_cv_file_magic_test_file=/usr/lib/libnls.so
+  ;;
+
+*nto* | *qnx*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+openbsd*)
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$'
+  else
+    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+  fi
+  ;;
+
+osf3* | osf4* | osf5*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+rdos*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+solaris*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+
+sysv4 | sysv4.3*)
+  case $host_vendor in
+  motorola)
+    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
+    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+    ;;
+  ncr)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  sequent)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+    ;;
+  sni)
+    lt_cv_file_magic_cmd='/bin/file'
+    lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
+    lt_cv_file_magic_test_file=/lib/libc.so
+    ;;
+  siemens)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  pc)
+    lt_cv_deplibs_check_method=pass_all
+    ;;
+  esac
+  ;;
+
+tpf*)
+  lt_cv_deplibs_check_method=pass_all
+  ;;
+esac
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
+$as_echo "$lt_cv_deplibs_check_method" >&6; }
+
+file_magic_glob=
+want_nocaseglob=no
+if test "$build" = "$host"; then
+  case $host_os in
+  mingw* | pw32*)
+    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+      want_nocaseglob=yes
+    else
+      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"`
+    fi
+    ;;
+  esac
+fi
+
+file_magic_cmd=$lt_cv_file_magic_cmd
+deplibs_check_method=$lt_cv_deplibs_check_method
+test -z "$deplibs_check_method" && deplibs_check_method=unknown
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dlltool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DLLTOOL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$DLLTOOL"; then
+  ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DLLTOOL=$ac_cv_prog_DLLTOOL
+if test -n "$DLLTOOL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5
+$as_echo "$DLLTOOL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DLLTOOL"; then
+  ac_ct_DLLTOOL=$DLLTOOL
+  # Extract the first word of "dlltool", so it can be a program name with args.
+set dummy dlltool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_DLLTOOL"; then
+  ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_DLLTOOL="dlltool"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
+if test -n "$ac_ct_DLLTOOL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5
+$as_echo "$ac_ct_DLLTOOL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_DLLTOOL" = x; then
+    DLLTOOL="false"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    DLLTOOL=$ac_ct_DLLTOOL
+  fi
+else
+  DLLTOOL="$ac_cv_prog_DLLTOOL"
+fi
+
+test -z "$DLLTOOL" && DLLTOOL=dlltool
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5
+$as_echo_n "checking how to associate runtime and link libraries... " >&6; }
+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_sharedlib_from_linklib_cmd='unknown'
+
+case $host_os in
+cygwin* | mingw* | pw32* | cegcc*)
+  # two different shell functions defined in ltmain.sh
+  # decide which to use based on capabilities of $DLLTOOL
+  case `$DLLTOOL --help 2>&1` in
+  *--identify-strict*)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+    ;;
+  *)
+    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+    ;;
+  esac
+  ;;
+*)
+  # fallback: assume linklib IS sharedlib
+  lt_cv_sharedlib_from_linklib_cmd="$ECHO"
+  ;;
+esac
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5
+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; }
+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  for ac_prog in ar
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AR+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$AR"; then
+  ac_cv_prog_AR="$AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+AR=$ac_cv_prog_AR
+if test -n "$AR"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
+$as_echo "$AR" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$AR" && break
+  done
+fi
+if test -z "$AR"; then
+  ac_ct_AR=$AR
+  for ac_prog in ar
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_AR+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_AR"; then
+  ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_AR="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_AR=$ac_cv_prog_ac_ct_AR
+if test -n "$ac_ct_AR"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+$as_echo "$ac_ct_AR" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_AR" && break
+done
+
+  if test "x$ac_ct_AR" = x; then
+    AR="false"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    AR=$ac_ct_AR
+  fi
+fi
+
+: ${AR=ar}
+: ${AR_FLAGS=cru}
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5
+$as_echo_n "checking for archiver @FILE support... " >&6; }
+if ${lt_cv_ar_at_file+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_ar_at_file=no
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  echo conftest.$ac_objext > conftest.lst
+      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5'
+      { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+  (eval $lt_ar_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+      if test "$ac_status" -eq 0; then
+	# Ensure the archiver fails upon bogus file names.
+	rm -f conftest.$ac_objext libconftest.a
+	{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+  (eval $lt_ar_try) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	if test "$ac_status" -ne 0; then
+          lt_cv_ar_at_file=@
+        fi
+      fi
+      rm -f conftest.* libconftest.a
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5
+$as_echo "$lt_cv_ar_at_file" >&6; }
+
+if test "x$lt_cv_ar_at_file" = xno; then
+  archiver_list_spec=
+else
+  archiver_list_spec=$lt_cv_ar_at_file
+fi
+
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+set dummy ${ac_tool_prefix}strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_STRIP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$STRIP"; then
+  ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+STRIP=$ac_cv_prog_STRIP
+if test -n "$STRIP"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+$as_echo "$STRIP" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_STRIP"; then
+  ac_ct_STRIP=$STRIP
+  # Extract the first word of "strip", so it can be a program name with args.
+set dummy strip; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_STRIP"; then
+  ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_STRIP="strip"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+if test -n "$ac_ct_STRIP"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+$as_echo "$ac_ct_STRIP" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_STRIP" = x; then
+    STRIP=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    STRIP=$ac_ct_STRIP
+  fi
+else
+  STRIP="$ac_cv_prog_STRIP"
+fi
+
+test -z "$STRIP" && STRIP=:
+
+
+
+
+
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_RANLIB+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$RANLIB"; then
+  ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+RANLIB=$ac_cv_prog_RANLIB
+if test -n "$RANLIB"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+$as_echo "$RANLIB" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_RANLIB"; then
+  ac_ct_RANLIB=$RANLIB
+  # Extract the first word of "ranlib", so it can be a program name with args.
+set dummy ranlib; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_RANLIB"; then
+  ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_RANLIB="ranlib"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+if test -n "$ac_ct_RANLIB"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+$as_echo "$ac_ct_RANLIB" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_RANLIB" = x; then
+    RANLIB=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    RANLIB=$ac_ct_RANLIB
+  fi
+else
+  RANLIB="$ac_cv_prog_RANLIB"
+fi
+
+test -z "$RANLIB" && RANLIB=:
+
+
+
+
+
+
+# Determine commands to create old-style static archives.
+old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+old_postinstall_cmds='chmod 644 $oldlib'
+old_postuninstall_cmds=
+
+if test -n "$RANLIB"; then
+  case $host_os in
+  openbsd*)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+    ;;
+  *)
+    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+    ;;
+  esac
+  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+fi
+
+case $host_os in
+  darwin*)
+    lock_old_archive_extraction=yes ;;
+  *)
+    lock_old_archive_extraction=no ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+# Check for command to grab the raw symbol name followed by C symbol from nm.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
+$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; }
+if ${lt_cv_sys_global_symbol_pipe+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+
+# These are sane defaults that work on at least a few old systems.
+# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
+
+# Character class describing NM global symbol codes.
+symcode='[BCDEGRST]'
+
+# Regexp to match symbols that can be accessed directly from C.
+sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
+
+# Define system-specific variables.
+case $host_os in
+aix*)
+  symcode='[BCDT]'
+  ;;
+cygwin* | mingw* | pw32* | cegcc*)
+  symcode='[ABCDGISTW]'
+  ;;
+hpux*)
+  if test "$host_cpu" = ia64; then
+    symcode='[ABCDEGRST]'
+  fi
+  ;;
+irix* | nonstopux*)
+  symcode='[BCDEGRST]'
+  ;;
+osf*)
+  symcode='[BCDEGQRST]'
+  ;;
+solaris*)
+  symcode='[BDRT]'
+  ;;
+sco3.2v5*)
+  symcode='[DT]'
+  ;;
+sysv4.2uw2*)
+  symcode='[DT]'
+  ;;
+sysv5* | sco5v6* | unixware* | OpenUNIX*)
+  symcode='[ABDT]'
+  ;;
+sysv4)
+  symcode='[DFNSTU]'
+  ;;
+esac
+
+# If we're using GNU nm, then use its standard symbol codes.
+case `$NM -V 2>&1` in
+*GNU* | *'with BFD'*)
+  symcode='[ABCDGIRSTW]' ;;
+esac
+
+# Transform an extracted symbol line into a proper C declaration.
+# Some systems (esp. on ia64) link data and code symbols differently,
+# so use this general approach.
+lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+
+# Transform an extracted symbol line into symbol name and symbol address
+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/  {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/  {\"\2\", (void *) \&\2},/p'"
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/  {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/  {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/  {\"lib\2\", (void *) \&\2},/p'"
+
+# Handle CRLF in mingw tool chain
+opt_cr=
+case $build_os in
+mingw*)
+  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+  ;;
+esac
+
+# Try without a prefix underscore, then with it.
+for ac_symprfx in "" "_"; do
+
+  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+  symxfrm="\\1 $ac_symprfx\\2 \\2"
+
+  # Write the raw and C identifiers.
+  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+    # Fake it for dumpbin and say T for any non-static function
+    # and D for any global variable.
+    # Also find C++ and __fastcall symbols from MSVC++,
+    # which start with @ or ?.
+    lt_cv_sys_global_symbol_pipe="$AWK '"\
+"     {last_section=section; section=\$ 3};"\
+"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+"     \$ 0!~/External *\|/{next};"\
+"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+"     {if(hide[section]) next};"\
+"     {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
+"     {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
+"     s[1]~/^[@?]/{print s[1], s[1]; next};"\
+"     s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+"     ' prfx=^$ac_symprfx"
+  else
+    lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[	 ]\($symcode$symcode*\)[	 ][	 ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+  fi
+  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'"
+
+  # Check to see that the pipe works correctly.
+  pipe_works=no
+
+  rm -f conftest*
+  cat > conftest.$ac_ext <<_LT_EOF
+#ifdef __cplusplus
+extern "C" {
+#endif
+char nm_test_var;
+void nm_test_func(void);
+void nm_test_func(void){}
+#ifdef __cplusplus
+}
+#endif
+int main(){nm_test_var='a';nm_test_func();return(0);}
+_LT_EOF
+
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    # Now try to grab the symbols.
+    nlist=conftest.nm
+    if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5
+  (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s "$nlist"; then
+      # Try sorting and uniquifying the output.
+      if sort "$nlist" | uniq > "$nlist"T; then
+	mv -f "$nlist"T "$nlist"
+      else
+	rm -f "$nlist"T
+      fi
+
+      # Make sure that we snagged all the symbols we need.
+      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+	  cat <<_LT_EOF > conftest.$ac_ext
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data.  */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_LT_EOF
+	  # Now generate the symbol file.
+	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+
+	  cat <<_LT_EOF >> conftest.$ac_ext
+
+/* The mapping between symbol names and symbols.  */
+LT_DLSYM_CONST struct {
+  const char *name;
+  void       *address;
+}
+lt__PROGRAM__LTX_preloaded_symbols[] =
+{
+  { "@PROGRAM@", (void *) 0 },
+_LT_EOF
+	  $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/  {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+	  cat <<\_LT_EOF >> conftest.$ac_ext
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt__PROGRAM__LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+_LT_EOF
+	  # Now try linking the two files.
+	  mv conftest.$ac_objext conftstm.$ac_objext
+	  lt_globsym_save_LIBS=$LIBS
+	  lt_globsym_save_CFLAGS=$CFLAGS
+	  LIBS="conftstm.$ac_objext"
+	  CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+  (eval $ac_link) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s conftest${ac_exeext}; then
+	    pipe_works=yes
+	  fi
+	  LIBS=$lt_globsym_save_LIBS
+	  CFLAGS=$lt_globsym_save_CFLAGS
+	else
+	  echo "cannot find nm_test_func in $nlist" >&5
+	fi
+      else
+	echo "cannot find nm_test_var in $nlist" >&5
+      fi
+    else
+      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5
+    fi
+  else
+    echo "$progname: failed program was:" >&5
+    cat conftest.$ac_ext >&5
+  fi
+  rm -rf conftest* conftst*
+
+  # Do not use the global_symbol_pipe unless it works.
+  if test "$pipe_works" = yes; then
+    break
+  else
+    lt_cv_sys_global_symbol_pipe=
+  fi
+done
+
+fi
+
+if test -z "$lt_cv_sys_global_symbol_pipe"; then
+  lt_cv_sys_global_symbol_to_cdecl=
+fi
+if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5
+$as_echo "failed" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
+$as_echo "ok" >&6; }
+fi
+
+# Response file support.
+if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+  nm_file_list_spec='@'
+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then
+  nm_file_list_spec='@'
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5
+$as_echo_n "checking for sysroot... " >&6; }
+
+# Check whether --with-sysroot was given.
+if test "${with_sysroot+set}" = set; then :
+  withval=$with_sysroot;
+else
+  with_sysroot=no
+fi
+
+
+lt_sysroot=
+case ${with_sysroot} in #(
+ yes)
+   if test "$GCC" = yes; then
+     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+   fi
+   ;; #(
+ /*)
+   lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"`
+   ;; #(
+ no|'')
+   ;; #(
+ *)
+   { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5
+$as_echo "${with_sysroot}" >&6; }
+   as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5
+   ;;
+esac
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5
+$as_echo "${lt_sysroot:-no}" >&6; }
+
+
+
+
+
+# Check whether --enable-libtool-lock was given.
+if test "${enable_libtool_lock+set}" = set; then :
+  enableval=$enable_libtool_lock;
+fi
+
+test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+
+# Some flags need to be propagated to the compiler or linker for good
+# libtool support.
+case $host in
+ia64-*-hpux*)
+  # Find out which ABI we are using.
+  echo 'int i;' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    case `/usr/bin/file conftest.$ac_objext` in
+      *ELF-32*)
+	HPUX_IA64_MODE="32"
+	;;
+      *ELF-64*)
+	HPUX_IA64_MODE="64"
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+*-*-irix6*)
+  # Find out which ABI we are using.
+  echo '#line '$LINENO' "configure"' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    if test "$lt_cv_prog_gnu_ld" = yes; then
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -melf32bsmip"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -melf32bmipn32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -melf64bmip"
+	;;
+      esac
+    else
+      case `/usr/bin/file conftest.$ac_objext` in
+	*32-bit*)
+	  LD="${LD-ld} -32"
+	  ;;
+	*N32*)
+	  LD="${LD-ld} -n32"
+	  ;;
+	*64-bit*)
+	  LD="${LD-ld} -64"
+	  ;;
+      esac
+    fi
+  fi
+  rm -rf conftest*
+  ;;
+
+x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \
+s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+  # Find out which ABI we are using.
+  echo 'int i;' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    case `/usr/bin/file conftest.o` in
+      *32-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_i386_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    LD="${LD-ld} -m elf_i386"
+	    ;;
+	  ppc64-*linux*|powerpc64-*linux*)
+	    LD="${LD-ld} -m elf32ppclinux"
+	    ;;
+	  s390x-*linux*)
+	    LD="${LD-ld} -m elf_s390"
+	    ;;
+	  sparc64-*linux*)
+	    LD="${LD-ld} -m elf32_sparc"
+	    ;;
+	esac
+	;;
+      *64-bit*)
+	case $host in
+	  x86_64-*kfreebsd*-gnu)
+	    LD="${LD-ld} -m elf_x86_64_fbsd"
+	    ;;
+	  x86_64-*linux*)
+	    LD="${LD-ld} -m elf_x86_64"
+	    ;;
+	  ppc*-*linux*|powerpc*-*linux*)
+	    LD="${LD-ld} -m elf64ppc"
+	    ;;
+	  s390*-*linux*|s390*-*tpf*)
+	    LD="${LD-ld} -m elf64_s390"
+	    ;;
+	  sparc*-*linux*)
+	    LD="${LD-ld} -m elf64_sparc"
+	    ;;
+	esac
+	;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+
+*-*-sco3.2v5*)
+  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+  SAVE_CFLAGS="$CFLAGS"
+  CFLAGS="$CFLAGS -belf"
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
+$as_echo_n "checking whether the C compiler needs -belf... " >&6; }
+if ${lt_cv_cc_needs_belf+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+     cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  lt_cv_cc_needs_belf=yes
+else
+  lt_cv_cc_needs_belf=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+     ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5
+$as_echo "$lt_cv_cc_needs_belf" >&6; }
+  if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+    CFLAGS="$SAVE_CFLAGS"
+  fi
+  ;;
+*-*solaris*)
+  # Find out which ABI we are using.
+  echo 'int i;' > conftest.$ac_ext
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+    case `/usr/bin/file conftest.o` in
+    *64-bit*)
+      case $lt_cv_prog_gnu_ld in
+      yes*)
+        case $host in
+        i?86-*-solaris*)
+          LD="${LD-ld} -m elf_x86_64"
+          ;;
+        sparc*-*-solaris*)
+          LD="${LD-ld} -m elf64_sparc"
+          ;;
+        esac
+        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
+        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+          LD="${LD-ld}_sol2"
+        fi
+        ;;
+      *)
+	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+	  LD="${LD-ld} -64"
+	fi
+	;;
+      esac
+      ;;
+    esac
+  fi
+  rm -rf conftest*
+  ;;
+esac
+
+need_locks="$enable_libtool_lock"
+
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args.
+set dummy ${ac_tool_prefix}mt; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$MANIFEST_TOOL"; then
+  ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL
+if test -n "$MANIFEST_TOOL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5
+$as_echo "$MANIFEST_TOOL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then
+  ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL
+  # Extract the first word of "mt", so it can be a program name with args.
+set dummy mt; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_MANIFEST_TOOL"; then
+  ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_MANIFEST_TOOL="mt"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL
+if test -n "$ac_ct_MANIFEST_TOOL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5
+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_MANIFEST_TOOL" = x; then
+    MANIFEST_TOOL=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL
+  fi
+else
+  MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL"
+fi
+
+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5
+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; }
+if ${lt_cv_path_mainfest_tool+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_path_mainfest_tool=no
+  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5
+  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+  cat conftest.err >&5
+  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+    lt_cv_path_mainfest_tool=yes
+  fi
+  rm -f conftest*
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5
+$as_echo "$lt_cv_path_mainfest_tool" >&6; }
+if test "x$lt_cv_path_mainfest_tool" != xyes; then
+  MANIFEST_TOOL=:
+fi
+
+
+
+
+
+
+  case $host_os in
+    rhapsody* | darwin*)
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args.
+set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DSYMUTIL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$DSYMUTIL"; then
+  ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DSYMUTIL=$ac_cv_prog_DSYMUTIL
+if test -n "$DSYMUTIL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5
+$as_echo "$DSYMUTIL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_DSYMUTIL"; then
+  ac_ct_DSYMUTIL=$DSYMUTIL
+  # Extract the first word of "dsymutil", so it can be a program name with args.
+set dummy dsymutil; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_DSYMUTIL"; then
+  ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL
+if test -n "$ac_ct_DSYMUTIL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5
+$as_echo "$ac_ct_DSYMUTIL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_DSYMUTIL" = x; then
+    DSYMUTIL=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    DSYMUTIL=$ac_ct_DSYMUTIL
+  fi
+else
+  DSYMUTIL="$ac_cv_prog_DSYMUTIL"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args.
+set dummy ${ac_tool_prefix}nmedit; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_NMEDIT+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$NMEDIT"; then
+  ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+NMEDIT=$ac_cv_prog_NMEDIT
+if test -n "$NMEDIT"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5
+$as_echo "$NMEDIT" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_NMEDIT"; then
+  ac_ct_NMEDIT=$NMEDIT
+  # Extract the first word of "nmedit", so it can be a program name with args.
+set dummy nmedit; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_NMEDIT"; then
+  ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_NMEDIT="nmedit"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT
+if test -n "$ac_ct_NMEDIT"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5
+$as_echo "$ac_ct_NMEDIT" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_NMEDIT" = x; then
+    NMEDIT=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    NMEDIT=$ac_ct_NMEDIT
+  fi
+else
+  NMEDIT="$ac_cv_prog_NMEDIT"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args.
+set dummy ${ac_tool_prefix}lipo; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_LIPO+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$LIPO"; then
+  ac_cv_prog_LIPO="$LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+LIPO=$ac_cv_prog_LIPO
+if test -n "$LIPO"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5
+$as_echo "$LIPO" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_LIPO"; then
+  ac_ct_LIPO=$LIPO
+  # Extract the first word of "lipo", so it can be a program name with args.
+set dummy lipo; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_LIPO+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_LIPO"; then
+  ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_LIPO="lipo"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO
+if test -n "$ac_ct_LIPO"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5
+$as_echo "$ac_ct_LIPO" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_LIPO" = x; then
+    LIPO=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    LIPO=$ac_ct_LIPO
+  fi
+else
+  LIPO="$ac_cv_prog_LIPO"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OTOOL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$OTOOL"; then
+  ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL=$ac_cv_prog_OTOOL
+if test -n "$OTOOL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5
+$as_echo "$OTOOL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL"; then
+  ac_ct_OTOOL=$OTOOL
+  # Extract the first word of "otool", so it can be a program name with args.
+set dummy otool; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_OTOOL+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_OTOOL"; then
+  ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_OTOOL="otool"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL
+if test -n "$ac_ct_OTOOL"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5
+$as_echo "$ac_ct_OTOOL" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_OTOOL" = x; then
+    OTOOL=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    OTOOL=$ac_ct_OTOOL
+  fi
+else
+  OTOOL="$ac_cv_prog_OTOOL"
+fi
+
+    if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args.
+set dummy ${ac_tool_prefix}otool64; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OTOOL64+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$OTOOL64"; then
+  ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OTOOL64=$ac_cv_prog_OTOOL64
+if test -n "$OTOOL64"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5
+$as_echo "$OTOOL64" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_OTOOL64"; then
+  ac_ct_OTOOL64=$OTOOL64
+  # Extract the first word of "otool64", so it can be a program name with args.
+set dummy otool64; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_OTOOL64"; then
+  ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_OTOOL64="otool64"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64
+if test -n "$ac_ct_OTOOL64"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5
+$as_echo "$ac_ct_OTOOL64" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_OTOOL64" = x; then
+    OTOOL64=":"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    OTOOL64=$ac_ct_OTOOL64
+  fi
+else
+  OTOOL64="$ac_cv_prog_OTOOL64"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
+$as_echo_n "checking for -single_module linker flag... " >&6; }
+if ${lt_cv_apple_cc_single_mod+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_apple_cc_single_mod=no
+      if test -z "${LT_MULTI_MODULE}"; then
+	# By default we will add the -single_module flag. You can override
+	# by either setting the environment variable LT_MULTI_MODULE
+	# non-empty at configure time, or by adding -multi_module to the
+	# link flags.
+	rm -rf libconftest.dylib*
+	echo "int foo(void){return 1;}" > conftest.c
+	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-dynamiclib -Wl,-single_module conftest.c" >&5
+	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+        _lt_result=$?
+	# If there is a non-empty error log, and "single_module"
+	# appears in it, assume the flag caused a linker warning
+        if test -s conftest.err && $GREP single_module conftest.err; then
+	  cat conftest.err >&5
+	# Otherwise, if the output was created with a 0 exit code from
+	# the compiler, it worked.
+	elif test -f libconftest.dylib && test $_lt_result -eq 0; then
+	  lt_cv_apple_cc_single_mod=yes
+	else
+	  cat conftest.err >&5
+	fi
+	rm -rf libconftest.dylib*
+	rm -f conftest.*
+      fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
+$as_echo "$lt_cv_apple_cc_single_mod" >&6; }
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
+$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; }
+if ${lt_cv_ld_exported_symbols_list+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_ld_exported_symbols_list=no
+      save_LDFLAGS=$LDFLAGS
+      echo "_main" > conftest.sym
+      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  lt_cv_ld_exported_symbols_list=yes
+else
+  lt_cv_ld_exported_symbols_list=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+	LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
+$as_echo "$lt_cv_ld_exported_symbols_list" >&6; }
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
+$as_echo_n "checking for -force_load linker flag... " >&6; }
+if ${lt_cv_ld_force_load+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_ld_force_load=no
+      cat > conftest.c << _LT_EOF
+int forced_loaded() { return 2;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5
+      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5
+      echo "$AR cru libconftest.a conftest.o" >&5
+      $AR cru libconftest.a conftest.o 2>&5
+      echo "$RANLIB libconftest.a" >&5
+      $RANLIB libconftest.a 2>&5
+      cat > conftest.c << _LT_EOF
+int main() { return 0;}
+_LT_EOF
+      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5
+      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+      _lt_result=$?
+      if test -s conftest.err && $GREP force_load conftest.err; then
+	cat conftest.err >&5
+      elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then
+	lt_cv_ld_force_load=yes
+      else
+	cat conftest.err >&5
+      fi
+        rm -f conftest.err libconftest.a conftest conftest.c
+        rm -rf conftest.dSYM
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
+$as_echo "$lt_cv_ld_force_load" >&6; }
+    case $host_os in
+    rhapsody* | darwin1.[012])
+      _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
+    darwin1.*)
+      _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+    darwin*) # darwin 5.x on
+      # if running on 10.5 or later, the deployment target defaults
+      # to the OS version, if on x86, and 10.4, the deployment
+      # target defaults to 10.4. Don't you love it?
+      case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in
+	10.0,*86*-darwin8*|10.0,*-darwin[91]*)
+	  _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+	10.[012]*)
+	  _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
+	10.*)
+	  _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;;
+      esac
+    ;;
+  esac
+    if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+      _lt_dar_single_mod='$single_module'
+    fi
+    if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
+      _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+    else
+      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+    fi
+    if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
+      _lt_dsymutil='~$DSYMUTIL $lib || :'
+    else
+      _lt_dsymutil=
+    fi
+    ;;
+  esac
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
+$as_echo_n "checking how to run the C preprocessor... " >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+  CPP=
+fi
+if test -z "$CPP"; then
+  if ${ac_cv_prog_CPP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+      # Double quotes because CPP needs to be expanded
+    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+    do
+      ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  # Broken: success on invalid input.
+continue
+else
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+  break
+fi
+
+    done
+    ac_cv_prog_CPP=$CPP
+
+fi
+  CPP=$ac_cv_prog_CPP
+else
+  ac_cv_prog_CPP=$CPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
+$as_echo "$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  # Broken: success on invalid input.
+continue
+else
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_header_stdc=yes
+else
+  ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "free" >/dev/null 2>&1; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+  if test "$cross_compiling" = yes; then :
+  :
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+		   (('a' <= (c) && (c) <= 'i') \
+		     || ('j' <= (c) && (c) <= 'r') \
+		     || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+  int i;
+  for (i = 0; i < 256; i++)
+    if (XOR (islower (i), ISLOWER (i))
+	|| toupper (i) != TOUPPER (i))
+      return 2;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+		  inttypes.h stdint.h unistd.h
+do :
+  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+for ac_header in dlfcn.h
+do :
+  ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
+"
+if test "x$ac_cv_header_dlfcn_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DLFCN_H 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+
+# Set options
+
+
+
+        enable_dlopen=no
+
+
+  enable_win32_dll=no
+
+
+            # Check whether --enable-shared was given.
+if test "${enable_shared+set}" = set; then :
+  enableval=$enable_shared; p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_shared=yes ;;
+    no) enable_shared=no ;;
+    *)
+      enable_shared=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for pkg in $enableval; do
+	IFS="$lt_save_ifs"
+	if test "X$pkg" = "X$p"; then
+	  enable_shared=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac
+else
+  enable_shared=yes
+fi
+
+
+
+
+
+
+
+
+
+  # Check whether --enable-static was given.
+if test "${enable_static+set}" = set; then :
+  enableval=$enable_static; p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_static=yes ;;
+    no) enable_static=no ;;
+    *)
+     enable_static=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for pkg in $enableval; do
+	IFS="$lt_save_ifs"
+	if test "X$pkg" = "X$p"; then
+	  enable_static=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac
+else
+  enable_static=yes
+fi
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-pic was given.
+if test "${with_pic+set}" = set; then :
+  withval=$with_pic; lt_p=${PACKAGE-default}
+    case $withval in
+    yes|no) pic_mode=$withval ;;
+    *)
+      pic_mode=default
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for lt_pkg in $withval; do
+	IFS="$lt_save_ifs"
+	if test "X$lt_pkg" = "X$lt_p"; then
+	  pic_mode=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac
+else
+  pic_mode=default
+fi
+
+
+test -z "$pic_mode" && pic_mode=default
+
+
+
+
+
+
+
+  # Check whether --enable-fast-install was given.
+if test "${enable_fast_install+set}" = set; then :
+  enableval=$enable_fast_install; p=${PACKAGE-default}
+    case $enableval in
+    yes) enable_fast_install=yes ;;
+    no) enable_fast_install=no ;;
+    *)
+      enable_fast_install=no
+      # Look at the argument we got.  We use all the common list separators.
+      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+      for pkg in $enableval; do
+	IFS="$lt_save_ifs"
+	if test "X$pkg" = "X$p"; then
+	  enable_fast_install=yes
+	fi
+      done
+      IFS="$lt_save_ifs"
+      ;;
+    esac
+else
+  enable_fast_install=yes
+fi
+
+
+
+
+
+
+
+
+
+
+
+# This can be used to rebuild libtool when needed
+LIBTOOL_DEPS="$ltmain"
+
+# Always use our own libtool.
+LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+test -z "$LN_S" && LN_S="ln -s"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+if test -n "${ZSH_VERSION+set}" ; then
+   setopt NO_GLOB_SUBST
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
+$as_echo_n "checking for objdir... " >&6; }
+if ${lt_cv_objdir+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  rm -f .libs 2>/dev/null
+mkdir .libs 2>/dev/null
+if test -d .libs; then
+  lt_cv_objdir=.libs
+else
+  # MS-DOS does not allow filenames that begin with a dot.
+  lt_cv_objdir=_libs
+fi
+rmdir .libs 2>/dev/null
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5
+$as_echo "$lt_cv_objdir" >&6; }
+objdir=$lt_cv_objdir
+
+
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define LT_OBJDIR "$lt_cv_objdir/"
+_ACEOF
+
+
+
+
+case $host_os in
+aix3*)
+  # AIX sometimes has problems with the GCC collect2 program.  For some
+  # reason, if we set the COLLECT_NAMES environment variable, the problems
+  # vanish in a puff of smoke.
+  if test "X${COLLECT_NAMES+set}" != Xset; then
+    COLLECT_NAMES=
+    export COLLECT_NAMES
+  fi
+  ;;
+esac
+
+# Global variables:
+ofile=libtool
+can_build_shared=yes
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+
+with_gnu_ld="$lt_cv_prog_gnu_ld"
+
+old_CC="$CC"
+old_CFLAGS="$CFLAGS"
+
+# Set sane defaults for various variables
+test -z "$CC" && CC=cc
+test -z "$LTCC" && LTCC=$CC
+test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+test -z "$LD" && LD=ld
+test -z "$ac_objext" && ac_objext=o
+
+for cc_temp in $compiler""; do
+  case $cc_temp in
+    compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+    distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+    \-*) ;;
+    *) break;;
+  esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+
+
+# Only perform the check for file, if the check method requires it
+test -z "$MAGIC_CMD" && MAGIC_CMD=file
+case $deplibs_check_method in
+file_magic*)
+  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
+$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; }
+if ${lt_cv_path_MAGIC_CMD+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  case $MAGIC_CMD in
+[\\/*] |  ?:[\\/]*)
+  lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+  ;;
+*)
+  lt_save_MAGIC_CMD="$MAGIC_CMD"
+  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+  ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+  for ac_dir in $ac_dummy; do
+    IFS="$lt_save_ifs"
+    test -z "$ac_dir" && ac_dir=.
+    if test -f $ac_dir/${ac_tool_prefix}file; then
+      lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file"
+      if test -n "$file_magic_test_file"; then
+	case $deplibs_check_method in
+	"file_magic "*)
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+	  MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+	    $EGREP "$file_magic_regex" > /dev/null; then
+	    :
+	  else
+	    cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such.  This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem.  Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool at gnu.org
+
+_LT_EOF
+	  fi ;;
+	esac
+      fi
+      break
+    fi
+  done
+  IFS="$lt_save_ifs"
+  MAGIC_CMD="$lt_save_MAGIC_CMD"
+  ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+$as_echo "$MAGIC_CMD" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+
+
+if test -z "$lt_cv_path_MAGIC_CMD"; then
+  if test -n "$ac_tool_prefix"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5
+$as_echo_n "checking for file... " >&6; }
+if ${lt_cv_path_MAGIC_CMD+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  case $MAGIC_CMD in
+[\\/*] |  ?:[\\/]*)
+  lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+  ;;
+*)
+  lt_save_MAGIC_CMD="$MAGIC_CMD"
+  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+  ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+  for ac_dir in $ac_dummy; do
+    IFS="$lt_save_ifs"
+    test -z "$ac_dir" && ac_dir=.
+    if test -f $ac_dir/file; then
+      lt_cv_path_MAGIC_CMD="$ac_dir/file"
+      if test -n "$file_magic_test_file"; then
+	case $deplibs_check_method in
+	"file_magic "*)
+	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+	  MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+	    $EGREP "$file_magic_regex" > /dev/null; then
+	    :
+	  else
+	    cat <<_LT_EOF 1>&2
+
+*** Warning: the command libtool uses to detect shared libraries,
+*** $file_magic_cmd, produces output that libtool cannot recognize.
+*** The result is that libtool may fail to recognize shared libraries
+*** as such.  This will affect the creation of libtool libraries that
+*** depend on shared libraries, but programs linked with such libtool
+*** libraries will work regardless of this problem.  Nevertheless, you
+*** may want to report the problem to your system manager and/or to
+*** bug-libtool at gnu.org
+
+_LT_EOF
+	  fi ;;
+	esac
+      fi
+      break
+    fi
+  done
+  IFS="$lt_save_ifs"
+  MAGIC_CMD="$lt_save_MAGIC_CMD"
+  ;;
+esac
+fi
+
+MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+if test -n "$MAGIC_CMD"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+$as_echo "$MAGIC_CMD" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  else
+    MAGIC_CMD=:
+  fi
+fi
+
+  fi
+  ;;
+esac
+
+# Use C for the default configuration in the libtool script
+
+lt_save_CC="$CC"
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Source file extension for C test sources.
+ac_ext=c
+
+# Object file extension for compiled C test sources.
+objext=o
+objext=$objext
+
+# Code to be used in simple compile tests
+lt_simple_compile_test_code="int some_variable = 0;"
+
+# Code to be used in simple link tests
+lt_simple_link_test_code='int main(){return(0);}'
+
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+# Save the default compiler, since it gets overwritten when the other
+# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+compiler_DEFAULT=$CC
+
+# save warnings/boilerplate of simple test code
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+if test -n "$compiler"; then
+
+lt_prog_compiler_no_builtin_flag=
+
+if test "$GCC" = yes; then
+  case $cc_basename in
+  nvcc*)
+    lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;;
+  *)
+    lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;;
+  esac
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
+if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_rtti_exceptions=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="-fno-rtti -fno-exceptions"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_rtti_exceptions=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
+$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
+
+if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then
+    lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions"
+else
+    :
+fi
+
+fi
+
+
+
+
+
+
+  lt_prog_compiler_wl=
+lt_prog_compiler_pic=
+lt_prog_compiler_static=
+
+
+  if test "$GCC" = yes; then
+    lt_prog_compiler_wl='-Wl,'
+    lt_prog_compiler_static='-static'
+
+    case $host_os in
+      aix*)
+      # All AIX code is PIC.
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static='-Bstatic'
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            lt_prog_compiler_pic='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the `-m68020' flag to GCC prevents building anything better,
+            # like `-m68040'.
+            lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      lt_prog_compiler_pic='-DDLL_EXPORT'
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      lt_prog_compiler_pic='-fno-common'
+      ;;
+
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      lt_prog_compiler_static=
+      ;;
+
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	# +Z the default
+	;;
+      *)
+	lt_prog_compiler_pic='-fPIC'
+	;;
+      esac
+      ;;
+
+    interix[3-9]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+
+    msdosdjgpp*)
+      # Just because we use GCC doesn't mean we suddenly get shared libraries
+      # on systems that don't support them.
+      lt_prog_compiler_can_build_shared=no
+      enable_shared=no
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic='-fPIC -shared'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	lt_prog_compiler_pic=-Kconform_pic
+      fi
+      ;;
+
+    *)
+      lt_prog_compiler_pic='-fPIC'
+      ;;
+    esac
+
+    case $cc_basename in
+    nvcc*) # Cuda Compiler Driver 2.2
+      lt_prog_compiler_wl='-Xlinker '
+      if test -n "$lt_prog_compiler_pic"; then
+        lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic"
+      fi
+      ;;
+    esac
+  else
+    # PORTME Check for flag to pass linker flags through the system compiler.
+    case $host_os in
+    aix*)
+      lt_prog_compiler_wl='-Wl,'
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static='-Bstatic'
+      else
+	lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp'
+      fi
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      lt_prog_compiler_pic='-DDLL_EXPORT'
+      ;;
+
+    hpux9* | hpux10* | hpux11*)
+      lt_prog_compiler_wl='-Wl,'
+      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+      # not for PA HP-UX.
+      case $host_cpu in
+      hppa*64*|ia64*)
+	# +Z the default
+	;;
+      *)
+	lt_prog_compiler_pic='+Z'
+	;;
+      esac
+      # Is there a better lt_prog_compiler_static that works with the bundled CC?
+      lt_prog_compiler_static='${wl}-a ${wl}archive'
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      lt_prog_compiler_wl='-Wl,'
+      # PIC (with -KPIC) is the default.
+      lt_prog_compiler_static='-non_shared'
+      ;;
+
+    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+      case $cc_basename in
+      # old Intel for x86_64 which still supported -KPIC.
+      ecc*)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-KPIC'
+	lt_prog_compiler_static='-static'
+        ;;
+      # icc used to be incompatible with GCC.
+      # ICC 10 doesn't accept -KPIC any more.
+      icc* | ifort*)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-fPIC'
+	lt_prog_compiler_static='-static'
+        ;;
+      # Lahey Fortran 8.1.
+      lf95*)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='--shared'
+	lt_prog_compiler_static='--static'
+	;;
+      nagfor*)
+	# NAG Fortran compiler
+	lt_prog_compiler_wl='-Wl,-Wl,,'
+	lt_prog_compiler_pic='-PIC'
+	lt_prog_compiler_static='-Bstatic'
+	;;
+      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+        # Portland Group compilers (*not* the Pentium gcc compiler,
+	# which looks to be a dead project)
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-fpic'
+	lt_prog_compiler_static='-Bstatic'
+        ;;
+      ccc*)
+        lt_prog_compiler_wl='-Wl,'
+        # All Alpha code is PIC.
+        lt_prog_compiler_static='-non_shared'
+        ;;
+      xl* | bgxl* | bgf* | mpixl*)
+	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+	lt_prog_compiler_wl='-Wl,'
+	lt_prog_compiler_pic='-qpic'
+	lt_prog_compiler_static='-qstaticlink'
+	;;
+      *)
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
+	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+	  lt_prog_compiler_pic='-KPIC'
+	  lt_prog_compiler_static='-Bstatic'
+	  lt_prog_compiler_wl=''
+	  ;;
+	*Sun\ F* | *Sun*Fortran*)
+	  lt_prog_compiler_pic='-KPIC'
+	  lt_prog_compiler_static='-Bstatic'
+	  lt_prog_compiler_wl='-Qoption ld '
+	  ;;
+	*Sun\ C*)
+	  # Sun C 5.9
+	  lt_prog_compiler_pic='-KPIC'
+	  lt_prog_compiler_static='-Bstatic'
+	  lt_prog_compiler_wl='-Wl,'
+	  ;;
+        *Intel*\ [CF]*Compiler*)
+	  lt_prog_compiler_wl='-Wl,'
+	  lt_prog_compiler_pic='-fPIC'
+	  lt_prog_compiler_static='-static'
+	  ;;
+	*Portland\ Group*)
+	  lt_prog_compiler_wl='-Wl,'
+	  lt_prog_compiler_pic='-fpic'
+	  lt_prog_compiler_static='-Bstatic'
+	  ;;
+	esac
+	;;
+      esac
+      ;;
+
+    newsos6)
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic='-fPIC -shared'
+      ;;
+
+    osf3* | osf4* | osf5*)
+      lt_prog_compiler_wl='-Wl,'
+      # All OSF/1 code is PIC.
+      lt_prog_compiler_static='-non_shared'
+      ;;
+
+    rdos*)
+      lt_prog_compiler_static='-non_shared'
+      ;;
+
+    solaris*)
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      case $cc_basename in
+      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+	lt_prog_compiler_wl='-Qoption ld ';;
+      *)
+	lt_prog_compiler_wl='-Wl,';;
+      esac
+      ;;
+
+    sunos4*)
+      lt_prog_compiler_wl='-Qoption ld '
+      lt_prog_compiler_pic='-PIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    sysv4 | sysv4.2uw2* | sysv4.3*)
+      lt_prog_compiler_wl='-Wl,'
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec ;then
+	lt_prog_compiler_pic='-Kconform_pic'
+	lt_prog_compiler_static='-Bstatic'
+      fi
+      ;;
+
+    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+      lt_prog_compiler_wl='-Wl,'
+      lt_prog_compiler_pic='-KPIC'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    unicos*)
+      lt_prog_compiler_wl='-Wl,'
+      lt_prog_compiler_can_build_shared=no
+      ;;
+
+    uts4*)
+      lt_prog_compiler_pic='-pic'
+      lt_prog_compiler_static='-Bstatic'
+      ;;
+
+    *)
+      lt_prog_compiler_can_build_shared=no
+      ;;
+    esac
+  fi
+
+case $host_os in
+  # For platforms which do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    lt_prog_compiler_pic=
+    ;;
+  *)
+    lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
+    ;;
+esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+if ${lt_cv_prog_compiler_pic+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_pic=$lt_prog_compiler_pic
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5
+$as_echo "$lt_cv_prog_compiler_pic" >&6; }
+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
+$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
+if ${lt_cv_prog_compiler_pic_works+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_pic_works=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$lt_prog_compiler_pic -DPIC"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_pic_works=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5
+$as_echo "$lt_cv_prog_compiler_pic_works" >&6; }
+
+if test x"$lt_cv_prog_compiler_pic_works" = xyes; then
+    case $lt_prog_compiler_pic in
+     "" | " "*) ;;
+     *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;;
+     esac
+else
+    lt_prog_compiler_pic=
+     lt_prog_compiler_can_build_shared=no
+fi
+
+fi
+
+
+
+
+
+
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if ${lt_cv_prog_compiler_static_works+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_static_works=no
+   save_LDFLAGS="$LDFLAGS"
+   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler_static_works=yes
+       fi
+     else
+       lt_cv_prog_compiler_static_works=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5
+$as_echo "$lt_cv_prog_compiler_static_works" >&6; }
+
+if test x"$lt_cv_prog_compiler_static_works" = xyes; then
+    :
+else
+    lt_prog_compiler_static=
+fi
+
+
+
+
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_c_o=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_c_o=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+
+
+
+
+hard_links="nottested"
+if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then
+  # do not overwrite the value of need_locks provided by the user
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+$as_echo_n "checking if we can lock with hard links... " >&6; }
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+$as_echo "$hard_links" >&6; }
+  if test "$hard_links" = no; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+
+
+
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+  runpath_var=
+  allow_undefined_flag=
+  always_export_symbols=no
+  archive_cmds=
+  archive_expsym_cmds=
+  compiler_needs_object=no
+  enable_shared_with_static_runtimes=no
+  export_dynamic_flag_spec=
+  export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  hardcode_automatic=no
+  hardcode_direct=no
+  hardcode_direct_absolute=no
+  hardcode_libdir_flag_spec=
+  hardcode_libdir_separator=
+  hardcode_minus_L=no
+  hardcode_shlibpath_var=unsupported
+  inherit_rpath=no
+  link_all_deplibs=unknown
+  module_cmds=
+  module_expsym_cmds=
+  old_archive_from_new_cmds=
+  old_archive_from_expsyms_cmds=
+  thread_safe_flag_spec=
+  whole_archive_flag_spec=
+  # include_expsyms should be a list of space-separated symbols to be *always*
+  # included in the symbol list
+  include_expsyms=
+  # exclude_expsyms can be an extended regexp of symbols to exclude
+  # it will be wrapped by ` (' and `)$', so one must not match beginning or
+  # end of line.  Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+  # as well as any symbol that contains `d'.
+  exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+  # platforms (ab)use it in PIC code, but their linkers get confused if
+  # the symbol is explicitly referenced.  Since portable code cannot
+  # rely on this symbol name, it's probably fine to never include it in
+  # preloaded symbol tables.
+  # Exclude shared library initialization/finalization symbols.
+  extract_expsyms_cmds=
+
+  case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    # FIXME: the MSVC++ port hasn't been tested in a loooong time
+    # When not using gcc, we currently assume that we are using
+    # Microsoft Visual C++.
+    if test "$GCC" != yes; then
+      with_gnu_ld=no
+    fi
+    ;;
+  interix*)
+    # we just hope/assume this is gcc and not c89 (= MSVC++)
+    with_gnu_ld=yes
+    ;;
+  openbsd*)
+    with_gnu_ld=no
+    ;;
+  linux* | k*bsd*-gnu | gnu*)
+    link_all_deplibs=no
+    ;;
+  esac
+
+  ld_shlibs=yes
+
+  # On some targets, GNU ld is compatible enough with the native linker
+  # that we're better off using the native interface for both.
+  lt_use_gnu_ld_interface=no
+  if test "$with_gnu_ld" = yes; then
+    case $host_os in
+      aix*)
+	# The AIX port of GNU ld has always aspired to compatibility
+	# with the native linker.  However, as the warning in the GNU ld
+	# block says, versions before 2.19.5* couldn't really create working
+	# shared libraries, regardless of the interface used.
+	case `$LD -v 2>&1` in
+	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+	  *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;;
+	  *\ \(GNU\ Binutils\)\ [3-9]*) ;;
+	  *)
+	    lt_use_gnu_ld_interface=yes
+	    ;;
+	esac
+	;;
+      *)
+	lt_use_gnu_ld_interface=yes
+	;;
+    esac
+  fi
+
+  if test "$lt_use_gnu_ld_interface" = yes; then
+    # If archive_cmds runs LD, not CC, wlarc should be empty
+    wlarc='${wl}'
+
+    # Set some defaults for GNU ld with shared library support. These
+    # are reset later if shared libraries are not supported. Putting them
+    # here allows them to be overridden if necessary.
+    runpath_var=LD_RUN_PATH
+    hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+    export_dynamic_flag_spec='${wl}--export-dynamic'
+    # ancient GNU ld didn't support --whole-archive et. al.
+    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+      whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+    else
+      whole_archive_flag_spec=
+    fi
+    supports_anon_versioning=no
+    case `$LD -v 2>&1` in
+      *GNU\ gold*) supports_anon_versioning=yes ;;
+      *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
+      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+      *\ 2.11.*) ;; # other 2.11 versions
+      *) supports_anon_versioning=yes ;;
+    esac
+
+    # See if GNU ld supports shared libraries.
+    case $host_os in
+    aix[3-9]*)
+      # On AIX/PPC, the GNU linker is very broken
+      if test "$host_cpu" != ia64; then
+	ld_shlibs=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support.  If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+            archive_expsym_cmds=''
+        ;;
+      m68k)
+            archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            hardcode_libdir_flag_spec='-L$libdir'
+            hardcode_minus_L=yes
+        ;;
+      esac
+      ;;
+
+    beos*)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	allow_undefined_flag=unsupported
+	# Joseph Beckenbach <jrb3 at best.com> says some releases of gcc
+	# support --undefined.  This deserves some investigation.  FIXME
+	archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
+      # as there is no search path for DLLs.
+      hardcode_libdir_flag_spec='-L$libdir'
+      export_dynamic_flag_spec='${wl}--export-all-symbols'
+      allow_undefined_flag=unsupported
+      always_export_symbols=no
+      enable_shared_with_static_runtimes=yes
+      export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+      exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+
+      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+        archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	# If the export-symbols file already is a .def file (1st line
+	# is EXPORTS), use it as is; otherwise, prepend...
+	archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	  cp $export_symbols $output_objdir/$soname.def;
+	else
+	  echo EXPORTS > $output_objdir/$soname.def;
+	  cat $export_symbols >> $output_objdir/$soname.def;
+	fi~
+	$CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    haiku*)
+      archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+      link_all_deplibs=yes
+      ;;
+
+    interix[3-9]*)
+      hardcode_direct=no
+      hardcode_shlibpath_var=no
+      hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+      export_dynamic_flag_spec='${wl}-E'
+      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+      # Instead, shared libraries are loaded at an image base (0x10000000 by
+      # default) and relocated if they conflict, which is a slow very memory
+      # consuming and fragmenting process.  To avoid this, we pick a random,
+      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+      archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      ;;
+
+    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+      tmp_diet=no
+      if test "$host_os" = linux-dietlibc; then
+	case $cc_basename in
+	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+	esac
+      fi
+      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+	 && test "$tmp_diet" = no
+      then
+	tmp_addflag=' $pic_flag'
+	tmp_sharedflag='-shared'
+	case $cc_basename,$host_cpu in
+        pgcc*)				# Portland Group C compiler
+	  whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag'
+	  ;;
+	pgf77* | pgf90* | pgf95* | pgfortran*)
+					# Portland Group f77 and f90 compilers
+	  whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag -Mnomain' ;;
+	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+	  tmp_addflag=' -i_dynamic' ;;
+	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+	ifc* | ifort*)			# Intel Fortran compiler
+	  tmp_addflag=' -nofor_main' ;;
+	lf95*)				# Lahey Fortran 8.1
+	  whole_archive_flag_spec=
+	  tmp_sharedflag='--shared' ;;
+	xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+	  tmp_sharedflag='-qmkshrobj'
+	  tmp_addflag= ;;
+	nvcc*)	# Cuda Compiler Driver 2.2
+	  whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  compiler_needs_object=yes
+	  ;;
+	esac
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ C*)			# Sun C 5.9
+	  whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  compiler_needs_object=yes
+	  tmp_sharedflag='-G' ;;
+	*Sun\ F*)			# Sun Fortran 8.3
+	  tmp_sharedflag='-G' ;;
+	esac
+	archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+        if test "x$supports_anon_versioning" = xyes; then
+          archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+	    cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+	    echo "local: *; };" >> $output_objdir/$libname.ver~
+	    $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+        fi
+
+	case $cc_basename in
+	xlf* | bgf* | bgxlf* | mpixlf*)
+	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+	  whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
+	  hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+	  archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+	  if test "x$supports_anon_versioning" = xyes; then
+	    archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+	      cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+	      echo "local: *; };" >> $output_objdir/$libname.ver~
+	      $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+	  fi
+	  ;;
+	esac
+      else
+        ld_shlibs=no
+      fi
+      ;;
+
+    netbsd* | netbsdelf*-gnu)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+	wlarc=
+      else
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      fi
+      ;;
+
+    solaris*)
+      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+	ld_shlibs=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+      case `$LD -v 2>&1` in
+        *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+	ld_shlibs=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+	;;
+	*)
+	  # For security reasons, it is highly recommended that you always
+	  # use absolute paths for naming shared libraries, and exclude the
+	  # DT_RUNPATH tag from executables and libraries.  But doing so
+	  # requires that you compile everything twice, which is a pain.
+	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	    hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+	    archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+	  else
+	    ld_shlibs=no
+	  fi
+	;;
+      esac
+      ;;
+
+    sunos4*)
+      archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      wlarc=
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    *)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	ld_shlibs=no
+      fi
+      ;;
+    esac
+
+    if test "$ld_shlibs" = no; then
+      runpath_var=
+      hardcode_libdir_flag_spec=
+      export_dynamic_flag_spec=
+      whole_archive_flag_spec=
+    fi
+  else
+    # PORTME fill in a description of your system's linker (not GNU ld)
+    case $host_os in
+    aix3*)
+      allow_undefined_flag=unsupported
+      always_export_symbols=yes
+      archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+      # Note: this linker hardcodes the directories in LIBPATH if there
+      # are no directories specified by -L.
+      hardcode_minus_L=yes
+      if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+	# Neither direct hardcoding nor static linking is supported with a
+	# broken collect2.
+	hardcode_direct=unsupported
+      fi
+      ;;
+
+    aix[4-9]*)
+      if test "$host_cpu" = ia64; then
+	# On IA64, the linker does run time linking by default, so we don't
+	# have to do anything special.
+	aix_use_runtimelinking=no
+	exp_sym_flag='-Bexport'
+	no_entry_flag=""
+      else
+	# If we're using GNU nm, then we don't want the "-C" option.
+	# -C means demangle to AIX nm, but means don't demangle with GNU nm
+	# Also, AIX nm treats weak defined symbols like other global
+	# defined symbols, whereas GNU nm marks them as "W".
+	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+	  export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	else
+	  export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	fi
+	aix_use_runtimelinking=no
+
+	# Test if we are trying to use run time linking or normal
+	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+	# need to do runtime linking.
+	case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+	  for ld_flag in $LDFLAGS; do
+	  if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+	    aix_use_runtimelinking=yes
+	    break
+	  fi
+	  done
+	  ;;
+	esac
+
+	exp_sym_flag='-bexport'
+	no_entry_flag='-bnoentry'
+      fi
+
+      # When large executables or shared objects are built, AIX ld can
+      # have problems creating the table of contents.  If linking a library
+      # or program results in "error TOC overflow" add -mminimal-toc to
+      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+      archive_cmds=''
+      hardcode_direct=yes
+      hardcode_direct_absolute=yes
+      hardcode_libdir_separator=':'
+      link_all_deplibs=yes
+      file_list_spec='${wl}-f,'
+
+      if test "$GCC" = yes; then
+	case $host_os in aix4.[012]|aix4.[012].*)
+	# We only want to do this on AIX 4.2 and lower, the check
+	# below for broken collect2 doesn't work under 4.3+
+	  collect2name=`${CC} -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	  # We have reworked collect2
+	  :
+	  else
+	  # We have old collect2
+	  hardcode_direct=unsupported
+	  # It fails to find uninstalled libraries when the uninstalled
+	  # path is not listed in the libpath.  Setting hardcode_minus_L
+	  # to unsupported forces relinking
+	  hardcode_minus_L=yes
+	  hardcode_libdir_flag_spec='-L$libdir'
+	  hardcode_libdir_separator=
+	  fi
+	  ;;
+	esac
+	shared_flag='-shared'
+	if test "$aix_use_runtimelinking" = yes; then
+	  shared_flag="$shared_flag "'${wl}-G'
+	fi
+	link_all_deplibs=no
+      else
+	# not using gcc
+	if test "$host_cpu" = ia64; then
+	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	# chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+	else
+	  if test "$aix_use_runtimelinking" = yes; then
+	    shared_flag='${wl}-G'
+	  else
+	    shared_flag='${wl}-bM:SRE'
+	  fi
+	fi
+      fi
+
+      export_dynamic_flag_spec='${wl}-bexpall'
+      # It seems that -bexpall does not export symbols beginning with
+      # underscore (_), so it is better to generate a list of symbols to export.
+      always_export_symbols=yes
+      if test "$aix_use_runtimelinking" = yes; then
+	# Warning - without using the other runtime loading flags (-brtl),
+	# -berok will link without error, but may produce a broken library.
+	allow_undefined_flag='-berok'
+        # Determine the default libpath from the value encoded in an
+        # empty executable.
+        if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if ${lt_cv_aix_libpath_+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_="/usr/lib:/lib"
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath_
+fi
+
+        hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+        archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+      else
+	if test "$host_cpu" = ia64; then
+	  hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+	  allow_undefined_flag="-z nodefs"
+	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+	else
+	 # Determine the default libpath from the value encoded in an
+	 # empty executable.
+	 if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if ${lt_cv_aix_libpath_+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath_"; then
+    lt_cv_aix_libpath_="/usr/lib:/lib"
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath_
+fi
+
+	 hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+	  # Warning - without using the other run time loading flags,
+	  # -berok will link without error, but may produce a broken library.
+	  no_undefined_flag=' ${wl}-bernotok'
+	  allow_undefined_flag=' ${wl}-berok'
+	  if test "$with_gnu_ld" = yes; then
+	    # We only use this code for GNU lds that support --whole-archive.
+	    whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	  else
+	    # Exported symbols can be pulled into shared objects from archives
+	    whole_archive_flag_spec='$convenience'
+	  fi
+	  archive_cmds_need_lc=yes
+	  # This is similar to how AIX traditionally builds its shared libraries.
+	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+	fi
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+            archive_expsym_cmds=''
+        ;;
+      m68k)
+            archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            hardcode_libdir_flag_spec='-L$libdir'
+            hardcode_minus_L=yes
+        ;;
+      esac
+      ;;
+
+    bsdi[45]*)
+      export_dynamic_flag_spec=-rdynamic
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # When not using gcc, we currently assume that we are using
+      # Microsoft Visual C++.
+      # hardcode_libdir_flag_spec is actually meaningless, as there is
+      # no search path for DLLs.
+      case $cc_basename in
+      cl*)
+	# Native MSVC
+	hardcode_libdir_flag_spec=' '
+	allow_undefined_flag=unsupported
+	always_export_symbols=yes
+	file_list_spec='@'
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=".dll"
+	# FIXME: Setting linknames here is a bad hack.
+	archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+	archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	    sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+	  else
+	    sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+	  fi~
+	  $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+	  linknames='
+	# The linker will not automatically build a static lib if we build a DLL.
+	# _LT_TAGVAR(old_archive_from_new_cmds, )='true'
+	enable_shared_with_static_runtimes=yes
+	exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+	export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+	# Don't use ranlib
+	old_postinstall_cmds='chmod 644 $oldlib'
+	postlink_cmds='lt_outputfile="@OUTPUT@"~
+	  lt_tool_outputfile="@TOOL_OUTPUT@"~
+	  case $lt_outputfile in
+	    *.exe|*.EXE) ;;
+	    *)
+	      lt_outputfile="$lt_outputfile.exe"
+	      lt_tool_outputfile="$lt_tool_outputfile.exe"
+	      ;;
+	  esac~
+	  if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+	    $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+	    $RM "$lt_outputfile.manifest";
+	  fi'
+	;;
+      *)
+	# Assume MSVC wrapper
+	hardcode_libdir_flag_spec=' '
+	allow_undefined_flag=unsupported
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=".dll"
+	# FIXME: Setting linknames here is a bad hack.
+	archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+	# The linker will automatically build a .lib file if we build a DLL.
+	old_archive_from_new_cmds='true'
+	# FIXME: Should let the user specify the lib program.
+	old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
+	enable_shared_with_static_runtimes=yes
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+
+
+  archive_cmds_need_lc=no
+  hardcode_direct=no
+  hardcode_automatic=yes
+  hardcode_shlibpath_var=unsupported
+  if test "$lt_cv_ld_force_load" = "yes"; then
+    whole_archive_flag_spec='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+  else
+    whole_archive_flag_spec=''
+  fi
+  link_all_deplibs=yes
+  allow_undefined_flag="$_lt_dar_allow_undefined"
+  case $cc_basename in
+     ifort*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test "$_lt_dar_can_shared" = "yes"; then
+    output_verbose_link_cmd=func_echo_all
+    archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+    module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+    archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+    module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+
+  else
+  ld_shlibs=no
+  fi
+
+      ;;
+
+    dgux*)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_shlibpath_var=no
+      ;;
+
+    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+    # support.  Future versions do this automatically, but an explicit c++rt0.o
+    # does not break anything, and helps significantly (at the cost of a little
+    # extra space).
+    freebsd2.2*)
+      archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+    freebsd2.*)
+      archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_direct=yes
+      hardcode_minus_L=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+    freebsd* | dragonfly*)
+      archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    hpux9*)
+      if test "$GCC" = yes; then
+	archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+      else
+	archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+      fi
+      hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+      hardcode_libdir_separator=:
+      hardcode_direct=yes
+
+      # hardcode_minus_L: Not really in the search PATH,
+      # but as the default location of the library.
+      hardcode_minus_L=yes
+      export_dynamic_flag_spec='${wl}-E'
+      ;;
+
+    hpux10*)
+      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+	archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      if test "$with_gnu_ld" = no; then
+	hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+	hardcode_libdir_separator=:
+	hardcode_direct=yes
+	hardcode_direct_absolute=yes
+	export_dynamic_flag_spec='${wl}-E'
+	# hardcode_minus_L: Not really in the search PATH,
+	# but as the default location of the library.
+	hardcode_minus_L=yes
+      fi
+      ;;
+
+    hpux11*)
+      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+	case $host_cpu in
+	hppa*64*)
+	  archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	esac
+      else
+	case $host_cpu in
+	hppa*64*)
+	  archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+
+	  # Older versions of the 11.00 compiler do not understand -b yet
+	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
+$as_echo_n "checking if $CC understands -b... " >&6; }
+if ${lt_cv_prog_compiler__b+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler__b=no
+   save_LDFLAGS="$LDFLAGS"
+   LDFLAGS="$LDFLAGS -b"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler__b=yes
+       fi
+     else
+       lt_cv_prog_compiler__b=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
+$as_echo "$lt_cv_prog_compiler__b" >&6; }
+
+if test x"$lt_cv_prog_compiler__b" = xyes; then
+    archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+else
+    archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+fi
+
+	  ;;
+	esac
+      fi
+      if test "$with_gnu_ld" = no; then
+	hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+	hardcode_libdir_separator=:
+
+	case $host_cpu in
+	hppa*64*|ia64*)
+	  hardcode_direct=no
+	  hardcode_shlibpath_var=no
+	  ;;
+	*)
+	  hardcode_direct=yes
+	  hardcode_direct_absolute=yes
+	  export_dynamic_flag_spec='${wl}-E'
+
+	  # hardcode_minus_L: Not really in the search PATH,
+	  # but as the default location of the library.
+	  hardcode_minus_L=yes
+	  ;;
+	esac
+      fi
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      if test "$GCC" = yes; then
+	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	# Try to use the -exported_symbol ld option, if it does not
+	# work, assume that -exports_file does not work either and
+	# implicitly export all symbols.
+	# This should be the same for all languages, so no per-tag cache variable.
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
+if ${lt_cv_irix_exported_symbol+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  save_LDFLAGS="$LDFLAGS"
+	   LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+	   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+int foo (void) { return 0; }
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  lt_cv_irix_exported_symbol=yes
+else
+  lt_cv_irix_exported_symbol=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+           LDFLAGS="$save_LDFLAGS"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
+$as_echo "$lt_cv_irix_exported_symbol" >&6; }
+	if test "$lt_cv_irix_exported_symbol" = yes; then
+          archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+	fi
+      else
+	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+      fi
+      archive_cmds_need_lc='no'
+      hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+      hardcode_libdir_separator=:
+      inherit_rpath=yes
+      link_all_deplibs=yes
+      ;;
+
+    netbsd* | netbsdelf*-gnu)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+      else
+	archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
+      fi
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_direct=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    newsos6)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_direct=yes
+      hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+      hardcode_libdir_separator=:
+      hardcode_shlibpath_var=no
+      ;;
+
+    *nto* | *qnx*)
+      ;;
+
+    openbsd*)
+      if test -f /usr/libexec/ld.so; then
+	hardcode_direct=yes
+	hardcode_shlibpath_var=no
+	hardcode_direct_absolute=yes
+	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+	  hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+	  export_dynamic_flag_spec='${wl}-E'
+	else
+	  case $host_os in
+	   openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+	     archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+	     hardcode_libdir_flag_spec='-R$libdir'
+	     ;;
+	   *)
+	     archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	     hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+	     ;;
+	  esac
+	fi
+      else
+	ld_shlibs=no
+      fi
+      ;;
+
+    os2*)
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_minus_L=yes
+      allow_undefined_flag=unsupported
+      archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+      old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+      ;;
+
+    osf3*)
+      if test "$GCC" = yes; then
+	allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+      else
+	allow_undefined_flag=' -expect_unresolved \*'
+	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+      fi
+      archive_cmds_need_lc='no'
+      hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+      hardcode_libdir_separator=:
+      ;;
+
+    osf4* | osf5*)	# as osf3* with the addition of -msym flag
+      if test "$GCC" = yes; then
+	allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
+	archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+      else
+	allow_undefined_flag=' -expect_unresolved \*'
+	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+	$CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+	# Both c and cxx compiler support -rpath directly
+	hardcode_libdir_flag_spec='-rpath $libdir'
+      fi
+      archive_cmds_need_lc='no'
+      hardcode_libdir_separator=:
+      ;;
+
+    solaris*)
+      no_undefined_flag=' -z defs'
+      if test "$GCC" = yes; then
+	wlarc='${wl}'
+	archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+      else
+	case `$CC -V 2>&1` in
+	*"Compilers 5.0"*)
+	  wlarc=''
+	  archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+	  ;;
+	*)
+	  wlarc='${wl}'
+	  archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+	  ;;
+	esac
+      fi
+      hardcode_libdir_flag_spec='-R$libdir'
+      hardcode_shlibpath_var=no
+      case $host_os in
+      solaris2.[0-5] | solaris2.[0-5].*) ;;
+      *)
+	# The compiler driver will combine and reorder linker options,
+	# but understands `-z linker_flag'.  GCC discards it without `$wl',
+	# but is careful enough not to reorder.
+	# Supported since Solaris 2.6 (maybe 2.5.1?)
+	if test "$GCC" = yes; then
+	  whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+	else
+	  whole_archive_flag_spec='-z allextract$convenience -z defaultextract'
+	fi
+	;;
+      esac
+      link_all_deplibs=yes
+      ;;
+
+    sunos4*)
+      if test "x$host_vendor" = xsequent; then
+	# Use $CC to link under sequent, because it throws in some extra .o
+	# files that make .init and .fini sections work.
+	archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_direct=yes
+      hardcode_minus_L=yes
+      hardcode_shlibpath_var=no
+      ;;
+
+    sysv4)
+      case $host_vendor in
+	sni)
+	  archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  hardcode_direct=yes # is this really true???
+	;;
+	siemens)
+	  ## LD is ld it makes a PLAMLIB
+	  ## CC just makes a GrossModule.
+	  archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+	  reload_cmds='$CC -r -o $output$reload_objs'
+	  hardcode_direct=no
+        ;;
+	motorola)
+	  archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+	;;
+      esac
+      runpath_var='LD_RUN_PATH'
+      hardcode_shlibpath_var=no
+      ;;
+
+    sysv4.3*)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_shlibpath_var=no
+      export_dynamic_flag_spec='-Bexport'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	hardcode_shlibpath_var=no
+	runpath_var=LD_RUN_PATH
+	hardcode_runpath_var=yes
+	ld_shlibs=yes
+      fi
+      ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+      no_undefined_flag='${wl}-z,text'
+      archive_cmds_need_lc=no
+      hardcode_shlibpath_var=no
+      runpath_var='LD_RUN_PATH'
+
+      if test "$GCC" = yes; then
+	archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6*)
+      # Note: We can NOT use -z defs as we might desire, because we do not
+      # link with -lc, and that would cause any symbols used from libc to
+      # always be unresolved, which means just about no library would
+      # ever link correctly.  If we're not using GNU ld we use -z text
+      # though, which does catch some bad symbols but isn't as heavy-handed
+      # as -z defs.
+      no_undefined_flag='${wl}-z,text'
+      allow_undefined_flag='${wl}-z,nodefs'
+      archive_cmds_need_lc=no
+      hardcode_shlibpath_var=no
+      hardcode_libdir_flag_spec='${wl}-R,$libdir'
+      hardcode_libdir_separator=':'
+      link_all_deplibs=yes
+      export_dynamic_flag_spec='${wl}-Bexport'
+      runpath_var='LD_RUN_PATH'
+
+      if test "$GCC" = yes; then
+	archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    uts4*)
+      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_libdir_flag_spec='-L$libdir'
+      hardcode_shlibpath_var=no
+      ;;
+
+    *)
+      ld_shlibs=no
+      ;;
+    esac
+
+    if test x$host_vendor = xsni; then
+      case $host in
+      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+	export_dynamic_flag_spec='${wl}-Blargedynsym'
+	;;
+      esac
+    fi
+  fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5
+$as_echo "$ld_shlibs" >&6; }
+test "$ld_shlibs" = no && can_build_shared=no
+
+with_gnu_ld=$with_gnu_ld
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc" in
+x|xyes)
+  # Assume -lc should be added
+  archive_cmds_need_lc=yes
+
+  if test "$enable_shared" = yes && test "$GCC" = yes; then
+    case $archive_cmds in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
+if ${lt_cv_archive_cmds_need_lc+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  $RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$lt_prog_compiler_wl
+	  pic_flag=$lt_prog_compiler_pic
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$allow_undefined_flag
+	  allow_undefined_flag=
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+  (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	  then
+	    lt_cv_archive_cmds_need_lc=no
+	  else
+	    lt_cv_archive_cmds_need_lc=yes
+	  fi
+	  allow_undefined_flag=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
+$as_echo "$lt_cv_archive_cmds_need_lc" >&6; }
+      archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+$as_echo_n "checking dynamic linker characteristics... " >&6; }
+
+if test "$GCC" = yes; then
+  case $host_os in
+    darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
+    *) lt_awk_arg="/^libraries:/" ;;
+  esac
+  case $host_os in
+    mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;;
+    *) lt_sed_strip_eq="s,=/,/,g" ;;
+  esac
+  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+  case $lt_search_path_spec in
+  *\;*)
+    # if the path contains ";" then we assume it to be the separator
+    # otherwise default to the standard path separator (i.e. ":") - it is
+    # assumed that no part of a normal pathname contains ";" but that should
+    # okay in the real world where ";" in dirpaths is itself problematic.
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+    ;;
+  *)
+    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+    ;;
+  esac
+  # Ok, now we have the path, separated by spaces, we can step through it
+  # and add multilib dir if necessary.
+  lt_tmp_lt_search_path_spec=
+  lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+  for lt_sys_path in $lt_search_path_spec; do
+    if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+    else
+      test -d "$lt_sys_path" && \
+	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+    fi
+  done
+  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+BEGIN {RS=" "; FS="/|\n";} {
+  lt_foo="";
+  lt_count=0;
+  for (lt_i = NF; lt_i > 0; lt_i--) {
+    if ($lt_i != "" && $lt_i != ".") {
+      if ($lt_i == "..") {
+        lt_count++;
+      } else {
+        if (lt_count == 0) {
+          lt_foo="/" $lt_i lt_foo;
+        } else {
+          lt_count--;
+        }
+      }
+    }
+  }
+  if (lt_foo != "") { lt_freq[lt_foo]++; }
+  if (lt_freq[lt_foo] == 1) { print lt_foo; }
+}'`
+  # AWK program above erroneously prepends '/' to C:/dos/paths
+  # for these hosts.
+  case $host_os in
+    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+      $SED 's,/\([A-Za-z]:\),\1,g'` ;;
+  esac
+  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+else
+  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+fi
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='${libname}${release}${shared_ext}$major'
+  ;;
+
+aix[4-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test "$host_cpu" = ia64; then
+    # AIX 5 supports IA64
+    library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line `#! .'.  This would cause the generated library to
+    # depend on `.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[01] | aix4.[01].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    if test "$aix_use_runtimelinking" = yes; then
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    else
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='${libname}${release}.a $libname.a'
+      soname_spec='${libname}${release}${shared_ext}$major'
+    fi
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='${libname}${shared_ext}'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[45]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=".dll"
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+
+      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+    library_names_spec='${libname}.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec="$LIB"
+      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+  soname_spec='${libname}${release}${major}$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[23].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[01]* | freebsdelf3.[01]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    if test "X$HPUX_IA64_MODE" = X32; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+    fi
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[3-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test "$lt_cv_prog_gnu_ld" = yes; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+  sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
+	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+  lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+
+fi
+
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Append ld.so.conf contents to the search path
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsdelf*-gnu)
+  version_type=linux
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='NetBSD ld.elf_so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec="/usr/lib"
+  need_lib_prefix=no
+  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+  case $host_os in
+    openbsd3.3 | openbsd3.3.*)	need_version=yes ;;
+    *)				need_version=no  ;;
+  esac
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+    case $host_os in
+      openbsd2.[89] | openbsd2.[89].*)
+	shlibpath_overrides_runpath=no
+	;;
+      *)
+	shlibpath_overrides_runpath=yes
+	;;
+      esac
+  else
+    shlibpath_overrides_runpath=yes
+  fi
+  ;;
+
+os2*)
+  libname_spec='$name'
+  shrext_cmds=".dll"
+  need_lib_prefix=no
+  library_names_spec='$libname${shared_ext} $libname.a'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=LIBPATH
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test "$with_gnu_ld" = yes; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec ;then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+    soname_spec='$libname${shared_ext}.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=freebsd-elf
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test "$with_gnu_ld" = yes; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+$as_echo "$dynamic_linker" >&6; }
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+  sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+  sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action=
+if test -n "$hardcode_libdir_flag_spec" ||
+   test -n "$runpath_var" ||
+   test "X$hardcode_automatic" = "Xyes" ; then
+
+  # We can hardcode non-existent directories.
+  if test "$hardcode_direct" != no &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no &&
+     test "$hardcode_minus_L" != no; then
+    # Linking always hardcodes the temporary library directory.
+    hardcode_action=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    hardcode_action=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  hardcode_action=unsupported
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5
+$as_echo "$hardcode_action" >&6; }
+
+if test "$hardcode_action" = relink ||
+   test "$inherit_rpath" = yes; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+     test "$enable_shared" = no; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+
+
+
+
+
+
+  if test "x$enable_dlopen" != xyes; then
+  enable_dlopen=unknown
+  enable_dlopen_self=unknown
+  enable_dlopen_self_static=unknown
+else
+  lt_cv_dlopen=no
+  lt_cv_dlopen_libs=
+
+  case $host_os in
+  beos*)
+    lt_cv_dlopen="load_add_on"
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+    ;;
+
+  mingw* | pw32* | cegcc*)
+    lt_cv_dlopen="LoadLibrary"
+    lt_cv_dlopen_libs=
+    ;;
+
+  cygwin*)
+    lt_cv_dlopen="dlopen"
+    lt_cv_dlopen_libs=
+    ;;
+
+  darwin*)
+  # if libdl is installed we need to link against it
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+$as_echo_n "checking for dlopen in -ldl... " >&6; }
+if ${ac_cv_lib_dl_dlopen+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_dl_dlopen=yes
+else
+  ac_cv_lib_dl_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
+  lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+
+    lt_cv_dlopen="dyld"
+    lt_cv_dlopen_libs=
+    lt_cv_dlopen_self=yes
+
+fi
+
+    ;;
+
+  *)
+    ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load"
+if test "x$ac_cv_func_shl_load" = xyes; then :
+  lt_cv_dlopen="shl_load"
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
+$as_echo_n "checking for shl_load in -ldld... " >&6; }
+if ${ac_cv_lib_dld_shl_load+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char shl_load ();
+int
+main ()
+{
+return shl_load ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_dld_shl_load=yes
+else
+  ac_cv_lib_dld_shl_load=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
+$as_echo "$ac_cv_lib_dld_shl_load" >&6; }
+if test "x$ac_cv_lib_dld_shl_load" = xyes; then :
+  lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
+else
+  ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
+if test "x$ac_cv_func_dlopen" = xyes; then :
+  lt_cv_dlopen="dlopen"
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+$as_echo_n "checking for dlopen in -ldl... " >&6; }
+if ${ac_cv_lib_dl_dlopen+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldl  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_dl_dlopen=yes
+else
+  ac_cv_lib_dl_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
+if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
+  lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
+$as_echo_n "checking for dlopen in -lsvld... " >&6; }
+if ${ac_cv_lib_svld_dlopen+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-lsvld  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dlopen ();
+int
+main ()
+{
+return dlopen ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_svld_dlopen=yes
+else
+  ac_cv_lib_svld_dlopen=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
+$as_echo "$ac_cv_lib_svld_dlopen" >&6; }
+if test "x$ac_cv_lib_svld_dlopen" = xyes; then :
+  lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
+$as_echo_n "checking for dld_link in -ldld... " >&6; }
+if ${ac_cv_lib_dld_dld_link+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_check_lib_save_LIBS=$LIBS
+LIBS="-ldld  $LIBS"
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char dld_link ();
+int
+main ()
+{
+return dld_link ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_lib_dld_dld_link=yes
+else
+  ac_cv_lib_dld_dld_link=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+LIBS=$ac_check_lib_save_LIBS
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
+$as_echo "$ac_cv_lib_dld_dld_link" >&6; }
+if test "x$ac_cv_lib_dld_dld_link" = xyes; then :
+  lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+
+fi
+
+    ;;
+  esac
+
+  if test "x$lt_cv_dlopen" != xno; then
+    enable_dlopen=yes
+  else
+    enable_dlopen=no
+  fi
+
+  case $lt_cv_dlopen in
+  dlopen)
+    save_CPPFLAGS="$CPPFLAGS"
+    test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+
+    save_LDFLAGS="$LDFLAGS"
+    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+
+    save_LIBS="$LIBS"
+    LIBS="$lt_cv_dlopen_libs $LIBS"
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
+$as_echo_n "checking whether a program can dlopen itself... " >&6; }
+if ${lt_cv_dlopen_self+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  	  if test "$cross_compiling" = yes; then :
+  lt_cv_dlopen_self=cross
+else
+  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+  lt_status=$lt_dlunknown
+  cat > conftest.$ac_ext <<_LT_EOF
+#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+#  define LT_DLGLOBAL		RTLD_GLOBAL
+#else
+#  ifdef DL_GLOBAL
+#    define LT_DLGLOBAL		DL_GLOBAL
+#  else
+#    define LT_DLGLOBAL		0
+#  endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+   find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+#  ifdef RTLD_LAZY
+#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+#  else
+#    ifdef DL_LAZY
+#      define LT_DLLAZY_OR_NOW		DL_LAZY
+#    else
+#      ifdef RTLD_NOW
+#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+#      else
+#        ifdef DL_NOW
+#          define LT_DLLAZY_OR_NOW	DL_NOW
+#        else
+#          define LT_DLLAZY_OR_NOW	0
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+
+/* When -fvisbility=hidden is used, assume the code has been annotated
+   correspondingly for the symbols needed.  */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+  int status = $lt_dlunknown;
+
+  if (self)
+    {
+      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+      else
+        {
+	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+          else puts (dlerror ());
+	}
+      /* dlclose (self); */
+    }
+  else
+    puts (dlerror ());
+
+  return status;
+}
+_LT_EOF
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+  (eval $ac_link) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then
+    (./conftest; exit; ) >&5 2>/dev/null
+    lt_status=$?
+    case x$lt_status in
+      x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;;
+      x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;;
+      x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;;
+    esac
+  else :
+    # compilation failed
+    lt_cv_dlopen_self=no
+  fi
+fi
+rm -fr conftest*
+
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5
+$as_echo "$lt_cv_dlopen_self" >&6; }
+
+    if test "x$lt_cv_dlopen_self" = xyes; then
+      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
+$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; }
+if ${lt_cv_dlopen_self_static+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  	  if test "$cross_compiling" = yes; then :
+  lt_cv_dlopen_self_static=cross
+else
+  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+  lt_status=$lt_dlunknown
+  cat > conftest.$ac_ext <<_LT_EOF
+#line $LINENO "configure"
+#include "confdefs.h"
+
+#if HAVE_DLFCN_H
+#include <dlfcn.h>
+#endif
+
+#include <stdio.h>
+
+#ifdef RTLD_GLOBAL
+#  define LT_DLGLOBAL		RTLD_GLOBAL
+#else
+#  ifdef DL_GLOBAL
+#    define LT_DLGLOBAL		DL_GLOBAL
+#  else
+#    define LT_DLGLOBAL		0
+#  endif
+#endif
+
+/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+   find out it does not work in some platform. */
+#ifndef LT_DLLAZY_OR_NOW
+#  ifdef RTLD_LAZY
+#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+#  else
+#    ifdef DL_LAZY
+#      define LT_DLLAZY_OR_NOW		DL_LAZY
+#    else
+#      ifdef RTLD_NOW
+#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+#      else
+#        ifdef DL_NOW
+#          define LT_DLLAZY_OR_NOW	DL_NOW
+#        else
+#          define LT_DLLAZY_OR_NOW	0
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+
+/* When -fvisbility=hidden is used, assume the code has been annotated
+   correspondingly for the symbols needed.  */
+#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+int fnord () __attribute__((visibility("default")));
+#endif
+
+int fnord () { return 42; }
+int main ()
+{
+  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+  int status = $lt_dlunknown;
+
+  if (self)
+    {
+      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+      else
+        {
+	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+          else puts (dlerror ());
+	}
+      /* dlclose (self); */
+    }
+  else
+    puts (dlerror ());
+
+  return status;
+}
+_LT_EOF
+  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+  (eval $ac_link) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then
+    (./conftest; exit; ) >&5 2>/dev/null
+    lt_status=$?
+    case x$lt_status in
+      x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;;
+      x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;;
+      x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;;
+    esac
+  else :
+    # compilation failed
+    lt_cv_dlopen_self_static=no
+  fi
+fi
+rm -fr conftest*
+
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5
+$as_echo "$lt_cv_dlopen_self_static" >&6; }
+    fi
+
+    CPPFLAGS="$save_CPPFLAGS"
+    LDFLAGS="$save_LDFLAGS"
+    LIBS="$save_LIBS"
+    ;;
+  esac
+
+  case $lt_cv_dlopen_self in
+  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+  *) enable_dlopen_self=unknown ;;
+  esac
+
+  case $lt_cv_dlopen_self_static in
+  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+  *) enable_dlopen_self_static=unknown ;;
+  esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+striplib=
+old_striplib=
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5
+$as_echo_n "checking whether stripping libraries is possible... " >&6; }
+if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+  test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
+  test -z "$striplib" && striplib="$STRIP --strip-unneeded"
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+# FIXME - insert some real tests, host_os isn't really good enough
+  case $host_os in
+  darwin*)
+    if test -n "$STRIP" ; then
+      striplib="$STRIP -x"
+      old_striplib="$STRIP -S"
+      { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+    else
+      { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+    fi
+    ;;
+  *)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+    ;;
+  esac
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+  # Report which library types will actually be built
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
+$as_echo_n "checking if libtool supports shared libraries... " >&6; }
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
+$as_echo "$can_build_shared" >&6; }
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
+$as_echo_n "checking whether to build shared libraries... " >&6; }
+  test "$can_build_shared" = "no" && enable_shared=no
+
+  # On AIX, shared libraries and static libraries use the same namespace, and
+  # are all built from PIC.
+  case $host_os in
+  aix3*)
+    test "$enable_shared" = yes && enable_static=no
+    if test -n "$RANLIB"; then
+      archive_cmds="$archive_cmds~\$RANLIB \$lib"
+      postinstall_cmds='$RANLIB $lib'
+    fi
+    ;;
+
+  aix[4-9]*)
+    if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+      test "$enable_shared" = yes && enable_static=no
+    fi
+    ;;
+  esac
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
+$as_echo "$enable_shared" >&6; }
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
+$as_echo_n "checking whether to build static libraries... " >&6; }
+  # Make sure either enable_shared or enable_static is yes.
+  test "$enable_shared" = yes || enable_static=yes
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
+$as_echo "$enable_static" >&6; }
+
+
+
+
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+CC="$lt_save_CC"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+        ac_config_commands="$ac_config_commands libtool"
+
+
+
+
+# Only expand once:
+
+
+
+ac_ext=${ac_fc_srcext-f}
+ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5'
+ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_fc_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+  for ac_prog in xlf2003 xlf2003_r ifort gfortran
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$FC"; then
+  ac_cv_prog_FC="$FC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_FC="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+FC=$ac_cv_prog_FC
+if test -n "$FC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $FC" >&5
+$as_echo "$FC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$FC" && break
+  done
+fi
+if test -z "$FC"; then
+  ac_ct_FC=$FC
+  for ac_prog in xlf2003 xlf2003_r ifort gfortran
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_FC"; then
+  ac_cv_prog_ac_ct_FC="$ac_ct_FC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_FC="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_FC=$ac_cv_prog_ac_ct_FC
+if test -n "$ac_ct_FC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FC" >&5
+$as_echo "$ac_ct_FC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_FC" && break
+done
+
+  if test "x$ac_ct_FC" = x; then
+    FC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    FC=$ac_ct_FC
+  fi
+fi
+
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for Fortran compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+rm -f a.out
+
+# If we don't use `.F' as extension, the preprocessor is not run on the
+# input file.  (Note that this only needs to work for GNU compilers.)
+ac_save_ext=$ac_ext
+ac_ext=F
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU Fortran compiler" >&5
+$as_echo_n "checking whether we are using the GNU Fortran compiler... " >&6; }
+if ${ac_cv_fc_compiler_gnu+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat > conftest.$ac_ext <<_ACEOF
+      program main
+#ifndef __GNUC__
+       choke me
+#endif
+
+      end
+_ACEOF
+if ac_fn_fc_try_compile "$LINENO"; then :
+  ac_compiler_gnu=yes
+else
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_fc_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_fc_compiler_gnu" >&5
+$as_echo "$ac_cv_fc_compiler_gnu" >&6; }
+ac_ext=$ac_save_ext
+ac_test_FCFLAGS=${FCFLAGS+set}
+ac_save_FCFLAGS=$FCFLAGS
+FCFLAGS=
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $FC accepts -g" >&5
+$as_echo_n "checking whether $FC accepts -g... " >&6; }
+if ${ac_cv_prog_fc_g+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  FCFLAGS=-g
+cat > conftest.$ac_ext <<_ACEOF
+      program main
+
+      end
+_ACEOF
+if ac_fn_fc_try_compile "$LINENO"; then :
+  ac_cv_prog_fc_g=yes
+else
+  ac_cv_prog_fc_g=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_fc_g" >&5
+$as_echo "$ac_cv_prog_fc_g" >&6; }
+if test "$ac_test_FCFLAGS" = set; then
+  FCFLAGS=$ac_save_FCFLAGS
+elif test $ac_cv_prog_fc_g = yes; then
+  if test "x$ac_cv_fc_compiler_gnu" = xyes; then
+    FCFLAGS="-g -O2"
+  else
+    FCFLAGS="-g"
+  fi
+else
+  if test "x$ac_cv_fc_compiler_gnu" = xyes; then
+    FCFLAGS="-O2"
+  else
+    FCFLAGS=
+  fi
+fi
+
+if test $ac_compiler_gnu = yes; then
+  GFC=yes
+else
+  GFC=
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+func_stripname_cnf ()
+{
+  case ${2} in
+  .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+  *)  func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+  esac
+} # func_stripname_cnf
+
+
+      ac_ext=${ac_fc_srcext-f}
+ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5'
+ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_fc_compiler_gnu
+
+
+if test -z "$FC" || test "X$FC" = "Xno"; then
+  _lt_disable_FC=yes
+fi
+
+archive_cmds_need_lc_FC=no
+allow_undefined_flag_FC=
+always_export_symbols_FC=no
+archive_expsym_cmds_FC=
+export_dynamic_flag_spec_FC=
+hardcode_direct_FC=no
+hardcode_direct_absolute_FC=no
+hardcode_libdir_flag_spec_FC=
+hardcode_libdir_separator_FC=
+hardcode_minus_L_FC=no
+hardcode_automatic_FC=no
+inherit_rpath_FC=no
+module_cmds_FC=
+module_expsym_cmds_FC=
+link_all_deplibs_FC=unknown
+old_archive_cmds_FC=$old_archive_cmds
+reload_flag_FC=$reload_flag
+reload_cmds_FC=$reload_cmds
+no_undefined_flag_FC=
+whole_archive_flag_spec_FC=
+enable_shared_with_static_runtimes_FC=no
+
+# Source file extension for fc test sources.
+ac_ext=${ac_fc_srcext-f}
+
+# Object file extension for compiled fc test sources.
+objext=o
+objext_FC=$objext
+
+# No sense in running all these tests if we already determined that
+# the FC compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_disable_FC" != yes; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="\
+      subroutine t
+      return
+      end
+"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code="\
+      program t
+      end
+"
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+  # save warnings/boilerplate of simple test code
+  ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+  ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC="$CC"
+  lt_save_GCC=$GCC
+  lt_save_CFLAGS=$CFLAGS
+  CC=${FC-"f95"}
+  CFLAGS=$FCFLAGS
+  compiler=$CC
+  GCC=$ac_cv_fc_compiler_gnu
+
+  compiler_FC=$CC
+  for cc_temp in $compiler""; do
+  case $cc_temp in
+    compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+    distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+    \-*) ;;
+    *) break;;
+  esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+
+
+  if test -n "$compiler"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
+$as_echo_n "checking if libtool supports shared libraries... " >&6; }
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
+$as_echo "$can_build_shared" >&6; }
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
+$as_echo_n "checking whether to build shared libraries... " >&6; }
+    test "$can_build_shared" = "no" && enable_shared=no
+
+    # On AIX, shared libraries and static libraries use the same namespace, and
+    # are all built from PIC.
+    case $host_os in
+      aix3*)
+        test "$enable_shared" = yes && enable_static=no
+        if test -n "$RANLIB"; then
+          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+          postinstall_cmds='$RANLIB $lib'
+        fi
+        ;;
+      aix[4-9]*)
+	if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
+	  test "$enable_shared" = yes && enable_static=no
+	fi
+        ;;
+    esac
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
+$as_echo "$enable_shared" >&6; }
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
+$as_echo_n "checking whether to build static libraries... " >&6; }
+    # Make sure either enable_shared or enable_static is yes.
+    test "$enable_shared" = yes || enable_static=yes
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
+$as_echo "$enable_static" >&6; }
+
+    GCC_FC="$ac_cv_fc_compiler_gnu"
+    LD_FC="$LD"
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    # Dependencies to place before and after the object being linked:
+predep_objects_FC=
+postdep_objects_FC=
+predeps_FC=
+postdeps_FC=
+compiler_lib_search_path_FC=
+
+cat > conftest.$ac_ext <<_LT_EOF
+      subroutine foo
+      implicit none
+      integer a
+      a=0
+      return
+      end
+_LT_EOF
+
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+  # Parse the compiler output and extract the necessary
+  # objects, libraries and library flags.
+
+  # Sentinel used to keep track of whether or not we are before
+  # the conftest object file.
+  pre_test_object_deps_done=no
+
+  for p in `eval "$output_verbose_link_cmd"`; do
+    case ${prev}${p} in
+
+    -L* | -R* | -l*)
+       # Some compilers place space between "-{L,R}" and the path.
+       # Remove the space.
+       if test $p = "-L" ||
+          test $p = "-R"; then
+	 prev=$p
+	 continue
+       fi
+
+       # Expand the sysroot to ease extracting the directories later.
+       if test -z "$prev"; then
+         case $p in
+         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+         esac
+       fi
+       case $p in
+       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+       esac
+       if test "$pre_test_object_deps_done" = no; then
+	 case ${prev} in
+	 -L | -R)
+	   # Internal compiler library paths should come after those
+	   # provided the user.  The postdeps already come after the
+	   # user supplied libs so there is no need to process them.
+	   if test -z "$compiler_lib_search_path_FC"; then
+	     compiler_lib_search_path_FC="${prev}${p}"
+	   else
+	     compiler_lib_search_path_FC="${compiler_lib_search_path_FC} ${prev}${p}"
+	   fi
+	   ;;
+	 # The "-l" case would never come before the object being
+	 # linked, so don't bother handling this case.
+	 esac
+       else
+	 if test -z "$postdeps_FC"; then
+	   postdeps_FC="${prev}${p}"
+	 else
+	   postdeps_FC="${postdeps_FC} ${prev}${p}"
+	 fi
+       fi
+       prev=
+       ;;
+
+    *.lto.$objext) ;; # Ignore GCC LTO objects
+    *.$objext)
+       # This assumes that the test object file only shows up
+       # once in the compiler output.
+       if test "$p" = "conftest.$objext"; then
+	 pre_test_object_deps_done=yes
+	 continue
+       fi
+
+       if test "$pre_test_object_deps_done" = no; then
+	 if test -z "$predep_objects_FC"; then
+	   predep_objects_FC="$p"
+	 else
+	   predep_objects_FC="$predep_objects_FC $p"
+	 fi
+       else
+	 if test -z "$postdep_objects_FC"; then
+	   postdep_objects_FC="$p"
+	 else
+	   postdep_objects_FC="$postdep_objects_FC $p"
+	 fi
+       fi
+       ;;
+
+    *) ;; # Ignore the rest.
+
+    esac
+  done
+
+  # Clean up.
+  rm -f a.out a.exe
+else
+  echo "libtool.m4: error: problem compiling FC test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+
+
+case " $postdeps_FC " in
+*" -lc "*) archive_cmds_need_lc_FC=no ;;
+esac
+ compiler_lib_search_dirs_FC=
+if test -n "${compiler_lib_search_path_FC}"; then
+ compiler_lib_search_dirs_FC=`echo " ${compiler_lib_search_path_FC}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    lt_prog_compiler_wl_FC=
+lt_prog_compiler_pic_FC=
+lt_prog_compiler_static_FC=
+
+
+  if test "$GCC" = yes; then
+    lt_prog_compiler_wl_FC='-Wl,'
+    lt_prog_compiler_static_FC='-static'
+
+    case $host_os in
+      aix*)
+      # All AIX code is PIC.
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static_FC='-Bstatic'
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            lt_prog_compiler_pic_FC='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the `-m68020' flag to GCC prevents building anything better,
+            # like `-m68040'.
+            lt_prog_compiler_pic_FC='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      lt_prog_compiler_pic_FC='-DDLL_EXPORT'
+      ;;
+
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      lt_prog_compiler_pic_FC='-fno-common'
+      ;;
+
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      lt_prog_compiler_static_FC=
+      ;;
+
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	# +Z the default
+	;;
+      *)
+	lt_prog_compiler_pic_FC='-fPIC'
+	;;
+      esac
+      ;;
+
+    interix[3-9]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+
+    msdosdjgpp*)
+      # Just because we use GCC doesn't mean we suddenly get shared libraries
+      # on systems that don't support them.
+      lt_prog_compiler_can_build_shared_FC=no
+      enable_shared=no
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic_FC='-fPIC -shared'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	lt_prog_compiler_pic_FC=-Kconform_pic
+      fi
+      ;;
+
+    *)
+      lt_prog_compiler_pic_FC='-fPIC'
+      ;;
+    esac
+
+    case $cc_basename in
+    nvcc*) # Cuda Compiler Driver 2.2
+      lt_prog_compiler_wl_FC='-Xlinker '
+      if test -n "$lt_prog_compiler_pic_FC"; then
+        lt_prog_compiler_pic_FC="-Xcompiler $lt_prog_compiler_pic_FC"
+      fi
+      ;;
+    esac
+  else
+    # PORTME Check for flag to pass linker flags through the system compiler.
+    case $host_os in
+    aix*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static_FC='-Bstatic'
+      else
+	lt_prog_compiler_static_FC='-bnso -bI:/lib/syscalls.exp'
+      fi
+      ;;
+
+    mingw* | cygwin* | pw32* | os2* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      lt_prog_compiler_pic_FC='-DDLL_EXPORT'
+      ;;
+
+    hpux9* | hpux10* | hpux11*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+      # not for PA HP-UX.
+      case $host_cpu in
+      hppa*64*|ia64*)
+	# +Z the default
+	;;
+      *)
+	lt_prog_compiler_pic_FC='+Z'
+	;;
+      esac
+      # Is there a better lt_prog_compiler_static that works with the bundled CC?
+      lt_prog_compiler_static_FC='${wl}-a ${wl}archive'
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      # PIC (with -KPIC) is the default.
+      lt_prog_compiler_static_FC='-non_shared'
+      ;;
+
+    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+      case $cc_basename in
+      # old Intel for x86_64 which still supported -KPIC.
+      ecc*)
+	lt_prog_compiler_wl_FC='-Wl,'
+	lt_prog_compiler_pic_FC='-KPIC'
+	lt_prog_compiler_static_FC='-static'
+        ;;
+      # icc used to be incompatible with GCC.
+      # ICC 10 doesn't accept -KPIC any more.
+      icc* | ifort*)
+	lt_prog_compiler_wl_FC='-Wl,'
+	lt_prog_compiler_pic_FC='-fPIC'
+	lt_prog_compiler_static_FC='-static'
+        ;;
+      # Lahey Fortran 8.1.
+      lf95*)
+	lt_prog_compiler_wl_FC='-Wl,'
+	lt_prog_compiler_pic_FC='--shared'
+	lt_prog_compiler_static_FC='--static'
+	;;
+      nagfor*)
+	# NAG Fortran compiler
+	lt_prog_compiler_wl_FC='-Wl,-Wl,,'
+	lt_prog_compiler_pic_FC='-PIC'
+	lt_prog_compiler_static_FC='-Bstatic'
+	;;
+      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+        # Portland Group compilers (*not* the Pentium gcc compiler,
+	# which looks to be a dead project)
+	lt_prog_compiler_wl_FC='-Wl,'
+	lt_prog_compiler_pic_FC='-fpic'
+	lt_prog_compiler_static_FC='-Bstatic'
+        ;;
+      ccc*)
+        lt_prog_compiler_wl_FC='-Wl,'
+        # All Alpha code is PIC.
+        lt_prog_compiler_static_FC='-non_shared'
+        ;;
+      xl* | bgxl* | bgf* | mpixl*)
+	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+	lt_prog_compiler_wl_FC='-Wl,'
+	lt_prog_compiler_pic_FC='-qpic'
+	lt_prog_compiler_static_FC='-qstaticlink'
+	;;
+      *)
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
+	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+	  lt_prog_compiler_pic_FC='-KPIC'
+	  lt_prog_compiler_static_FC='-Bstatic'
+	  lt_prog_compiler_wl_FC=''
+	  ;;
+	*Sun\ F* | *Sun*Fortran*)
+	  lt_prog_compiler_pic_FC='-KPIC'
+	  lt_prog_compiler_static_FC='-Bstatic'
+	  lt_prog_compiler_wl_FC='-Qoption ld '
+	  ;;
+	*Sun\ C*)
+	  # Sun C 5.9
+	  lt_prog_compiler_pic_FC='-KPIC'
+	  lt_prog_compiler_static_FC='-Bstatic'
+	  lt_prog_compiler_wl_FC='-Wl,'
+	  ;;
+        *Intel*\ [CF]*Compiler*)
+	  lt_prog_compiler_wl_FC='-Wl,'
+	  lt_prog_compiler_pic_FC='-fPIC'
+	  lt_prog_compiler_static_FC='-static'
+	  ;;
+	*Portland\ Group*)
+	  lt_prog_compiler_wl_FC='-Wl,'
+	  lt_prog_compiler_pic_FC='-fpic'
+	  lt_prog_compiler_static_FC='-Bstatic'
+	  ;;
+	esac
+	;;
+      esac
+      ;;
+
+    newsos6)
+      lt_prog_compiler_pic_FC='-KPIC'
+      lt_prog_compiler_static_FC='-Bstatic'
+      ;;
+
+    *nto* | *qnx*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic_FC='-fPIC -shared'
+      ;;
+
+    osf3* | osf4* | osf5*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      # All OSF/1 code is PIC.
+      lt_prog_compiler_static_FC='-non_shared'
+      ;;
+
+    rdos*)
+      lt_prog_compiler_static_FC='-non_shared'
+      ;;
+
+    solaris*)
+      lt_prog_compiler_pic_FC='-KPIC'
+      lt_prog_compiler_static_FC='-Bstatic'
+      case $cc_basename in
+      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+	lt_prog_compiler_wl_FC='-Qoption ld ';;
+      *)
+	lt_prog_compiler_wl_FC='-Wl,';;
+      esac
+      ;;
+
+    sunos4*)
+      lt_prog_compiler_wl_FC='-Qoption ld '
+      lt_prog_compiler_pic_FC='-PIC'
+      lt_prog_compiler_static_FC='-Bstatic'
+      ;;
+
+    sysv4 | sysv4.2uw2* | sysv4.3*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      lt_prog_compiler_pic_FC='-KPIC'
+      lt_prog_compiler_static_FC='-Bstatic'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec ;then
+	lt_prog_compiler_pic_FC='-Kconform_pic'
+	lt_prog_compiler_static_FC='-Bstatic'
+      fi
+      ;;
+
+    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      lt_prog_compiler_pic_FC='-KPIC'
+      lt_prog_compiler_static_FC='-Bstatic'
+      ;;
+
+    unicos*)
+      lt_prog_compiler_wl_FC='-Wl,'
+      lt_prog_compiler_can_build_shared_FC=no
+      ;;
+
+    uts4*)
+      lt_prog_compiler_pic_FC='-pic'
+      lt_prog_compiler_static_FC='-Bstatic'
+      ;;
+
+    *)
+      lt_prog_compiler_can_build_shared_FC=no
+      ;;
+    esac
+  fi
+
+case $host_os in
+  # For platforms which do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    lt_prog_compiler_pic_FC=
+    ;;
+  *)
+    lt_prog_compiler_pic_FC="$lt_prog_compiler_pic_FC"
+    ;;
+esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+if ${lt_cv_prog_compiler_pic_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_pic_FC=$lt_prog_compiler_pic_FC
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_FC" >&5
+$as_echo "$lt_cv_prog_compiler_pic_FC" >&6; }
+lt_prog_compiler_pic_FC=$lt_cv_prog_compiler_pic_FC
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic_FC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_FC works" >&5
+$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_FC works... " >&6; }
+if ${lt_cv_prog_compiler_pic_works_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_pic_works_FC=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$lt_prog_compiler_pic_FC"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_pic_works_FC=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_FC" >&5
+$as_echo "$lt_cv_prog_compiler_pic_works_FC" >&6; }
+
+if test x"$lt_cv_prog_compiler_pic_works_FC" = xyes; then
+    case $lt_prog_compiler_pic_FC in
+     "" | " "*) ;;
+     *) lt_prog_compiler_pic_FC=" $lt_prog_compiler_pic_FC" ;;
+     esac
+else
+    lt_prog_compiler_pic_FC=
+     lt_prog_compiler_can_build_shared_FC=no
+fi
+
+fi
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl_FC eval lt_tmp_static_flag=\"$lt_prog_compiler_static_FC\"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if ${lt_cv_prog_compiler_static_works_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_static_works_FC=no
+   save_LDFLAGS="$LDFLAGS"
+   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler_static_works_FC=yes
+       fi
+     else
+       lt_cv_prog_compiler_static_works_FC=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_FC" >&5
+$as_echo "$lt_cv_prog_compiler_static_works_FC" >&6; }
+
+if test x"$lt_cv_prog_compiler_static_works_FC" = xyes; then
+    :
+else
+    lt_prog_compiler_static_FC=
+fi
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_c_o_FC=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o_FC=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_FC" >&5
+$as_echo "$lt_cv_prog_compiler_c_o_FC" >&6; }
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_c_o_FC=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o_FC=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_FC" >&5
+$as_echo "$lt_cv_prog_compiler_c_o_FC" >&6; }
+
+
+
+
+hard_links="nottested"
+if test "$lt_cv_prog_compiler_c_o_FC" = no && test "$need_locks" != no; then
+  # do not overwrite the value of need_locks provided by the user
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+$as_echo_n "checking if we can lock with hard links... " >&6; }
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+$as_echo "$hard_links" >&6; }
+  if test "$hard_links" = no; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+  runpath_var=
+  allow_undefined_flag_FC=
+  always_export_symbols_FC=no
+  archive_cmds_FC=
+  archive_expsym_cmds_FC=
+  compiler_needs_object_FC=no
+  enable_shared_with_static_runtimes_FC=no
+  export_dynamic_flag_spec_FC=
+  export_symbols_cmds_FC='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  hardcode_automatic_FC=no
+  hardcode_direct_FC=no
+  hardcode_direct_absolute_FC=no
+  hardcode_libdir_flag_spec_FC=
+  hardcode_libdir_separator_FC=
+  hardcode_minus_L_FC=no
+  hardcode_shlibpath_var_FC=unsupported
+  inherit_rpath_FC=no
+  link_all_deplibs_FC=unknown
+  module_cmds_FC=
+  module_expsym_cmds_FC=
+  old_archive_from_new_cmds_FC=
+  old_archive_from_expsyms_cmds_FC=
+  thread_safe_flag_spec_FC=
+  whole_archive_flag_spec_FC=
+  # include_expsyms should be a list of space-separated symbols to be *always*
+  # included in the symbol list
+  include_expsyms_FC=
+  # exclude_expsyms can be an extended regexp of symbols to exclude
+  # it will be wrapped by ` (' and `)$', so one must not match beginning or
+  # end of line.  Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
+  # as well as any symbol that contains `d'.
+  exclude_expsyms_FC='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+  # platforms (ab)use it in PIC code, but their linkers get confused if
+  # the symbol is explicitly referenced.  Since portable code cannot
+  # rely on this symbol name, it's probably fine to never include it in
+  # preloaded symbol tables.
+  # Exclude shared library initialization/finalization symbols.
+  extract_expsyms_cmds=
+
+  case $host_os in
+  cygwin* | mingw* | pw32* | cegcc*)
+    # FIXME: the MSVC++ port hasn't been tested in a loooong time
+    # When not using gcc, we currently assume that we are using
+    # Microsoft Visual C++.
+    if test "$GCC" != yes; then
+      with_gnu_ld=no
+    fi
+    ;;
+  interix*)
+    # we just hope/assume this is gcc and not c89 (= MSVC++)
+    with_gnu_ld=yes
+    ;;
+  openbsd*)
+    with_gnu_ld=no
+    ;;
+  linux* | k*bsd*-gnu | gnu*)
+    link_all_deplibs_FC=no
+    ;;
+  esac
+
+  ld_shlibs_FC=yes
+
+  # On some targets, GNU ld is compatible enough with the native linker
+  # that we're better off using the native interface for both.
+  lt_use_gnu_ld_interface=no
+  if test "$with_gnu_ld" = yes; then
+    case $host_os in
+      aix*)
+	# The AIX port of GNU ld has always aspired to compatibility
+	# with the native linker.  However, as the warning in the GNU ld
+	# block says, versions before 2.19.5* couldn't really create working
+	# shared libraries, regardless of the interface used.
+	case `$LD -v 2>&1` in
+	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+	  *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;;
+	  *\ \(GNU\ Binutils\)\ [3-9]*) ;;
+	  *)
+	    lt_use_gnu_ld_interface=yes
+	    ;;
+	esac
+	;;
+      *)
+	lt_use_gnu_ld_interface=yes
+	;;
+    esac
+  fi
+
+  if test "$lt_use_gnu_ld_interface" = yes; then
+    # If archive_cmds runs LD, not CC, wlarc should be empty
+    wlarc='${wl}'
+
+    # Set some defaults for GNU ld with shared library support. These
+    # are reset later if shared libraries are not supported. Putting them
+    # here allows them to be overridden if necessary.
+    runpath_var=LD_RUN_PATH
+    hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+    export_dynamic_flag_spec_FC='${wl}--export-dynamic'
+    # ancient GNU ld didn't support --whole-archive et. al.
+    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+      whole_archive_flag_spec_FC="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+    else
+      whole_archive_flag_spec_FC=
+    fi
+    supports_anon_versioning=no
+    case `$LD -v 2>&1` in
+      *GNU\ gold*) supports_anon_versioning=yes ;;
+      *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
+      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+      *\ 2.11.*) ;; # other 2.11 versions
+      *) supports_anon_versioning=yes ;;
+    esac
+
+    # See if GNU ld supports shared libraries.
+    case $host_os in
+    aix[3-9]*)
+      # On AIX/PPC, the GNU linker is very broken
+      if test "$host_cpu" != ia64; then
+	ld_shlibs_FC=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: the GNU linker, at least up to release 2.19, is reported
+*** to be unable to reliably create shared libraries on AIX.
+*** Therefore, libtool is disabling shared libraries support.  If you
+*** really care for shared libraries, you may want to install binutils
+*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+*** You will then need to restart the configuration process.
+
+_LT_EOF
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            archive_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+            archive_expsym_cmds_FC=''
+        ;;
+      m68k)
+            archive_cmds_FC='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            hardcode_libdir_flag_spec_FC='-L$libdir'
+            hardcode_minus_L_FC=yes
+        ;;
+      esac
+      ;;
+
+    beos*)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	allow_undefined_flag_FC=unsupported
+	# Joseph Beckenbach <jrb3 at best.com> says some releases of gcc
+	# support --undefined.  This deserves some investigation.  FIXME
+	archive_cmds_FC='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+      else
+	ld_shlibs_FC=no
+      fi
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # _LT_TAGVAR(hardcode_libdir_flag_spec, FC) is actually meaningless,
+      # as there is no search path for DLLs.
+      hardcode_libdir_flag_spec_FC='-L$libdir'
+      export_dynamic_flag_spec_FC='${wl}--export-all-symbols'
+      allow_undefined_flag_FC=unsupported
+      always_export_symbols_FC=no
+      enable_shared_with_static_runtimes_FC=yes
+      export_symbols_cmds_FC='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+      exclude_expsyms_FC='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+
+      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+        archive_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	# If the export-symbols file already is a .def file (1st line
+	# is EXPORTS), use it as is; otherwise, prepend...
+	archive_expsym_cmds_FC='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	  cp $export_symbols $output_objdir/$soname.def;
+	else
+	  echo EXPORTS > $output_objdir/$soname.def;
+	  cat $export_symbols >> $output_objdir/$soname.def;
+	fi~
+	$CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+      else
+	ld_shlibs_FC=no
+      fi
+      ;;
+
+    haiku*)
+      archive_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+      link_all_deplibs_FC=yes
+      ;;
+
+    interix[3-9]*)
+      hardcode_direct_FC=no
+      hardcode_shlibpath_var_FC=no
+      hardcode_libdir_flag_spec_FC='${wl}-rpath,$libdir'
+      export_dynamic_flag_spec_FC='${wl}-E'
+      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+      # Instead, shared libraries are loaded at an image base (0x10000000 by
+      # default) and relocated if they conflict, which is a slow very memory
+      # consuming and fragmenting process.  To avoid this, we pick a random,
+      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+      archive_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      archive_expsym_cmds_FC='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+      ;;
+
+    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+      tmp_diet=no
+      if test "$host_os" = linux-dietlibc; then
+	case $cc_basename in
+	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+	esac
+      fi
+      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+	 && test "$tmp_diet" = no
+      then
+	tmp_addflag=' $pic_flag'
+	tmp_sharedflag='-shared'
+	case $cc_basename,$host_cpu in
+        pgcc*)				# Portland Group C compiler
+	  whole_archive_flag_spec_FC='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag'
+	  ;;
+	pgf77* | pgf90* | pgf95* | pgfortran*)
+					# Portland Group f77 and f90 compilers
+	  whole_archive_flag_spec_FC='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  tmp_addflag=' $pic_flag -Mnomain' ;;
+	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+	  tmp_addflag=' -i_dynamic' ;;
+	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+	ifc* | ifort*)			# Intel Fortran compiler
+	  tmp_addflag=' -nofor_main' ;;
+	lf95*)				# Lahey Fortran 8.1
+	  whole_archive_flag_spec_FC=
+	  tmp_sharedflag='--shared' ;;
+	xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+	  tmp_sharedflag='-qmkshrobj'
+	  tmp_addflag= ;;
+	nvcc*)	# Cuda Compiler Driver 2.2
+	  whole_archive_flag_spec_FC='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  compiler_needs_object_FC=yes
+	  ;;
+	esac
+	case `$CC -V 2>&1 | sed 5q` in
+	*Sun\ C*)			# Sun C 5.9
+	  whole_archive_flag_spec_FC='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	  compiler_needs_object_FC=yes
+	  tmp_sharedflag='-G' ;;
+	*Sun\ F*)			# Sun Fortran 8.3
+	  tmp_sharedflag='-G' ;;
+	esac
+	archive_cmds_FC='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+
+        if test "x$supports_anon_versioning" = xyes; then
+          archive_expsym_cmds_FC='echo "{ global:" > $output_objdir/$libname.ver~
+	    cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+	    echo "local: *; };" >> $output_objdir/$libname.ver~
+	    $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+        fi
+
+	case $cc_basename in
+	xlf* | bgf* | bgxlf* | mpixlf*)
+	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+	  whole_archive_flag_spec_FC='--whole-archive$convenience --no-whole-archive'
+	  hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+	  archive_cmds_FC='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+	  if test "x$supports_anon_versioning" = xyes; then
+	    archive_expsym_cmds_FC='echo "{ global:" > $output_objdir/$libname.ver~
+	      cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+	      echo "local: *; };" >> $output_objdir/$libname.ver~
+	      $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+	  fi
+	  ;;
+	esac
+      else
+        ld_shlibs_FC=no
+      fi
+      ;;
+
+    netbsd* | netbsdelf*-gnu)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	archive_cmds_FC='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+	wlarc=
+      else
+	archive_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	archive_expsym_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      fi
+      ;;
+
+    solaris*)
+      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+	ld_shlibs_FC=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+*** create shared libraries on Solaris systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.9.1 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	archive_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	archive_expsym_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	ld_shlibs_FC=no
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+      case `$LD -v 2>&1` in
+        *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+	ld_shlibs_FC=no
+	cat <<_LT_EOF 1>&2
+
+*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+*** reliably create shared libraries on SCO systems.  Therefore, libtool
+*** is disabling shared libraries support.  We urge you to upgrade GNU
+*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+*** your PATH or compiler configuration so that the native linker is
+*** used, and then restart.
+
+_LT_EOF
+	;;
+	*)
+	  # For security reasons, it is highly recommended that you always
+	  # use absolute paths for naming shared libraries, and exclude the
+	  # DT_RUNPATH tag from executables and libraries.  But doing so
+	  # requires that you compile everything twice, which is a pain.
+	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	    hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+	    archive_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    archive_expsym_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+	  else
+	    ld_shlibs_FC=no
+	  fi
+	;;
+      esac
+      ;;
+
+    sunos4*)
+      archive_cmds_FC='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      wlarc=
+      hardcode_direct_FC=yes
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    *)
+      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	archive_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	archive_expsym_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+      else
+	ld_shlibs_FC=no
+      fi
+      ;;
+    esac
+
+    if test "$ld_shlibs_FC" = no; then
+      runpath_var=
+      hardcode_libdir_flag_spec_FC=
+      export_dynamic_flag_spec_FC=
+      whole_archive_flag_spec_FC=
+    fi
+  else
+    # PORTME fill in a description of your system's linker (not GNU ld)
+    case $host_os in
+    aix3*)
+      allow_undefined_flag_FC=unsupported
+      always_export_symbols_FC=yes
+      archive_expsym_cmds_FC='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+      # Note: this linker hardcodes the directories in LIBPATH if there
+      # are no directories specified by -L.
+      hardcode_minus_L_FC=yes
+      if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+	# Neither direct hardcoding nor static linking is supported with a
+	# broken collect2.
+	hardcode_direct_FC=unsupported
+      fi
+      ;;
+
+    aix[4-9]*)
+      if test "$host_cpu" = ia64; then
+	# On IA64, the linker does run time linking by default, so we don't
+	# have to do anything special.
+	aix_use_runtimelinking=no
+	exp_sym_flag='-Bexport'
+	no_entry_flag=""
+      else
+	# If we're using GNU nm, then we don't want the "-C" option.
+	# -C means demangle to AIX nm, but means don't demangle with GNU nm
+	# Also, AIX nm treats weak defined symbols like other global
+	# defined symbols, whereas GNU nm marks them as "W".
+	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+	  export_symbols_cmds_FC='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	else
+	  export_symbols_cmds_FC='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+	fi
+	aix_use_runtimelinking=no
+
+	# Test if we are trying to use run time linking or normal
+	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+	# need to do runtime linking.
+	case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+	  for ld_flag in $LDFLAGS; do
+	  if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+	    aix_use_runtimelinking=yes
+	    break
+	  fi
+	  done
+	  ;;
+	esac
+
+	exp_sym_flag='-bexport'
+	no_entry_flag='-bnoentry'
+      fi
+
+      # When large executables or shared objects are built, AIX ld can
+      # have problems creating the table of contents.  If linking a library
+      # or program results in "error TOC overflow" add -mminimal-toc to
+      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+      archive_cmds_FC=''
+      hardcode_direct_FC=yes
+      hardcode_direct_absolute_FC=yes
+      hardcode_libdir_separator_FC=':'
+      link_all_deplibs_FC=yes
+      file_list_spec_FC='${wl}-f,'
+
+      if test "$GCC" = yes; then
+	case $host_os in aix4.[012]|aix4.[012].*)
+	# We only want to do this on AIX 4.2 and lower, the check
+	# below for broken collect2 doesn't work under 4.3+
+	  collect2name=`${CC} -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	  # We have reworked collect2
+	  :
+	  else
+	  # We have old collect2
+	  hardcode_direct_FC=unsupported
+	  # It fails to find uninstalled libraries when the uninstalled
+	  # path is not listed in the libpath.  Setting hardcode_minus_L
+	  # to unsupported forces relinking
+	  hardcode_minus_L_FC=yes
+	  hardcode_libdir_flag_spec_FC='-L$libdir'
+	  hardcode_libdir_separator_FC=
+	  fi
+	  ;;
+	esac
+	shared_flag='-shared'
+	if test "$aix_use_runtimelinking" = yes; then
+	  shared_flag="$shared_flag "'${wl}-G'
+	fi
+	link_all_deplibs_FC=no
+      else
+	# not using gcc
+	if test "$host_cpu" = ia64; then
+	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	# chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+	else
+	  if test "$aix_use_runtimelinking" = yes; then
+	    shared_flag='${wl}-G'
+	  else
+	    shared_flag='${wl}-bM:SRE'
+	  fi
+	fi
+      fi
+
+      export_dynamic_flag_spec_FC='${wl}-bexpall'
+      # It seems that -bexpall does not export symbols beginning with
+      # underscore (_), so it is better to generate a list of symbols to export.
+      always_export_symbols_FC=yes
+      if test "$aix_use_runtimelinking" = yes; then
+	# Warning - without using the other runtime loading flags (-brtl),
+	# -berok will link without error, but may produce a broken library.
+	allow_undefined_flag_FC='-berok'
+        # Determine the default libpath from the value encoded in an
+        # empty executable.
+        if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if ${lt_cv_aix_libpath__FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat > conftest.$ac_ext <<_ACEOF
+      program main
+
+      end
+_ACEOF
+if ac_fn_fc_try_link "$LINENO"; then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath__FC=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath__FC"; then
+    lt_cv_aix_libpath__FC=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath__FC"; then
+    lt_cv_aix_libpath__FC="/usr/lib:/lib"
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath__FC
+fi
+
+        hardcode_libdir_flag_spec_FC='${wl}-blibpath:$libdir:'"$aix_libpath"
+        archive_expsym_cmds_FC='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+      else
+	if test "$host_cpu" = ia64; then
+	  hardcode_libdir_flag_spec_FC='${wl}-R $libdir:/usr/lib:/lib'
+	  allow_undefined_flag_FC="-z nodefs"
+	  archive_expsym_cmds_FC="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+	else
+	 # Determine the default libpath from the value encoded in an
+	 # empty executable.
+	 if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if ${lt_cv_aix_libpath__FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat > conftest.$ac_ext <<_ACEOF
+      program main
+
+      end
+_ACEOF
+if ac_fn_fc_try_link "$LINENO"; then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath__FC=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath__FC"; then
+    lt_cv_aix_libpath__FC=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath__FC"; then
+    lt_cv_aix_libpath__FC="/usr/lib:/lib"
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath__FC
+fi
+
+	 hardcode_libdir_flag_spec_FC='${wl}-blibpath:$libdir:'"$aix_libpath"
+	  # Warning - without using the other run time loading flags,
+	  # -berok will link without error, but may produce a broken library.
+	  no_undefined_flag_FC=' ${wl}-bernotok'
+	  allow_undefined_flag_FC=' ${wl}-berok'
+	  if test "$with_gnu_ld" = yes; then
+	    # We only use this code for GNU lds that support --whole-archive.
+	    whole_archive_flag_spec_FC='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	  else
+	    # Exported symbols can be pulled into shared objects from archives
+	    whole_archive_flag_spec_FC='$convenience'
+	  fi
+	  archive_cmds_need_lc_FC=yes
+	  # This is similar to how AIX traditionally builds its shared libraries.
+	  archive_expsym_cmds_FC="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+	fi
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            archive_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+            archive_expsym_cmds_FC=''
+        ;;
+      m68k)
+            archive_cmds_FC='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+            hardcode_libdir_flag_spec_FC='-L$libdir'
+            hardcode_minus_L_FC=yes
+        ;;
+      esac
+      ;;
+
+    bsdi[45]*)
+      export_dynamic_flag_spec_FC=-rdynamic
+      ;;
+
+    cygwin* | mingw* | pw32* | cegcc*)
+      # When not using gcc, we currently assume that we are using
+      # Microsoft Visual C++.
+      # hardcode_libdir_flag_spec is actually meaningless, as there is
+      # no search path for DLLs.
+      case $cc_basename in
+      cl*)
+	# Native MSVC
+	hardcode_libdir_flag_spec_FC=' '
+	allow_undefined_flag_FC=unsupported
+	always_export_symbols_FC=yes
+	file_list_spec_FC='@'
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=".dll"
+	# FIXME: Setting linknames here is a bad hack.
+	archive_cmds_FC='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+	archive_expsym_cmds_FC='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	    sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+	  else
+	    sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+	  fi~
+	  $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+	  linknames='
+	# The linker will not automatically build a static lib if we build a DLL.
+	# _LT_TAGVAR(old_archive_from_new_cmds, FC)='true'
+	enable_shared_with_static_runtimes_FC=yes
+	exclude_expsyms_FC='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+	export_symbols_cmds_FC='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+	# Don't use ranlib
+	old_postinstall_cmds_FC='chmod 644 $oldlib'
+	postlink_cmds_FC='lt_outputfile="@OUTPUT@"~
+	  lt_tool_outputfile="@TOOL_OUTPUT@"~
+	  case $lt_outputfile in
+	    *.exe|*.EXE) ;;
+	    *)
+	      lt_outputfile="$lt_outputfile.exe"
+	      lt_tool_outputfile="$lt_tool_outputfile.exe"
+	      ;;
+	  esac~
+	  if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+	    $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+	    $RM "$lt_outputfile.manifest";
+	  fi'
+	;;
+      *)
+	# Assume MSVC wrapper
+	hardcode_libdir_flag_spec_FC=' '
+	allow_undefined_flag_FC=unsupported
+	# Tell ltmain to make .lib files, not .a files.
+	libext=lib
+	# Tell ltmain to make .dll files, not .so files.
+	shrext_cmds=".dll"
+	# FIXME: Setting linknames here is a bad hack.
+	archive_cmds_FC='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+	# The linker will automatically build a .lib file if we build a DLL.
+	old_archive_from_new_cmds_FC='true'
+	# FIXME: Should let the user specify the lib program.
+	old_archive_cmds_FC='lib -OUT:$oldlib$oldobjs$old_deplibs'
+	enable_shared_with_static_runtimes_FC=yes
+	;;
+      esac
+      ;;
+
+    darwin* | rhapsody*)
+
+
+  archive_cmds_need_lc_FC=no
+  hardcode_direct_FC=no
+  hardcode_automatic_FC=yes
+  hardcode_shlibpath_var_FC=unsupported
+  if test "$lt_cv_ld_force_load" = "yes"; then
+    whole_archive_flag_spec_FC='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+    compiler_needs_object_FC=yes
+  else
+    whole_archive_flag_spec_FC=''
+  fi
+  link_all_deplibs_FC=yes
+  allow_undefined_flag_FC="$_lt_dar_allow_undefined"
+  case $cc_basename in
+     ifort*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test "$_lt_dar_can_shared" = "yes"; then
+    output_verbose_link_cmd=func_echo_all
+    archive_cmds_FC="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+    module_cmds_FC="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+    archive_expsym_cmds_FC="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+    module_expsym_cmds_FC="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+
+  else
+  ld_shlibs_FC=no
+  fi
+
+      ;;
+
+    dgux*)
+      archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_libdir_flag_spec_FC='-L$libdir'
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+    # support.  Future versions do this automatically, but an explicit c++rt0.o
+    # does not break anything, and helps significantly (at the cost of a little
+    # extra space).
+    freebsd2.2*)
+      archive_cmds_FC='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+      hardcode_libdir_flag_spec_FC='-R$libdir'
+      hardcode_direct_FC=yes
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+    freebsd2.*)
+      archive_cmds_FC='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_direct_FC=yes
+      hardcode_minus_L_FC=yes
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+    freebsd* | dragonfly*)
+      archive_cmds_FC='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+      hardcode_libdir_flag_spec_FC='-R$libdir'
+      hardcode_direct_FC=yes
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    hpux9*)
+      if test "$GCC" = yes; then
+	archive_cmds_FC='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+      else
+	archive_cmds_FC='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+      fi
+      hardcode_libdir_flag_spec_FC='${wl}+b ${wl}$libdir'
+      hardcode_libdir_separator_FC=:
+      hardcode_direct_FC=yes
+
+      # hardcode_minus_L: Not really in the search PATH,
+      # but as the default location of the library.
+      hardcode_minus_L_FC=yes
+      export_dynamic_flag_spec_FC='${wl}-E'
+      ;;
+
+    hpux10*)
+      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+	archive_cmds_FC='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds_FC='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      if test "$with_gnu_ld" = no; then
+	hardcode_libdir_flag_spec_FC='${wl}+b ${wl}$libdir'
+	hardcode_libdir_separator_FC=:
+	hardcode_direct_FC=yes
+	hardcode_direct_absolute_FC=yes
+	export_dynamic_flag_spec_FC='${wl}-E'
+	# hardcode_minus_L: Not really in the search PATH,
+	# but as the default location of the library.
+	hardcode_minus_L_FC=yes
+      fi
+      ;;
+
+    hpux11*)
+      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+	case $host_cpu in
+	hppa*64*)
+	  archive_cmds_FC='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  archive_cmds_FC='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  archive_cmds_FC='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	esac
+      else
+	case $host_cpu in
+	hppa*64*)
+	  archive_cmds_FC='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	ia64*)
+	  archive_cmds_FC='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	archive_cmds_FC='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	esac
+      fi
+      if test "$with_gnu_ld" = no; then
+	hardcode_libdir_flag_spec_FC='${wl}+b ${wl}$libdir'
+	hardcode_libdir_separator_FC=:
+
+	case $host_cpu in
+	hppa*64*|ia64*)
+	  hardcode_direct_FC=no
+	  hardcode_shlibpath_var_FC=no
+	  ;;
+	*)
+	  hardcode_direct_FC=yes
+	  hardcode_direct_absolute_FC=yes
+	  export_dynamic_flag_spec_FC='${wl}-E'
+
+	  # hardcode_minus_L: Not really in the search PATH,
+	  # but as the default location of the library.
+	  hardcode_minus_L_FC=yes
+	  ;;
+	esac
+      fi
+      ;;
+
+    irix5* | irix6* | nonstopux*)
+      if test "$GCC" = yes; then
+	archive_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	# Try to use the -exported_symbol ld option, if it does not
+	# work, assume that -exports_file does not work either and
+	# implicitly export all symbols.
+	# This should be the same for all languages, so no per-tag cache variable.
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
+if ${lt_cv_irix_exported_symbol+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  save_LDFLAGS="$LDFLAGS"
+	   LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
+	   cat > conftest.$ac_ext <<_ACEOF
+
+      subroutine foo
+      end
+_ACEOF
+if ac_fn_fc_try_link "$LINENO"; then :
+  lt_cv_irix_exported_symbol=yes
+else
+  lt_cv_irix_exported_symbol=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+           LDFLAGS="$save_LDFLAGS"
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
+$as_echo "$lt_cv_irix_exported_symbol" >&6; }
+	if test "$lt_cv_irix_exported_symbol" = yes; then
+          archive_expsym_cmds_FC='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
+	fi
+      else
+	archive_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	archive_expsym_cmds_FC='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+      fi
+      archive_cmds_need_lc_FC='no'
+      hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+      hardcode_libdir_separator_FC=:
+      inherit_rpath_FC=yes
+      link_all_deplibs_FC=yes
+      ;;
+
+    netbsd* | netbsdelf*-gnu)
+      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	archive_cmds_FC='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+      else
+	archive_cmds_FC='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
+      fi
+      hardcode_libdir_flag_spec_FC='-R$libdir'
+      hardcode_direct_FC=yes
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    newsos6)
+      archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_direct_FC=yes
+      hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+      hardcode_libdir_separator_FC=:
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    *nto* | *qnx*)
+      ;;
+
+    openbsd*)
+      if test -f /usr/libexec/ld.so; then
+	hardcode_direct_FC=yes
+	hardcode_shlibpath_var_FC=no
+	hardcode_direct_absolute_FC=yes
+	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+	  archive_cmds_FC='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds_FC='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
+	  hardcode_libdir_flag_spec_FC='${wl}-rpath,$libdir'
+	  export_dynamic_flag_spec_FC='${wl}-E'
+	else
+	  case $host_os in
+	   openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+	     archive_cmds_FC='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+	     hardcode_libdir_flag_spec_FC='-R$libdir'
+	     ;;
+	   *)
+	     archive_cmds_FC='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+	     hardcode_libdir_flag_spec_FC='${wl}-rpath,$libdir'
+	     ;;
+	  esac
+	fi
+      else
+	ld_shlibs_FC=no
+      fi
+      ;;
+
+    os2*)
+      hardcode_libdir_flag_spec_FC='-L$libdir'
+      hardcode_minus_L_FC=yes
+      allow_undefined_flag_FC=unsupported
+      archive_cmds_FC='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
+      old_archive_from_new_cmds_FC='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+      ;;
+
+    osf3*)
+      if test "$GCC" = yes; then
+	allow_undefined_flag_FC=' ${wl}-expect_unresolved ${wl}\*'
+	archive_cmds_FC='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+      else
+	allow_undefined_flag_FC=' -expect_unresolved \*'
+	archive_cmds_FC='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+      fi
+      archive_cmds_need_lc_FC='no'
+      hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+      hardcode_libdir_separator_FC=:
+      ;;
+
+    osf4* | osf5*)	# as osf3* with the addition of -msym flag
+      if test "$GCC" = yes; then
+	allow_undefined_flag_FC=' ${wl}-expect_unresolved ${wl}\*'
+	archive_cmds_FC='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	hardcode_libdir_flag_spec_FC='${wl}-rpath ${wl}$libdir'
+      else
+	allow_undefined_flag_FC=' -expect_unresolved \*'
+	archive_cmds_FC='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	archive_expsym_cmds_FC='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+	$CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+
+	# Both c and cxx compiler support -rpath directly
+	hardcode_libdir_flag_spec_FC='-rpath $libdir'
+      fi
+      archive_cmds_need_lc_FC='no'
+      hardcode_libdir_separator_FC=:
+      ;;
+
+    solaris*)
+      no_undefined_flag_FC=' -z defs'
+      if test "$GCC" = yes; then
+	wlarc='${wl}'
+	archive_cmds_FC='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds_FC='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+      else
+	case `$CC -V 2>&1` in
+	*"Compilers 5.0"*)
+	  wlarc=''
+	  archive_cmds_FC='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  archive_expsym_cmds_FC='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+	  ;;
+	*)
+	  wlarc='${wl}'
+	  archive_cmds_FC='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds_FC='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	  $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+	  ;;
+	esac
+      fi
+      hardcode_libdir_flag_spec_FC='-R$libdir'
+      hardcode_shlibpath_var_FC=no
+      case $host_os in
+      solaris2.[0-5] | solaris2.[0-5].*) ;;
+      *)
+	# The compiler driver will combine and reorder linker options,
+	# but understands `-z linker_flag'.  GCC discards it without `$wl',
+	# but is careful enough not to reorder.
+	# Supported since Solaris 2.6 (maybe 2.5.1?)
+	if test "$GCC" = yes; then
+	  whole_archive_flag_spec_FC='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+	else
+	  whole_archive_flag_spec_FC='-z allextract$convenience -z defaultextract'
+	fi
+	;;
+      esac
+      link_all_deplibs_FC=yes
+      ;;
+
+    sunos4*)
+      if test "x$host_vendor" = xsequent; then
+	# Use $CC to link under sequent, because it throws in some extra .o
+	# files that make .init and .fini sections work.
+	archive_cmds_FC='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds_FC='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+      fi
+      hardcode_libdir_flag_spec_FC='-L$libdir'
+      hardcode_direct_FC=yes
+      hardcode_minus_L_FC=yes
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    sysv4)
+      case $host_vendor in
+	sni)
+	  archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  hardcode_direct_FC=yes # is this really true???
+	;;
+	siemens)
+	  ## LD is ld it makes a PLAMLIB
+	  ## CC just makes a GrossModule.
+	  archive_cmds_FC='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+	  reload_cmds_FC='$CC -r -o $output$reload_objs'
+	  hardcode_direct_FC=no
+        ;;
+	motorola)
+	  archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	  hardcode_direct_FC=no #Motorola manual says yes, but my tests say they lie
+	;;
+      esac
+      runpath_var='LD_RUN_PATH'
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    sysv4.3*)
+      archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_shlibpath_var_FC=no
+      export_dynamic_flag_spec_FC='-Bexport'
+      ;;
+
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+	hardcode_shlibpath_var_FC=no
+	runpath_var=LD_RUN_PATH
+	hardcode_runpath_var=yes
+	ld_shlibs_FC=yes
+      fi
+      ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+      no_undefined_flag_FC='${wl}-z,text'
+      archive_cmds_need_lc_FC=no
+      hardcode_shlibpath_var_FC=no
+      runpath_var='LD_RUN_PATH'
+
+      if test "$GCC" = yes; then
+	archive_cmds_FC='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds_FC='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds_FC='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds_FC='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    sysv5* | sco3.2v5* | sco5v6*)
+      # Note: We can NOT use -z defs as we might desire, because we do not
+      # link with -lc, and that would cause any symbols used from libc to
+      # always be unresolved, which means just about no library would
+      # ever link correctly.  If we're not using GNU ld we use -z text
+      # though, which does catch some bad symbols but isn't as heavy-handed
+      # as -z defs.
+      no_undefined_flag_FC='${wl}-z,text'
+      allow_undefined_flag_FC='${wl}-z,nodefs'
+      archive_cmds_need_lc_FC=no
+      hardcode_shlibpath_var_FC=no
+      hardcode_libdir_flag_spec_FC='${wl}-R,$libdir'
+      hardcode_libdir_separator_FC=':'
+      link_all_deplibs_FC=yes
+      export_dynamic_flag_spec_FC='${wl}-Bexport'
+      runpath_var='LD_RUN_PATH'
+
+      if test "$GCC" = yes; then
+	archive_cmds_FC='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds_FC='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      else
+	archive_cmds_FC='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	archive_expsym_cmds_FC='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+      fi
+      ;;
+
+    uts4*)
+      archive_cmds_FC='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+      hardcode_libdir_flag_spec_FC='-L$libdir'
+      hardcode_shlibpath_var_FC=no
+      ;;
+
+    *)
+      ld_shlibs_FC=no
+      ;;
+    esac
+
+    if test x$host_vendor = xsni; then
+      case $host in
+      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+	export_dynamic_flag_spec_FC='${wl}-Blargedynsym'
+	;;
+      esac
+    fi
+  fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_FC" >&5
+$as_echo "$ld_shlibs_FC" >&6; }
+test "$ld_shlibs_FC" = no && can_build_shared=no
+
+with_gnu_ld_FC=$with_gnu_ld
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc_FC" in
+x|xyes)
+  # Assume -lc should be added
+  archive_cmds_need_lc_FC=yes
+
+  if test "$enable_shared" = yes && test "$GCC" = yes; then
+    case $archive_cmds_FC in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
+if ${lt_cv_archive_cmds_need_lc_FC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  $RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$lt_prog_compiler_wl_FC
+	  pic_flag=$lt_prog_compiler_pic_FC
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$allow_undefined_flag_FC
+	  allow_undefined_flag_FC=
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_FC 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+  (eval $archive_cmds_FC 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	  then
+	    lt_cv_archive_cmds_need_lc_FC=no
+	  else
+	    lt_cv_archive_cmds_need_lc_FC=yes
+	  fi
+	  allow_undefined_flag_FC=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_FC" >&5
+$as_echo "$lt_cv_archive_cmds_need_lc_FC" >&6; }
+      archive_cmds_need_lc_FC=$lt_cv_archive_cmds_need_lc_FC
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+$as_echo_n "checking dynamic linker characteristics... " >&6; }
+
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='${libname}${release}${shared_ext}$major'
+  ;;
+
+aix[4-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test "$host_cpu" = ia64; then
+    # AIX 5 supports IA64
+    library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line `#! .'.  This would cause the generated library to
+    # depend on `.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[01] | aix4.[01].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    if test "$aix_use_runtimelinking" = yes; then
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    else
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='${libname}${release}.a $libname.a'
+      soname_spec='${libname}${release}${shared_ext}$major'
+    fi
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='${libname}${shared_ext}'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[45]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=".dll"
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+    library_names_spec='${libname}.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec="$LIB"
+      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+  soname_spec='${libname}${release}${major}$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[23].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[01]* | freebsdelf3.[01]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    if test "X$HPUX_IA64_MODE" = X32; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+    fi
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[3-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test "$lt_cv_prog_gnu_ld" = yes; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+  sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_FC\"; \
+	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_FC\""
+    cat > conftest.$ac_ext <<_ACEOF
+      program main
+
+      end
+_ACEOF
+if ac_fn_fc_try_link "$LINENO"; then :
+  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+  lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+
+fi
+
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Append ld.so.conf contents to the search path
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsdelf*-gnu)
+  version_type=linux
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='NetBSD ld.elf_so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec="/usr/lib"
+  need_lib_prefix=no
+  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+  case $host_os in
+    openbsd3.3 | openbsd3.3.*)	need_version=yes ;;
+    *)				need_version=no  ;;
+  esac
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+    case $host_os in
+      openbsd2.[89] | openbsd2.[89].*)
+	shlibpath_overrides_runpath=no
+	;;
+      *)
+	shlibpath_overrides_runpath=yes
+	;;
+      esac
+  else
+    shlibpath_overrides_runpath=yes
+  fi
+  ;;
+
+os2*)
+  libname_spec='$name'
+  shrext_cmds=".dll"
+  need_lib_prefix=no
+  library_names_spec='$libname${shared_ext} $libname.a'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=LIBPATH
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test "$with_gnu_ld" = yes; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec ;then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+    soname_spec='$libname${shared_ext}.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=freebsd-elf
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test "$with_gnu_ld" = yes; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+$as_echo "$dynamic_linker" >&6; }
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+  sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+  sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action_FC=
+if test -n "$hardcode_libdir_flag_spec_FC" ||
+   test -n "$runpath_var_FC" ||
+   test "X$hardcode_automatic_FC" = "Xyes" ; then
+
+  # We can hardcode non-existent directories.
+  if test "$hardcode_direct_FC" != no &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test "$_LT_TAGVAR(hardcode_shlibpath_var, FC)" != no &&
+     test "$hardcode_minus_L_FC" != no; then
+    # Linking always hardcodes the temporary library directory.
+    hardcode_action_FC=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    hardcode_action_FC=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  hardcode_action_FC=unsupported
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_FC" >&5
+$as_echo "$hardcode_action_FC" >&6; }
+
+if test "$hardcode_action_FC" = relink ||
+   test "$inherit_rpath_FC" = yes; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+     test "$enable_shared" = no; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+
+
+
+
+
+
+
+  fi # test -n "$compiler"
+
+  GCC=$lt_save_GCC
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+fi # test "$_lt_disable_FC" != yes
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+  OPENMP_CFLAGS=
+  # Check whether --enable-openmp was given.
+if test "${enable_openmp+set}" = set; then :
+  enableval=$enable_openmp;
+fi
+
+  if test "$enable_openmp" != no; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to support OpenMP" >&5
+$as_echo_n "checking for $CC option to support OpenMP... " >&6; }
+if ${ac_cv_prog_c_openmp+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#ifndef _OPENMP
+ choke me
+#endif
+#include <omp.h>
+int main () { return omp_get_num_threads (); }
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_prog_c_openmp='none needed'
+else
+  ac_cv_prog_c_openmp='unsupported'
+	  	  	  	  	  	  	                                	  	  	  	  	  	  for ac_option in -fopenmp -xopenmp -openmp -mp -omp -qsmp=omp -homp \
+                           -Popenmp --openmp; do
+	    ac_save_CFLAGS=$CFLAGS
+	    CFLAGS="$CFLAGS $ac_option"
+	    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#ifndef _OPENMP
+ choke me
+#endif
+#include <omp.h>
+int main () { return omp_get_num_threads (); }
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_prog_c_openmp=$ac_option
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+	    CFLAGS=$ac_save_CFLAGS
+	    if test "$ac_cv_prog_c_openmp" != unsupported; then
+	      break
+	    fi
+	  done
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_c_openmp" >&5
+$as_echo "$ac_cv_prog_c_openmp" >&6; }
+    case $ac_cv_prog_c_openmp in #(
+      "none needed" | unsupported)
+	;; #(
+      *)
+	OPENMP_CFLAGS=$ac_cv_prog_c_openmp ;;
+    esac
+  fi
+
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+if test -z "$CXX"; then
+  if test -n "$CCC"; then
+    CXX=$CCC
+  else
+    if test -n "$ac_tool_prefix"; then
+  for ac_prog in xlC xlC_r7 xlC_r4 xlC_r g++ pgCC
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CXX"; then
+  ac_cv_prog_CXX="$CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CXX=$ac_cv_prog_CXX
+if test -n "$CXX"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+$as_echo "$CXX" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$CXX" && break
+  done
+fi
+if test -z "$CXX"; then
+  ac_ct_CXX=$CXX
+  for ac_prog in xlC xlC_r7 xlC_r4 xlC_r g++ pgCC
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CXX"; then
+  ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CXX="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+if test -n "$ac_ct_CXX"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+$as_echo "$ac_ct_CXX" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CXX" && break
+done
+
+  if test "x$ac_ct_CXX" = x; then
+    CXX="g++"
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CXX=$ac_ct_CXX
+  fi
+fi
+
+  fi
+fi
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
+$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
+if ${ac_cv_cxx_compiler_gnu+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_compiler_gnu=yes
+else
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GXX=yes
+else
+  GXX=
+fi
+ac_test_CXXFLAGS=${CXXFLAGS+set}
+ac_save_CXXFLAGS=$CXXFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+$as_echo_n "checking whether $CXX accepts -g... " >&6; }
+if ${ac_cv_prog_cxx_g+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+   ac_cxx_werror_flag=yes
+   ac_cv_prog_cxx_g=no
+   CXXFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_cv_prog_cxx_g=yes
+else
+  CXXFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+else
+  ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+	 CXXFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+  ac_cv_prog_cxx_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+$as_echo "$ac_cv_prog_cxx_g" >&6; }
+if test "$ac_test_CXXFLAGS" = set; then
+  CXXFLAGS=$ac_save_CXXFLAGS
+elif test $ac_cv_prog_cxx_g = yes; then
+  if test "$GXX" = yes; then
+    CXXFLAGS="-g -O2"
+  else
+    CXXFLAGS="-g"
+  fi
+else
+  if test "$GXX" = yes; then
+    CXXFLAGS="-O2"
+  else
+    CXXFLAGS=
+  fi
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CXX"  am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CXX_dependencies_compiler_type+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named `D' -- because `-MD' means `put the output
+  # in D'.
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CXX_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+  case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+      # Solaris 8's {/usr,}/bin/sh.
+      touch sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with `-c' and `-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle `-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # after this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok `-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CXX_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CXX_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
+  am__fastdepCXX_TRUE=
+  am__fastdepCXX_FALSE='#'
+else
+  am__fastdepCXX_TRUE='#'
+  am__fastdepCXX_FALSE=
+fi
+
+
+      if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
+    ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
+    (test "X$CXX" != "Xg++"))) ; then
+  ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
+$as_echo_n "checking how to run the C++ preprocessor... " >&6; }
+if test -z "$CXXCPP"; then
+  if ${ac_cv_prog_CXXCPP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+      # Double quotes because CXXCPP needs to be expanded
+    for CXXCPP in "$CXX -E" "/lib/cpp"
+    do
+      ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+  # Broken: success on invalid input.
+continue
+else
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+  break
+fi
+
+    done
+    ac_cv_prog_CXXCPP=$CXXCPP
+
+fi
+  CXXCPP=$ac_cv_prog_CXXCPP
+else
+  ac_cv_prog_CXXCPP=$CXXCPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
+$as_echo "$CXXCPP" >&6; }
+ac_preproc_ok=false
+for ac_cxx_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+
+else
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_cxx_try_cpp "$LINENO"; then :
+  # Broken: success on invalid input.
+continue
+else
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.i conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.i conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
+See \`config.log' for more details" "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+else
+  _lt_caught_CXX_error=yes
+fi
+
+ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+archive_cmds_need_lc_CXX=no
+allow_undefined_flag_CXX=
+always_export_symbols_CXX=no
+archive_expsym_cmds_CXX=
+compiler_needs_object_CXX=no
+export_dynamic_flag_spec_CXX=
+hardcode_direct_CXX=no
+hardcode_direct_absolute_CXX=no
+hardcode_libdir_flag_spec_CXX=
+hardcode_libdir_separator_CXX=
+hardcode_minus_L_CXX=no
+hardcode_shlibpath_var_CXX=unsupported
+hardcode_automatic_CXX=no
+inherit_rpath_CXX=no
+module_cmds_CXX=
+module_expsym_cmds_CXX=
+link_all_deplibs_CXX=unknown
+old_archive_cmds_CXX=$old_archive_cmds
+reload_flag_CXX=$reload_flag
+reload_cmds_CXX=$reload_cmds
+no_undefined_flag_CXX=
+whole_archive_flag_spec_CXX=
+enable_shared_with_static_runtimes_CXX=no
+
+# Source file extension for C++ test sources.
+ac_ext=cpp
+
+# Object file extension for compiled C++ test sources.
+objext=o
+objext_CXX=$objext
+
+# No sense in running all these tests if we already determined that
+# the CXX compiler isn't working.  Some variables (like enable_shared)
+# are currently assumed to apply to all compilers on this platform,
+# and will be corrupted by setting them based on a non-working compiler.
+if test "$_lt_caught_CXX_error" != yes; then
+  # Code to be used in simple compile tests
+  lt_simple_compile_test_code="int some_variable = 0;"
+
+  # Code to be used in simple link tests
+  lt_simple_link_test_code='int main(int, char *[]) { return(0); }'
+
+  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+
+
+
+
+
+
+# If no C compiler was specified, use CC.
+LTCC=${LTCC-"$CC"}
+
+# If no C compiler flags were specified, use CFLAGS.
+LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+
+# Allow CC to be a program name with arguments.
+compiler=$CC
+
+
+  # save warnings/boilerplate of simple test code
+  ac_outfile=conftest.$ac_objext
+echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_compiler_boilerplate=`cat conftest.err`
+$RM conftest*
+
+  ac_outfile=conftest.$ac_objext
+echo "$lt_simple_link_test_code" >conftest.$ac_ext
+eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+_lt_linker_boilerplate=`cat conftest.err`
+$RM -r conftest*
+
+
+  # Allow CC to be a program name with arguments.
+  lt_save_CC=$CC
+  lt_save_CFLAGS=$CFLAGS
+  lt_save_LD=$LD
+  lt_save_GCC=$GCC
+  GCC=$GXX
+  lt_save_with_gnu_ld=$with_gnu_ld
+  lt_save_path_LD=$lt_cv_path_LD
+  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+  else
+    $as_unset lt_cv_prog_gnu_ld
+  fi
+  if test -n "${lt_cv_path_LDCXX+set}"; then
+    lt_cv_path_LD=$lt_cv_path_LDCXX
+  else
+    $as_unset lt_cv_path_LD
+  fi
+  test -z "${LDCXX+set}" || LD=$LDCXX
+  CC=${CXX-"c++"}
+  CFLAGS=$CXXFLAGS
+  compiler=$CC
+  compiler_CXX=$CC
+  for cc_temp in $compiler""; do
+  case $cc_temp in
+    compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+    distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+    \-*) ;;
+    *) break;;
+  esac
+done
+cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+
+
+  if test -n "$compiler"; then
+    # We don't want -fno-exception when compiling C++ code, so set the
+    # no_builtin_flag separately
+    if test "$GXX" = yes; then
+      lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin'
+    else
+      lt_prog_compiler_no_builtin_flag_CXX=
+    fi
+
+    if test "$GXX" = yes; then
+      # Set up default GNU C++ configuration
+
+
+
+# Check whether --with-gnu-ld was given.
+if test "${with_gnu_ld+set}" = set; then :
+  withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
+else
+  with_gnu_ld=no
+fi
+
+ac_prog=ld
+if test "$GCC" = yes; then
+  # Check if gcc -print-prog-name=ld gives a path.
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+$as_echo_n "checking for ld used by $CC... " >&6; }
+  case $host in
+  *-*-mingw*)
+    # gcc leaves a trailing carriage return which upsets mingw
+    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+  *)
+    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+  esac
+  case $ac_prog in
+    # Accept absolute paths.
+    [\\/]* | ?:[\\/]*)
+      re_direlt='/[^/][^/]*/\.\./'
+      # Canonicalize the pathname of ld
+      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+      done
+      test -z "$LD" && LD="$ac_prog"
+      ;;
+  "")
+    # If it fails, then pretend we aren't using GCC.
+    ac_prog=ld
+    ;;
+  *)
+    # If it is relative, then search for the first ld in PATH.
+    with_gnu_ld=unknown
+    ;;
+  esac
+elif test "$with_gnu_ld" = yes; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+$as_echo_n "checking for GNU ld... " >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+$as_echo_n "checking for non-GNU ld... " >&6; }
+fi
+if ${lt_cv_path_LD+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$LD"; then
+  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+  for ac_dir in $PATH; do
+    IFS="$lt_save_ifs"
+    test -z "$ac_dir" && ac_dir=.
+    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+      lt_cv_path_LD="$ac_dir/$ac_prog"
+      # Check to see if the program is GNU ld.  I'd rather use --version,
+      # but apparently some variants of GNU ld only accept -v.
+      # Break only if it was the GNU/non-GNU ld that we prefer.
+      case `"$lt_cv_path_LD" -v 2>&1 </dev/null` in
+      *GNU* | *'with BFD'*)
+	test "$with_gnu_ld" != no && break
+	;;
+      *)
+	test "$with_gnu_ld" != yes && break
+	;;
+      esac
+    fi
+  done
+  IFS="$lt_save_ifs"
+else
+  lt_cv_path_LD="$LD" # Let the user override the test with a path.
+fi
+fi
+
+LD="$lt_cv_path_LD"
+if test -n "$LD"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
+$as_echo "$LD" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
+if ${lt_cv_prog_gnu_ld+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  # I'd rather use --version here, but apparently some GNU lds only accept -v.
+case `$LD -v 2>&1 </dev/null` in
+*GNU* | *'with BFD'*)
+  lt_cv_prog_gnu_ld=yes
+  ;;
+*)
+  lt_cv_prog_gnu_ld=no
+  ;;
+esac
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
+$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+with_gnu_ld=$lt_cv_prog_gnu_ld
+
+
+
+
+
+
+
+      # Check if GNU C++ uses GNU ld as the underlying linker, since the
+      # archiving commands below assume that GNU ld is being used.
+      if test "$with_gnu_ld" = yes; then
+        archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+        archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+
+        hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+        export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+
+        # If archive_cmds runs LD, not CC, wlarc should be empty
+        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+        #     investigate it a little bit more. (MM)
+        wlarc='${wl}'
+
+        # ancient GNU ld didn't support --whole-archive et. al.
+        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+	  $GREP 'no-whole-archive' > /dev/null; then
+          whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+        else
+          whole_archive_flag_spec_CXX=
+        fi
+      else
+        with_gnu_ld=no
+        wlarc=
+
+        # A generic and very simple default shared library creation
+        # command for GNU C++ for the case where it uses the native
+        # linker, instead of GNU ld.  If possible, this setting should
+        # overridden to take advantage of the native linker features on
+        # the platform it is being used on.
+        archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+      fi
+
+      # Commands to make compiler produce verbose output that lists
+      # what "hidden" libraries, object files and flags are used when
+      # linking a shared library.
+      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+    else
+      GXX=no
+      with_gnu_ld=no
+      wlarc=
+    fi
+
+    # PORTME: fill in a description of your system's C++ link characteristics
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+    ld_shlibs_CXX=yes
+    case $host_os in
+      aix3*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+      aix[4-9]*)
+        if test "$host_cpu" = ia64; then
+          # On IA64, the linker does run time linking by default, so we don't
+          # have to do anything special.
+          aix_use_runtimelinking=no
+          exp_sym_flag='-Bexport'
+          no_entry_flag=""
+        else
+          aix_use_runtimelinking=no
+
+          # Test if we are trying to use run time linking or normal
+          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+          # need to do runtime linking.
+          case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+	    for ld_flag in $LDFLAGS; do
+	      case $ld_flag in
+	      *-brtl*)
+	        aix_use_runtimelinking=yes
+	        break
+	        ;;
+	      esac
+	    done
+	    ;;
+          esac
+
+          exp_sym_flag='-bexport'
+          no_entry_flag='-bnoentry'
+        fi
+
+        # When large executables or shared objects are built, AIX ld can
+        # have problems creating the table of contents.  If linking a library
+        # or program results in "error TOC overflow" add -mminimal-toc to
+        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+
+        archive_cmds_CXX=''
+        hardcode_direct_CXX=yes
+        hardcode_direct_absolute_CXX=yes
+        hardcode_libdir_separator_CXX=':'
+        link_all_deplibs_CXX=yes
+        file_list_spec_CXX='${wl}-f,'
+
+        if test "$GXX" = yes; then
+          case $host_os in aix4.[012]|aix4.[012].*)
+          # We only want to do this on AIX 4.2 and lower, the check
+          # below for broken collect2 doesn't work under 4.3+
+	  collect2name=`${CC} -print-prog-name=collect2`
+	  if test -f "$collect2name" &&
+	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+	  then
+	    # We have reworked collect2
+	    :
+	  else
+	    # We have old collect2
+	    hardcode_direct_CXX=unsupported
+	    # It fails to find uninstalled libraries when the uninstalled
+	    # path is not listed in the libpath.  Setting hardcode_minus_L
+	    # to unsupported forces relinking
+	    hardcode_minus_L_CXX=yes
+	    hardcode_libdir_flag_spec_CXX='-L$libdir'
+	    hardcode_libdir_separator_CXX=
+	  fi
+          esac
+          shared_flag='-shared'
+	  if test "$aix_use_runtimelinking" = yes; then
+	    shared_flag="$shared_flag "'${wl}-G'
+	  fi
+        else
+          # not using gcc
+          if test "$host_cpu" = ia64; then
+	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+	  # chokes on -Wl,-G. The following line is correct:
+	  shared_flag='-G'
+          else
+	    if test "$aix_use_runtimelinking" = yes; then
+	      shared_flag='${wl}-G'
+	    else
+	      shared_flag='${wl}-bM:SRE'
+	    fi
+          fi
+        fi
+
+        export_dynamic_flag_spec_CXX='${wl}-bexpall'
+        # It seems that -bexpall does not export symbols beginning with
+        # underscore (_), so it is better to generate a list of symbols to
+	# export.
+        always_export_symbols_CXX=yes
+        if test "$aix_use_runtimelinking" = yes; then
+          # Warning - without using the other runtime loading flags (-brtl),
+          # -berok will link without error, but may produce a broken library.
+          allow_undefined_flag_CXX='-berok'
+          # Determine the default libpath from the value encoded in an empty
+          # executable.
+          if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if ${lt_cv_aix_libpath__CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX="/usr/lib:/lib"
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath__CXX
+fi
+
+          hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath"
+
+          archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+        else
+          if test "$host_cpu" = ia64; then
+	    hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib'
+	    allow_undefined_flag_CXX="-z nodefs"
+	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+          else
+	    # Determine the default libpath from the value encoded in an
+	    # empty executable.
+	    if test "${lt_cv_aix_libpath+set}" = set; then
+  aix_libpath=$lt_cv_aix_libpath
+else
+  if ${lt_cv_aix_libpath__CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+
+  lt_aix_libpath_sed='
+      /Import File Strings/,/^$/ {
+	  /^0/ {
+	      s/^0  *\([^ ]*\) *$/\1/
+	      p
+	  }
+      }'
+  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  # Check for a 64-bit object if we didn't find anything.
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+  fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+  if test -z "$lt_cv_aix_libpath__CXX"; then
+    lt_cv_aix_libpath__CXX="/usr/lib:/lib"
+  fi
+
+fi
+
+  aix_libpath=$lt_cv_aix_libpath__CXX
+fi
+
+	    hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath"
+	    # Warning - without using the other run time loading flags,
+	    # -berok will link without error, but may produce a broken library.
+	    no_undefined_flag_CXX=' ${wl}-bernotok'
+	    allow_undefined_flag_CXX=' ${wl}-berok'
+	    if test "$with_gnu_ld" = yes; then
+	      # We only use this code for GNU lds that support --whole-archive.
+	      whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	    else
+	      # Exported symbols can be pulled into shared objects from archives
+	      whole_archive_flag_spec_CXX='$convenience'
+	    fi
+	    archive_cmds_need_lc_CXX=yes
+	    # This is similar to how AIX traditionally builds its shared
+	    # libraries.
+	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+          fi
+        fi
+        ;;
+
+      beos*)
+	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+	  allow_undefined_flag_CXX=unsupported
+	  # Joseph Beckenbach <jrb3 at best.com> says some releases of gcc
+	  # support --undefined.  This deserves some investigation.  FIXME
+	  archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	else
+	  ld_shlibs_CXX=no
+	fi
+	;;
+
+      chorus*)
+        case $cc_basename in
+          *)
+	  # FIXME: insert proper C++ library support
+	  ld_shlibs_CXX=no
+	  ;;
+        esac
+        ;;
+
+      cygwin* | mingw* | pw32* | cegcc*)
+	case $GXX,$cc_basename in
+	,cl* | no,cl*)
+	  # Native MSVC
+	  # hardcode_libdir_flag_spec is actually meaningless, as there is
+	  # no search path for DLLs.
+	  hardcode_libdir_flag_spec_CXX=' '
+	  allow_undefined_flag_CXX=unsupported
+	  always_export_symbols_CXX=yes
+	  file_list_spec_CXX='@'
+	  # Tell ltmain to make .lib files, not .a files.
+	  libext=lib
+	  # Tell ltmain to make .dll files, not .so files.
+	  shrext_cmds=".dll"
+	  # FIXME: Setting linknames here is a bad hack.
+	  archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames='
+	  archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	      $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp;
+	    else
+	      $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp;
+	    fi~
+	    $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+	    linknames='
+	  # The linker will not automatically build a static lib if we build a DLL.
+	  # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true'
+	  enable_shared_with_static_runtimes_CXX=yes
+	  # Don't use ranlib
+	  old_postinstall_cmds_CXX='chmod 644 $oldlib'
+	  postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~
+	    lt_tool_outputfile="@TOOL_OUTPUT@"~
+	    case $lt_outputfile in
+	      *.exe|*.EXE) ;;
+	      *)
+		lt_outputfile="$lt_outputfile.exe"
+		lt_tool_outputfile="$lt_tool_outputfile.exe"
+		;;
+	    esac~
+	    func_to_tool_file "$lt_outputfile"~
+	    if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then
+	      $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+	      $RM "$lt_outputfile.manifest";
+	    fi'
+	  ;;
+	*)
+	  # g++
+	  # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless,
+	  # as there is no search path for DLLs.
+	  hardcode_libdir_flag_spec_CXX='-L$libdir'
+	  export_dynamic_flag_spec_CXX='${wl}--export-all-symbols'
+	  allow_undefined_flag_CXX=unsupported
+	  always_export_symbols_CXX=no
+	  enable_shared_with_static_runtimes_CXX=yes
+
+	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+	    archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	    # If the export-symbols file already is a .def file (1st line
+	    # is EXPORTS), use it as is; otherwise, prepend...
+	    archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
+	      cp $export_symbols $output_objdir/$soname.def;
+	    else
+	      echo EXPORTS > $output_objdir/$soname.def;
+	      cat $export_symbols >> $output_objdir/$soname.def;
+	    fi~
+	    $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+	  else
+	    ld_shlibs_CXX=no
+	  fi
+	  ;;
+	esac
+	;;
+      darwin* | rhapsody*)
+
+
+  archive_cmds_need_lc_CXX=no
+  hardcode_direct_CXX=no
+  hardcode_automatic_CXX=yes
+  hardcode_shlibpath_var_CXX=unsupported
+  if test "$lt_cv_ld_force_load" = "yes"; then
+    whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+
+  else
+    whole_archive_flag_spec_CXX=''
+  fi
+  link_all_deplibs_CXX=yes
+  allow_undefined_flag_CXX="$_lt_dar_allow_undefined"
+  case $cc_basename in
+     ifort*) _lt_dar_can_shared=yes ;;
+     *) _lt_dar_can_shared=$GCC ;;
+  esac
+  if test "$_lt_dar_can_shared" = "yes"; then
+    output_verbose_link_cmd=func_echo_all
+    archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
+    module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
+    archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
+    module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+       if test "$lt_cv_apple_cc_single_mod" != "yes"; then
+      archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}"
+      archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}"
+    fi
+
+  else
+  ld_shlibs_CXX=no
+  fi
+
+	;;
+
+      dgux*)
+        case $cc_basename in
+          ec++*)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          ghcx*)
+	    # Green Hills C++ Compiler
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+        esac
+        ;;
+
+      freebsd2.*)
+        # C++ shared libraries reported to be fairly broken before
+	# switch to ELF
+        ld_shlibs_CXX=no
+        ;;
+
+      freebsd-elf*)
+        archive_cmds_need_lc_CXX=no
+        ;;
+
+      freebsd* | dragonfly*)
+        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+        # conventions
+        ld_shlibs_CXX=yes
+        ;;
+
+      haiku*)
+        archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+        link_all_deplibs_CXX=yes
+        ;;
+
+      hpux9*)
+        hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir'
+        hardcode_libdir_separator_CXX=:
+        export_dynamic_flag_spec_CXX='${wl}-E'
+        hardcode_direct_CXX=yes
+        hardcode_minus_L_CXX=yes # Not in the search PATH,
+				             # but as the default
+				             # location of the library.
+
+        case $cc_basename in
+          CC*)
+            # FIXME: insert proper C++ library support
+            ld_shlibs_CXX=no
+            ;;
+          aCC*)
+            archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+            # Commands to make compiler produce verbose output that lists
+            # what "hidden" libraries, object files and flags are used when
+            # linking a shared library.
+            #
+            # There doesn't appear to be a way to prevent this compiler from
+            # explicitly linking system object files so we need to strip them
+            # from the output so that they don't get included in the library
+            # dependencies.
+            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+            ;;
+          *)
+            if test "$GXX" = yes; then
+              archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+            else
+              # FIXME: insert proper C++ library support
+              ld_shlibs_CXX=no
+            fi
+            ;;
+        esac
+        ;;
+
+      hpux10*|hpux11*)
+        if test $with_gnu_ld = no; then
+	  hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir'
+	  hardcode_libdir_separator_CXX=:
+
+          case $host_cpu in
+            hppa*64*|ia64*)
+              ;;
+            *)
+	      export_dynamic_flag_spec_CXX='${wl}-E'
+              ;;
+          esac
+        fi
+        case $host_cpu in
+          hppa*64*|ia64*)
+            hardcode_direct_CXX=no
+            hardcode_shlibpath_var_CXX=no
+            ;;
+          *)
+            hardcode_direct_CXX=yes
+            hardcode_direct_absolute_CXX=yes
+            hardcode_minus_L_CXX=yes # Not in the search PATH,
+					         # but as the default
+					         # location of the library.
+            ;;
+        esac
+
+        case $cc_basename in
+          CC*)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          aCC*)
+	    case $host_cpu in
+	      hppa*64*)
+	        archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      ia64*)
+	        archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	      *)
+	        archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	        ;;
+	    esac
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+          *)
+	    if test "$GXX" = yes; then
+	      if test $with_gnu_ld = no; then
+	        case $host_cpu in
+	          hppa*64*)
+	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          ia64*)
+	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	          *)
+	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	            ;;
+	        esac
+	      fi
+	    else
+	      # FIXME: insert proper C++ library support
+	      ld_shlibs_CXX=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      interix[3-9]*)
+	hardcode_direct_CXX=no
+	hardcode_shlibpath_var_CXX=no
+	hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+	export_dynamic_flag_spec_CXX='${wl}-E'
+	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+	# Instead, shared libraries are loaded at an image base (0x10000000 by
+	# default) and relocated if they conflict, which is a slow very memory
+	# consuming and fragmenting process.  To avoid this, we pick a random,
+	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+	archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+	;;
+      irix5* | irix6*)
+        case $cc_basename in
+          CC*)
+	    # SGI C++
+	    archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    if test "$GXX" = yes; then
+	      if test "$with_gnu_ld" = no; then
+	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+	      else
+	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib'
+	      fi
+	    fi
+	    link_all_deplibs_CXX=yes
+	    ;;
+        esac
+        hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+        hardcode_libdir_separator_CXX=:
+        inherit_rpath_CXX=yes
+        ;;
+
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+	    archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+
+	    hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+	    old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs'
+	    ;;
+	  icpc* | ecpc* )
+	    # Intel C++
+	    with_gnu_ld=yes
+	    # version 8.0 and above of icpc choke on multiply defined symbols
+	    # if we add $predep_objects and $postdep_objects, however 7.1 and
+	    # earlier do not add the objects themselves.
+	    case `$CC -V 2>&1` in
+	      *"Version 7."*)
+	        archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+		archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	      *)  # Version 8.0 or newer
+	        tmp_idyn=
+	        case $host_cpu in
+		  ia64*) tmp_idyn=' -i_dynamic';;
+		esac
+	        archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+		archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+		;;
+	    esac
+	    archive_cmds_need_lc_CXX=no
+	    hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+	    whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+	    ;;
+          pgCC* | pgcpp*)
+            # Portland Group C++ compiler
+	    case `$CC -V` in
+	    *pgCC\ [1-5].* | *pgcpp\ [1-5].*)
+	      prelink_cmds_CXX='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+		compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+	      old_archive_cmds_CXX='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+		$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+		$RANLIB $oldlib'
+	      archive_cmds_CXX='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+		$CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+	      archive_expsym_cmds_CXX='tpldir=Template.dir~
+		rm -rf $tpldir~
+		$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+		$CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+	      ;;
+	    *) # Version 6 and above use weak symbols
+	      archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+	      archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+	      ;;
+	    esac
+
+	    hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir'
+	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+	    whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+            ;;
+	  cxx*)
+	    # Compaq C++
+	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname  -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
+
+	    runpath_var=LD_RUN_PATH
+	    hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+	    hardcode_libdir_separator_CXX=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+	    ;;
+	  xl* | mpixl* | bgxl*)
+	    # IBM XL 8.0 on PPC, with GNU ld
+	    hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+	    archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+	    if test "x$supports_anon_versioning" = xyes; then
+	      archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~
+		cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+		echo "local: *; };" >> $output_objdir/$libname.ver~
+		$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+	    fi
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      no_undefined_flag_CXX=' -zdefs'
+	      archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	      archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
+	      hardcode_libdir_flag_spec_CXX='-R$libdir'
+	      whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+	      compiler_needs_object_CXX=yes
+
+	      # Not sure whether something based on
+	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+	      # would be better.
+	      output_verbose_link_cmd='func_echo_all'
+
+	      # Archives containing C++ object files must be created using
+	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	      # necessary to make sure instantiated templates are included
+	      # in the archive.
+	      old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+
+      lynxos*)
+        # FIXME: insert proper C++ library support
+	ld_shlibs_CXX=no
+	;;
+
+      m88k*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+	;;
+
+      mvs*)
+        case $cc_basename in
+          cxx*)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+	  *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+	esac
+	;;
+
+      netbsd*)
+        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+	  archive_cmds_CXX='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+	  wlarc=
+	  hardcode_libdir_flag_spec_CXX='-R$libdir'
+	  hardcode_direct_CXX=yes
+	  hardcode_shlibpath_var_CXX=no
+	fi
+	# Workaround some broken pre-1.5 toolchains
+	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+	;;
+
+      *nto* | *qnx*)
+        ld_shlibs_CXX=yes
+	;;
+
+      openbsd2*)
+        # C++ shared libraries are fairly broken
+	ld_shlibs_CXX=no
+	;;
+
+      openbsd*)
+	if test -f /usr/libexec/ld.so; then
+	  hardcode_direct_CXX=yes
+	  hardcode_shlibpath_var_CXX=no
+	  hardcode_direct_absolute_CXX=yes
+	  archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+	  hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+	    archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib'
+	    export_dynamic_flag_spec_CXX='${wl}-E'
+	    whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+	  fi
+	  output_verbose_link_cmd=func_echo_all
+	else
+	  ld_shlibs_CXX=no
+	fi
+	;;
+
+      osf3* | osf4* | osf5*)
+        case $cc_basename in
+          KCC*)
+	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+
+	    # KCC will only create a shared library if the output file
+	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+	    # to its proper name (with version) after linking.
+	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+
+	    hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+	    hardcode_libdir_separator_CXX=:
+
+	    # Archives containing C++ object files must be created using
+	    # the KAI C++ compiler.
+	    case $host in
+	      osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;;
+	      *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;;
+	    esac
+	    ;;
+          RCC*)
+	    # Rational C++ 2.4.1
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          cxx*)
+	    case $host in
+	      osf3*)
+	        allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*'
+	        archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	        hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+		;;
+	      *)
+	        allow_undefined_flag_CXX=' -expect_unresolved \*'
+	        archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+	        archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+	          echo "-hidden">> $lib.exp~
+	          $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~
+	          $RM $lib.exp'
+	        hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+		;;
+	    esac
+
+	    hardcode_libdir_separator_CXX=:
+
+	    # Commands to make compiler produce verbose output that lists
+	    # what "hidden" libraries, object files and flags are used when
+	    # linking a shared library.
+	    #
+	    # There doesn't appear to be a way to prevent this compiler from
+	    # explicitly linking system object files so we need to strip them
+	    # from the output so that they don't get included in the library
+	    # dependencies.
+	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+	    ;;
+	  *)
+	    if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+	      allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*'
+	      case $host in
+	        osf3*)
+	          archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+		  ;;
+	        *)
+	          archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+		  ;;
+	      esac
+
+	      hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+	      hardcode_libdir_separator_CXX=:
+
+	      # Commands to make compiler produce verbose output that lists
+	      # what "hidden" libraries, object files and flags are used when
+	      # linking a shared library.
+	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+
+	    else
+	      # FIXME: insert proper C++ library support
+	      ld_shlibs_CXX=no
+	    fi
+	    ;;
+        esac
+        ;;
+
+      psos*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+
+      sunos4*)
+        case $cc_basename in
+          CC*)
+	    # Sun C++ 4.x
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          lcc*)
+	    # Lucid
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+        esac
+        ;;
+
+      solaris*)
+        case $cc_basename in
+          CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+            archive_cmds_need_lc_CXX=yes
+	    no_undefined_flag_CXX=' -zdefs'
+	    archive_cmds_CXX='$CC -G${allow_undefined_flag}  -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+	    archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+	      $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	    hardcode_libdir_flag_spec_CXX='-R$libdir'
+	    hardcode_shlibpath_var_CXX=no
+	    case $host_os in
+	      solaris2.[0-5] | solaris2.[0-5].*) ;;
+	      *)
+		# The compiler driver will combine and reorder linker options,
+		# but understands `-z linker_flag'.
+	        # Supported since Solaris 2.6 (maybe 2.5.1?)
+		whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract'
+	        ;;
+	    esac
+	    link_all_deplibs_CXX=yes
+
+	    output_verbose_link_cmd='func_echo_all'
+
+	    # Archives containing C++ object files must be created using
+	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+	    # necessary to make sure instantiated templates are included
+	    # in the archive.
+	    old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
+	    ;;
+          gcx*)
+	    # Green Hills C++ Compiler
+	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+
+	    # The C++ compiler must be used to create the archive.
+	    old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+	    ;;
+          *)
+	    # GNU C++ compiler with Solaris linker
+	    if test "$GXX" = yes && test "$with_gnu_ld" = no; then
+	      no_undefined_flag_CXX=' ${wl}-z ${wl}defs'
+	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+		  $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      else
+	        # g++ 2.7 appears to require `-G' NOT `-shared' on this
+	        # platform.
+	        archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+		  $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+
+	        # Commands to make compiler produce verbose output that lists
+	        # what "hidden" libraries, object files and flags are used when
+	        # linking a shared library.
+	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+	      fi
+
+	      hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir'
+	      case $host_os in
+		solaris2.[0-5] | solaris2.[0-5].*) ;;
+		*)
+		  whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+		  ;;
+	      esac
+	    fi
+	    ;;
+        esac
+        ;;
+
+    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+      no_undefined_flag_CXX='${wl}-z,text'
+      archive_cmds_need_lc_CXX=no
+      hardcode_shlibpath_var_CXX=no
+      runpath_var='LD_RUN_PATH'
+
+      case $cc_basename in
+        CC*)
+	  archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+	*)
+	  archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	  ;;
+      esac
+      ;;
+
+      sysv5* | sco3.2v5* | sco5v6*)
+	# Note: We can NOT use -z defs as we might desire, because we do not
+	# link with -lc, and that would cause any symbols used from libc to
+	# always be unresolved, which means just about no library would
+	# ever link correctly.  If we're not using GNU ld we use -z text
+	# though, which does catch some bad symbols but isn't as heavy-handed
+	# as -z defs.
+	no_undefined_flag_CXX='${wl}-z,text'
+	allow_undefined_flag_CXX='${wl}-z,nodefs'
+	archive_cmds_need_lc_CXX=no
+	hardcode_shlibpath_var_CXX=no
+	hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir'
+	hardcode_libdir_separator_CXX=':'
+	link_all_deplibs_CXX=yes
+	export_dynamic_flag_spec_CXX='${wl}-Bexport'
+	runpath_var='LD_RUN_PATH'
+
+	case $cc_basename in
+          CC*)
+	    archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~
+	      '"$old_archive_cmds_CXX"
+	    reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~
+	      '"$reload_cmds_CXX"
+	    ;;
+	  *)
+	    archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+	    ;;
+	esac
+      ;;
+
+      tandem*)
+        case $cc_basename in
+          NCC*)
+	    # NonStop-UX NCC 3.20
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+          *)
+	    # FIXME: insert proper C++ library support
+	    ld_shlibs_CXX=no
+	    ;;
+        esac
+        ;;
+
+      vxworks*)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+
+      *)
+        # FIXME: insert proper C++ library support
+        ld_shlibs_CXX=no
+        ;;
+    esac
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+$as_echo "$ld_shlibs_CXX" >&6; }
+    test "$ld_shlibs_CXX" = no && can_build_shared=no
+
+    GCC_CXX="$GXX"
+    LD_CXX="$LD"
+
+    ## CAVEAT EMPTOR:
+    ## There is no encapsulation within the following macros, do not change
+    ## the running order or otherwise move them around unless you know exactly
+    ## what you are doing...
+    # Dependencies to place before and after the object being linked:
+predep_objects_CXX=
+postdep_objects_CXX=
+predeps_CXX=
+postdeps_CXX=
+compiler_lib_search_path_CXX=
+
+cat > conftest.$ac_ext <<_LT_EOF
+class Foo
+{
+public:
+  Foo (void) { a = 0; }
+private:
+  int a;
+};
+_LT_EOF
+
+
+_lt_libdeps_save_CFLAGS=$CFLAGS
+case "$CC $CFLAGS " in #(
+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+esac
+
+if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then
+  # Parse the compiler output and extract the necessary
+  # objects, libraries and library flags.
+
+  # Sentinel used to keep track of whether or not we are before
+  # the conftest object file.
+  pre_test_object_deps_done=no
+
+  for p in `eval "$output_verbose_link_cmd"`; do
+    case ${prev}${p} in
+
+    -L* | -R* | -l*)
+       # Some compilers place space between "-{L,R}" and the path.
+       # Remove the space.
+       if test $p = "-L" ||
+          test $p = "-R"; then
+	 prev=$p
+	 continue
+       fi
+
+       # Expand the sysroot to ease extracting the directories later.
+       if test -z "$prev"; then
+         case $p in
+         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+         esac
+       fi
+       case $p in
+       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+       esac
+       if test "$pre_test_object_deps_done" = no; then
+	 case ${prev} in
+	 -L | -R)
+	   # Internal compiler library paths should come after those
+	   # provided the user.  The postdeps already come after the
+	   # user supplied libs so there is no need to process them.
+	   if test -z "$compiler_lib_search_path_CXX"; then
+	     compiler_lib_search_path_CXX="${prev}${p}"
+	   else
+	     compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}"
+	   fi
+	   ;;
+	 # The "-l" case would never come before the object being
+	 # linked, so don't bother handling this case.
+	 esac
+       else
+	 if test -z "$postdeps_CXX"; then
+	   postdeps_CXX="${prev}${p}"
+	 else
+	   postdeps_CXX="${postdeps_CXX} ${prev}${p}"
+	 fi
+       fi
+       prev=
+       ;;
+
+    *.lto.$objext) ;; # Ignore GCC LTO objects
+    *.$objext)
+       # This assumes that the test object file only shows up
+       # once in the compiler output.
+       if test "$p" = "conftest.$objext"; then
+	 pre_test_object_deps_done=yes
+	 continue
+       fi
+
+       if test "$pre_test_object_deps_done" = no; then
+	 if test -z "$predep_objects_CXX"; then
+	   predep_objects_CXX="$p"
+	 else
+	   predep_objects_CXX="$predep_objects_CXX $p"
+	 fi
+       else
+	 if test -z "$postdep_objects_CXX"; then
+	   postdep_objects_CXX="$p"
+	 else
+	   postdep_objects_CXX="$postdep_objects_CXX $p"
+	 fi
+       fi
+       ;;
+
+    *) ;; # Ignore the rest.
+
+    esac
+  done
+
+  # Clean up.
+  rm -f a.out a.exe
+else
+  echo "libtool.m4: error: problem compiling CXX test program"
+fi
+
+$RM -f confest.$objext
+CFLAGS=$_lt_libdeps_save_CFLAGS
+
+# PORTME: override above test on systems where it is broken
+case $host_os in
+interix[3-9]*)
+  # Interix 3.5 installs completely hosed .la files for C++, so rather than
+  # hack all around it, let's just trust "g++" to DTRT.
+  predep_objects_CXX=
+  postdep_objects_CXX=
+  postdeps_CXX=
+  ;;
+
+linux*)
+  case `$CC -V 2>&1 | sed 5q` in
+  *Sun\ C*)
+    # Sun C++ 5.9
+
+    # The more standards-conforming stlport4 library is
+    # incompatible with the Cstd library. Avoid specifying
+    # it if it's in CXXFLAGS. Ignore libCrun as
+    # -library=stlport4 depends on it.
+    case " $CXX $CXXFLAGS " in
+    *" -library=stlport4 "*)
+      solaris_use_stlport4=yes
+      ;;
+    esac
+
+    if test "$solaris_use_stlport4" != yes; then
+      postdeps_CXX='-library=Cstd -library=Crun'
+    fi
+    ;;
+  esac
+  ;;
+
+solaris*)
+  case $cc_basename in
+  CC* | sunCC*)
+    # The more standards-conforming stlport4 library is
+    # incompatible with the Cstd library. Avoid specifying
+    # it if it's in CXXFLAGS. Ignore libCrun as
+    # -library=stlport4 depends on it.
+    case " $CXX $CXXFLAGS " in
+    *" -library=stlport4 "*)
+      solaris_use_stlport4=yes
+      ;;
+    esac
+
+    # Adding this requires a known-good setup of shared libraries for
+    # Sun compiler versions before 5.6, else PIC objects from an old
+    # archive will be linked into the output, leading to subtle bugs.
+    if test "$solaris_use_stlport4" != yes; then
+      postdeps_CXX='-library=Cstd -library=Crun'
+    fi
+    ;;
+  esac
+  ;;
+esac
+
+
+case " $postdeps_CXX " in
+*" -lc "*) archive_cmds_need_lc_CXX=no ;;
+esac
+ compiler_lib_search_dirs_CXX=
+if test -n "${compiler_lib_search_path_CXX}"; then
+ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+    lt_prog_compiler_wl_CXX=
+lt_prog_compiler_pic_CXX=
+lt_prog_compiler_static_CXX=
+
+
+  # C++ specific cases for pic, static, wl, etc.
+  if test "$GXX" = yes; then
+    lt_prog_compiler_wl_CXX='-Wl,'
+    lt_prog_compiler_static_CXX='-static'
+
+    case $host_os in
+    aix*)
+      # All AIX code is PIC.
+      if test "$host_cpu" = ia64; then
+	# AIX 5 now supports IA64 processor
+	lt_prog_compiler_static_CXX='-Bstatic'
+      fi
+      ;;
+
+    amigaos*)
+      case $host_cpu in
+      powerpc)
+            # see comment about AmigaOS4 .so support
+            lt_prog_compiler_pic_CXX='-fPIC'
+        ;;
+      m68k)
+            # FIXME: we need at least 68020 code to build shared libraries, but
+            # adding the `-m68020' flag to GCC prevents building anything better,
+            # like `-m68040'.
+            lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4'
+        ;;
+      esac
+      ;;
+
+    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+      # PIC is the default for these OSes.
+      ;;
+    mingw* | cygwin* | os2* | pw32* | cegcc*)
+      # This hack is so that the source file can tell whether it is being
+      # built for inclusion in a dll (and should export symbols for example).
+      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+      # (--disable-auto-import) libraries
+      lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+      ;;
+    darwin* | rhapsody*)
+      # PIC is the default on this platform
+      # Common symbols not allowed in MH_DYLIB files
+      lt_prog_compiler_pic_CXX='-fno-common'
+      ;;
+    *djgpp*)
+      # DJGPP does not support shared libraries at all
+      lt_prog_compiler_pic_CXX=
+      ;;
+    haiku*)
+      # PIC is the default for Haiku.
+      # The "-static" flag exists, but is broken.
+      lt_prog_compiler_static_CXX=
+      ;;
+    interix[3-9]*)
+      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+      # Instead, we relocate shared libraries at runtime.
+      ;;
+    sysv4*MP*)
+      if test -d /usr/nec; then
+	lt_prog_compiler_pic_CXX=-Kconform_pic
+      fi
+      ;;
+    hpux*)
+      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+      # sets the default TLS model and affects inlining.
+      case $host_cpu in
+      hppa*64*)
+	;;
+      *)
+	lt_prog_compiler_pic_CXX='-fPIC'
+	;;
+      esac
+      ;;
+    *qnx* | *nto*)
+      # QNX uses GNU C++, but need to define -shared option too, otherwise
+      # it will coredump.
+      lt_prog_compiler_pic_CXX='-fPIC -shared'
+      ;;
+    *)
+      lt_prog_compiler_pic_CXX='-fPIC'
+      ;;
+    esac
+  else
+    case $host_os in
+      aix[4-9]*)
+	# All AIX code is PIC.
+	if test "$host_cpu" = ia64; then
+	  # AIX 5 now supports IA64 processor
+	  lt_prog_compiler_static_CXX='-Bstatic'
+	else
+	  lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp'
+	fi
+	;;
+      chorus*)
+	case $cc_basename in
+	cxch68*)
+	  # Green Hills C++ Compiler
+	  # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+	  ;;
+	esac
+	;;
+      mingw* | cygwin* | os2* | pw32* | cegcc*)
+	# This hack is so that the source file can tell whether it is being
+	# built for inclusion in a dll (and should export symbols for example).
+	lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+	;;
+      dgux*)
+	case $cc_basename in
+	  ec++*)
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    ;;
+	  ghcx*)
+	    # Green Hills C++ Compiler
+	    lt_prog_compiler_pic_CXX='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      freebsd* | dragonfly*)
+	# FreeBSD uses GNU C++
+	;;
+      hpux9* | hpux10* | hpux11*)
+	case $cc_basename in
+	  CC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_static_CXX='${wl}-a ${wl}archive'
+	    if test "$host_cpu" != ia64; then
+	      lt_prog_compiler_pic_CXX='+Z'
+	    fi
+	    ;;
+	  aCC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_static_CXX='${wl}-a ${wl}archive'
+	    case $host_cpu in
+	    hppa*64*|ia64*)
+	      # +Z the default
+	      ;;
+	    *)
+	      lt_prog_compiler_pic_CXX='+Z'
+	      ;;
+	    esac
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      interix*)
+	# This is c89, which is MS Visual C++ (no shared libs)
+	# Anyone wants to do a port?
+	;;
+      irix5* | irix6* | nonstopux*)
+	case $cc_basename in
+	  CC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_static_CXX='-non_shared'
+	    # CC pic flag -KPIC is the default.
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+	case $cc_basename in
+	  KCC*)
+	    # KAI C++ Compiler
+	    lt_prog_compiler_wl_CXX='--backend -Wl,'
+	    lt_prog_compiler_pic_CXX='-fPIC'
+	    ;;
+	  ecpc* )
+	    # old Intel C++ for x86_64 which still supported -KPIC.
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    lt_prog_compiler_static_CXX='-static'
+	    ;;
+	  icpc* )
+	    # Intel C++, used to be incompatible with GCC.
+	    # ICC 10 doesn't accept -KPIC any more.
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-fPIC'
+	    lt_prog_compiler_static_CXX='-static'
+	    ;;
+	  pgCC* | pgcpp*)
+	    # Portland Group C++ compiler
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-fpic'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    ;;
+	  cxx*)
+	    # Compaq C++
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    lt_prog_compiler_pic_CXX=
+	    lt_prog_compiler_static_CXX='-non_shared'
+	    ;;
+	  xlc* | xlC* | bgxl[cC]* | mpixl[cC]*)
+	    # IBM XL 8.0, 9.0 on PPC and BlueGene
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-qpic'
+	    lt_prog_compiler_static_CXX='-qstaticlink'
+	    ;;
+	  *)
+	    case `$CC -V 2>&1 | sed 5q` in
+	    *Sun\ C*)
+	      # Sun C++ 5.9
+	      lt_prog_compiler_pic_CXX='-KPIC'
+	      lt_prog_compiler_static_CXX='-Bstatic'
+	      lt_prog_compiler_wl_CXX='-Qoption ld '
+	      ;;
+	    esac
+	    ;;
+	esac
+	;;
+      lynxos*)
+	;;
+      m88k*)
+	;;
+      mvs*)
+	case $cc_basename in
+	  cxx*)
+	    lt_prog_compiler_pic_CXX='-W c,exportall'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      netbsd* | netbsdelf*-gnu)
+	;;
+      *qnx* | *nto*)
+        # QNX uses GNU C++, but need to define -shared option too, otherwise
+        # it will coredump.
+        lt_prog_compiler_pic_CXX='-fPIC -shared'
+        ;;
+      osf3* | osf4* | osf5*)
+	case $cc_basename in
+	  KCC*)
+	    lt_prog_compiler_wl_CXX='--backend -Wl,'
+	    ;;
+	  RCC*)
+	    # Rational C++ 2.4.1
+	    lt_prog_compiler_pic_CXX='-pic'
+	    ;;
+	  cxx*)
+	    # Digital/Compaq C++
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    # Make sure the PIC flag is empty.  It appears that all Alpha
+	    # Linux and Compaq Tru64 Unix objects are PIC.
+	    lt_prog_compiler_pic_CXX=
+	    lt_prog_compiler_static_CXX='-non_shared'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      psos*)
+	;;
+      solaris*)
+	case $cc_basename in
+	  CC* | sunCC*)
+	    # Sun C++ 4.2, 5.x and Centerline C++
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    lt_prog_compiler_wl_CXX='-Qoption ld '
+	    ;;
+	  gcx*)
+	    # Green Hills C++ Compiler
+	    lt_prog_compiler_pic_CXX='-PIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sunos4*)
+	case $cc_basename in
+	  CC*)
+	    # Sun C++ 4.x
+	    lt_prog_compiler_pic_CXX='-pic'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    ;;
+	  lcc*)
+	    # Lucid
+	    lt_prog_compiler_pic_CXX='-pic'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+	case $cc_basename in
+	  CC*)
+	    lt_prog_compiler_wl_CXX='-Wl,'
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    lt_prog_compiler_static_CXX='-Bstatic'
+	    ;;
+	esac
+	;;
+      tandem*)
+	case $cc_basename in
+	  NCC*)
+	    # NonStop-UX NCC 3.20
+	    lt_prog_compiler_pic_CXX='-KPIC'
+	    ;;
+	  *)
+	    ;;
+	esac
+	;;
+      vxworks*)
+	;;
+      *)
+	lt_prog_compiler_can_build_shared_CXX=no
+	;;
+    esac
+  fi
+
+case $host_os in
+  # For platforms which do not support PIC, -DPIC is meaningless:
+  *djgpp*)
+    lt_prog_compiler_pic_CXX=
+    ;;
+  *)
+    lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC"
+    ;;
+esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+if ${lt_cv_prog_compiler_pic_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; }
+lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX
+
+#
+# Check to make sure the PIC flag actually works.
+#
+if test -n "$lt_prog_compiler_pic_CXX"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5
+$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; }
+if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_pic_works_CXX=no
+   ac_outfile=conftest.$ac_objext
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+   lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   # The option is referenced via a variable to avoid confusing sed.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>conftest.err)
+   ac_status=$?
+   cat conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s "$ac_outfile"; then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings other than the usual output.
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_pic_works_CXX=yes
+     fi
+   fi
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; }
+
+if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then
+    case $lt_prog_compiler_pic_CXX in
+     "" | " "*) ;;
+     *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;;
+     esac
+else
+    lt_prog_compiler_pic_CXX=
+     lt_prog_compiler_can_build_shared_CXX=no
+fi
+
+fi
+
+
+
+
+
+#
+# Check to make sure the static flag actually works.
+#
+wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+if ${lt_cv_prog_compiler_static_works_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_static_works_CXX=no
+   save_LDFLAGS="$LDFLAGS"
+   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+     # The linker can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     if test -s conftest.err; then
+       # Append any errors to the config.log.
+       cat conftest.err 1>&5
+       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+       if diff conftest.exp conftest.er2 >/dev/null; then
+         lt_cv_prog_compiler_static_works_CXX=yes
+       fi
+     else
+       lt_cv_prog_compiler_static_works_CXX=yes
+     fi
+   fi
+   $RM -r conftest*
+   LDFLAGS="$save_LDFLAGS"
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; }
+
+if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then
+    :
+else
+    lt_prog_compiler_static_CXX=
+fi
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_c_o_CXX=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o_CXX=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+if ${lt_cv_prog_compiler_c_o_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_prog_compiler_c_o_CXX=no
+   $RM -r conftest 2>/dev/null
+   mkdir conftest
+   cd conftest
+   mkdir out
+   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+   lt_compiler_flag="-o out/conftest2.$ac_objext"
+   # Insert the option either (1) after the last *FLAGS variable, or
+   # (2) before a word containing "conftest.", or (3) at the end.
+   # Note that $ac_compile itself does not contain backslashes and begins
+   # with a dollar sign (not a hyphen), so the echo should work correctly.
+   lt_compile=`echo "$ac_compile" | $SED \
+   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
+   -e 's:$: $lt_compiler_flag:'`
+   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
+   (eval "$lt_compile" 2>out/conftest.err)
+   ac_status=$?
+   cat out/conftest.err >&5
+   echo "$as_me:$LINENO: \$? = $ac_status" >&5
+   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+   then
+     # The compiler can only warn and ignore the option if not recognized
+     # So say no if there are warnings
+     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+       lt_cv_prog_compiler_c_o_CXX=yes
+     fi
+   fi
+   chmod u+w . 2>&5
+   $RM conftest*
+   # SGI C++ compiler will create directory out/ii_files/ for
+   # template instantiation
+   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+   $RM out/* && rmdir out
+   cd ..
+   $RM -r conftest
+   $RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+
+
+
+
+hard_links="nottested"
+if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then
+  # do not overwrite the value of need_locks provided by the user
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+$as_echo_n "checking if we can lock with hard links... " >&6; }
+  hard_links=yes
+  $RM conftest*
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  touch conftest.a
+  ln conftest.a conftest.b 2>&5 || hard_links=no
+  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+$as_echo "$hard_links" >&6; }
+  if test "$hard_links" = no; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
+$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+    need_locks=warn
+  fi
+else
+  need_locks=no
+fi
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+
+  export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+  exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+  case $host_os in
+  aix[4-9]*)
+    # If we're using GNU nm, then we don't want the "-C" option.
+    # -C means demangle to AIX nm, but means don't demangle with GNU nm
+    # Also, AIX nm treats weak defined symbols like other global defined
+    # symbols, whereas GNU nm marks them as "W".
+    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+      export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+    else
+      export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+    fi
+    ;;
+  pw32*)
+    export_symbols_cmds_CXX="$ltdll_cmds"
+    ;;
+  cygwin* | mingw* | cegcc*)
+    case $cc_basename in
+    cl*)
+      exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+      ;;
+    *)
+      export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+      exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+      ;;
+    esac
+    ;;
+  linux* | k*bsd*-gnu | gnu*)
+    link_all_deplibs_CXX=no
+    ;;
+  *)
+    export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+    ;;
+  esac
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+$as_echo "$ld_shlibs_CXX" >&6; }
+test "$ld_shlibs_CXX" = no && can_build_shared=no
+
+with_gnu_ld_CXX=$with_gnu_ld
+
+
+
+
+
+
+#
+# Do we need to explicitly link libc?
+#
+case "x$archive_cmds_need_lc_CXX" in
+x|xyes)
+  # Assume -lc should be added
+  archive_cmds_need_lc_CXX=yes
+
+  if test "$enable_shared" = yes && test "$GCC" = yes; then
+    case $archive_cmds_CXX in
+    *'~'*)
+      # FIXME: we may have to deal with multi-command sequences.
+      ;;
+    '$CC '*)
+      # Test whether the compiler implicitly links with -lc since on some
+      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+      # to ld, don't add -lc before -lgcc.
+      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
+if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  $RM conftest*
+	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+
+	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+  (eval $ac_compile) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } 2>conftest.err; then
+	  soname=conftest
+	  lib=conftest
+	  libobjs=conftest.$ac_objext
+	  deplibs=
+	  wl=$lt_prog_compiler_wl_CXX
+	  pic_flag=$lt_prog_compiler_pic_CXX
+	  compiler_flags=-v
+	  linker_flags=-v
+	  verstring=
+	  output_objdir=.
+	  libname=conftest
+	  lt_save_allow_undefined_flag=$allow_undefined_flag_CXX
+	  allow_undefined_flag_CXX=
+	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+  (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+	  then
+	    lt_cv_archive_cmds_need_lc_CXX=no
+	  else
+	    lt_cv_archive_cmds_need_lc_CXX=yes
+	  fi
+	  allow_undefined_flag_CXX=$lt_save_allow_undefined_flag
+	else
+	  cat conftest.err 1>&5
+	fi
+	$RM conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5
+$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; }
+      archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX
+      ;;
+    esac
+  fi
+  ;;
+esac
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+$as_echo_n "checking dynamic linker characteristics... " >&6; }
+
+library_names_spec=
+libname_spec='lib$name'
+soname_spec=
+shrext_cmds=".so"
+postinstall_cmds=
+postuninstall_cmds=
+finish_cmds=
+finish_eval=
+shlibpath_var=
+shlibpath_overrides_runpath=unknown
+version_type=none
+dynamic_linker="$host_os ld.so"
+sys_lib_dlsearch_path_spec="/lib /usr/lib"
+need_lib_prefix=unknown
+hardcode_into_libs=no
+
+# when you set need_version to no, make sure it does not cause -set_version
+# flags to be left without arguments
+need_version=unknown
+
+case $host_os in
+aix3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+  shlibpath_var=LIBPATH
+
+  # AIX 3 has no versioning support, so we append a major version to the name.
+  soname_spec='${libname}${release}${shared_ext}$major'
+  ;;
+
+aix[4-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  hardcode_into_libs=yes
+  if test "$host_cpu" = ia64; then
+    # AIX 5 supports IA64
+    library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+    shlibpath_var=LD_LIBRARY_PATH
+  else
+    # With GCC up to 2.95.x, collect2 would create an import file
+    # for dependence libraries.  The import file would start with
+    # the line `#! .'.  This would cause the generated library to
+    # depend on `.', always an invalid library.  This was fixed in
+    # development snapshots of GCC prior to 3.0.
+    case $host_os in
+      aix4 | aix4.[01] | aix4.[01].*)
+      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+	   echo ' yes '
+	   echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+	:
+      else
+	can_build_shared=no
+      fi
+      ;;
+    esac
+    # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+    # soname into executable. Probably we can add versioning support to
+    # collect2, so additional links can be useful in future.
+    if test "$aix_use_runtimelinking" = yes; then
+      # If using run time linking (on AIX 4.2 or later) use lib<name>.so
+      # instead of lib<name>.a to let people know that these are not
+      # typical AIX shared libraries.
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    else
+      # We preserve .a as extension for shared libraries through AIX4.2
+      # and later when we are not doing run time linking.
+      library_names_spec='${libname}${release}.a $libname.a'
+      soname_spec='${libname}${release}${shared_ext}$major'
+    fi
+    shlibpath_var=LIBPATH
+  fi
+  ;;
+
+amigaos*)
+  case $host_cpu in
+  powerpc)
+    # Since July 2007 AmigaOS4 officially supports .so libraries.
+    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    ;;
+  m68k)
+    library_names_spec='$libname.ixlibrary $libname.a'
+    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+    ;;
+  esac
+  ;;
+
+beos*)
+  library_names_spec='${libname}${shared_ext}'
+  dynamic_linker="$host_os ld.so"
+  shlibpath_var=LIBRARY_PATH
+  ;;
+
+bsdi[45]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+  # the default ld.so.conf also contains /usr/contrib/lib and
+  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+  # libtool to hard-code these into programs
+  ;;
+
+cygwin* | mingw* | pw32* | cegcc*)
+  version_type=windows
+  shrext_cmds=".dll"
+  need_version=no
+  need_lib_prefix=no
+
+  case $GCC,$cc_basename in
+  yes,*)
+    # gcc
+    library_names_spec='$libname.dll.a'
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname~
+      chmod a+x \$dldir/$dlname~
+      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+      fi'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+
+    case $host_os in
+    cygwin*)
+      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+      soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+
+      ;;
+    mingw* | cegcc*)
+      # MinGW DLLs use traditional 'lib' prefix
+      soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    pw32*)
+      # pw32 DLLs use 'pw' prefix rather than 'lib'
+      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+      ;;
+    esac
+    dynamic_linker='Win32 ld.exe'
+    ;;
+
+  *,cl*)
+    # Native MSVC
+    libname_spec='$name'
+    soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+    library_names_spec='${libname}.dll.lib'
+
+    case $build_os in
+    mingw*)
+      sys_lib_search_path_spec=
+      lt_save_ifs=$IFS
+      IFS=';'
+      for lt_path in $LIB
+      do
+        IFS=$lt_save_ifs
+        # Let DOS variable expansion print the short 8.3 style file name.
+        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+      done
+      IFS=$lt_save_ifs
+      # Convert to MSYS style.
+      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+      ;;
+    cygwin*)
+      # Convert to unix form, then to dos form, then back to unix form
+      # but this time dos style (no spaces!) so that the unix form looks
+      # like /cygdrive/c/PROGRA~1:/cygdr...
+      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      ;;
+    *)
+      sys_lib_search_path_spec="$LIB"
+      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+        # It is most probably a Windows format PATH.
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+      else
+        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+      fi
+      # FIXME: find the short name or the path components, as spaces are
+      # common. (e.g. "Program Files" -> "PROGRA~1")
+      ;;
+    esac
+
+    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+    postinstall_cmds='base_file=`basename \${file}`~
+      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+      dldir=$destdir/`dirname \$dlpath`~
+      test -d \$dldir || mkdir -p \$dldir~
+      $install_prog $dir/$dlname \$dldir/$dlname'
+    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+      dlpath=$dir/\$dldll~
+       $RM \$dlpath'
+    shlibpath_overrides_runpath=yes
+    dynamic_linker='Win32 link.exe'
+    ;;
+
+  *)
+    # Assume MSVC wrapper
+    library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+    dynamic_linker='Win32 ld.exe'
+    ;;
+  esac
+  # FIXME: first we should search . and the directory the executable is in
+  shlibpath_var=PATH
+  ;;
+
+darwin* | rhapsody*)
+  dynamic_linker="$host_os dyld"
+  version_type=darwin
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
+  soname_spec='${libname}${release}${major}$shared_ext'
+  shlibpath_overrides_runpath=yes
+  shlibpath_var=DYLD_LIBRARY_PATH
+  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+
+  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+  ;;
+
+dgux*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+freebsd* | dragonfly*)
+  # DragonFly does not have aout.  When/if they implement a new
+  # versioning mechanism, adjust this.
+  if test -x /usr/bin/objformat; then
+    objformat=`/usr/bin/objformat`
+  else
+    case $host_os in
+    freebsd[23].*) objformat=aout ;;
+    *) objformat=elf ;;
+    esac
+  fi
+  version_type=freebsd-$objformat
+  case $version_type in
+    freebsd-elf*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+      need_version=no
+      need_lib_prefix=no
+      ;;
+    freebsd-*)
+      library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+      need_version=yes
+      ;;
+  esac
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_os in
+  freebsd2.*)
+    shlibpath_overrides_runpath=yes
+    ;;
+  freebsd3.[01]* | freebsdelf3.[01]*)
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
+  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
+    shlibpath_overrides_runpath=no
+    hardcode_into_libs=yes
+    ;;
+  *) # from 4.6 on, and DragonFly
+    shlibpath_overrides_runpath=yes
+    hardcode_into_libs=yes
+    ;;
+  esac
+  ;;
+
+haiku*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  dynamic_linker="$host_os runtime_loader"
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+  hardcode_into_libs=yes
+  ;;
+
+hpux9* | hpux10* | hpux11*)
+  # Give a soname corresponding to the major version so that dld.sl refuses to
+  # link against other versions.
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  case $host_cpu in
+  ia64*)
+    shrext_cmds='.so'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.so"
+    shlibpath_var=LD_LIBRARY_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    if test "X$HPUX_IA64_MODE" = X32; then
+      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+    else
+      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+    fi
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  hppa*64*)
+    shrext_cmds='.sl'
+    hardcode_into_libs=yes
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+    ;;
+  *)
+    shrext_cmds='.sl'
+    dynamic_linker="$host_os dld.sl"
+    shlibpath_var=SHLIB_PATH
+    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    ;;
+  esac
+  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+  postinstall_cmds='chmod 555 $lib'
+  # or fails outright, so override atomically:
+  install_override_mode=555
+  ;;
+
+interix[3-9]*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+irix5* | irix6* | nonstopux*)
+  case $host_os in
+    nonstopux*) version_type=nonstopux ;;
+    *)
+	if test "$lt_cv_prog_gnu_ld" = yes; then
+		version_type=linux # correct to gnu/linux during the next big refactor
+	else
+		version_type=irix
+	fi ;;
+  esac
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+  case $host_os in
+  irix5* | nonstopux*)
+    libsuff= shlibsuff=
+    ;;
+  *)
+    case $LD in # libtool.m4 will add one of these switches to LD
+    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+      libsuff= shlibsuff= libmagic=32-bit;;
+    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+      libsuff=32 shlibsuff=N32 libmagic=N32;;
+    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+    *) libsuff= shlibsuff= libmagic=never-match;;
+    esac
+    ;;
+  esac
+  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+  shlibpath_overrides_runpath=no
+  sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
+  sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+  hardcode_into_libs=yes
+  ;;
+
+# No shared lib support for Linux oldld, aout, or coff.
+linux*oldld* | linux*aout* | linux*coff*)
+  dynamic_linker=no
+  ;;
+
+# This must be glibc/ELF.
+linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+
+  # Some binutils ld are patched to set DT_RUNPATH
+  if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  lt_cv_shlibpath_overrides_runpath=no
+    save_LDFLAGS=$LDFLAGS
+    save_libdir=$libdir
+    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \
+	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\""
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_link "$LINENO"; then :
+  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+  lt_cv_shlibpath_overrides_runpath=yes
+fi
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+    LDFLAGS=$save_LDFLAGS
+    libdir=$save_libdir
+
+fi
+
+  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+
+  # This implies no fast_install, which is unacceptable.
+  # Some rework will be needed to allow for fast_install
+  # before this can be enabled.
+  hardcode_into_libs=yes
+
+  # Append ld.so.conf contents to the search path
+  if test -f /etc/ld.so.conf; then
+    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+  fi
+
+  # We used to test for /lib/ld.so.1 and disable shared libraries on
+  # powerpc, because MkLinux only supported shared libraries with the
+  # GNU dynamic linker.  Since this was broken with cross compilers,
+  # most powerpc-linux boxes support dynamic linking these days and
+  # people can always --disable-shared, the test was removed, and we
+  # assume the GNU/Linux dynamic linker is in use.
+  dynamic_linker='GNU/Linux ld.so'
+  ;;
+
+netbsdelf*-gnu)
+  version_type=linux
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='NetBSD ld.elf_so'
+  ;;
+
+netbsd*)
+  version_type=sunos
+  need_lib_prefix=no
+  need_version=no
+  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+    dynamic_linker='NetBSD (a.out) ld.so'
+  else
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
+    soname_spec='${libname}${release}${shared_ext}$major'
+    dynamic_linker='NetBSD ld.elf_so'
+  fi
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  ;;
+
+newsos6)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  ;;
+
+*nto* | *qnx*)
+  version_type=qnx
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  dynamic_linker='ldqnx.so'
+  ;;
+
+openbsd*)
+  version_type=sunos
+  sys_lib_dlsearch_path_spec="/usr/lib"
+  need_lib_prefix=no
+  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
+  case $host_os in
+    openbsd3.3 | openbsd3.3.*)	need_version=yes ;;
+    *)				need_version=no  ;;
+  esac
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+    case $host_os in
+      openbsd2.[89] | openbsd2.[89].*)
+	shlibpath_overrides_runpath=no
+	;;
+      *)
+	shlibpath_overrides_runpath=yes
+	;;
+      esac
+  else
+    shlibpath_overrides_runpath=yes
+  fi
+  ;;
+
+os2*)
+  libname_spec='$name'
+  shrext_cmds=".dll"
+  need_lib_prefix=no
+  library_names_spec='$libname${shared_ext} $libname.a'
+  dynamic_linker='OS/2 ld.exe'
+  shlibpath_var=LIBPATH
+  ;;
+
+osf3* | osf4* | osf5*)
+  version_type=osf
+  need_lib_prefix=no
+  need_version=no
+  soname_spec='${libname}${release}${shared_ext}$major'
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+  sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+  ;;
+
+rdos*)
+  dynamic_linker=no
+  ;;
+
+solaris*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  # ldd complains unless libraries are executable
+  postinstall_cmds='chmod +x $lib'
+  ;;
+
+sunos4*)
+  version_type=sunos
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  if test "$with_gnu_ld" = yes; then
+    need_lib_prefix=no
+  fi
+  need_version=yes
+  ;;
+
+sysv4 | sysv4.3*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  case $host_vendor in
+    sni)
+      shlibpath_overrides_runpath=no
+      need_lib_prefix=no
+      runpath_var=LD_RUN_PATH
+      ;;
+    siemens)
+      need_lib_prefix=no
+      ;;
+    motorola)
+      need_lib_prefix=no
+      need_version=no
+      shlibpath_overrides_runpath=no
+      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+      ;;
+  esac
+  ;;
+
+sysv4*MP*)
+  if test -d /usr/nec ;then
+    version_type=linux # correct to gnu/linux during the next big refactor
+    library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
+    soname_spec='$libname${shared_ext}.$major'
+    shlibpath_var=LD_LIBRARY_PATH
+  fi
+  ;;
+
+sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+  version_type=freebsd-elf
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=yes
+  hardcode_into_libs=yes
+  if test "$with_gnu_ld" = yes; then
+    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+  else
+    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+    case $host_os in
+      sco3.2v5*)
+        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+	;;
+    esac
+  fi
+  sys_lib_dlsearch_path_spec='/usr/lib'
+  ;;
+
+tpf*)
+  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+  version_type=linux # correct to gnu/linux during the next big refactor
+  need_lib_prefix=no
+  need_version=no
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  shlibpath_var=LD_LIBRARY_PATH
+  shlibpath_overrides_runpath=no
+  hardcode_into_libs=yes
+  ;;
+
+uts4*)
+  version_type=linux # correct to gnu/linux during the next big refactor
+  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+  soname_spec='${libname}${release}${shared_ext}$major'
+  shlibpath_var=LD_LIBRARY_PATH
+  ;;
+
+*)
+  dynamic_linker=no
+  ;;
+esac
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+$as_echo "$dynamic_linker" >&6; }
+test "$dynamic_linker" = no && can_build_shared=no
+
+variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+if test "$GCC" = yes; then
+  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+fi
+
+if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
+  sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+fi
+if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
+  sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+fi
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+hardcode_action_CXX=
+if test -n "$hardcode_libdir_flag_spec_CXX" ||
+   test -n "$runpath_var_CXX" ||
+   test "X$hardcode_automatic_CXX" = "Xyes" ; then
+
+  # We can hardcode non-existent directories.
+  if test "$hardcode_direct_CXX" != no &&
+     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+     # have to relink, otherwise we might link with an installed library
+     # when we should be linking with a yet-to-be-installed one
+     ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no &&
+     test "$hardcode_minus_L_CXX" != no; then
+    # Linking always hardcodes the temporary library directory.
+    hardcode_action_CXX=relink
+  else
+    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+    hardcode_action_CXX=immediate
+  fi
+else
+  # We cannot hardcode anything, or else we can only hardcode existing
+  # directories.
+  hardcode_action_CXX=unsupported
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5
+$as_echo "$hardcode_action_CXX" >&6; }
+
+if test "$hardcode_action_CXX" = relink ||
+   test "$inherit_rpath_CXX" = yes; then
+  # Fast installation is not supported
+  enable_fast_install=no
+elif test "$shlibpath_overrides_runpath" = yes ||
+     test "$enable_shared" = no; then
+  # Fast installation is not necessary
+  enable_fast_install=needless
+fi
+
+
+
+
+
+
+
+  fi # test -n "$compiler"
+
+  CC=$lt_save_CC
+  CFLAGS=$lt_save_CFLAGS
+  LDCXX=$LD
+  LD=$lt_save_LD
+  GCC=$lt_save_GCC
+  with_gnu_ld=$lt_save_with_gnu_ld
+  lt_cv_path_LDCXX=$lt_cv_path_LD
+  lt_cv_path_LD=$lt_save_path_LD
+  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+fi # test "$_lt_caught_CXX_error" != yes
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# on epsilon cc is not reloaded with modules
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+  for ac_prog in xlc_r xlc icc pgcc gcc cc
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$CC" && break
+  done
+fi
+if test -z "$CC"; then
+  ac_ct_CC=$CC
+  for ac_prog in xlc_r xlc icc pgcc gcc cc
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_ac_ct_CC+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_ac_ct_CC="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CC" && break
+done
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "no acceptable C compiler found in \$PATH
+See \`config.log' for more details" "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if ${ac_cv_c_compiler_gnu+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_compiler_gnu=yes
+else
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GCC=yes
+else
+  GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if ${ac_cv_prog_cc_g+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_c_werror_flag=$ac_c_werror_flag
+   ac_c_werror_flag=yes
+   ac_cv_prog_cc_g=no
+   CFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+else
+  CFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+  ac_c_werror_flag=$ac_save_c_werror_flag
+	 CFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+  CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+  if test "$GCC" = yes; then
+    CFLAGS="-g -O2"
+  else
+    CFLAGS="-g"
+  fi
+else
+  if test "$GCC" = yes; then
+    CFLAGS="-O2"
+  else
+    CFLAGS=
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if ${ac_cv_prog_cc_c89+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdarg.h>
+#include <stdio.h>
+struct stat;
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+     char **p;
+     int i;
+{
+  return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+  char *s;
+  va_list v;
+  va_start (v,p);
+  s = g (p, va_arg (v,int));
+  va_end (v);
+  return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
+   function prototypes and stuff, but not '\xHH' hex character constants.
+   These don't provoke an error unfortunately, instead are silently treated
+   as 'x'.  The following induces an error, until -std is added to get
+   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
+   array size at least.  It's necessary to write '\x00'==0 to get something
+   that's true only with -std.  */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+   inside strings and character constants.  */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
+  ;
+  return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+  CC="$ac_save_CC $ac_arg"
+  if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+  test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+  x)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+  xno)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+  *)
+    CC="$CC $ac_cv_prog_cc_c89"
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+depcc="$CC"   am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CC_dependencies_compiler_type+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named `D' -- because `-MD' means `put the output
+  # in D'.
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CC_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+  case " $depcc " in #(
+     *\ -arch\ *\ -arch\ *) am__universal=true ;;
+     esac
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+      # Solaris 8's {/usr,}/bin/sh.
+      touch sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with `-c' and `-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle `-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # after this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok `-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CC_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CC_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+  am__fastdepCC_TRUE=
+  am__fastdepCC_FALSE='#'
+else
+  am__fastdepCC_TRUE='#'
+  am__fastdepCC_FALSE=
+fi
+
+
+# ... Makefile.am:45: compiling `unroll.c' with per-target flags requires `AM_PROG_CC_C_O' in `configure.ac'
+if test "x$CC" != xcc; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5
+$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5
+$as_echo_n "checking whether cc understands -c and -o together... " >&6; }
+fi
+set dummy $CC; ac_cc=`$as_echo "$2" |
+		      sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
+if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+# Make sure it works both with $CC and with simple cc.
+# We do the test twice because some compilers refuse to overwrite an
+# existing .o file with -o, though they will create one.
+ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5'
+rm -f conftest2.*
+if { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } &&
+   test -f conftest2.$ac_objext && { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; };
+then
+  eval ac_cv_prog_cc_${ac_cc}_c_o=yes
+  if test "x$CC" != xcc; then
+    # Test first that cc exists at all.
+    if { ac_try='cc -c conftest.$ac_ext >&5'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+      ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5'
+      rm -f conftest2.*
+      if { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } &&
+	 test -f conftest2.$ac_objext && { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; };
+      then
+	# cc works too.
+	:
+      else
+	# cc exists but doesn't like -o.
+	eval ac_cv_prog_cc_${ac_cc}_c_o=no
+      fi
+    fi
+  fi
+else
+  eval ac_cv_prog_cc_${ac_cc}_c_o=no
+fi
+rm -f core conftest*
+
+fi
+if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+$as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h
+
+fi
+
+# FIXME: we rely on the cache variable name because
+# there is no other way.
+set dummy $CC
+am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
+eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o
+if test "$am_t" != yes; then
+   # Losing compiler, so override with the script.
+   # FIXME: It is wrong to rewrite CC.
+   # But if we don't then we get into trouble of one sort or another.
+   # A longer-term fix would be to have automake use am__CC in this case,
+   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+   CC="$am_aux_dir/compile $CC"
+fi
+
+
+# libtoolize if autoconf complains for the following
+
+# By default we simply use the C compiler to build assembly code.
+
+test "${CCAS+set}" = set || CCAS=$CC
+test "${CCASFLAGS+set}" = set || CCASFLAGS=$CFLAGS
+
+
+
+depcc="$CCAS"   am_compiler_list=
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+$as_echo_n "checking dependency style of $depcc... " >&6; }
+if ${am_cv_CCAS_dependencies_compiler_type+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+  # We make a subdir and do the tests there.  Otherwise we can end up
+  # making bogus files that we don't know about and never remove.  For
+  # instance it was reported that on HP-UX the gcc test will end up
+  # making a dummy file named `D' -- because `-MD' means `put the output
+  # in D'.
+  rm -rf conftest.dir
+  mkdir conftest.dir
+  # Copy depcomp to subdir because otherwise we won't find it if we're
+  # using a relative directory.
+  cp "$am_depcomp" conftest.dir
+  cd conftest.dir
+  # We will build objects and dependencies in a subdirectory because
+  # it helps to detect inapplicable dependency modes.  For instance
+  # both Tru64's cc and ICC support -MD to output dependencies as a
+  # side effect of compilation, but ICC will put the dependencies in
+  # the current directory while Tru64 will put them in the object
+  # directory.
+  mkdir sub
+
+  am_cv_CCAS_dependencies_compiler_type=none
+  if test "$am_compiler_list" = ""; then
+     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
+  fi
+  am__universal=false
+
+
+  for depmode in $am_compiler_list; do
+    # Setup a source with many dependencies, because some compilers
+    # like to wrap large dependency lists on column 80 (with \), and
+    # we should not choose a depcomp mode which is confused by this.
+    #
+    # We need to recreate these files for each test, as the compiler may
+    # overwrite some of them when testing with obscure command lines.
+    # This happens at least with the AIX C compiler.
+    : > sub/conftest.c
+    for i in 1 2 3 4 5 6; do
+      echo '#include "conftst'$i'.h"' >> sub/conftest.c
+      # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with
+      # Solaris 8's {/usr,}/bin/sh.
+      touch sub/conftst$i.h
+    done
+    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
+
+    # We check with `-c' and `-o' for the sake of the "dashmstdout"
+    # mode.  It turns out that the SunPro C++ compiler does not properly
+    # handle `-M -o', and we need to detect this.  Also, some Intel
+    # versions had trouble with output in subdirs
+    am__obj=sub/conftest.${OBJEXT-o}
+    am__minus_obj="-o $am__obj"
+    case $depmode in
+    gcc)
+      # This depmode causes a compiler race in universal mode.
+      test "$am__universal" = false || continue
+      ;;
+    nosideeffect)
+      # after this tag, mechanisms are not by side-effect, so they'll
+      # only be used when explicitly requested
+      if test "x$enable_dependency_tracking" = xyes; then
+	continue
+      else
+	break
+      fi
+      ;;
+    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
+      # This compiler won't grok `-c -o', but also, the minuso test has
+      # not run yet.  These depmodes are late enough in the game, and
+      # so weak that their functioning should not be impacted.
+      am__obj=conftest.${OBJEXT-o}
+      am__minus_obj=
+      ;;
+    none) break ;;
+    esac
+    if depmode=$depmode \
+       source=sub/conftest.c object=$am__obj \
+       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
+       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
+         >/dev/null 2>conftest.err &&
+       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
+       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
+       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
+      # icc doesn't choke on unknown options, it will just issue warnings
+      # or remarks (even with -Werror).  So we grep stderr for any message
+      # that says an option was ignored or not supported.
+      # When given -MP, icc 7.0 and 7.1 complain thusly:
+      #   icc: Command line warning: ignoring option '-M'; no argument required
+      # The diagnosis changed in icc 8.0:
+      #   icc: Command line remark: option '-MP' not supported
+      if (grep 'ignoring option' conftest.err ||
+          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
+        am_cv_CCAS_dependencies_compiler_type=$depmode
+        break
+      fi
+    fi
+  done
+
+  cd ..
+  rm -rf conftest.dir
+else
+  am_cv_CCAS_dependencies_compiler_type=none
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CCAS_dependencies_compiler_type" >&5
+$as_echo "$am_cv_CCAS_dependencies_compiler_type" >&6; }
+CCASDEPMODE=depmode=$am_cv_CCAS_dependencies_compiler_type
+
+ if
+  test "x$enable_dependency_tracking" != xno \
+  && test "$am_cv_CCAS_dependencies_compiler_type" = gcc3; then
+  am__fastdepCCAS_TRUE=
+  am__fastdepCCAS_FALSE='#'
+else
+  am__fastdepCCAS_TRUE='#'
+  am__fastdepCCAS_FALSE=
+fi
+
+
+#AC_GNU_SOURCE
+#AC_PROG_C
+#AC_PROG_INSTALL
+for ac_prog in gawk mawk nawk awk
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AWK+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$AWK"; then
+  ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_AWK="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$AWK" && break
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if ${ac_cv_path_GREP+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$GREP"; then
+  ac_path_GREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in grep ggrep; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+      as_fn_executable_p "$ac_path_GREP" || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+  # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo 'GREP' >> "conftest.nl"
+    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_GREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_GREP="$ac_path_GREP"
+      ac_path_GREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_GREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_GREP"; then
+    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of void *" >&5
+$as_echo_n "checking size of void *... " >&6; }
+if ${ac_cv_sizeof_void_p+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (void *))" "ac_cv_sizeof_void_p"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_void_p" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (void *)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_void_p=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_void_p" >&5
+$as_echo "$ac_cv_sizeof_void_p" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_VOID_P $ac_cv_sizeof_void_p
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of char" >&5
+$as_echo_n "checking size of char... " >&6; }
+if ${ac_cv_sizeof_char+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (char))" "ac_cv_sizeof_char"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_char" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (char)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_char=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_char" >&5
+$as_echo "$ac_cv_sizeof_char" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_CHAR $ac_cv_sizeof_char
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of int" >&5
+$as_echo_n "checking size of int... " >&6; }
+if ${ac_cv_sizeof_int+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (int))" "ac_cv_sizeof_int"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_int" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (int)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_int=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_int" >&5
+$as_echo "$ac_cv_sizeof_int" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_INT $ac_cv_sizeof_int
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of short int" >&5
+$as_echo_n "checking size of short int... " >&6; }
+if ${ac_cv_sizeof_short_int+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (short int))" "ac_cv_sizeof_short_int"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_short_int" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (short int)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_short_int=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_short_int" >&5
+$as_echo "$ac_cv_sizeof_short_int" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SHORT_INT $ac_cv_sizeof_short_int
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long" >&5
+$as_echo_n "checking size of long... " >&6; }
+if ${ac_cv_sizeof_long+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long))" "ac_cv_sizeof_long"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_long" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_long=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long" >&5
+$as_echo "$ac_cv_sizeof_long" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG $ac_cv_sizeof_long
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long int" >&5
+$as_echo_n "checking size of long int... " >&6; }
+if ${ac_cv_sizeof_long_int+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long int))" "ac_cv_sizeof_long_int"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_long_int" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long int)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_long_int=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_int" >&5
+$as_echo "$ac_cv_sizeof_long_int" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG_INT $ac_cv_sizeof_long_int
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long long int" >&5
+$as_echo_n "checking size of long long int... " >&6; }
+if ${ac_cv_sizeof_long_long_int+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long long int))" "ac_cv_sizeof_long_long_int"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_long_long_int" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long long int)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_long_long_int=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_long_int" >&5
+$as_echo "$ac_cv_sizeof_long_long_int" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG_LONG_INT $ac_cv_sizeof_long_long_int
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of size_t" >&5
+$as_echo_n "checking size of size_t... " >&6; }
+if ${ac_cv_sizeof_size_t+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (size_t))" "ac_cv_sizeof_size_t"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_size_t" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (size_t)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_size_t=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_size_t" >&5
+$as_echo "$ac_cv_sizeof_size_t" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_SIZE_T $ac_cv_sizeof_size_t
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of float" >&5
+$as_echo_n "checking size of float... " >&6; }
+if ${ac_cv_sizeof_float+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (float))" "ac_cv_sizeof_float"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_float" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (float)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_float=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_float" >&5
+$as_echo "$ac_cv_sizeof_float" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_FLOAT $ac_cv_sizeof_float
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of double" >&5
+$as_echo_n "checking size of double... " >&6; }
+if ${ac_cv_sizeof_double+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (double))" "ac_cv_sizeof_double"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_double" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (double)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_double=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_double" >&5
+$as_echo "$ac_cv_sizeof_double" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_DOUBLE $ac_cv_sizeof_double
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of long double" >&5
+$as_echo_n "checking size of long double... " >&6; }
+if ${ac_cv_sizeof_long_double+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (long double))" "ac_cv_sizeof_long_double"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_long_double" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (long double)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_long_double=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_long_double" >&5
+$as_echo "$ac_cv_sizeof_long_double" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_LONG_DOUBLE $ac_cv_sizeof_long_double
+_ACEOF
+
+
+# to use complex we need a specialized header
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of complex" >&5
+$as_echo_n "checking size of complex... " >&6; }
+if ${ac_cv_sizeof_complex+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (complex))" "ac_cv_sizeof_complex"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_complex" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (complex)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_complex=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_complex" >&5
+$as_echo "$ac_cv_sizeof_complex" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_COMPLEX $ac_cv_sizeof_complex
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of float complex" >&5
+$as_echo_n "checking size of float complex... " >&6; }
+if ${ac_cv_sizeof_float_complex+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (float complex))" "ac_cv_sizeof_float_complex"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_float_complex" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (float complex)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_float_complex=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_float_complex" >&5
+$as_echo "$ac_cv_sizeof_float_complex" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_FLOAT_COMPLEX $ac_cv_sizeof_float_complex
+_ACEOF
+
+
+# The cast to long int works around a bug in the HP C Compiler
+# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects
+# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'.
+# This bug is HP SR number 8606223364.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of double complex" >&5
+$as_echo_n "checking size of double complex... " >&6; }
+if ${ac_cv_sizeof_double_complex+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (double complex))" "ac_cv_sizeof_double_complex"        "$ac_includes_default"; then :
+
+else
+  if test "$ac_cv_type_double_complex" = yes; then
+     { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error 77 "cannot compute sizeof (double complex)
+See \`config.log' for more details" "$LINENO" 5; }
+   else
+     ac_cv_sizeof_double_complex=0
+   fi
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_double_complex" >&5
+$as_echo "$ac_cv_sizeof_double_complex" >&6; }
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SIZEOF_DOUBLE_COMPLEX $ac_cv_sizeof_double_complex
+_ACEOF
+
+
+#
+
+  OPENMP_CFLAGS=
+  # Check whether --enable-openmp was given.
+if test "${enable_openmp+set}" = set; then :
+  enableval=$enable_openmp;
+fi
+
+  if test "$enable_openmp" != no; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to support OpenMP" >&5
+$as_echo_n "checking for $CC option to support OpenMP... " >&6; }
+if ${ac_cv_prog_c_openmp+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#ifndef _OPENMP
+ choke me
+#endif
+#include <omp.h>
+int main () { return omp_get_num_threads (); }
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_prog_c_openmp='none needed'
+else
+  ac_cv_prog_c_openmp='unsupported'
+	  	  	  	  	  	  	                                	  	  	  	  	  	  for ac_option in -fopenmp -xopenmp -openmp -mp -omp -qsmp=omp -homp \
+                           -Popenmp --openmp; do
+	    ac_save_CFLAGS=$CFLAGS
+	    CFLAGS="$CFLAGS $ac_option"
+	    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#ifndef _OPENMP
+ choke me
+#endif
+#include <omp.h>
+int main () { return omp_get_num_threads (); }
+
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  ac_cv_prog_c_openmp=$ac_option
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+	    CFLAGS=$ac_save_CFLAGS
+	    if test "$ac_cv_prog_c_openmp" != unsupported; then
+	      break
+	    fi
+	  done
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_c_openmp" >&5
+$as_echo "$ac_cv_prog_c_openmp" >&6; }
+    case $ac_cv_prog_c_openmp in #(
+      "none needed" | unsupported)
+	;; #(
+      *)
+	OPENMP_CFLAGS=$ac_cv_prog_c_openmp ;;
+    esac
+  fi
+
+
+#
+# Extract the first word of "grep", so it can be a program name with args.
+set dummy grep; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_have_grep+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$have_grep"; then
+  ac_cv_prog_have_grep="$have_grep" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_have_grep="yes"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_prog_have_grep" && ac_cv_prog_have_grep="no"
+fi
+fi
+have_grep=$ac_cv_prog_have_grep
+if test -n "$have_grep"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_grep" >&5
+$as_echo "$have_grep" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+# Extract the first word of "sed", so it can be a program name with args.
+set dummy sed; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_have_sed+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$have_sed"; then
+  ac_cv_prog_have_sed="$have_sed" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_have_sed="yes"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_prog_have_sed" && ac_cv_prog_have_sed="no"
+fi
+fi
+have_sed=$ac_cv_prog_have_sed
+if test -n "$have_sed"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_sed" >&5
+$as_echo "$have_sed" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+for ac_prog in $OCTAVE octave
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_OCTAVE+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$OCTAVE"; then
+  ac_cv_prog_OCTAVE="$OCTAVE" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_OCTAVE="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+OCTAVE=$ac_cv_prog_OCTAVE
+if test -n "$OCTAVE"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OCTAVE" >&5
+$as_echo "$OCTAVE" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$OCTAVE" && break
+done
+test -n "$OCTAVE" || OCTAVE="false"
+
+for ac_prog in $DOXYGEN doxygen
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_DOXYGEN+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$DOXYGEN"; then
+  ac_cv_prog_DOXYGEN="$DOXYGEN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_DOXYGEN="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+DOXYGEN=$ac_cv_prog_DOXYGEN
+if test -n "$DOXYGEN"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DOXYGEN" >&5
+$as_echo "$DOXYGEN" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$DOXYGEN" && break
+done
+test -n "$DOXYGEN" || DOXYGEN="false"
+
+for ac_prog in $HELP2MAN help2man
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_HELP2MAN+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$HELP2MAN"; then
+  ac_cv_prog_HELP2MAN="$HELP2MAN" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_HELP2MAN="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+HELP2MAN=$ac_cv_prog_HELP2MAN
+if test -n "$HELP2MAN"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $HELP2MAN" >&5
+$as_echo "$HELP2MAN" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$HELP2MAN" && break
+done
+test -n "$HELP2MAN" || HELP2MAN="false"
+
+for ac_prog in $M4 m4 gm4 /opt/freeware/bin/m4
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_M4+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$M4"; then
+  ac_cv_prog_M4="$M4" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_M4="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+M4=$ac_cv_prog_M4
+if test -n "$M4"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $M4" >&5
+$as_echo "$M4" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$M4" && break
+done
+test -n "$M4" || M4="false"
+
+
+
+
+
+
+
+
+
+
+
+#
+ac_config_headers="$ac_config_headers rsb-config.h"
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5
+$as_echo_n "checking whether byte ordering is bigendian... " >&6; }
+if ${ac_cv_c_bigendian+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_cv_c_bigendian=unknown
+    # See if we're dealing with a universal compiler.
+    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifndef __APPLE_CC__
+	       not a universal capable compiler
+	     #endif
+	     typedef int dummy;
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+	# Check for potential -arch flags.  It is not universal unless
+	# there are at least two -arch flags with different values.
+	ac_arch=
+	ac_prev=
+	for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do
+	 if test -n "$ac_prev"; then
+	   case $ac_word in
+	     i?86 | x86_64 | ppc | ppc64)
+	       if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then
+		 ac_arch=$ac_word
+	       else
+		 ac_cv_c_bigendian=universal
+		 break
+	       fi
+	       ;;
+	   esac
+	   ac_prev=
+	 elif test "x$ac_word" = "x-arch"; then
+	   ac_prev=arch
+	 fi
+       done
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    if test $ac_cv_c_bigendian = unknown; then
+      # See if sys/param.h defines the BYTE_ORDER macro.
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <sys/types.h>
+	     #include <sys/param.h>
+
+int
+main ()
+{
+#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \
+		     && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \
+		     && LITTLE_ENDIAN)
+	      bogus endian macros
+	     #endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  # It does; now see whether it defined to BIG_ENDIAN or not.
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <sys/types.h>
+		#include <sys/param.h>
+
+int
+main ()
+{
+#if BYTE_ORDER != BIG_ENDIAN
+		 not big endian
+		#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_c_bigendian=yes
+else
+  ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    fi
+    if test $ac_cv_c_bigendian = unknown; then
+      # See if <limits.h> defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris).
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <limits.h>
+
+int
+main ()
+{
+#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN)
+	      bogus endian macros
+	     #endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  # It does; now see whether it defined to _BIG_ENDIAN or not.
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <limits.h>
+
+int
+main ()
+{
+#ifndef _BIG_ENDIAN
+		 not big endian
+		#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_c_bigendian=yes
+else
+  ac_cv_c_bigendian=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    fi
+    if test $ac_cv_c_bigendian = unknown; then
+      # Compile a test program.
+      if test "$cross_compiling" = yes; then :
+  # Try to guess by grepping values from an object file.
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+short int ascii_mm[] =
+		  { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 };
+		short int ascii_ii[] =
+		  { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 };
+		int use_ascii (int i) {
+		  return ascii_mm[i] + ascii_ii[i];
+		}
+		short int ebcdic_ii[] =
+		  { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 };
+		short int ebcdic_mm[] =
+		  { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 };
+		int use_ebcdic (int i) {
+		  return ebcdic_mm[i] + ebcdic_ii[i];
+		}
+		extern int foo;
+
+int
+main ()
+{
+return use_ascii (foo) == use_ebcdic (foo);
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then
+	      ac_cv_c_bigendian=yes
+	    fi
+	    if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then
+	      if test "$ac_cv_c_bigendian" = unknown; then
+		ac_cv_c_bigendian=no
+	      else
+		# finding both strings is unlikely to happen, but who knows?
+		ac_cv_c_bigendian=unknown
+	      fi
+	    fi
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_includes_default
+int
+main ()
+{
+
+	     /* Are we little or big endian?  From Harbison&Steele.  */
+	     union
+	     {
+	       long int l;
+	       char c[sizeof (long int)];
+	     } u;
+	     u.l = 1;
+	     return u.c[sizeof (long int) - 1] == 1;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+  ac_cv_c_bigendian=no
+else
+  ac_cv_c_bigendian=yes
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+    fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5
+$as_echo "$ac_cv_c_bigendian" >&6; }
+ case $ac_cv_c_bigendian in #(
+   yes)
+     $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h
+;; #(
+   no)
+      ;; #(
+   universal)
+
+$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h
+
+     ;; #(
+   *)
+     as_fn_error $? "unknown endianness
+ presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;;
+ esac
+
+for ac_func in vprintf
+do :
+  ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf"
+if test "x$ac_cv_func_vprintf" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_VPRINTF 1
+_ACEOF
+
+ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt"
+if test "x$ac_cv_func__doprnt" = xyes; then :
+
+$as_echo "#define HAVE_DOPRNT 1" >>confdefs.h
+
+fi
+
+fi
+done
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if ${ac_cv_header_stdc+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_header_stdc=yes
+else
+  ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "free" >/dev/null 2>&1; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+  if test "$cross_compiling" = yes; then :
+  :
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+		   (('a' <= (c) && (c) <= 'i') \
+		     || ('j' <= (c) && (c) <= 'r') \
+		     || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+  int i;
+  for (i = 0; i < 256; i++)
+    if (XOR (islower (i), ISLOWER (i))
+	|| toupper (i) != TOUPPER (i))
+      return 2;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
+$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
+if ${ac_cv_c_const+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+#ifndef __cplusplus
+  /* Ultrix mips cc rejects this sort of thing.  */
+  typedef int charset[2];
+  const charset cs = { 0, 0 };
+  /* SunOS 4.1.1 cc rejects this.  */
+  char const *const *pcpcc;
+  char **ppc;
+  /* NEC SVR4.0.2 mips cc rejects this.  */
+  struct point {int x, y;};
+  static struct point const zero = {0,0};
+  /* AIX XL C 1.02.0.0 rejects this.
+     It does not let you subtract one const X* pointer from another in
+     an arm of an if-expression whose if-part is not a constant
+     expression */
+  const char *g = "string";
+  pcpcc = &g + (g ? g-g : 0);
+  /* HPUX 7.0 cc rejects these. */
+  ++pcpcc;
+  ppc = (char**) pcpcc;
+  pcpcc = (char const *const *) ppc;
+  { /* SCO 3.2v4 cc rejects this sort of thing.  */
+    char tx;
+    char *t = &tx;
+    char const *s = 0 ? (char *) 0 : (char const *) 0;
+
+    *t++ = 0;
+    if (s) return 0;
+  }
+  { /* Someone thinks the Sun supposedly-ANSI compiler will reject this.  */
+    int x[] = {25, 17};
+    const int *foo = &x[0];
+    ++foo;
+  }
+  { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
+    typedef const int *iptr;
+    iptr p = 0;
+    ++p;
+  }
+  { /* AIX XL C 1.02.0.0 rejects this sort of thing, saying
+       "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
+    struct s { int j; const int *ap[3]; } bx;
+    struct s *b = &bx; b->j = 5;
+  }
+  { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
+    const int foo = 10;
+    if (!foo) return 0;
+  }
+  return !cs[0] && !zero.x;
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_c_const=yes
+else
+  ac_cv_c_const=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5
+$as_echo "$ac_cv_c_const" >&6; }
+if test $ac_cv_c_const = no; then
+
+$as_echo "#define const /**/" >>confdefs.h
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5
+$as_echo_n "checking for inline... " >&6; }
+if ${ac_cv_c_inline+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_cv_c_inline=no
+for ac_kw in inline __inline__ __inline; do
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifndef __cplusplus
+typedef int foo_t;
+static $ac_kw foo_t static_foo () {return 0; }
+$ac_kw foo_t foo () {return 0; }
+#endif
+
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_c_inline=$ac_kw
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+  test "$ac_cv_c_inline" != no && break
+done
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5
+$as_echo "$ac_cv_c_inline" >&6; }
+
+case $ac_cv_c_inline in
+  inline | yes) ;;
+  *)
+    case $ac_cv_c_inline in
+      no) ac_val=;;
+      *) ac_val=$ac_cv_c_inline;;
+    esac
+    cat >>confdefs.h <<_ACEOF
+#ifndef __cplusplus
+#define inline $ac_val
+#endif
+_ACEOF
+    ;;
+esac
+
+ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
+if test "x$ac_cv_type_size_t" = xyes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define size_t unsigned int
+_ACEOF
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5
+$as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; }
+if ${ac_cv_header_time+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <sys/types.h>
+#include <sys/time.h>
+#include <time.h>
+
+int
+main ()
+{
+if ((struct tm *) 0)
+return 0;
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_header_time=yes
+else
+  ac_cv_header_time=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5
+$as_echo "$ac_cv_header_time" >&6; }
+if test $ac_cv_header_time = yes; then
+
+$as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h
+
+fi
+
+#
+ac_fn_c_check_func "$LINENO" "mlockall" "ac_cv_func_mlockall"
+if test "x$ac_cv_func_mlockall" = xyes; then :
+
+$as_echo "#define HAVE_MLOCKALL 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "sysconf" "ac_cv_func_sysconf"
+if test "x$ac_cv_func_sysconf" = xyes; then :
+
+$as_echo "#define HAVE_SYSCONF 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "gethostname" "ac_cv_func_gethostname"
+if test "x$ac_cv_func_gethostname" = xyes; then :
+
+$as_echo "#define HAVE_GETHOSTNAME 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "posix_memalign" "ac_cv_func_posix_memalign"
+if test "x$ac_cv_func_posix_memalign" = xyes; then :
+
+$as_echo "#define HAVE_POSIX_MEMALIGN 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "memalign" "ac_cv_func_memalign"
+if test "x$ac_cv_func_memalign" = xyes; then :
+
+$as_echo "#define HAVE_MEMALIGN 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "getenv" "ac_cv_func_getenv"
+if test "x$ac_cv_func_getenv" = xyes; then :
+
+$as_echo "#define HAVE_GETENV 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "fileno" "ac_cv_func_fileno"
+if test "x$ac_cv_func_fileno" = xyes; then :
+
+$as_echo "#define HAVE_FILENO 1" >>confdefs.h
+
+fi
+
+
+for ac_func in rand isatty
+do :
+  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+for ac_func in sched_getaffinity
+do :
+  ac_fn_c_check_func "$LINENO" "sched_getaffinity" "ac_cv_func_sched_getaffinity"
+if test "x$ac_cv_func_sched_getaffinity" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SCHED_GETAFFINITY 1
+_ACEOF
+
+fi
+done
+
+for ac_func in memset memcmp strncmp strcpy
+do :
+  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+for ac_func in dup
+do :
+  ac_fn_c_check_func "$LINENO" "dup" "ac_cv_func_dup"
+if test "x$ac_cv_func_dup" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DUP 1
+_ACEOF
+
+fi
+done
+
+for ac_func in fread fwrite
+do :
+  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+if eval test \"x\$"$as_ac_var"\" = x"yes"; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+#dnl	***********************************************************************
+#dnl					THESE ARE ESSENTIAL
+#dnl	***********************************************************************
+for ac_header in libgen.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "libgen.h" "ac_cv_header_libgen_h" "$ac_includes_default"
+if test "x$ac_cv_header_libgen_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_LIBGEN_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in sched.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "sched.h" "ac_cv_header_sched_h" "$ac_includes_default"
+if test "x$ac_cv_header_sched_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SCHED_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in dmalloc.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "dmalloc.h" "ac_cv_header_dmalloc_h" "$ac_includes_default"
+if test "x$ac_cv_header_dmalloc_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_DMALLOC_H 1
+_ACEOF
+
+fi
+
+done
+
+ac_fn_c_check_func "$LINENO" "getopt_long" "ac_cv_func_getopt_long"
+if test "x$ac_cv_func_getopt_long" = xyes; then :
+
+$as_echo "#define HAVE_GETOPT_LONG 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "times" "ac_cv_func_times"
+if test "x$ac_cv_func_times" = xyes; then :
+
+$as_echo "#define HAVE_TIMES 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday"
+if test "x$ac_cv_func_gettimeofday" = xyes; then :
+
+$as_echo "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h
+
+fi
+
+ac_fn_c_check_func "$LINENO" "setenv" "ac_cv_func_setenv"
+if test "x$ac_cv_func_setenv" = xyes; then :
+
+$as_echo "#define HAVE_SETENV 1" >>confdefs.h
+
+fi
+
+for ac_header in omp.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "omp.h" "ac_cv_header_omp_h" "$ac_includes_default"
+if test "x$ac_cv_header_omp_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_OMP_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in getopt.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "getopt.h" "ac_cv_header_getopt_h" "$ac_includes_default"
+if test "x$ac_cv_header_getopt_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_GETOPT_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in malloc.h memory.h
+do :
+  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in pthread.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default"
+if test "x$ac_cv_header_pthread_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_PTHREAD_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in papi.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "papi.h" "ac_cv_header_papi_h" "$ac_includes_default"
+if test "x$ac_cv_header_papi_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_PAPI_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in gsl/gsl_sort.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "gsl/gsl_sort.h" "ac_cv_header_gsl_gsl_sort_h" "$ac_includes_default"
+if test "x$ac_cv_header_gsl_gsl_sort_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_GSL_GSL_SORT_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in times.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "times.h" "ac_cv_header_times_h" "$ac_includes_default"
+if test "x$ac_cv_header_times_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_TIMES_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in sys/utsname.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "sys/utsname.h" "ac_cv_header_sys_utsname_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_utsname_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SYS_UTSNAME_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in sys/resource.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "sys/resource.h" "ac_cv_header_sys_resource_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_resource_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SYS_RESOURCE_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in complex.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "complex.h" "ac_cv_header_complex_h" "$ac_includes_default"
+if test "x$ac_cv_header_complex_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_COMPLEX_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in assert.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "assert.h" "ac_cv_header_assert_h" "$ac_includes_default"
+if test "x$ac_cv_header_assert_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_ASSERT_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in rpc/xdr.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "rpc/xdr.h" "ac_cv_header_rpc_xdr_h" "$ac_includes_default"
+if test "x$ac_cv_header_rpc_xdr_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_RPC_XDR_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in sys/mman.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_mman_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SYS_MMAN_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in stdint.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "stdint.h" "ac_cv_header_stdint_h" "$ac_includes_default"
+if test "x$ac_cv_header_stdint_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_STDINT_H 1
+_ACEOF
+
+fi
+
+done
+
+for ac_header in unistd.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default"
+if test "x$ac_cv_header_unistd_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_UNISTD_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in stdio.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "stdio.h" "ac_cv_header_stdio_h" "$ac_includes_default"
+if test "x$ac_cv_header_stdio_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_STDIO_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in stdarg.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "stdarg.h" "ac_cv_header_stdarg_h" "$ac_includes_default"
+if test "x$ac_cv_header_stdarg_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_STDARG_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in time.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "time.h" "ac_cv_header_time_h" "$ac_includes_default"
+if test "x$ac_cv_header_time_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_TIME_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in regex.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "regex.h" "ac_cv_header_regex_h" "$ac_includes_default"
+if test "x$ac_cv_header_regex_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_REGEX_H 1
+_ACEOF
+
+fi
+
+done
+ for ac_header in string.h strings.h ctype.h
+do :
+  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
+if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+#ifdef __MINGW32__
+#error "You are likely using MINGW (Minimalist GNU for Windows)."
+#else
+    /* "You are likely not using MINGW (Minimalist GNU for Windows)." */
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_we_use_mingw=no
+else
+  ac_cv_we_use_mingw=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+if test "x$ac_cv_we_use_mingw" = xyes; then
+	ac_cv_mingw_add="-D__USE_MINGW_ANSI_STDIO=1"
+      	{ $as_echo "$as_me:${as_lineno-$LINENO}: You are likely using MINGW (Minimalist GNU for Windows). Adding ${ac_cv_mingw_add} to compilation flags to avoid broken C99 support." >&5
+$as_echo "$as_me: You are likely using MINGW (Minimalist GNU for Windows). Adding ${ac_cv_mingw_add} to compilation flags to avoid broken C99 support." >&6;}
+	CFLAGS="${CFLAGS} ${ac_cv_mingw_add}"
+fi
+# rsbench-only LIBS and CFLAGS:
+RSB_RSBENCH_LIBS=
+RSB_RSBENCH_CFLAGS=
+
+# Check whether --with-math was given.
+if test "${with_math+set}" = set; then :
+  withval=$with_math; if test "x$withval" = xno; then want_math_libs= ; else want_math_libs="$withval" ; fi
+else
+  want_math_libs="-lm"
+fi
+
+for ac_header in math.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "math.h" "ac_cv_header_math_h" "$ac_includes_default"
+if test "x$ac_cv_header_math_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_MATH_H 1
+_ACEOF
+ LIBS="${LIBS} $want_math_libs"
+else
+  break
+fi
+
+done
+
+
+
+# Check whether --with-xdr was given.
+if test "${with_xdr+set}" = set; then :
+  withval=$with_xdr; if test "x$withval" = xno; then want_xdr_libs= ; else want_xdr_libs="$withval" ; fi
+else
+  want_xdr_libs=" "
+fi
+
+for ac_header in rpc/xdr.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "rpc/xdr.h" "ac_cv_header_rpc_xdr_h" "$ac_includes_default"
+if test "x$ac_cv_header_rpc_xdr_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_RPC_XDR_H 1
+_ACEOF
+
+fi
+
+done
+
+
+
+
+# Check whether --with-hwloc was given.
+if test "${with_hwloc+set}" = set; then :
+  withval=$with_hwloc; if test "x$withval" = xno; then want_hwloc_libs= ; else want_hwloc_libs="$withval"; if test "x$want_hwloc_libs" = x"yes" ; then want_hwloc_libs="-lhwloc" ; fi; enable_hwloc=yes ; fi
+else
+  want_hwloc_libs=" "
+fi
+
+for ac_header in hwloc.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "hwloc.h" "ac_cv_header_hwloc_h" "$ac_includes_default"
+if test "x$ac_cv_header_hwloc_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_HWLOC_H 1
+_ACEOF
+ if test "x$enable_hwloc" != x -a "x$want_hwloc_libs" != x ; then LIBS="${LIBS} $want_hwloc_libs"; fi;
+else
+  break
+fi
+
+done
+
+
+if test "x${CC}" = x"xlc" -o "x${CC}" = x"xlc_r"  ; then
+      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Disabling XDR support: our API was only tested on Linux." >&5
+$as_echo "$as_me: Disabling XDR support: our API was only tested on Linux." >&6;}
+	want_xdr_libs=
+	ac_cv_header_rpc_xdr_h=no
+fi
+
+if test x"$want_xdr_libs" != x"" && test "x$ac_cv_header_rpc_xdr_h" = xyes ; then
+	      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling xdr support." >&5
+$as_echo "$as_me: Enabling xdr support." >&6;}
+
+$as_echo "#define RSB_WANT_XDR_SUPPORT 1" >>confdefs.h
+
+	LIBS="${LIBS} $want_xdr_libs"
+	else
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No xdr headers found." >&5
+$as_echo "$as_me: WARNING: No xdr headers found." >&2;}
+
+$as_echo "#define RSB_WANT_XDR_SUPPORT 0" >>confdefs.h
+
+fi
+
+
+# Check whether --with-dmalloc was given.
+if test "${with_dmalloc+set}" = set; then :
+  withval=$with_dmalloc; if test "x$withval" = xyes; then
+	enable_dmalloc=1;
+	if test x"$ac_cv_header_dmalloc_h" = xyes ; then
+		LIBS="${LIBS} -ldmalloc"
+		DMALLOC_CFLAGS="-DDMALLOC"
+	fi
+ else
+	enable_dmalloc=0
+ fi
+else
+  enable_dmalloc=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_WANT_DMALLOC $enable_dmalloc
+_ACEOF
+
+
+
+# Check whether --with-mkl-include was given.
+if test "${with_mkl_include+set}" = set; then :
+  withval=$with_mkl_include; if test "x$withval" = xno; then MKL_INCLUDE= ; else if test "x$withval" = xyes; then MKL_INCLUDE="" ; else MKL_INCLUDE="$withval" ; fi  ; fi
+else
+  true
+fi
+
+
+
+# Check whether --with-mkl was given.
+if test "${with_mkl+set}" = set; then :
+  withval=$with_mkl; if test "x$withval" = xno; then want_mkl_libs= ; else if test "x$withval" = xyes; then want_mkl_libs="-static -L/opt/intel/mkl/lib/ia32/ -lmkl_solver -Wl,--start-group -lmkl_intel -lmkl_gnu_thread -lmkl_core -Wl,--end-group -fopenmp -lpthread" ; else want_mkl_libs="$withval" ; fi  ; fi
+else
+  true
+fi
+
+
+
+# Check whether --with-zlib was given.
+if test "${with_zlib+set}" = set; then :
+  withval=$with_zlib; if test "x$withval" = xno; then want_zlib_libs="" ; else want_zlib_libs="$withval" ; fi
+else
+  want_zlib_libs=""
+fi
+
+for ac_header in zlib.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default"
+if test "x$ac_cv_header_zlib_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_ZLIB_H 1
+_ACEOF
+
+fi
+
+done
+
+want_zlib_support="no"
+if test x"$want_zlib_libs" != x"" && test "x$ac_cv_header_zlib_h" = xyes ; then
+	      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling zlib support." >&5
+$as_echo "$as_me: Enabling zlib support." >&6;}
+
+$as_echo "#define RSB_WANT_ZLIB_SUPPORT 1" >>confdefs.h
+
+	if test x"$want_zlib_libs" = x"yes" ; then want_zlib_libs=-lz; fi
+	LIBS="${LIBS} $want_zlib_libs"
+	want_zlib_support="yes"
+	else
+
+$as_echo "#define RSB_WANT_ZLIB_SUPPORT 0" >>confdefs.h
+
+	want_zlib_support="no"
+fi
+
+# Check whether --with-ompio was given.
+if test "${with_ompio+set}" = set; then :
+  withval=$with_ompio; if test "x$withval" = xno; then want_ompio="no" ; else want_ompio="yes" ; fi
+else
+  want_ompio="no"
+fi
+
+
+if test x"$want_ompio" = x"yes" ; then
+      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling OpenMP + fgets_unlocked() IO support." >&5
+$as_echo "$as_me: Enabling OpenMP + fgets_unlocked() IO support." >&6;}
+
+$as_echo "#define RSB_WANT_OMPIO_SUPPORT 1" >>confdefs.h
+
+	else
+
+$as_echo "#define RSB_WANT_OMPIO_SUPPORT 0" >>confdefs.h
+
+fi
+for ac_header in limits.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "limits.h" "ac_cv_header_limits_h" "$ac_includes_default"
+if test "x$ac_cv_header_limits_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_LIMITS_H 1
+_ACEOF
+ break
+else
+  break
+fi
+
+done
+
+for ac_header in signal.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "signal.h" "ac_cv_header_signal_h" "$ac_includes_default"
+if test "x$ac_cv_header_signal_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SIGNAL_H 1
+_ACEOF
+ break
+else
+  break
+fi
+
+done
+
+# an AIX specific check
+for ac_header in sys/systemcfg.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "sys/systemcfg.h" "ac_cv_header_sys_systemcfg_h" "$ac_includes_default"
+if test "x$ac_cv_header_sys_systemcfg_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_SYS_SYSTEMCFG_H 1
+_ACEOF
+ break
+else
+  break
+fi
+
+done
+
+
+$as_echo "#define RSB_WANT_VERBOSE_MESSAGES 0" >>confdefs.h
+
+
+$as_echo "#define RSB_WANT_KERNELS_DEBUG 1" >>confdefs.h
+
+
+$as_echo "#define RSB_SORT_IN_PLACE 0" >>confdefs.h
+
+
+$as_echo "#define RSB_BLOCK_SMALL_INDICES 1" >>confdefs.h
+
+detected_memhinfo=`$srcdir/scripts/linux-sys-cache.sh`;
+
+# Check whether --with-nounroll-cflag was given.
+if test "${with_nounroll_cflag+set}" = set; then :
+  withval=$with_nounroll_cflag; if test "x$withval" = xno; then userset_nounroll_cflag="" ; else userset_nounroll_cflag="$withval" ; fi
+else
+  userset_nounroll_cflag="";
+fi
+
+default_want_int_verrbosity="0";
+# Check whether --enable-internals-error-verbosity was given.
+if test "${enable_internals_error_verbosity+set}" = set; then :
+  enableval=$enable_internals_error_verbosity; if test "x$enableval" = xno; then want_int_verrbosity="${default_want_int_verrbosity}" ; else want_int_verrbosity="$enableval" ; fi
+else
+  want_int_verrbosity="${default_want_int_verrbosity}";
+fi
+
+default_want_ext_verrbosity="0";
+# Check whether --enable-interface-error-verbosity was given.
+if test "${enable_interface_error_verbosity+set}" = set; then :
+  enableval=$enable_interface_error_verbosity; if test "xenableval" = xno; then want_ext_verrbosity="${default_want_ext_verrbosity}" ; else want_ext_verrbosity="$enableval" ; fi
+else
+  want_ext_verrbosity="${default_want_ext_verrbosity}";
+fi
+
+default_want_io_level=0;
+# Check whether --enable-io-level was given.
+if test "${enable_io_level+set}" = set; then :
+  enableval=$enable_io_level; if test "x$enableval" = xno; then want_io_level="${default_want_io_level}" ; else want_io_level="$enableval" ; fi
+else
+  want_io_level="7";
+fi
+
+
+# Check whether --with-max-threads was given.
+if test "${with_max_threads+set}" = set; then :
+  withval=$with_max_threads; if test "x$withval" = xno; then want_max_threads="64" ; want_max_threads=64 ; else want_max_threads="$withval" ; fi
+else
+  want_max_threads="64";
+fi
+
+
+# Check whether --with-memhinfo was given.
+if test "${with_memhinfo+set}" = set; then :
+  withval=$with_memhinfo; if test "x$withval" = xno; then memhinfo="" ; openmp_flags= ; else memhinfo="$withval" ; fi
+else
+  memhinfo="";
+fi
+
+RSB_USER_SET_MEM_HIERARCHY_INFO="${memhinfo}"
+
+RSB_DETECTED_MEM_HIERARCHY_INFO="${detected_memhinfo}"
+
+
+# Check whether --with-ar was given.
+if test "${with_ar+set}" = set; then :
+  withval=$with_ar; if test "x$withval" = xno; then true ; else AR="$withval" ; fi
+else
+   AR="$AR"
+fi
+
+
+# Check whether --with-arflags was given.
+if test "${with_arflags+set}" = set; then :
+  withval=$with_arflags; if test "x$withval" = xno; then true ; else ARFLAGS="$withval" ; fi
+else
+   ARFLAGS="$ARFLAGS"
+fi
+
+
+# Check whether --with-m4 was given.
+if test "${with_m4+set}" = set; then :
+  withval=$with_m4; if test "x$withval" = xno; then true ; else M4="$withval" ; fi
+else
+  true;
+fi
+
+
+if test "x$enable_openmp" = x ; then
+	enable_openmp=yes
+fi
+if test "x$enable_openmp" != x"yes" && test "x$want_ompio" = x"yes"; then
+	as_fn_error $? "You must enable OpenMP if you want OpenMP-backed I/O!" "$LINENO" 5
+fi
+#dnl	***********************************************************************
+
+
+
+
+#dnl	***********************************************************************
+# the default block unrolls
+default_unrolls=1
+default_util_unrolls=16
+# the default types for macro-generated code
+blas_matrix_types="double,float,float complex,double complex"
+psblas_matrix_types="${blas_matrix_types}"
+non_blas_matrix_types="int"
+all_matrix_types="$non_blas_matrix_types,$blas_matrix_types"
+#default_types=int,double,float,float complex, double complex
+# float complex and double complex are c99 types
+default_types="double,float,float complex,double complex"
+# the default matrix operations
+blas_matrix_ops=spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spsv_uxua,spmv_sxsa,spsv_sxsx
+psblas_matrix_ops="${blas_matrix_ops}",infty_norm,rowssums,scale
+extra_blas_matrix_ops= # 20140719 these kernels are not active at the moment
+#
+non_blas_matrix_ops=infty_norm,rowssums,scale
+all_matrix_ops="$blas_matrix_ops,$non_blas_matrix_ops$extra_blas_matrix_ops"
+#
+default_matrix_ops=spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spsv_uxua,spmv_sxsa,spsv_sxsx,infty_norm,rowssums,scale
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+row_unrolls="$default_unrolls"
+#dnl	***********************************************************************
+column_unrolls="$default_unrolls"
+#dnl	***********************************************************************
+
+$as_echo "#define RSB_WANT_SPARSE_BLAS_LEVEL_1 1" >>confdefs.h
+
+#dnl	***********************************************************************
+# Check whether --enable-matrix-types was given.
+if test "${enable_matrix_types+set}" = set; then :
+  enableval=$enable_matrix_types; want_matrix_types="$enableval"
+else
+  want_matrix_types="$default_types"
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-matrix-ops was given.
+if test "${enable_matrix_ops+set}" = set; then :
+  enableval=$enable_matrix_ops; want_matrix_ops="$enableval"
+else
+  want_matrix_ops="$default_matrix_ops"
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-vector-utils-loop-unrolls was given.
+if test "${enable_vector_utils_loop_unrolls+set}" = set; then :
+  enableval=$enable_vector_utils_loop_unrolls; util_unrolls="$enableval"
+else
+  util_unrolls="$default_util_unrolls"
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-octave-testing was given.
+if test "${enable_octave_testing+set}" = set; then :
+  enableval=$enable_octave_testing; if test "x$enableval" = xno; then
+	enable_octave_testing=no
+ else
+	enable_octave_testing=yes
+ fi
+else
+  enable_octave_testing=yes
+fi
+
+#dnl	***********************************************************************
+if test "x$want_matrix_types" = xall; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling all matrix types." >&5
+$as_echo "$as_me: Enabling all matrix types." >&6;}
+	want_matrix_types="${all_matrix_types}";
+else
+	true;
+	if test "x$want_matrix_types" = x"blas"; then
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling all matrix types for Sparse BLAS (S,C,D,Z)." >&5
+$as_echo "$as_me: Enabling all matrix types for Sparse BLAS (S,C,D,Z)." >&6;}
+		want_matrix_types="${blas_matrix_types}";
+		#if test "x$enable_octave_testing" = xyes; then want_matrix_types=${want_matrix_types},int ; fi
+	else
+	    if test "x$want_matrix_types" = x"psblas"; then
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling matrix types for Parallel Sparse BLAS (PSBLAS)." >&5
+$as_echo "$as_me: Enabling matrix types for Parallel Sparse BLAS (PSBLAS)." >&6;}
+		want_matrix_types="${psblas_matrix_types}";
+		#if test "x$enable_octave_testing" = xyes; then want_matrix_types=${want_matrix_types},int ; fi
+	    else
+		true;
+	    fi
+	fi
+fi
+#dnl	***********************************************************************
+if test "x$want_matrix_ops" = xall; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling all of the matrix ops." >&5
+$as_echo "$as_me: Enabling all of the matrix ops." >&6;}
+	want_matrix_ops="${all_matrix_ops}";
+else
+	if test "x$want_matrix_ops" = xblas; then
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling matrix ops for Sparse BLAS." >&5
+$as_echo "$as_me: Enabling matrix ops for Sparse BLAS." >&6;}
+		want_matrix_ops="${blas_matrix_ops}";
+	else
+	    if test "x$want_matrix_ops" = xpsblas; then
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling matrix ops for Parallel Sparse BLAS (PSBLAS)." >&5
+$as_echo "$as_me: Enabling matrix ops for Parallel Sparse BLAS (PSBLAS)." >&6;}
+		want_matrix_ops="${psblas_matrix_ops}";
+	    else
+		true;
+	    fi
+	fi
+fi
+#dnl	***********************************************************************
+if test x"$want_matrix_types" != x"$default_types" -o x"$want_matrix_ops" != x"$default_matrix_ops" -o x"$util_unrolls" != x"$default_util_unrolls"; then
+if test x"$M4" = x"false"; then
+	as_fn_error $? "Did not specify an m4 processor, so code generation from m4 files is disabled (and so configure time specification of non default types, operations, unrolls) !" "$LINENO" 5
+fi
+fi
+#dnl	***********************************************************************
+sparse_blas_interface_default=yes
+# Check whether --enable-sparse-blas-interface was given.
+if test "${enable_sparse_blas_interface+set}" = set; then :
+  enableval=$enable_sparse_blas_interface; if test "x$enableval" = xno; then
+	sparse_blas_interface=no
+ else
+	sparse_blas_interface=yes
+ fi
+else
+  sparse_blas_interface="${sparse_blas_interface_default}"
+fi
+
+#dnl	***********************************************************************
+enable_looping_kernels=no
+
+#dnl	***********************************************************************
+
+# Check whether --with-oski was given.
+if test "${with_oski+set}" = set; then :
+  withval=$with_oski; if test "x$withval" = xno; then
+	enable_oski=no
+ else
+	enable_oski=yes
+ fi
+else
+  enable_oski=no
+fi
+
+#dnl	***********************************************************************
+#AC_ARG_WITH(papi, AC_HELP_STRING([--with-papi], [PAPI (Performance Application Programming Interface). UNFINISHED.]),
+#[if test "x$withval" = xno; then
+#	enable_papi=no
+# else
+#	enable_papi=yes
+# fi],[enable_papi=no])
+#dnl	***********************************************************************
+
+
+# Check whether --with-likwid was given.
+if test "${with_likwid+set}" = set; then :
+  withval=$with_likwid; if test "x$withval" = xno; then
+	enable_likwid=no
+ else
+	enable_likwid=yes
+ fi
+else
+  enable_likwid=no
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-allocator-wrapper was given.
+if test "${enable_allocator_wrapper+set}" = set; then :
+  enableval=$enable_allocator_wrapper; if test "x$enableval" = xno; then
+	disable_allocator_wrapper=yes
+ else
+	disable_allocator_wrapper=no
+ fi
+else
+  disable_allocator_wrapper=yes
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-alignment was given.
+if test "${enable_alignment+set}" = set; then :
+  enableval=$enable_alignment; if test "x$enableval" = xno; then
+	enable_alignment=no
+ else
+	enable_alignment=yes
+ fi
+else
+  enable_alignment=yes
+fi
+
+enable_b=yes
+enable_c=yes
+#dnl	***********************************************************************
+# Check whether --enable-librsb-stats was given.
+if test "${enable_librsb_stats+set}" = set; then :
+  enableval=$enable_librsb_stats; if test "x$enableval" = xno; then
+	enable_librsb_stats=no
+ else
+	enable_librsb_stats=yes
+ fi
+else
+  enable_librsb_stats=no
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-rsb-num-threads was given.
+if test "${enable_rsb_num_threads+set}" = set; then :
+  enableval=$enable_rsb_num_threads; if test "x$enableval" = xno; then
+	enable_rsb_num_threads=no
+ else
+	enable_rsb_num_threads=yes
+ fi
+else
+  enable_rsb_num_threads=no
+fi
+
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+# Check whether --enable-fortran-module-install was given.
+if test "${enable_fortran_module_install+set}" = set; then :
+  enableval=$enable_fortran_module_install; if test "x$enableval" = xno; then
+	want_blas_sparse_mod_install=no
+ else
+	want_blas_sparse_mod_install=yes
+ fi
+else
+  want_blas_sparse_mod_install=no
+fi
+
+#dnl	***********************************************************************
+want_install_pkg_config_default=no
+# Check whether --enable-pkg-config-install was given.
+if test "${enable_pkg_config_install+set}" = set; then :
+  enableval=$enable_pkg_config_install; if test "x$enableval" = x"yes"; then
+	want_install_pkg_config=yes
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Will install pkg-config librsb.pc file." >&5
+$as_echo "$as_me: Will install pkg-config librsb.pc file." >&6;}
+ else
+	want_install_pkg_config=yes
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Will not install pkg-config librsb.pc file (--enable-pkg-config-install to change)." >&5
+$as_echo "$as_me: Will not install pkg-config librsb.pc file (--enable-pkg-config-install to change)." >&6;}
+ fi
+else
+  want_install_pkg_config="${want_install_pkg_config_default}"
+fi
+
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+# Check whether --enable-doc-build was given.
+if test "${enable_doc_build+set}" = set; then :
+  enableval=$enable_doc_build; if test "x$enableval" = xno; then
+	want_build_doc=no
+ else
+	want_build_doc=yes
+ fi
+else
+  want_build_doc=no
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-shlib-linked-examples was given.
+if test "${enable_shlib_linked_examples+set}" = set; then :
+  enableval=$enable_shlib_linked_examples; if test "x$enableval" = xno; then
+	want_rsb_dl=no
+ else
+	want_rsb_dl=yes
+ fi
+else
+  want_rsb_dl=no
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-c-examples was given.
+if test "${enable_c_examples+set}" = set; then :
+  enableval=$enable_c_examples; if test "x$enableval" = xno; then
+	enable_c_examples=no
+ else
+	enable_c_examples=yes
+ fi
+else
+  enable_c_examples=yes
+fi
+
+#dnl	***********************************************************************
+# Check whether --enable-fortran-examples was given.
+if test "${enable_fortran_examples+set}" = set; then :
+  enableval=$enable_fortran_examples; if test "x$enableval" = xno; then
+	enable_fortran_examples=no
+ else
+	enable_fortran_examples=yes
+ fi
+else
+  enable_fortran_examples=yes
+fi
+
+#dnl	***********************************************************************
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C/C++ restrict keyword" >&5
+$as_echo_n "checking for C/C++ restrict keyword... " >&6; }
+if ${ac_cv_c_restrict+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_cv_c_restrict=no
+   # The order here caters to the fact that C++ does not require restrict.
+   for ac_kw in __restrict __restrict__ _Restrict restrict; do
+     cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+typedef int * int_ptr;
+	int foo (int_ptr $ac_kw ip) {
+	return ip[0];
+       }
+int
+main ()
+{
+int s[1];
+	int * $ac_kw t = s;
+	t[0] = 0;
+	return foo(t)
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_c_restrict=$ac_kw
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+     test "$ac_cv_c_restrict" != no && break
+   done
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_restrict" >&5
+$as_echo "$ac_cv_c_restrict" >&6; }
+
+ case $ac_cv_c_restrict in
+   restrict) ;;
+   no) $as_echo "#define restrict /**/" >>confdefs.h
+ ;;
+   *)  cat >>confdefs.h <<_ACEOF
+#define restrict $ac_cv_c_restrict
+_ACEOF
+ ;;
+ esac
+
+# Check whether --enable-restrict was given.
+if test "${enable_restrict+set}" = set; then :
+  enableval=$enable_restrict; if test "x$enableval" = xno; then
+	enable_restrict=no
+ else
+	enable_restrict=yes
+ fi
+else
+  enable_restrict=yes
+fi
+
+#dnl	***********************************************************************
+
+# Check whether --with-c99-flag was given.
+if test "${with_c99_flag+set}" = set; then :
+  withval=$with_c99_flag; if test "x$withval" = xno; then
+	enable_c99=no
+ else
+	enable_c99=yes
+ fi
+else
+  enable_c99=yes
+fi
+
+#dnl	***********************************************************************
+want_spsm_diagonal_check_default=yes
+# Check whether --enable-zero-division-checks-on-solve was given.
+if test "${enable_zero_division_checks_on_solve+set}" = set; then :
+  enableval=$enable_zero_division_checks_on_solve; if test "x$enableval" = xyes; then
+	want_spsm_diagonal_check=yes
+ else
+	want_spsm_diagonal_check=no
+ fi
+else
+  want_spsm_diagonal_check="${want_spsm_diagonal_check_default}"
+fi
+
+#dnl	***********************************************************************
+want_sigaction_in_rsbench=no
+# Check whether --enable-sigaction-interruptible-rsbench was given.
+if test "${enable_sigaction_interruptible_rsbench+set}" = set; then :
+  enableval=$enable_sigaction_interruptible_rsbench; if test "x$enableval" = xyes; then
+	want_sigaction_in_rsbench=yes;
+ else
+	want_sigaction_in_rsbench=no;
+ fi
+else
+  want_sigaction_in_rsbench=no;
+fi
+
+if test x$want_sigaction_in_rsbench = xno ; then
+
+$as_echo "#define RSB_WANT_ACTION_SIGNAL 1" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+enable_optimize=no
+#dnl	***********************************************************************
+# Check whether --enable-internal-headers-install was given.
+if test "${enable_internal_headers_install+set}" = set; then :
+  enableval=$enable_internal_headers_install; if test "x$enableval" = xyes; then
+ 	enable_ihi=yes;
+ 	{ $as_echo "$as_me:${as_lineno-$LINENO}: Will install also internal headers." >&5
+$as_echo "$as_me: Will install also internal headers." >&6;}
+else
+ 	enable_ihi=no;
+fi
+else
+  enable_ihi=no
+fi
+
+#dnl	***********************************************************************
+RSB_USE_ASSERT="";
+# Check whether --enable-debug-getenvs was given.
+if test "${enable_debug_getenvs+set}" = set; then :
+  enableval=$enable_debug_getenvs; if test "x$enableval" = xyes; then
+	RSB_USE_ASSERT=1;
+
+$as_echo "#define RSB_ALLOW_INTERNAL_GETENVS 1" >>confdefs.h
+
+else
+
+$as_echo "#define RSB_ALLOW_INTERNAL_GETENVS 0" >>confdefs.h
+
+fi
+else
+  true;
+fi
+
+#dnl	***********************************************************************
+RSB_USE_ASSERT="";
+# Check whether --enable-debug was given.
+if test "${enable_debug+set}" = set; then :
+  enableval=$enable_debug; if test "x$enableval" = xyes; then
+	enable_debug=yes;
+	RSB_USE_ASSERT=1;
+	want_int_verrbosity=1; # FIXME: this shall be removed from here, once the library gets stable for release!
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: With the debug switch enabled, also setting internal error verbosity level at value 1." >&5
+$as_echo "$as_me: With the debug switch enabled, also setting internal error verbosity level at value 1." >&6;}
+else
+	enable_debug=no
+fi
+else
+  enable_debug=no
+fi
+
+#dnl	***********************************************************************
+
+if test "x$sparse_blas_interface" = xyes; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Will build a Sparse BLAS interface to librsb." >&5
+$as_echo "$as_me: Will build a Sparse BLAS interface to librsb." >&6;}
+
+$as_echo "#define RSB_WITH_SPARSE_BLAS_INTERFACE 1" >>confdefs.h
+
+else
+      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Will not build a Sparse BLAS interface to librsb." >&5
+$as_echo "$as_me: Will not build a Sparse BLAS interface to librsb." >&6;}
+fi
+#dnl	***********************************************************************
+want_looping_kernels=0;
+if test "x$enable_looping_kernels" = xyes; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling looping kernels." >&5
+$as_echo "$as_me: Enabling looping kernels." >&6;}
+	want_looping_kernels=1;
+
+$as_echo "#define RSB_WANT_LOOPING_KERNELS 1" >>confdefs.h
+
+
+
+else
+	true
+fi
+#dnl	***********************************************************************
+
+# Check whether --with-papi was given.
+if test "${with_papi+set}" = set; then :
+  withval=$with_papi; if test "x$withval" = xno; then want_papi_libs= ; else enable_papi=yes; want_papi_libs="$withval" ; fi
+else
+  want_papi_libs="-lpapi"
+fi
+
+#dnl	***********************************************************************
+if test "x$enable_papi" = xyes; then
+	if test "x$ac_cv_header_papi_h" != xyes; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Header file <papi.h> not found, therefore we will not use it!" >&5
+$as_echo "$as_me: WARNING: Header file <papi.h> not found, therefore we will not use it!" >&2;}
+	else
+	if test "$want_papi_libs" = yes ; then want_papi_libs=-lpapi ; fi
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} $want_papi_libs"
+	# TODO : set CFLAGS !
+      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling PAPI (Performance Application Programming Interface)." >&5
+$as_echo "$as_me: Enabling PAPI (Performance Application Programming Interface)." >&6;}
+	# FIXME: should differentiate RSB_WANT_PERFORMANCE_COUNTERS from RSB_HAVE_PAPI
+
+$as_echo "#define RSB_WANT_PERFORMANCE_COUNTERS 1" >>confdefs.h
+
+
+$as_echo "#define RSB_HAVE_PAPI 1" >>confdefs.h
+
+	fi
+else
+      		true
+fi
+#dnl	***********************************************************************
+if test "x$enable_likwid" = xyes; then
+	if test "x$LIKWID_LIBS" = x; then
+		LIKWID_LIBS="-llikwid"
+	fi
+  	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling support for LIKWID (LIKWID_CFLAGS=${LIKWID_CFLAGS}) (LIKWID_LIBS=${LIKWID_LIBS})." >&5
+$as_echo "$as_me: Enabling support for LIKWID (LIKWID_CFLAGS=${LIKWID_CFLAGS}) (LIKWID_LIBS=${LIKWID_LIBS})." >&6;}
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} ${LIKWID_LIBS}"
+	RSB_RSBENCH_CFLAGS="${RSB_RSBENCH_CFLAGS} ${LIKWID_CFLAGS}"
+
+$as_echo "#define RSB_WITH_LIKWID 1" >>confdefs.h
+
+else
+
+$as_echo "#define RSB_WITH_LIKWID 0" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+if test "x$enable_hwloc" = xyes; then
+	if test "x$want_hwloc_libs" != x; then
+		HWLOC_LIBS="$want_hwloc_libs"
+	fi
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling support for HWLOC (HWLOC_CFLAGS=${HWLOC_CFLAGS}) (HWLOC_LIBS=${HWLOC_LIBS})." >&5
+$as_echo "$as_me: Enabling support for HWLOC (HWLOC_CFLAGS=${HWLOC_CFLAGS}) (HWLOC_LIBS=${HWLOC_LIBS})." >&6;}
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} ${HWLOC_LIBS}"
+	RSB_RSBENCH_CFLAGS="${RSB_RSBENCH_CFLAGS} ${HWLOC_CFLAGS}"
+
+$as_echo "#define RSB_WITH_HWLOC 1" >>confdefs.h
+
+else
+
+$as_echo "#define RSB_WITH_HWLOC 0" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+if test "x$disable_allocator_wrapper" = xyes; then
+
+$as_echo "#define RSB_DISABLE_ALLOCATOR_WRAPPER 1" >>confdefs.h
+
+
+$as_echo "#define RSB_WANT_ALLOCATOR_LIMITS 0" >>confdefs.h
+
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will disable memory allocators wrappers." >&5
+$as_echo "$as_me: Will disable memory allocators wrappers." >&6;}
+else
+
+$as_echo "#define RSB_WANT_ALLOCATOR_LIMITS 1" >>confdefs.h
+
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Enabling memory allocators wrappers." >&5
+$as_echo "$as_me: Enabling memory allocators wrappers." >&6;}
+fi
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+if test "x$enable_alignment" = xno; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will not enforce aligned memory chunks allocation." >&5
+$as_echo "$as_me: Will not enforce aligned memory chunks allocation." >&6;}
+else
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will enforce aligned memory chunks allocation." >&5
+$as_echo "$as_me: Will enforce aligned memory chunks allocation." >&6;}
+
+$as_echo "#define RSB_WANT_DOUBLE_ALIGNED 1" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+if test "x$enable_librsb_stats" = xyes; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling collection of time statistics in librsb operations (this introduces an overhead)." >&5
+$as_echo "$as_me: Enabling collection of time statistics in librsb operations (this introduces an overhead)." >&6;}
+
+$as_echo "#define RSB_WANT_LIBRSB_STATS 1" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+if test "x$enable_rsb_num_threads" = xyes; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling experimental RSB_NUM_THREADS environment variable." >&5
+$as_echo "$as_me: Enabling experimental RSB_NUM_THREADS environment variable." >&6;}
+
+$as_echo "#define RSB_WANT_RSB_NUM_THREADS 1" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+if test "x$enable_c_examples" = xno; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will not build C examples." >&5
+$as_echo "$as_me: Will not build C examples." >&6;}
+else
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will build C examples." >&5
+$as_echo "$as_me: Will build C examples." >&6;}
+fi
+#dnl	***********************************************************************
+if test "x$enable_fortran_examples" = xno; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will not build Fortran examples." >&5
+$as_echo "$as_me: Will not build Fortran examples." >&6;}
+else
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will build (experimental) Fortran examples." >&5
+$as_echo "$as_me: Will build (experimental) Fortran examples." >&6;}
+fi
+#dnl	***********************************************************************
+if test "x$enable_restrict" = xyes; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will use the C99 restrict keyword." >&5
+$as_echo "$as_me: Will use the C99 restrict keyword." >&6;}
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will also add the -std=c99 flag." >&5
+$as_echo "$as_me: Will also add the -std=c99 flag." >&6;}
+      enable_c99=yes
+else
+      { $as_echo "$as_me:${as_lineno-$LINENO}: Will not use the C99 restrict keyword " >&5
+$as_echo "$as_me: Will not use the C99 restrict keyword " >&6;}
+fi
+#dnl	***********************************************************************
+if test "x$want_build_doc" = xyes ; then
+      if test x"$DOXYGEN" = x"false"; then
+	      as_fn_error $? "Doxygen not detected ! Please --disable-doc-build or supply a valid DOXYGEN variable." "$LINENO" 5
+	      want_build_doc=no
+      else
+	      { $as_echo "$as_me:${as_lineno-$LINENO}: Will rebuild the documentation using \"$DOXYGEN\" as Doxygen executable." >&5
+$as_echo "$as_me: Will rebuild the documentation using \"$DOXYGEN\" as Doxygen executable." >&6;}
+	      if test x"$HELP2MAN" = x"false"; then
+      	   	{ $as_echo "$as_me:${as_lineno-$LINENO}: Program man pages will not generated: HELPMAN not detected." >&5
+$as_echo "$as_me: Program man pages will not generated: HELPMAN not detected." >&6;}
+	      fi
+      fi
+else
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Will not use Doxygen to build documentation (--enable-doc-build to change)." >&5
+$as_echo "$as_me: Will not use Doxygen to build documentation (--enable-doc-build to change)." >&6;}
+fi
+#dnl	***********************************************************************
+no_unroll_flags=""
+#dnl	***********************************************************************
+if test xyes = xyes; then # we need a new flag here for this : FIXME
+	# I am not sure whether these flags are optimal, but among these they are
+
+	if test "x${CC}" = x"xlc" -o "x${CC}" = x"xlc_r"  && test "x$spigni_forte" = "x" ; then
+		# use -qnostrict to turn  off aggressive optimization (debug cases)
+		# use -q64 to enable 64 bit compilation and ar -X 64 cru ... for linking (FIXME)
+          	# -qfdpr
+          	# The compiler generates additional symbol information for use by the AIX "fdprpro" code optimizer.
+		# /opt/freeware/bin path is harmful with autotools on the ENEA grid environment, as it is the default one!
+		PATH="/bin/:$PATH"
+
+		# the following should only be used along with -q64. not without! (FIXME)
+		ARFLAGS="-X 64 cru"
+		SPCFLAGS="-q 64"
+
+		#spigni_forte="-O3 -lmass -lessl"
+		spigni_forte="-O3 -lmass -lessl -q64 -bmaxdata:0x1000000000"
+		#spigni_forte="-O3 -lmass -lessl -q64 -bmaxdata:0x70000000"
+		# FIXME : configure is not smart enough to add -X64 to ARFLAGS
+		# FIXME : CXXFLAGS too
+#		spigni_forte="-O3 -lmass -lessl"
+		restrict_flags="-qkeyword=restrict"
+		c99_flags="-qlanglvl=extc99 $restrict_flags"
+		debug_flags="-O0 -g"
+		openmp_flags="-qsmp=omp"
+		if test -f /bin/uname ; then
+			# some AFS systems (e.g.: ENEA.it grid) need this fix
+			uname_M="`/bin/uname -M`"
+		else
+			uname_M="`uname -M`"
+		fi
+		if test "x${uname_M}" = x"IBM,9118-575" ; then
+			spigni_forte="$spigni_forte -qarch=pwr5 -qtune=pwr5"
+		fi
+		if test "x${uname_M}" = x"IBM,7040-681" ; then
+			spigni_forte="$spigni_forte -qarch=pwr4 -qtune=pwr4"
+		fi
+		# verbose : 0-3
+		# profiling info: -pga
+		# -qbrowse
+		no_unroll_flags="-qunroll=no"
+		# xlc has #pragma unroll !
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Guessing the compiler is xlc." >&5
+$as_echo "$as_me: Guessing the compiler is xlc." >&6;}
+	fi
+
+        have_icc=no; # a fix to set correctly openmp_flags
+        if test "x${CC}" = x"icc" || ${CC} -V 2>&1 | grep Intel ; then have_icc=yes ; fi
+
+        if test "x${have_icc}" = x"yes" && test "x$spigni_forte" = "x" ; then
+#	if test "x${CC}" = x"icc" && test "x$spigni_forte" = "x" ; then
+		spigni_forte="-O3 -xSSE3  -no-alias-const -no-multibyte-chars -pipe "
+		# note: -tpp6 & -tpp7 and so on are old icc flags (version 11 does not support them)
+		# ipo seems to break autotools
+		# -xS ?
+		# TODO : '-ax:SSE2'  .. generate multiple paths ..
+
+		# -ax turns on the vectorizer (MMX, SSEx, ...)
+		# -mtune=..
+		restrict_flags="-restrict" # !
+		c99_flags="$restrict_flags"
+		debug_flags="-O0 -g"
+		#no_unroll_flags="-fno-unroll"
+		#20110608 icc v12 wants -unroll=0
+		no_unroll_flags="-unroll=0"
+		#openmp_flags="-openmp" # -parallel
+		openmp_flags="-qopenmp" # -parallel
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Guessing the compiler is icc." >&5
+$as_echo "$as_me: Guessing the compiler is icc." >&6;}
+		walls="-Wall"
+	fi
+
+	if test "x${CC}" = x"pgcc" && test "x$spigni_forte" = "x" ; then
+		spigni_forte="-O3 -Mvect=cachesize:automatic,fuse,prefetch,sse -Mquad -Mscalarsse -Mnoframe -Minfo=all" # O3 is same as 4
+		c99_flags="-c99 -Xa"
+		restrict_flags="$c99_flags" # !
+		debug_flags="-O0 -g"
+		no_unroll_flags="-Mnounroll"
+		openmp_flags="-mp"
+		# -Mconcur is VERY interesting .. !!
+		# -Mlist (creates a listing file)
+		# -Mprof=hwcts     Use PAPI-based profiling with hardware counters (linux86-64 only).
+		# -pg exists, -g too
+		# -Mnovect disables the vectorizer, and is the default
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Guessing the compiler is pgcc." >&5
+$as_echo "$as_me: Guessing the compiler is pgcc." >&6;}
+	fi
+
+	if test "x$ac_cv_c_compiler_gnu" = xyes && test "x$spigni_forte" = "x" ; then
+		# note that CC=icc will not imply ac_cv_c_compiler_gnu=yes !
+		# -malign-double does not make sense on 64 bit archs and triggers errors
+		#spigni_forte="-O3 -fomit-frame-pointer -ffast-math"
+		spigni_forte="-O3 -fomit-frame-pointer -mtune=native"
+		c99_flags="-std=c99" # ?
+		restrict_flags="$c99_flags" # !
+		debug_flags="-O0 -ggdb"
+		no_unroll_flags="-fno-unroll-loops"
+		openmp_flags="-fopenmp"
+		if test x != x"${OPENMP_CFLAGS}" ; then
+			openmp_flags="${OPENMP_CFLAGS}"
+		fi
+		# NOTE: -ffast-math disables math functions specifications, and therefore is EVIL
+		spigni_nativo="-pipe"
+		cpuinfomn=`cat /proc/cpuinfo| grep model.name | sed s/^.*://g`
+		# FIXME : the following will fail on tcsh
+#		if test x"` $CC -v 2>&1| grep -i red.*hat`" != x ; then
+		gcc_v=`$CC --version` # will be catched on tcsh
+		if test x"` $CC -v 2>&1 | grep -i red.*hat`" != x -o x"`echo $gcc_v` | grep -i red.hat" != x; then
+			# uhm..
+#			if test x"` echo $cpuinfomn | grep Athlon `" != x ; then
+#				# fails for a
+#				# model name      : AMD Athlon(tm) 7750 Dual-Core Processor
+#				spigni_nativo="$spigni_nativo -march=athlon -mtune=athlon"
+#			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Opteron.*2216\>'`" != x ; then
+				# model name      : Dual-Core AMD Opteron(tm) Processor 2216
+				spigni_nativo="$spigni_nativo -march=opteron -mtune=opteron"
+			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Opteron.*2352\>'`" != x ; then
+				# Opteron barcelona are 2344-2350, but the instruction set is ok
+				# model name      : AMD Athlon(tm) 7750 Dual-Core Processor
+				spigni_nativo="$spigni_nativo -march=barcelona -mtune=barcelona"
+			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Athlon.*7750'`" != x ; then
+				# this is Phenom, not Opteron arcelona, but same instruction set
+				# model name      : AMD Athlon(tm) 7750 Dual-Core Processor
+				spigni_nativo="$spigni_nativo -march=barcelona -mtune=barcelona"
+			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Athlon.*64.*X2.*Dual Core Processor 6000.'`" != x ; then
+                        	# K9 microarchitecture
+				# this is Windsor, released May 24, 2006
+				# rossini.ibspan.waw.pl
+				# model name      : AMD Athlon(tm) 64 X2 Dual Core Processor 6000+
+	                        spigni_nativo="$spigni_nativo -march=amdfam10 -mtune=amdfam10"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*EXWL3...\>'`" != x ; then
+				# Wolfdale 	31..
+				# Kentsfield 	32..
+				# Yorkfield 	33..
+				# Lynnfield 	34..
+				# Bloomfield	35..
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*X7...'`" != x ; then
+				# Tigerton series, d.c.	72..
+				# Tigerton series, q.c.	73..	1066MT/s
+				# cresco1x .. portici.enea.it
+				# model name      : Intel(R) Xeon(R) CPU           X7350  @ 2.93GHz
+				# Tulsa series		71..
+				# crescobf.brindisi.enea.it:
+				# model name      : Intel(R) Xeon(R) CPU           X7350  @ 2.93GHz
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*EXWL70..\>'`" != x ; then
+				# Paxville (Netburst)
+				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*EXWL50..\>'`" != x ; then
+				# Dempsey (Netburst)
+				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Core(TM)2 Quad CPU'`" != x ; then
+				# Conroe/Allendale
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*EXWL51..\>'`" != x ; then
+				# Woodcrest (Core2)
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*EXWL52..\>'`" != x ; then
+				# Wolfdale DP
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*EXWL53..\>'`" != x ; then
+				# Clovertown series, 1333MT/s, 2x4MB L2
+				# ce1-cresco.portici.enea.it
+				# model name     : Intel(R) Xeon(R) CPU           E5335  @ 2.00GHz
+				# Clovertown series, 1333MT/s
+				# cresco2-f3.portici.enea.it
+				# model name      : Intel(R) Xeon(R) CPU           E5345  @ 2.33GHz
+				# Harpertown series	54..	, 12 MB L2
+				# Gainestown (Nehalem)s.55..	4x256kB L2, 8MB L3
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+#			if test x"` echo $cpuinfomn | grep Opteron `" != x ; then
+#				spigni_nativo="$spigni_nativo -march=opteron -mtune=opteron"
+#			fi
+			if test x"` echo $cpuinfomn | grep 'Pentium(R).4' `" != x ; then
+				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Pentium III (Coppermine)' `" != x ; then
+				spigni_nativo="$spigni_nativo -march=pentium3 -mtune=pentium3 -msse"
+			fi
+#			if test x"` echo $cpuinfomn | grep 'Xeon'`" != x ; then
+#				Intel(R) Xeon(TM) CPU 3.00GHz
+#				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+#			fi
+		else
+			spigni_nativo="-march=native -mtune=native $spigni_nativo"
+		fi
+		# NOTE : compilers like gcc version 3.4.6 20060404 (Red Hat 3.4.6-10) do not accept native switch
+		# -march=pentium3
+		# on p4: --malign=double -march=pentium4 -mfpmath=sse -msse2 -
+		walls="-Wall -Wredundant-decls -Wno-switch -Wdisabled-optimization -Wdeclaration-after-statement   "" -Wpointer-arith -Wstrict-prototypes "
+		#" -pedantic"
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Guessing the C compiler is gcc." >&5
+$as_echo "$as_me: Guessing the C compiler is gcc." >&6;}
+	fi
+else
+	true
+fi
+
+#dnl	***********************************************************************
+#dnl	GNU FORTRAN runtime
+#dnl	***********************************************************************
+if test "x$ac_cv_fc_compiler_gnu" = xyes ; then
+	LIBS="${LIBS} -lgfortran"
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Guessing the Fortran compiler is gfortran and adding -lgfortran to LIBS (invoke with ac_cv_fc_compiler_gnu=no to prevent this)." >&5
+$as_echo "$as_me: Guessing the Fortran compiler is gfortran and adding -lgfortran to LIBS (invoke with ac_cv_fc_compiler_gnu=no to prevent this)." >&6;}
+fi
+#dnl	***********************************************************************
+#dnl	CFLAGS handling starts here
+#dnl	***********************************************************************
+if test "x$CFLAGS" = x ; then
+if test "x$enable_optimize" = xyes && test x$enable_debug != xyes ; then
+	if test "x$mio_spigni_forte" = "x" ; then
+		true;
+	else
+		spigni_forte="$mio_spigni_forte";
+		spigni_nativo="";
+	fi
+	if test "x$spigni_forte" = "x" ; then
+		spigni="-O3 -malign-double $spigni_nativo"
+		#spigni="-O3 -fomit-frame-pointer -malign-double $spigni_nativo"
+	else
+		spigni="$spigni_forte $spigni_nativo"
+	fi
+
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Adding ${spigni} to CFLAGS." >&5
+$as_echo "$as_me: Adding ${spigni} to CFLAGS." >&6;}
+	#CFLAGS="${CFLAGS} ${spigni}"
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Overriding CFLAGS=\"$CFLAGS\".. " >&5
+$as_echo "$as_me: Overriding CFLAGS=\"$CFLAGS\".. " >&6;}
+	CFLAGS="${spigni}"
+else
+	if test "x$enable_debug" = xyes; then
+		if test "x$debug_flags" = "x" ; then
+			CFLAGS="-O0 -g"
+		else
+			CFLAGS="${debug_flags}"
+		fi
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: Optimization turned off. Debugging enabled. (CFLAGS overwritten)" >&5
+$as_echo "$as_me: Optimization turned off. Debugging enabled. (CFLAGS overwritten)" >&6;}
+		# since we are allowed to suggest flags, we do so
+		CFLAGS="${CFLAGS} ${SPCFLAGS}"
+	else
+		true;
+	fi
+	true;
+fi
+#
+fi
+#dnl	***********************************************************************
+# fix just for SP
+if test x"${SPCFLAGS}" != x ; then
+	CFLAGS="${CFLAGS} ${SPCFLAGS}"
+fi
+#dnl	***********************************************************************
+if test x"$enable_dmalloc" == x"1"  ; then
+	CFLAGS="$CFLAGS $DMALLOC_CFLAGS"
+fi
+#dnl	***********************************************************************
+if test x"$want_mkl_libs" != x""  ; then
+	      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling MKL support in the benchmarking program (will add \"$MKL_INCLUDE\" to compilation flags; will link to \"$want_mkl_libs\")." >&5
+$as_echo "$as_me: Enabling MKL support in the benchmarking program (will add \"$MKL_INCLUDE\" to compilation flags; will link to \"$want_mkl_libs\")." >&6;}
+
+$as_echo "#define RSB_WANT_MKL 1" >>confdefs.h
+
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} $want_mkl_libs"
+		if test -n "$MKL_INCLUDE" ; then
+			RSB_RSBENCH_CFLAGS="$RSB_RSBENCH_CFLAGS -I $MKL_INCLUDE"
+		fi
+	else
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: Disabling MKL support in the benchmarking program." >&5
+$as_echo "$as_me: Disabling MKL support in the benchmarking program." >&6;}
+
+$as_echo "#define RSB_WANT_MKL 0" >>confdefs.h
+
+fi
+#dnl	***********************************************************************
+if test x"$enable_openmp" = x"yes"; then
+
+$as_echo "#define RSB_WANT_OMP_RECURSIVE_KERNELS 1" >>confdefs.h
+
+	if test x"$openmp_flags" != "x" ; then
+		CFLAGS="${CFLAGS} $openmp_flags"
+		if test "x$OPENMP_FCFLAGS" = "x" ; then
+			OPENMP_FCFLAGS="$openmp_flags"
+		fi
+		FCFLAGS="${FCFLAGS} ${OPENMP_FCFLAGS}"
+	else
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: We do not know an appropriate OpenMP-enabling flag but assume OpenMP is active." >&5
+$as_echo "$as_me: We do not know an appropriate OpenMP-enabling flag but assume OpenMP is active." >&6;}
+		CFLAGS="${CFLAGS}"
+	fi
+else
+
+$as_echo "#define RSB_WANT_OMP_RECURSIVE_KERNELS 0" >>confdefs.h
+
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: OpenMP code disabled: 1 thread at most is allowed." >&5
+$as_echo "$as_me: OpenMP code disabled: 1 thread at most is allowed." >&6;}
+        want_max_threads="1"
+	true;
+fi
+#dnl	***********************************************************************
+if test "x$enable_c99" = xyes; then
+	if test "x$c99_flags" = "x" ; then
+		{ $as_echo "$as_me:${as_lineno-$LINENO}: We do not know an appropriate c99-enabling flag.." >&5
+$as_echo "$as_me: We do not know an appropriate c99-enabling flag.." >&6;}
+		CFLAGS="${CFLAGS}"
+	else
+		CFLAGS="${CFLAGS} $c99_flags"
+	fi
+else
+	true;
+fi
+#dnl	***********************************************************************
+
+if test "x$enable_oski" = xyes; then
+      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Looking for user set OSKI_INCLUDE, OSKI_LUA_PATH, OSKI_PATH environment variables.." >&5
+$as_echo "$as_me: Looking for user set OSKI_INCLUDE, OSKI_LUA_PATH, OSKI_PATH environment variables.." >&6;}
+	save_CFLAGS="$CFLAGS"
+	if test -n "$OSKI_INCLUDE" ; then
+		CFLAGS="$CFLAGS -I $OSKI_INCLUDE"
+	fi
+	for ac_header in oski/oski.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "oski/oski.h" "ac_cv_header_oski_oski_h" "$ac_includes_default"
+if test "x$ac_cv_header_oski_oski_h" = xyes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_OSKI_OSKI_H 1
+_ACEOF
+ true
+fi
+
+done
+
+	CFLAGS="$save_CFLAGS"
+	if test "x$ac_cv_header_oski_oski_h" != xyes; then
+	as_fn_error $? "Header file <oski/oski.h> not found, therefore we will not use it!" "$LINENO" 5
+	else
+		# FIXME: this is temporary, for my own machines
+		if test -d "~/usr/local/include/" ; then
+			CFLAGS="$CFLAGS -I ~/usr/local/include/"
+		fi
+
+		if test -n "$OSKI_INCLUDE" ; then
+			CFLAGS="$CFLAGS -I $OSKI_INCLUDE"
+		fi
+
+		if test x"$OSKI_PATH" = x && test -d "/usr/local/lib/oski" ; then
+			OSKI_PATH=/usr/local/lib/oski
+		fi
+		if test x"$OSKI_LUA_PATH" = x ; then
+			OSKI_LUA_PATH="$OSKI_PATH/?.lua"
+			else
+			true;
+		fi
+		if test x"$OSKI_LIBS" = x ; then
+			# oski-1.0.1h works in this way
+			#OSKI_LIBS=`cat $OSKI_PATH/site-modules-static.txt | tr '\n' ' '`
+			OSKI_LIBS=`cat $OSKI_PATH/site-modules-shared.txt | tr '\n' ' '`
+			# the following often fail due to the incorrect order of libs:
+			#OSKI_LIBS=`cat $OSKI_PATH/site-modules-static.txt|sed 's/^\/.*\///g;s/^'/"$OSKI_PATH\/"/g | tr '\n' ' '`
+		fi
+		if test x"$OSKI_LIBS" = x ; then
+			as_fn_error $? "No linkable libraries for OSKI ? Disable OSKI support ot try setting OSKI_LIBS by hand." "$LINENO" 5
+		fi
+		if test x"${OSKI_CFLAGS}" = x && test -d "~/usr/local/include/"; then
+			OSKI_CFLAGS="$OSKI_CFLAGS -I  /usr/local/include/"
+			OSKI_CFLAGS="$OSKI_CFLAGS -I ~/usr/local/include/"
+		fi
+		#
+		RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} -L${OSKI_PATH} ${OSKI_LIBS}"
+
+		# FIXME: this is temporary, for my own machines
+		if test -d "~/usr/local/lib/oski/" ; then
+			RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} -L ~/usr/local/lib/oski/"
+		fi
+
+		RSB_RSBENCH_CFLAGS="${RSB_RSBENCH_CFLAGS} $OSKI_CFLAGS"
+	      	{ $as_echo "$as_me:${as_lineno-$LINENO}: Enabling comparative OSKI benchmarking." >&5
+$as_echo "$as_me: Enabling comparative OSKI benchmarking." >&6;}
+
+$as_echo "#define RSB_WANT_OSKI_BENCHMARKING 1" >>confdefs.h
+
+
+cat >>confdefs.h <<_ACEOF
+#define OSKI_LUA_PATH "$OSKI_LUA_PATH"
+_ACEOF
+
+	fi
+else
+      	true;
+fi
+#dnl
+RSB_CONST_MAX_SUPPORTED_THREADS="${want_max_threads}"
+
+#dnl	***********************************************************************
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_INT_ERR_VERBOSITY $want_int_verrbosity
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_OUT_ERR_VERBOSITY $want_ext_verrbosity
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_WANT_IO_LEVEL $want_io_level
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_USER_SET_MEM_HIERARCHY_INFO "$RSB_USER_SET_MEM_HIERARCHY_INFO"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_DETECTED_MEM_HIERARCHY_INFO "$RSB_DETECTED_MEM_HIERARCHY_INFO"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_CONST_MAX_SUPPORTED_THREADS $RSB_CONST_MAX_SUPPORTED_THREADS
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define CFLAGS "$CFLAGS"
+_ACEOF
+
+
+cat >>confdefs.h <<_ACEOF
+#define CC "$CC"
+_ACEOF
+
+if test x"$RSB_USE_ASSERT" != x ; then
+
+cat >>confdefs.h <<_ACEOF
+#define RSB_USE_ASSERT "$RSB_USE_ASSERT"
+_ACEOF
+
+fi
+#dnl	***********************************************************************
+WANT_MATRIX_STORAGE=""
+if test x"$enable_c" = xyes ; then
+	WANT_MATRIX_BCOO_STORAGE=BCOR
+#	WANT_MATRIX_BCOO_STORAGE=BCOR,BCOC
+	WANT_MATRIX_STORAGE="$WANT_MATRIX_STORAGE,$WANT_MATRIX_BCOO_STORAGE"
+fi
+if test x"$enable_b" = xyes ; then
+	WANT_MATRIX_BCSS_STORAGE=BCSR
+#	WANT_MATRIX_BCSS_STORAGE=BCSR,BCSC
+	WANT_MATRIX_STORAGE="$WANT_MATRIX_STORAGE,$WANT_MATRIX_BCSS_STORAGE"
+fi
+# we get rid of the comma
+WANT_MATRIX_STORAGE="`echo $WANT_MATRIX_STORAGE| sed 's/^,//g'`"
+#dnl	***********************************************************************
+if test "x${userset_nounroll_cflag}" != x ; then
+	no_unroll_flags="${userset_nounroll_cflag}"
+fi
+# for rsb_config.m4.in
+
+
+
+
+NOUNROLLCFLAGS="$no_unroll_flags"
+
+RSB_RSBENCH_LIBS="$RSB_RSBENCH_LIBS"
+
+RSB_RSBENCH_CFLAGS="$RSB_RSBENCH_CFLAGS"
+
+WANT_ROW_UNLOOP_FACTORS="$row_unrolls"
+
+WANT_LOOPING_KERNELS="$want_looping_kernels"
+
+WANT_COLUMN_UNLOOP_FACTORS="$column_unrolls"
+
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR="$util_unrolls"
+
+WANT_HALFWORD_INDICES="yes"
+
+WANT_SPSM_DIAG_CHECK="$want_spsm_diagonal_check"
+
+WANT_TYPES="$want_matrix_types"
+
+WANT_MATRIX_BCSS_STORAGE="$WANT_MATRIX_BCSS_STORAGE"
+
+WANT_MATRIX_BCOO_STORAGE="$WANT_MATRIX_BCOO_STORAGE"
+
+WANT_MATRIX_LINKED_STORAGE="$WANT_MATRIX_LINKED_STORAGE"
+
+WANT_MATRIX_VB_STORAGE="$WANT_MATRIX_VB_STORAGE"
+
+WANT_MATRIX_STORAGE="$WANT_MATRIX_STORAGE"
+
+WANT_MATRIX_OPS="$want_matrix_ops"
+
+WANT_MATRIX_ALL_META_OPS="spmv,spsv"
+
+WANT_MATRIX_ALL_OPS="$all_matrix_ops"
+
+WANT_MATRIX_ALL_TYPES="$all_matrix_types"
+
+#dnl	***********************************************************************
+if test x = x"$ARFLAGS" ; then ARFLAGS="cru" ; fi # damn AIX ar
+ARFLAGS="$ARFLAGS"
+
+#dnl	***********************************************************************
+
+if test x"${enable_fortran_examples}" = x"yes" -a x"${FC}" = x"" ;  then
+	enable_fortran_examples=no
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No Fortran compiler detected (FC environment variable). Will not build the Fortran examples." >&5
+$as_echo "$as_me: WARNING: No Fortran compiler detected (FC environment variable). Will not build the Fortran examples." >&2;}
+fi
+ if test x"$enable_fortran_examples" = xyes ; then
+  HAVE_FORTRAN_EXAMPLES_TRUE=
+  HAVE_FORTRAN_EXAMPLES_FALSE='#'
+else
+  HAVE_FORTRAN_EXAMPLES_TRUE='#'
+  HAVE_FORTRAN_EXAMPLES_FALSE=
+fi
+
+ if test x"$enable_c_examples" = xyes ; then
+  HAVE_C_EXAMPLES_TRUE=
+  HAVE_C_EXAMPLES_FALSE='#'
+else
+  HAVE_C_EXAMPLES_TRUE='#'
+  HAVE_C_EXAMPLES_FALSE=
+fi
+
+ if test x"$OCTAVE" != xfalse ; then
+  HAVE_OCTAVE_TRUE=
+  HAVE_OCTAVE_FALSE='#'
+else
+  HAVE_OCTAVE_TRUE='#'
+  HAVE_OCTAVE_FALSE=
+fi
+
+want_int=`echo "$want_matrix_types" | grep '\<int\>'`
+if test x"$OCTAVE" != xfalse -a x"$enable_octave_testing" = xyes ; then want_octave_testing=yes; else want_octave_testing=no; fi
+ if test x"$want_octave_testing" = x"yes" ; then
+  WANT_OCTAVE_TESTING_TRUE=
+  WANT_OCTAVE_TESTING_FALSE='#'
+else
+  WANT_OCTAVE_TESTING_TRUE='#'
+  WANT_OCTAVE_TESTING_FALSE=
+fi
+
+if test x"$OCTAVE" != xfalse -a x"$want_int" != x -a x"$enable_octave_testing" = xyes ; then want_octave_testing_and_int=yes; else want_octave_testing_and_int=no ; fi
+ if test x"$want_octave_testing_and_int" = x"yes" ; then
+  WANT_OCTAVE_TESTING_AND_INT_TRUE=
+  WANT_OCTAVE_TESTING_AND_INT_FALSE='#'
+else
+  WANT_OCTAVE_TESTING_AND_INT_TRUE='#'
+  WANT_OCTAVE_TESTING_AND_INT_FALSE=
+fi
+
+if test x"${FC}" = x"" ;  then
+if test x"${want_blas_sparse_mod_install}" = x"yes" -o x"${sparse_blas_interface}" = x"yes" ;  then
+	want_blas_sparse_mod_install=no;
+	sparse_blas_interface=no;
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No Fortran compiler detected (FC environment variable). Will not build the BLAS interface." >&5
+$as_echo "$as_me: WARNING: No Fortran compiler detected (FC environment variable). Will not build the BLAS interface." >&2;}
+fi
+fi
+ if test x"$want_blas_sparse_mod_install" = x"yes"; then
+  WANT_BLAS_SPARSE_MOD_INSTALL_TRUE=
+  WANT_BLAS_SPARSE_MOD_INSTALL_FALSE='#'
+else
+  WANT_BLAS_SPARSE_MOD_INSTALL_TRUE='#'
+  WANT_BLAS_SPARSE_MOD_INSTALL_FALSE=
+fi
+
+ if test x"$CXX" != x ; then
+  WANT_CXX_TEST_RSBENCH_TRUE=
+  WANT_CXX_TEST_RSBENCH_FALSE='#'
+else
+  WANT_CXX_TEST_RSBENCH_TRUE='#'
+  WANT_CXX_TEST_RSBENCH_FALSE=
+fi
+
+ if test x"$DOXYGEN" != x"false" ; then
+  HAVE_DOXYGEN_TRUE=
+  HAVE_DOXYGEN_FALSE='#'
+else
+  HAVE_DOXYGEN_TRUE='#'
+  HAVE_DOXYGEN_FALSE=
+fi
+
+ if test x"$want_build_doc" = x"yes" ; then
+  WANT_BUILD_DOC_TRUE=
+  WANT_BUILD_DOC_FALSE='#'
+else
+  WANT_BUILD_DOC_TRUE='#'
+  WANT_BUILD_DOC_FALSE=
+fi
+
+ if test x"$want_install_pkg_config" = x"yes" ; then
+  HAVE_PKGCONFIG_INSTALL_TRUE=
+  HAVE_PKGCONFIG_INSTALL_FALSE='#'
+else
+  HAVE_PKGCONFIG_INSTALL_TRUE='#'
+  HAVE_PKGCONFIG_INSTALL_FALSE=
+fi
+
+ if test x"$HELP2MAN" != x"false" ; then
+  HAVE_HELP2MAN_TRUE=
+  HAVE_HELP2MAN_FALSE='#'
+else
+  HAVE_HELP2MAN_TRUE='#'
+  HAVE_HELP2MAN_FALSE=
+fi
+
+ if test x"$M4" != xfalse ; then
+  HAVE_M4_TRUE=
+  HAVE_M4_FALSE='#'
+else
+  HAVE_M4_TRUE='#'
+  HAVE_M4_FALSE=
+fi
+
+ if test x"$FC" != x ; then
+  HAVE_FC_TRUE=
+  HAVE_FC_FALSE='#'
+else
+  HAVE_FC_TRUE='#'
+  HAVE_FC_FALSE=
+fi
+
+ if test x"$sparse_blas_interface" = xyes ; then
+  HAVE_SPARSE_BLAS_INTERFACE_TRUE=
+  HAVE_SPARSE_BLAS_INTERFACE_FALSE='#'
+else
+  HAVE_SPARSE_BLAS_INTERFACE_TRUE='#'
+  HAVE_SPARSE_BLAS_INTERFACE_FALSE=
+fi
+
+ if test x"$enable_ihi" = xyes ; then
+  WANT_INTERNAL_HEADERS_INSTALL_TRUE=
+  WANT_INTERNAL_HEADERS_INSTALL_FALSE='#'
+else
+  WANT_INTERNAL_HEADERS_INSTALL_TRUE='#'
+  WANT_INTERNAL_HEADERS_INSTALL_FALSE=
+fi
+
+ if test x"$want_ompio" = x"yes" && test x"$enable_openmp" = x"yes" ; then
+  WANT_OMPIO_SUPPORT_TRUE=
+  WANT_OMPIO_SUPPORT_FALSE='#'
+else
+  WANT_OMPIO_SUPPORT_TRUE='#'
+  WANT_OMPIO_SUPPORT_FALSE=
+fi
+
+OCTAVE_FLAGS="--no-history --no-line-editing  --no-site-file --norc   --silent"
+
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+ac_config_files="$ac_config_files librsb-config:librsb-config.in"
+
+ac_config_files="$ac_config_files librsb.pc:librsb.pc.in"
+
+ac_config_files="$ac_config_files rsb_config.m4 Makefile bench/Makefile doc/Makefile examples/Makefile scripts/Makefile m4/Makefile blas_sparse/Makefile"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems.  If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
+
+  (set) 2>&1 |
+    case $as_nl`(ac_space=' '; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
+      # `set' does not quote correctly, so add quotes: double-quote
+      # substitution turns \\\\ into \\, and sed turns \\ into \.
+      sed -n \
+	"s/'/'\\\\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+      ;; #(
+    *)
+      # `set' quotes correctly as required by POSIX, so do not add quotes.
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+      ;;
+    esac |
+    sort
+) |
+  sed '
+     /^ac_cv_env_/b end
+     t clear
+     :clear
+     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+     t end
+     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+     :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+  if test -w "$cache_file"; then
+    if test "x$cache_file" != "x/dev/null"; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+      if test ! -f "$cache_file" || test -h "$cache_file"; then
+	cat confcache >"$cache_file"
+      else
+        case $cache_file in #(
+        */* | ?:*)
+	  mv -f confcache "$cache_file"$$ &&
+	  mv -f "$cache_file"$$ "$cache_file" ;; #(
+        *)
+	  mv -f confcache "$cache_file" ;;
+	esac
+      fi
+    fi
+  else
+    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+  fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+U=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+  # 1. Remove the extension, and $U if already installed.
+  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
+  #    will be set to the directory where LIBOBJS objects are built.
+  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+ if test -n "$EXEEXT"; then
+  am__EXEEXT_TRUE=
+  am__EXEEXT_FALSE='#'
+else
+  am__EXEEXT_TRUE='#'
+  am__EXEEXT_FALSE=
+fi
+
+if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
+  as_fn_error $? "conditional \"AMDEP\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then
+  as_fn_error $? "conditional \"am__fastdepCCAS\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+if test -z "${HAVE_FORTRAN_EXAMPLES_TRUE}" && test -z "${HAVE_FORTRAN_EXAMPLES_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_FORTRAN_EXAMPLES\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_C_EXAMPLES_TRUE}" && test -z "${HAVE_C_EXAMPLES_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_C_EXAMPLES\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_OCTAVE_TRUE}" && test -z "${HAVE_OCTAVE_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_OCTAVE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_OCTAVE_TESTING_TRUE}" && test -z "${WANT_OCTAVE_TESTING_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_OCTAVE_TESTING\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_OCTAVE_TESTING_AND_INT_TRUE}" && test -z "${WANT_OCTAVE_TESTING_AND_INT_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_OCTAVE_TESTING_AND_INT\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_BLAS_SPARSE_MOD_INSTALL_TRUE}" && test -z "${WANT_BLAS_SPARSE_MOD_INSTALL_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_BLAS_SPARSE_MOD_INSTALL\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_CXX_TEST_RSBENCH_TRUE}" && test -z "${WANT_CXX_TEST_RSBENCH_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_CXX_TEST_RSBENCH\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_DOXYGEN_TRUE}" && test -z "${HAVE_DOXYGEN_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_DOXYGEN\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_BUILD_DOC_TRUE}" && test -z "${WANT_BUILD_DOC_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_BUILD_DOC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_PKGCONFIG_INSTALL_TRUE}" && test -z "${HAVE_PKGCONFIG_INSTALL_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_PKGCONFIG_INSTALL\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_HELP2MAN_TRUE}" && test -z "${HAVE_HELP2MAN_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_HELP2MAN\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_M4_TRUE}" && test -z "${HAVE_M4_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_M4\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_FC_TRUE}" && test -z "${HAVE_FC_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_FC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${HAVE_SPARSE_BLAS_INTERFACE_TRUE}" && test -z "${HAVE_SPARSE_BLAS_INTERFACE_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_SPARSE_BLAS_INTERFACE\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_INTERNAL_HEADERS_INSTALL_TRUE}" && test -z "${WANT_INTERNAL_HEADERS_INSTALL_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_INTERNAL_HEADERS_INSTALL\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+if test -z "${WANT_OMPIO_SUPPORT_TRUE}" && test -z "${WANT_OMPIO_SUPPORT_FALSE}"; then
+  as_fn_error $? "conditional \"WANT_OMPIO_SUPPORT\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
+
+: "${CONFIG_STATUS=./config.status}"
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='print -r --'
+  as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='printf %s\n'
+  as_echo_n='printf %s'
+else
+  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+    as_echo_n='/usr/ucb/echo -n'
+  else
+    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+    as_echo_n_body='eval
+      arg=$1;
+      case $arg in #(
+      *"$as_nl"*)
+	expr "X$arg" : "X\\(.*\\)$as_nl";
+	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+      esac;
+      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+    '
+    export as_echo_n_body
+    as_echo_n='sh -c $as_echo_n_body as_echo'
+  fi
+  export as_echo_body
+  as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" ""	$as_nl"
+
+# Find who we are.  Look in the path if we contain no directory separator.
+as_myself=
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there.  '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error STATUS ERROR [LINENO LOG_FD]
+# ----------------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with STATUS, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$1; test $as_status -eq 0 && as_status=1
+  if test "$4"; then
+    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+  fi
+  $as_echo "$as_me: error: $2" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
+  as_expr=expr
+else
+  as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+  as_basename=basename
+else
+  as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$0" : 'X\(//\)$' \| \
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
+else
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -pR'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -pR'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -pR'
+  fi
+else
+  as_ln_s='cp -pR'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+  as_mkdir_p='mkdir -p "$as_dir"'
+else
+  test -d ./-p && rmdir ./-p
+  as_mkdir_p=false
+fi
+
+
+# as_fn_executable_p FILE
+# -----------------------
+# Test if FILE is an executable regular file.
+as_fn_executable_p ()
+{
+  test -f "$1" && test -x "$1"
+} # as_fn_executable_p
+as_test_x='test -x'
+as_executable_p=as_fn_executable_p
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by librsb $as_me 1.2.0-rc5, which was
+generated by GNU Autoconf 2.69.  Invocation command line was
+
+  CONFIG_FILES    = $CONFIG_FILES
+  CONFIG_HEADERS  = $CONFIG_HEADERS
+  CONFIG_LINKS    = $CONFIG_LINKS
+  CONFIG_COMMANDS = $CONFIG_COMMANDS
+  $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+config_commands="$ac_config_commands"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration.  Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+  -h, --help       print this help, then exit
+  -V, --version    print version number and configuration settings, then exit
+      --config     print configuration, then exit
+  -q, --quiet, --silent
+                   do not print progress messages
+  -d, --debug      don't remove temporary files
+      --recheck    update $as_me by reconfiguring in the same conditions
+      --file=FILE[:TEMPLATE]
+                   instantiate the configuration file FILE
+      --header=FILE[:TEMPLATE]
+                   instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Configuration commands:
+$config_commands
+
+Report bugs to <michelemartone_AT_users_DOT_sourceforge_DOT_net>."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+librsb config.status 1.2.0-rc5
+configured by $0, generated by GNU Autoconf 2.69,
+  with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2012 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+INSTALL='$INSTALL'
+MKDIR_P='$MKDIR_P'
+AWK='$AWK'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+  case $1 in
+  --*=?*)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+    ac_shift=:
+    ;;
+  --*=)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=
+    ac_shift=:
+    ;;
+  *)
+    ac_option=$1
+    ac_optarg=$2
+    ac_shift=shift
+    ;;
+  esac
+
+  case $ac_option in
+  # Handling of the options.
+  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+    ac_cs_recheck=: ;;
+  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+    $as_echo "$ac_cs_version"; exit ;;
+  --config | --confi | --conf | --con | --co | --c )
+    $as_echo "$ac_cs_config"; exit ;;
+  --debug | --debu | --deb | --de | --d | -d )
+    debug=: ;;
+  --file | --fil | --fi | --f )
+    $ac_shift
+    case $ac_optarg in
+    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    '') as_fn_error $? "missing file argument" ;;
+    esac
+    as_fn_append CONFIG_FILES " '$ac_optarg'"
+    ac_need_defaults=false;;
+  --header | --heade | --head | --hea )
+    $ac_shift
+    case $ac_optarg in
+    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+    ac_need_defaults=false;;
+  --he | --h)
+    # Conflict between --help and --header
+    as_fn_error $? "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+  --help | --hel | -h )
+    $as_echo "$ac_cs_usage"; exit ;;
+  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+  | -silent | --silent | --silen | --sile | --sil | --si | --s)
+    ac_cs_silent=: ;;
+
+  # This is an error.
+  -*) as_fn_error $? "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+  *) as_fn_append ac_config_targets " $1"
+     ac_need_defaults=false ;;
+
+  esac
+  shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+  exec 6>/dev/null
+  ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+  set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+  shift
+  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+  CONFIG_SHELL='$SHELL'
+  export CONFIG_SHELL
+  exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+  echo
+  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+  $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+#
+# INIT-COMMANDS
+#
+AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"
+
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+sed_quote_subst='$sed_quote_subst'
+double_quote_subst='$double_quote_subst'
+delay_variable_subst='$delay_variable_subst'
+macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`'
+macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`'
+enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`'
+enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'
+pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`'
+enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`'
+SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`'
+ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`'
+PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`'
+host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`'
+host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`'
+host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`'
+build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`'
+build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`'
+build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`'
+SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`'
+Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`'
+GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`'
+EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`'
+FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`'
+LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`'
+NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`'
+LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`'
+max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`'
+ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`'
+exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`'
+lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`'
+lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`'
+lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`'
+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`'
+reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`'
+reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`'
+OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`'
+deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`'
+file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`'
+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`'
+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`'
+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`'
+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`'
+AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`'
+AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`'
+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`'
+STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`'
+RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`'
+old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`'
+lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`'
+CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`'
+CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`'
+compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`'
+GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`'
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`'
+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`'
+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`'
+objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`'
+MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`'
+need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`'
+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`'
+DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`'
+NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`'
+LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`'
+OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`'
+OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`'
+libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`'
+shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`'
+extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
+archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`'
+hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`'
+inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`'
+always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`'
+include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`'
+prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`'
+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`'
+file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`'
+variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`'
+need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`'
+need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`'
+version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`'
+runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`'
+shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`'
+libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`'
+library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`'
+soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`'
+install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`'
+postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
+finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`'
+finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`'
+hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`'
+sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`'
+sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`'
+hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`'
+enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`'
+enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`'
+old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`'
+striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`'
+predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`'
+postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`'
+predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`'
+postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`'
+LD_FC='`$ECHO "$LD_FC" | $SED "$delay_single_quote_subst"`'
+LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`'
+reload_flag_FC='`$ECHO "$reload_flag_FC" | $SED "$delay_single_quote_subst"`'
+reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`'
+reload_cmds_FC='`$ECHO "$reload_cmds_FC" | $SED "$delay_single_quote_subst"`'
+reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds_FC='`$ECHO "$old_archive_cmds_FC" | $SED "$delay_single_quote_subst"`'
+old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_FC='`$ECHO "$compiler_FC" | $SED "$delay_single_quote_subst"`'
+compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`'
+GCC_FC='`$ECHO "$GCC_FC" | $SED "$delay_single_quote_subst"`'
+GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag_FC='`$ECHO "$lt_prog_compiler_no_builtin_flag_FC" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic_FC='`$ECHO "$lt_prog_compiler_pic_FC" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl_FC='`$ECHO "$lt_prog_compiler_wl_FC" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static_FC='`$ECHO "$lt_prog_compiler_static_FC" | $SED "$delay_single_quote_subst"`'
+lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o_FC='`$ECHO "$lt_cv_prog_compiler_c_o_FC" | $SED "$delay_single_quote_subst"`'
+lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc_FC='`$ECHO "$archive_cmds_need_lc_FC" | $SED "$delay_single_quote_subst"`'
+archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes_FC='`$ECHO "$enable_shared_with_static_runtimes_FC" | $SED "$delay_single_quote_subst"`'
+enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec_FC='`$ECHO "$export_dynamic_flag_spec_FC" | $SED "$delay_single_quote_subst"`'
+export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec_FC='`$ECHO "$whole_archive_flag_spec_FC" | $SED "$delay_single_quote_subst"`'
+whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object_FC='`$ECHO "$compiler_needs_object_FC" | $SED "$delay_single_quote_subst"`'
+compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds_FC='`$ECHO "$old_archive_from_new_cmds_FC" | $SED "$delay_single_quote_subst"`'
+old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds_FC='`$ECHO "$old_archive_from_expsyms_cmds_FC" | $SED "$delay_single_quote_subst"`'
+old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+archive_cmds_FC='`$ECHO "$archive_cmds_FC" | $SED "$delay_single_quote_subst"`'
+archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds_FC='`$ECHO "$archive_expsym_cmds_FC" | $SED "$delay_single_quote_subst"`'
+archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+module_cmds_FC='`$ECHO "$module_cmds_FC" | $SED "$delay_single_quote_subst"`'
+module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds_FC='`$ECHO "$module_expsym_cmds_FC" | $SED "$delay_single_quote_subst"`'
+module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld_FC='`$ECHO "$with_gnu_ld_FC" | $SED "$delay_single_quote_subst"`'
+with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag_FC='`$ECHO "$allow_undefined_flag_FC" | $SED "$delay_single_quote_subst"`'
+allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag_FC='`$ECHO "$no_undefined_flag_FC" | $SED "$delay_single_quote_subst"`'
+no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_FC='`$ECHO "$hardcode_libdir_flag_spec_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator_FC='`$ECHO "$hardcode_libdir_separator_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_FC='`$ECHO "$hardcode_direct_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute_FC='`$ECHO "$hardcode_direct_absolute_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L_FC='`$ECHO "$hardcode_minus_L_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var_FC='`$ECHO "$hardcode_shlibpath_var_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic_FC='`$ECHO "$hardcode_automatic_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`'
+inherit_rpath_FC='`$ECHO "$inherit_rpath_FC" | $SED "$delay_single_quote_subst"`'
+inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs_FC='`$ECHO "$link_all_deplibs_FC" | $SED "$delay_single_quote_subst"`'
+link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`'
+always_export_symbols_FC='`$ECHO "$always_export_symbols_FC" | $SED "$delay_single_quote_subst"`'
+always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds_FC='`$ECHO "$export_symbols_cmds_FC" | $SED "$delay_single_quote_subst"`'
+export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms_FC='`$ECHO "$exclude_expsyms_FC" | $SED "$delay_single_quote_subst"`'
+exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+include_expsyms_FC='`$ECHO "$include_expsyms_FC" | $SED "$delay_single_quote_subst"`'
+include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+prelink_cmds_FC='`$ECHO "$prelink_cmds_FC" | $SED "$delay_single_quote_subst"`'
+prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+postlink_cmds_FC='`$ECHO "$postlink_cmds_FC" | $SED "$delay_single_quote_subst"`'
+postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+file_list_spec_FC='`$ECHO "$file_list_spec_FC" | $SED "$delay_single_quote_subst"`'
+file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`'
+hardcode_action_FC='`$ECHO "$hardcode_action_FC" | $SED "$delay_single_quote_subst"`'
+hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs_FC='`$ECHO "$compiler_lib_search_dirs_FC" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`'
+predep_objects_FC='`$ECHO "$predep_objects_FC" | $SED "$delay_single_quote_subst"`'
+predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`'
+postdep_objects_FC='`$ECHO "$postdep_objects_FC" | $SED "$delay_single_quote_subst"`'
+postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`'
+predeps_FC='`$ECHO "$predeps_FC" | $SED "$delay_single_quote_subst"`'
+predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`'
+postdeps_FC='`$ECHO "$postdeps_FC" | $SED "$delay_single_quote_subst"`'
+postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path_FC='`$ECHO "$compiler_lib_search_path_FC" | $SED "$delay_single_quote_subst"`'
+compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`'
+
+LTCC='$LTCC'
+LTCFLAGS='$LTCFLAGS'
+compiler='$compiler_DEFAULT'
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+
+# Quote evaled strings.
+for var in SHELL \
+ECHO \
+PATH_SEPARATOR \
+SED \
+GREP \
+EGREP \
+FGREP \
+LD \
+NM \
+LN_S \
+lt_SP2NL \
+lt_NL2SP \
+reload_flag \
+OBJDUMP \
+deplibs_check_method \
+file_magic_cmd \
+file_magic_glob \
+want_nocaseglob \
+DLLTOOL \
+sharedlib_from_linklib_cmd \
+AR \
+AR_FLAGS \
+archiver_list_spec \
+STRIP \
+RANLIB \
+CC \
+CFLAGS \
+compiler \
+lt_cv_sys_global_symbol_pipe \
+lt_cv_sys_global_symbol_to_cdecl \
+lt_cv_sys_global_symbol_to_c_name_address \
+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
+nm_file_list_spec \
+lt_prog_compiler_no_builtin_flag \
+lt_prog_compiler_pic \
+lt_prog_compiler_wl \
+lt_prog_compiler_static \
+lt_cv_prog_compiler_c_o \
+need_locks \
+MANIFEST_TOOL \
+DSYMUTIL \
+NMEDIT \
+LIPO \
+OTOOL \
+OTOOL64 \
+shrext_cmds \
+export_dynamic_flag_spec \
+whole_archive_flag_spec \
+compiler_needs_object \
+with_gnu_ld \
+allow_undefined_flag \
+no_undefined_flag \
+hardcode_libdir_flag_spec \
+hardcode_libdir_separator \
+exclude_expsyms \
+include_expsyms \
+file_list_spec \
+variables_saved_for_relink \
+libname_spec \
+library_names_spec \
+soname_spec \
+install_override_mode \
+finish_eval \
+old_striplib \
+striplib \
+compiler_lib_search_dirs \
+predep_objects \
+postdep_objects \
+predeps \
+postdeps \
+compiler_lib_search_path \
+LD_FC \
+LD_CXX \
+reload_flag_FC \
+reload_flag_CXX \
+compiler_FC \
+compiler_CXX \
+lt_prog_compiler_no_builtin_flag_FC \
+lt_prog_compiler_no_builtin_flag_CXX \
+lt_prog_compiler_pic_FC \
+lt_prog_compiler_pic_CXX \
+lt_prog_compiler_wl_FC \
+lt_prog_compiler_wl_CXX \
+lt_prog_compiler_static_FC \
+lt_prog_compiler_static_CXX \
+lt_cv_prog_compiler_c_o_FC \
+lt_cv_prog_compiler_c_o_CXX \
+export_dynamic_flag_spec_FC \
+export_dynamic_flag_spec_CXX \
+whole_archive_flag_spec_FC \
+whole_archive_flag_spec_CXX \
+compiler_needs_object_FC \
+compiler_needs_object_CXX \
+with_gnu_ld_FC \
+with_gnu_ld_CXX \
+allow_undefined_flag_FC \
+allow_undefined_flag_CXX \
+no_undefined_flag_FC \
+no_undefined_flag_CXX \
+hardcode_libdir_flag_spec_FC \
+hardcode_libdir_flag_spec_CXX \
+hardcode_libdir_separator_FC \
+hardcode_libdir_separator_CXX \
+exclude_expsyms_FC \
+exclude_expsyms_CXX \
+include_expsyms_FC \
+include_expsyms_CXX \
+file_list_spec_FC \
+file_list_spec_CXX \
+compiler_lib_search_dirs_FC \
+compiler_lib_search_dirs_CXX \
+predep_objects_FC \
+predep_objects_CXX \
+postdep_objects_FC \
+postdep_objects_CXX \
+predeps_FC \
+predeps_CXX \
+postdeps_FC \
+postdeps_CXX \
+compiler_lib_search_path_FC \
+compiler_lib_search_path_CXX; do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[\\\\\\\`\\"\\\$]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+# Double-quote double-evaled strings.
+for var in reload_cmds \
+old_postinstall_cmds \
+old_postuninstall_cmds \
+old_archive_cmds \
+extract_expsyms_cmds \
+old_archive_from_new_cmds \
+old_archive_from_expsyms_cmds \
+archive_cmds \
+archive_expsym_cmds \
+module_cmds \
+module_expsym_cmds \
+export_symbols_cmds \
+prelink_cmds \
+postlink_cmds \
+postinstall_cmds \
+postuninstall_cmds \
+finish_cmds \
+sys_lib_search_path_spec \
+sys_lib_dlsearch_path_spec \
+reload_cmds_FC \
+reload_cmds_CXX \
+old_archive_cmds_FC \
+old_archive_cmds_CXX \
+old_archive_from_new_cmds_FC \
+old_archive_from_new_cmds_CXX \
+old_archive_from_expsyms_cmds_FC \
+old_archive_from_expsyms_cmds_CXX \
+archive_cmds_FC \
+archive_cmds_CXX \
+archive_expsym_cmds_FC \
+archive_expsym_cmds_CXX \
+module_cmds_FC \
+module_cmds_CXX \
+module_expsym_cmds_FC \
+module_expsym_cmds_CXX \
+export_symbols_cmds_FC \
+export_symbols_cmds_CXX \
+prelink_cmds_FC \
+prelink_cmds_CXX \
+postlink_cmds_FC \
+postlink_cmds_CXX; do
+    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+    *[\\\\\\\`\\"\\\$]*)
+      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+      ;;
+    *)
+      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+      ;;
+    esac
+done
+
+ac_aux_dir='$ac_aux_dir'
+xsi_shell='$xsi_shell'
+lt_shell_append='$lt_shell_append'
+
+# See if we are running on zsh, and set the options which allow our
+# commands through without removal of \ escapes INIT.
+if test -n "\${ZSH_VERSION+set}" ; then
+   setopt NO_GLOB_SUBST
+fi
+
+
+    PACKAGE='$PACKAGE'
+    VERSION='$VERSION'
+    TIMESTAMP='$TIMESTAMP'
+    RM='$RM'
+    ofile='$ofile'
+
+
+
+
+
+
+
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+  case $ac_config_target in
+    "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
+    "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
+    "rsb-config.h") CONFIG_HEADERS="$CONFIG_HEADERS rsb-config.h" ;;
+    "librsb-config") CONFIG_FILES="$CONFIG_FILES librsb-config:librsb-config.in" ;;
+    "librsb.pc") CONFIG_FILES="$CONFIG_FILES librsb.pc:librsb.pc.in" ;;
+    "rsb_config.m4") CONFIG_FILES="$CONFIG_FILES rsb_config.m4" ;;
+    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+    "bench/Makefile") CONFIG_FILES="$CONFIG_FILES bench/Makefile" ;;
+    "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;;
+    "examples/Makefile") CONFIG_FILES="$CONFIG_FILES examples/Makefile" ;;
+    "scripts/Makefile") CONFIG_FILES="$CONFIG_FILES scripts/Makefile" ;;
+    "m4/Makefile") CONFIG_FILES="$CONFIG_FILES m4/Makefile" ;;
+    "blas_sparse/Makefile") CONFIG_FILES="$CONFIG_FILES blas_sparse/Makefile" ;;
+
+  *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+  esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used.  Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+  test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+fi
+
+# Have a temporary directory for convenience.  Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+  tmp= ac_tmp=
+  trap 'exit_status=$?
+  : "${ac_tmp:=$tmp}"
+  { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
+' 0
+  trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+  test -d "$tmp"
+}  ||
+{
+  tmp=./conf$$-$RANDOM
+  (umask 077 && mkdir "$tmp")
+} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
+ac_tmp=$tmp
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+  eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+  ac_cs_awk_cr='\\r'
+else
+  ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+  echo "cat >conf$$subs.awk <<_ACEOF" &&
+  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+  echo "_ACEOF"
+} >conf$$subs.sh ||
+  as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+  . ./conf$$subs.sh ||
+    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+
+  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+  if test $ac_delim_n = $ac_delim_num; then
+    break
+  elif $ac_last_try; then
+    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+  N
+  s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
+  for (key in S) S_is_set[key] = 1
+  FS = ""
+
+}
+{
+  line = $ 0
+  nfields = split(line, field, "@")
+  substed = 0
+  len = length(field[1])
+  for (i = 2; i < nfields; i++) {
+    key = field[i]
+    keylen = length(key)
+    if (S_is_set[key]) {
+      value = S[key]
+      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+      len += length(value) + length(field[++i])
+      substed = 1
+    } else
+      len += 1 + keylen
+  }
+
+  print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+  cat
+fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
+  || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
+# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+  ac_vpsub='/^[	 ]*VPATH[	 ]*=[	 ]*/{
+h
+s///
+s/^/:/
+s/[	 ]*$/:/
+s/:\$(srcdir):/:/g
+s/:\${srcdir}:/:/g
+s/:@srcdir@:/:/g
+s/^:*//
+s/:*$//
+x
+s/\(=[	 ]*\).*/\1/
+G
+s/\n//
+s/^[^=]*=[	 ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+  ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
+  if test -z "$ac_tt"; then
+    break
+  elif $ac_last_try; then
+    as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any.  Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  for (key in D) D_is_set[key] = 1
+  FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+  line = \$ 0
+  split(line, arg, " ")
+  if (arg[1] == "#") {
+    defundef = arg[2]
+    mac1 = arg[3]
+  } else {
+    defundef = substr(arg[1], 2)
+    mac1 = arg[2]
+  }
+  split(mac1, mac2, "(") #)
+  macro = mac2[1]
+  prefix = substr(line, 1, index(line, defundef) - 1)
+  if (D_is_set[macro]) {
+    # Preserve the white space surrounding the "#".
+    print prefix "define", macro P[macro] D[macro]
+    next
+  } else {
+    # Replace #undef with comments.  This is necessary, for example,
+    # in the case of _POSIX_SOURCE, which is predefined and required
+    # on some systems where configure will not decide to define it.
+    if (defundef == "undef") {
+      print "/*", prefix defundef, macro, "*/"
+      next
+    }
+  }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+  as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    :C $CONFIG_COMMANDS"
+shift
+for ac_tag
+do
+  case $ac_tag in
+  :[FHLC]) ac_mode=$ac_tag; continue;;
+  esac
+  case $ac_mode$ac_tag in
+  :[FHL]*:*);;
+  :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
+  :[FH]-) ac_tag=-:-;;
+  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+  esac
+  ac_save_IFS=$IFS
+  IFS=:
+  set x $ac_tag
+  IFS=$ac_save_IFS
+  shift
+  ac_file=$1
+  shift
+
+  case $ac_mode in
+  :L) ac_source=$1;;
+  :[FH])
+    ac_file_inputs=
+    for ac_f
+    do
+      case $ac_f in
+      -) ac_f="$ac_tmp/stdin";;
+      *) # Look for the file first in the build tree, then in the source tree
+	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
+	 # because $ac_f cannot contain `:'.
+	 test -f "$ac_f" ||
+	   case $ac_f in
+	   [\\/$]*) false;;
+	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+	   esac ||
+	   as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+      esac
+      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+      as_fn_append ac_file_inputs " '$ac_f'"
+    done
+
+    # Let's still pretend it is `configure' which instantiates (i.e., don't
+    # use $as_me), people would be surprised to read:
+    #    /* config.h.  Generated by config.status.  */
+    configure_input='Generated from '`
+	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+	`' by configure.'
+    if test x"$ac_file" != x-; then
+      configure_input="$ac_file.  $configure_input"
+      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+    fi
+    # Neutralize special characters interpreted by sed in replacement strings.
+    case $configure_input in #(
+    *\&* | *\|* | *\\* )
+       ac_sed_conf_input=`$as_echo "$configure_input" |
+       sed 's/[\\\\&|]/\\\\&/g'`;; #(
+    *) ac_sed_conf_input=$configure_input;;
+    esac
+
+    case $ac_tag in
+    *:-:* | *:-) cat >"$ac_tmp/stdin" \
+      || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
+    esac
+    ;;
+  esac
+
+  ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$ac_file" : 'X\(//\)[^/]' \| \
+	 X"$ac_file" : 'X\(//\)$' \| \
+	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  as_dir="$ac_dir"; as_fn_mkdir_p
+  ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+  .)  # We are building in place.
+    ac_srcdir=.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
+    ac_srcdir=$srcdir$ac_dir_suffix;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+  case $ac_mode in
+  :F)
+  #
+  # CONFIG_FILE
+  #
+
+  case $INSTALL in
+  [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
+  *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
+  esac
+  ac_MKDIR_P=$MKDIR_P
+  case $MKDIR_P in
+  [\\/$]* | ?:[\\/]* ) ;;
+  */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
+  esac
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+  p
+  q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  ac_datarootdir_hack='
+  s&@datadir@&$datadir&g
+  s&@docdir@&$docdir&g
+  s&@infodir@&$infodir&g
+  s&@localedir@&$localedir&g
+  s&@mandir@&$mandir&g
+  s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+s&@INSTALL@&$ac_INSTALL&;t t
+s&@MKDIR_P@&$ac_MKDIR_P&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
+  >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+  { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' \
+      "$ac_tmp/out"`; test -z "$ac_out"; } &&
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined" >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined" >&2;}
+
+  rm -f "$ac_tmp/stdin"
+  case $ac_file in
+  -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
+  *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
+  esac \
+  || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+ ;;
+  :H)
+  #
+  # CONFIG_HEADER
+  #
+  if test x"$ac_file" != x-; then
+    {
+      $as_echo "/* $configure_input  */" \
+      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+    } >"$ac_tmp/config.h" \
+      || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+    if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+    else
+      rm -f "$ac_file"
+      mv "$ac_tmp/config.h" "$ac_file" \
+	|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
+    fi
+  else
+    $as_echo "/* $configure_input  */" \
+      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+      || as_fn_error $? "could not create -" "$LINENO" 5
+  fi
+# Compute "$ac_file"'s index in $config_headers.
+_am_arg="$ac_file"
+_am_stamp_count=1
+for _am_header in $config_headers :; do
+  case $_am_header in
+    $_am_arg | $_am_arg:* )
+      break ;;
+    * )
+      _am_stamp_count=`expr $_am_stamp_count + 1` ;;
+  esac
+done
+echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
+$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$_am_arg" : 'X\(//\)[^/]' \| \
+	 X"$_am_arg" : 'X\(//\)$' \| \
+	 X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$_am_arg" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`/stamp-h$_am_stamp_count
+ ;;
+
+  :C)  { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+$as_echo "$as_me: executing $ac_file commands" >&6;}
+ ;;
+  esac
+
+
+  case $ac_file$ac_mode in
+    "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
+  # Autoconf 2.62 quotes --file arguments for eval, but not when files
+  # are listed without --file.  Let's play safe and only enable the eval
+  # if we detect the quoting.
+  case $CONFIG_FILES in
+  *\'*) eval set x "$CONFIG_FILES" ;;
+  *)   set x $CONFIG_FILES ;;
+  esac
+  shift
+  for mf
+  do
+    # Strip MF so we end up with the name of the file.
+    mf=`echo "$mf" | sed -e 's/:.*$//'`
+    # Check whether this is an Automake generated Makefile or not.
+    # We used to match only the files named `Makefile.in', but
+    # some people rename them; so instead we look at the file content.
+    # Grep'ing the first line is not enough: some people post-process
+    # each Makefile.in and add a new line on top of each file to say so.
+    # Grep'ing the whole file is not good either: AIX grep has a line
+    # limit of 2048, but all sed's we know have understand at least 4000.
+    if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then
+      dirpart=`$as_dirname -- "$mf" ||
+$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$mf" : 'X\(//\)[^/]' \| \
+	 X"$mf" : 'X\(//\)$' \| \
+	 X"$mf" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$mf" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+    else
+      continue
+    fi
+    # Extract the definition of DEPDIR, am__include, and am__quote
+    # from the Makefile without running `make'.
+    DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"`
+    test -z "$DEPDIR" && continue
+    am__include=`sed -n 's/^am__include = //p' < "$mf"`
+    test -z "am__include" && continue
+    am__quote=`sed -n 's/^am__quote = //p' < "$mf"`
+    # When using ansi2knr, U may be empty or an underscore; expand it
+    U=`sed -n 's/^U = //p' < "$mf"`
+    # Find all dependency output files, they are included files with
+    # $(DEPDIR) in their names.  We invoke sed twice because it is the
+    # simplest approach to changing $(DEPDIR) to its actual value in the
+    # expansion.
+    for file in `sed -n "
+      s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \
+	 sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do
+      # Make sure the directory exists.
+      test -f "$dirpart/$file" && continue
+      fdir=`$as_dirname -- "$file" ||
+$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$file" : 'X\(//\)[^/]' \| \
+	 X"$file" : 'X\(//\)$' \| \
+	 X"$file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$file" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      as_dir=$dirpart/$fdir; as_fn_mkdir_p
+      # echo "creating $dirpart/$file"
+      echo '# dummy' > "$dirpart/$file"
+    done
+  done
+}
+ ;;
+    "libtool":C)
+
+    # See if we are running on zsh, and set the options which allow our
+    # commands through without removal of \ escapes.
+    if test -n "${ZSH_VERSION+set}" ; then
+      setopt NO_GLOB_SUBST
+    fi
+
+    cfgfile="${ofile}T"
+    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+    $RM "$cfgfile"
+
+    cat <<_LT_EOF >> "$cfgfile"
+#! $SHELL
+
+# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
+# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+#
+#   Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
+#                 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+#                 Foundation, Inc.
+#   Written by Gordon Matzigkeit, 1996
+#
+#   This file is part of GNU Libtool.
+#
+# GNU Libtool is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of
+# the License, or (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING.  If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
+# obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+
+# The names of the tagged configurations supported by this script.
+available_tags="FC CXX "
+
+# ### BEGIN LIBTOOL CONFIG
+
+# Which release of libtool.m4 was used?
+macro_version=$macro_version
+macro_revision=$macro_revision
+
+# Whether or not to build shared libraries.
+build_libtool_libs=$enable_shared
+
+# Whether or not to build static libraries.
+build_old_libs=$enable_static
+
+# What type of objects to build.
+pic_mode=$pic_mode
+
+# Whether or not to optimize for fast installation.
+fast_install=$enable_fast_install
+
+# Shell to use when invoking shell scripts.
+SHELL=$lt_SHELL
+
+# An echo program that protects backslashes.
+ECHO=$lt_ECHO
+
+# The PATH separator for the build system.
+PATH_SEPARATOR=$lt_PATH_SEPARATOR
+
+# The host system.
+host_alias=$host_alias
+host=$host
+host_os=$host_os
+
+# The build system.
+build_alias=$build_alias
+build=$build
+build_os=$build_os
+
+# A sed program that does not truncate output.
+SED=$lt_SED
+
+# Sed that helps us avoid accidentally triggering echo(1) options like -n.
+Xsed="\$SED -e 1s/^X//"
+
+# A grep program that handles long lines.
+GREP=$lt_GREP
+
+# An ERE matcher.
+EGREP=$lt_EGREP
+
+# A literal string matcher.
+FGREP=$lt_FGREP
+
+# A BSD- or MS-compatible name lister.
+NM=$lt_NM
+
+# Whether we need soft or hard links.
+LN_S=$lt_LN_S
+
+# What is the maximum length of a command?
+max_cmd_len=$max_cmd_len
+
+# Object file suffix (normally "o").
+objext=$ac_objext
+
+# Executable file suffix (normally "").
+exeext=$exeext
+
+# whether the shell understands "unset".
+lt_unset=$lt_unset
+
+# turn spaces into newlines.
+SP2NL=$lt_lt_SP2NL
+
+# turn newlines into spaces.
+NL2SP=$lt_lt_NL2SP
+
+# convert \$build file names to \$host format.
+to_host_file_cmd=$lt_cv_to_host_file_cmd
+
+# convert \$build files to toolchain format.
+to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+
+# An object symbol dumper.
+OBJDUMP=$lt_OBJDUMP
+
+# Method to check whether dependent libraries are shared objects.
+deplibs_check_method=$lt_deplibs_check_method
+
+# Command to use when deplibs_check_method = "file_magic".
+file_magic_cmd=$lt_file_magic_cmd
+
+# How to find potential files when deplibs_check_method = "file_magic".
+file_magic_glob=$lt_file_magic_glob
+
+# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
+want_nocaseglob=$lt_want_nocaseglob
+
+# DLL creation program.
+DLLTOOL=$lt_DLLTOOL
+
+# Command to associate shared and link libraries.
+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd
+
+# The archiver.
+AR=$lt_AR
+
+# Flags to create an archive.
+AR_FLAGS=$lt_AR_FLAGS
+
+# How to feed a file listing to the archiver.
+archiver_list_spec=$lt_archiver_list_spec
+
+# A symbol stripping program.
+STRIP=$lt_STRIP
+
+# Commands used to install an old-style archive.
+RANLIB=$lt_RANLIB
+old_postinstall_cmds=$lt_old_postinstall_cmds
+old_postuninstall_cmds=$lt_old_postuninstall_cmds
+
+# Whether to use a lock for old archive extraction.
+lock_old_archive_extraction=$lock_old_archive_extraction
+
+# A C compiler.
+LTCC=$lt_CC
+
+# LTCC compiler flags.
+LTCFLAGS=$lt_CFLAGS
+
+# Take the output of nm and produce a listing of raw symbols and C names.
+global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe
+
+# Transform the output of nm in a proper C declaration.
+global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl
+
+# Transform the output of nm in a C name address pair.
+global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
+
+# Transform the output of nm in a C name address pair when lib prefix is needed.
+global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
+
+# Specify filename containing input files for \$NM.
+nm_file_list_spec=$lt_nm_file_list_spec
+
+# The root where to search for dependent libraries,and in which our libraries should be installed.
+lt_sysroot=$lt_sysroot
+
+# The name of the directory that contains temporary libtool files.
+objdir=$objdir
+
+# Used to examine libraries when file_magic_cmd begins with "file".
+MAGIC_CMD=$MAGIC_CMD
+
+# Must we lock files when doing compilation?
+need_locks=$lt_need_locks
+
+# Manifest tool.
+MANIFEST_TOOL=$lt_MANIFEST_TOOL
+
+# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
+DSYMUTIL=$lt_DSYMUTIL
+
+# Tool to change global to local symbols on Mac OS X.
+NMEDIT=$lt_NMEDIT
+
+# Tool to manipulate fat objects and archives on Mac OS X.
+LIPO=$lt_LIPO
+
+# ldd/readelf like tool for Mach-O binaries on Mac OS X.
+OTOOL=$lt_OTOOL
+
+# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4.
+OTOOL64=$lt_OTOOL64
+
+# Old archive suffix (normally "a").
+libext=$libext
+
+# Shared library suffix (normally ".so").
+shrext_cmds=$lt_shrext_cmds
+
+# The commands to extract the exported symbol list from a shared archive.
+extract_expsyms_cmds=$lt_extract_expsyms_cmds
+
+# Variables whose values should be saved in libtool wrapper scripts and
+# restored at link time.
+variables_saved_for_relink=$lt_variables_saved_for_relink
+
+# Do we need the "lib" prefix for modules?
+need_lib_prefix=$need_lib_prefix
+
+# Do we need a version for libraries?
+need_version=$need_version
+
+# Library versioning type.
+version_type=$version_type
+
+# Shared library runtime path variable.
+runpath_var=$runpath_var
+
+# Shared library path variable.
+shlibpath_var=$shlibpath_var
+
+# Is shlibpath searched before the hard-coded library search path?
+shlibpath_overrides_runpath=$shlibpath_overrides_runpath
+
+# Format of library name prefix.
+libname_spec=$lt_libname_spec
+
+# List of archive names.  First name is the real one, the rest are links.
+# The last name is the one that the linker finds with -lNAME
+library_names_spec=$lt_library_names_spec
+
+# The coded name of the library, if different from the real name.
+soname_spec=$lt_soname_spec
+
+# Permission mode override for installation of shared libraries.
+install_override_mode=$lt_install_override_mode
+
+# Command to use after installation of a shared archive.
+postinstall_cmds=$lt_postinstall_cmds
+
+# Command to use after uninstallation of a shared archive.
+postuninstall_cmds=$lt_postuninstall_cmds
+
+# Commands used to finish a libtool library installation in a directory.
+finish_cmds=$lt_finish_cmds
+
+# As "finish_cmds", except a single script fragment to be evaled but
+# not shown.
+finish_eval=$lt_finish_eval
+
+# Whether we should hardcode library paths into libraries.
+hardcode_into_libs=$hardcode_into_libs
+
+# Compile-time system search path for libraries.
+sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+
+# Run-time system search path for libraries.
+sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+
+# Whether dlopen is supported.
+dlopen_support=$enable_dlopen
+
+# Whether dlopen of programs is supported.
+dlopen_self=$enable_dlopen_self
+
+# Whether dlopen of statically linked programs is supported.
+dlopen_self_static=$enable_dlopen_self_static
+
+# Commands to strip libraries.
+old_striplib=$lt_old_striplib
+striplib=$lt_striplib
+
+
+# The linker used to build libraries.
+LD=$lt_LD
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag
+reload_cmds=$lt_reload_cmds
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds
+
+# A language specific compiler.
+CC=$lt_compiler
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds
+archive_expsym_cmds=$lt_archive_expsym_cmds
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds
+module_expsym_cmds=$lt_module_expsym_cmds
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects
+postdep_objects=$lt_postdep_objects
+predeps=$lt_predeps
+postdeps=$lt_postdeps
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path
+
+# ### END LIBTOOL CONFIG
+
+_LT_EOF
+
+  case $host_os in
+  aix3*)
+    cat <<\_LT_EOF >> "$cfgfile"
+# AIX sometimes has problems with the GCC collect2 program.  For some
+# reason, if we set the COLLECT_NAMES environment variable, the problems
+# vanish in a puff of smoke.
+if test "X${COLLECT_NAMES+set}" != Xset; then
+  COLLECT_NAMES=
+  export COLLECT_NAMES
+fi
+_LT_EOF
+    ;;
+  esac
+
+
+ltmain="$ac_aux_dir/ltmain.sh"
+
+
+  # We use sed instead of cat because bash on DJGPP gets confused if
+  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
+  # text mode, it properly converts lines to CR/LF.  This bash problem
+  # is reportedly fixed, but why not run on old versions too?
+  sed '$q' "$ltmain" >> "$cfgfile" \
+     || (rm -f "$cfgfile"; exit 1)
+
+  if test x"$xsi_shell" = xyes; then
+  sed -e '/^func_dirname ()$/,/^} # func_dirname /c\
+func_dirname ()\
+{\
+\    case ${1} in\
+\      */*) func_dirname_result="${1%/*}${2}" ;;\
+\      *  ) func_dirname_result="${3}" ;;\
+\    esac\
+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_basename ()$/,/^} # func_basename /c\
+func_basename ()\
+{\
+\    func_basename_result="${1##*/}"\
+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\
+func_dirname_and_basename ()\
+{\
+\    case ${1} in\
+\      */*) func_dirname_result="${1%/*}${2}" ;;\
+\      *  ) func_dirname_result="${3}" ;;\
+\    esac\
+\    func_basename_result="${1##*/}"\
+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_stripname ()$/,/^} # func_stripname /c\
+func_stripname ()\
+{\
+\    # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\
+\    # positional parameters, so assign one to ordinary parameter first.\
+\    func_stripname_result=${3}\
+\    func_stripname_result=${func_stripname_result#"${1}"}\
+\    func_stripname_result=${func_stripname_result%"${2}"}\
+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\
+func_split_long_opt ()\
+{\
+\    func_split_long_opt_name=${1%%=*}\
+\    func_split_long_opt_arg=${1#*=}\
+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\
+func_split_short_opt ()\
+{\
+\    func_split_short_opt_arg=${1#??}\
+\    func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\
+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\
+func_lo2o ()\
+{\
+\    case ${1} in\
+\      *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\
+\      *)    func_lo2o_result=${1} ;;\
+\    esac\
+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_xform ()$/,/^} # func_xform /c\
+func_xform ()\
+{\
+    func_xform_result=${1%.*}.lo\
+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_arith ()$/,/^} # func_arith /c\
+func_arith ()\
+{\
+    func_arith_result=$(( $* ))\
+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_len ()$/,/^} # func_len /c\
+func_len ()\
+{\
+    func_len_result=${#1}\
+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+fi
+
+if test x"$lt_shell_append" = xyes; then
+  sed -e '/^func_append ()$/,/^} # func_append /c\
+func_append ()\
+{\
+    eval "${1}+=\\${2}"\
+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\
+func_append_quoted ()\
+{\
+\    func_quote_for_eval "${2}"\
+\    eval "${1}+=\\\\ \\$func_quote_for_eval_result"\
+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \
+  && mv -f "$cfgfile.tmp" "$cfgfile" \
+    || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+test 0 -eq $? || _lt_function_replace_fail=:
+
+
+  # Save a `func_append' function call where possible by direct use of '+='
+  sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \
+    && mv -f "$cfgfile.tmp" "$cfgfile" \
+      || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+  test 0 -eq $? || _lt_function_replace_fail=:
+else
+  # Save a `func_append' function call even when '+=' is not available
+  sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \
+    && mv -f "$cfgfile.tmp" "$cfgfile" \
+      || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp")
+  test 0 -eq $? || _lt_function_replace_fail=:
+fi
+
+if test x"$_lt_function_replace_fail" = x":"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5
+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;}
+fi
+
+
+   mv -f "$cfgfile" "$ofile" ||
+    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+  chmod +x "$ofile"
+
+
+    cat <<_LT_EOF >> "$ofile"
+
+# ### BEGIN LIBTOOL TAG CONFIG: FC
+
+# The linker used to build libraries.
+LD=$lt_LD_FC
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag_FC
+reload_cmds=$lt_reload_cmds_FC
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds_FC
+
+# A language specific compiler.
+CC=$lt_compiler_FC
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC_FC
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_FC
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic_FC
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl_FC
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static_FC
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o_FC
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc_FC
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_FC
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_FC
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec_FC
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object_FC
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_FC
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_FC
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds_FC
+archive_expsym_cmds=$lt_archive_expsym_cmds_FC
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds_FC
+module_expsym_cmds=$lt_module_expsym_cmds_FC
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld_FC
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag_FC
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag_FC
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_FC
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator_FC
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct_FC
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute_FC
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L_FC
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var_FC
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic_FC
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath_FC
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs_FC
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols_FC
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds_FC
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms_FC
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms_FC
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds_FC
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds_FC
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec_FC
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action_FC
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_FC
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects_FC
+postdep_objects=$lt_postdep_objects_FC
+predeps=$lt_predeps_FC
+postdeps=$lt_postdeps_FC
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path_FC
+
+# ### END LIBTOOL TAG CONFIG: FC
+_LT_EOF
+
+
+    cat <<_LT_EOF >> "$ofile"
+
+# ### BEGIN LIBTOOL TAG CONFIG: CXX
+
+# The linker used to build libraries.
+LD=$lt_LD_CXX
+
+# How to create reloadable object files.
+reload_flag=$lt_reload_flag_CXX
+reload_cmds=$lt_reload_cmds_CXX
+
+# Commands used to build an old-style archive.
+old_archive_cmds=$lt_old_archive_cmds_CXX
+
+# A language specific compiler.
+CC=$lt_compiler_CXX
+
+# Is the compiler the GNU compiler?
+with_gcc=$GCC_CXX
+
+# Compiler flag to turn off builtin functions.
+no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX
+
+# Additional compiler flags for building library objects.
+pic_flag=$lt_lt_prog_compiler_pic_CXX
+
+# How to pass a linker flag through the compiler.
+wl=$lt_lt_prog_compiler_wl_CXX
+
+# Compiler flag to prevent dynamic linking.
+link_static_flag=$lt_lt_prog_compiler_static_CXX
+
+# Does compiler simultaneously support -c and -o options?
+compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX
+
+# Whether or not to add -lc for building shared libraries.
+build_libtool_need_lc=$archive_cmds_need_lc_CXX
+
+# Whether or not to disallow shared libs when runtime libs are static.
+allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX
+
+# Compiler flag to allow reflexive dlopens.
+export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX
+
+# Compiler flag to generate shared objects directly from archives.
+whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX
+
+# Whether the compiler copes with passing no objects directly.
+compiler_needs_object=$lt_compiler_needs_object_CXX
+
+# Create an old-style archive from a shared archive.
+old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX
+
+# Create a temporary old-style archive to link instead of a shared archive.
+old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX
+
+# Commands used to build a shared archive.
+archive_cmds=$lt_archive_cmds_CXX
+archive_expsym_cmds=$lt_archive_expsym_cmds_CXX
+
+# Commands used to build a loadable module if different from building
+# a shared archive.
+module_cmds=$lt_module_cmds_CXX
+module_expsym_cmds=$lt_module_expsym_cmds_CXX
+
+# Whether we are building with GNU ld or not.
+with_gnu_ld=$lt_with_gnu_ld_CXX
+
+# Flag that allows shared libraries with undefined symbols to be built.
+allow_undefined_flag=$lt_allow_undefined_flag_CXX
+
+# Flag that enforces no undefined symbols.
+no_undefined_flag=$lt_no_undefined_flag_CXX
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist
+hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX
+
+# Whether we need a single "-rpath" flag with a separated argument.
+hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary.
+hardcode_direct=$hardcode_direct_CXX
+
+# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+# DIR into the resulting binary and the resulting library dependency is
+# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+# library is relocated.
+hardcode_direct_absolute=$hardcode_direct_absolute_CXX
+
+# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+# into the resulting binary.
+hardcode_minus_L=$hardcode_minus_L_CXX
+
+# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+# into the resulting binary.
+hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX
+
+# Set to "yes" if building a shared library automatically hardcodes DIR
+# into the library and all subsequent libraries and executables linked
+# against it.
+hardcode_automatic=$hardcode_automatic_CXX
+
+# Set to yes if linker adds runtime paths of dependent libraries
+# to runtime path list.
+inherit_rpath=$inherit_rpath_CXX
+
+# Whether libtool must link a program against all its dependency libraries.
+link_all_deplibs=$link_all_deplibs_CXX
+
+# Set to "yes" if exported symbols are required.
+always_export_symbols=$always_export_symbols_CXX
+
+# The commands to list exported symbols.
+export_symbols_cmds=$lt_export_symbols_cmds_CXX
+
+# Symbols that should not be listed in the preloaded symbols.
+exclude_expsyms=$lt_exclude_expsyms_CXX
+
+# Symbols that must always be exported.
+include_expsyms=$lt_include_expsyms_CXX
+
+# Commands necessary for linking programs (against libraries) with templates.
+prelink_cmds=$lt_prelink_cmds_CXX
+
+# Commands necessary for finishing linking programs.
+postlink_cmds=$lt_postlink_cmds_CXX
+
+# Specify filename containing input files.
+file_list_spec=$lt_file_list_spec_CXX
+
+# How to hardcode a shared library path into an executable.
+hardcode_action=$hardcode_action_CXX
+
+# The directories searched by this compiler when creating a shared library.
+compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX
+
+# Dependencies to place before and after the objects being linked to
+# create a shared library.
+predep_objects=$lt_predep_objects_CXX
+postdep_objects=$lt_postdep_objects_CXX
+predeps=$lt_predeps_CXX
+postdeps=$lt_postdeps_CXX
+
+# The library search path used internally by the compiler when linking
+# a shared library.
+compiler_lib_search_path=$lt_compiler_lib_search_path_CXX
+
+# ### END LIBTOOL TAG CONFIG: CXX
+_LT_EOF
+
+ ;;
+    "rsb-config.h":H) sed 's/^#define /#define RSB_/g;s/ RSB_RSB_/ RSB_/g' rsb-config.h > rsb-config.h.tmp ; echo '#endif /* RSB_CONFIG_H_INCLUDED */' >> rsb-config.h.tmp ; cat $srcdir/rsb_license_header.inc $srcdir/rsb-config.h.hin rsb-config.h.tmp > rsb-config.h ; rm rsb-config.h.tmp ;;
+    "librsb-config":F) chmod +x librsb-config ;;
+
+  esac
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+  as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded.  So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status.  When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+  ac_cs_success=:
+  ac_config_status_args=
+  test "$silent" = yes &&
+    ac_config_status_args="$ac_config_status_args --quiet"
+  exec 5>/dev/null
+  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+  exec 5>>config.log
+  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+  # would make configure fail if this is the last instruction.
+  $ac_cs_success || as_fn_exit 1
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
+#dnl	***********************************************************************
+{ $as_echo "$as_me:${as_lineno-$LINENO}: =============== Build Programs and Flags ===============================
+(you can override these at build time; e.g.: 'make CC=cc')
+	CC                     : ${CC}
+	FC (to disable, FC='') : ${FC}
+	CFLAGS                 : ${CFLAGS}
+	FCFLAGS                : ${FCFLAGS}
+	LDFLAGS                : ${LDFLAGS}
+	NOUNROLLCFLAGS         : ${NOUNROLLCFLAGS}
+	LIBS                   : ${LIBS}
+	AR                     : ${AR}
+	ARFLAGS                : ${ARFLAGS}
+	M4                     : ${M4}
+	OCTAVE                 : ${OCTAVE}
+	DOXYGEN                : ${DOXYGEN}
+	HELP2MAN               : ${HELP2MAN}
+	CXX                    : ${CXX}
+=== Additional flags affecting only the benchmark program (rsbench): ===
+	RSB_RSBENCH_LIBS             : ${RSB_RSBENCH_LIBS}
+	RSB_RSBENCH_CFLAGS           : ${RSB_RSBENCH_CFLAGS}
+========= Main code generator values, this build vs defaults ===========
+(if these differ from the defaults, you need to have M4 and run 'make cleanall' and 'make')
+	All Numerical types          : \"${all_matrix_types}\"
+	Numerical types              : \"${want_matrix_types}\" vs \"${default_types}\"
+	Build Sparse BLAS Interface  : \"${sparse_blas_interface}\" vs \"${sparse_blas_interface_default}\"
+	Util. Kernels Unroll         : \"${util_unrolls}\" vs \"${default_util_unrolls}\"
+	Triangular solve zero check  : \"${want_spsm_diagonal_check}\" vs \"${want_spsm_diagonal_check_default}\"
+============== Build Configuration, this build vs defaults =============
+(if you reconfigure and change these, you need to run 'make clean' and 'make')
+	Supported I/O functionality level   : \"${want_io_level}\" vs \"${default_want_io_level}\"
+	Interface Error Verbosity           : \"${want_ext_verrbosity}\" vs \"${default_want_ext_verrbosity}\"
+	Internals Error Verbosity           : \"${want_int_verrbosity}\" vs \"${default_want_int_verrbosity}\"
+	Memory hierarchy info, detected     : \"${RSB_DETECTED_MEM_HIERARCHY_INFO}\"
+	Memory hierarchy info, selected     : \"${RSB_USER_SET_MEM_HIERARCHY_INFO}\"
+	Maximum of Supported threads        : \"${RSB_CONST_MAX_SUPPORTED_THREADS}\"
+	Build Fortran examples              : \"$enable_fortran_examples\"
+	Build C examples                    : \"$enable_c_examples\"
+	Install Sparse BLAS Fortran modules : \"$want_blas_sparse_mod_install\"
+	Install pkg-config \"librsb.pc\" file : \"$want_install_pkg_config\"
+	Build Octave generated tester       : \"$want_octave_testing\"
+	Build Octave generated tester (int) : \"$want_octave_testing_and_int\"
+	Build HTML and man documentation    : \"$want_build_doc\"
+	gzipped matrices support via zlib   : \"${want_zlib_support}\"
+	gather elapsed time in librsb       : \"${enable_librsb_stats}\"
+" >&5
+$as_echo "$as_me: =============== Build Programs and Flags ===============================
+(you can override these at build time; e.g.: 'make CC=cc')
+	CC                     : ${CC}
+	FC (to disable, FC='') : ${FC}
+	CFLAGS                 : ${CFLAGS}
+	FCFLAGS                : ${FCFLAGS}
+	LDFLAGS                : ${LDFLAGS}
+	NOUNROLLCFLAGS         : ${NOUNROLLCFLAGS}
+	LIBS                   : ${LIBS}
+	AR                     : ${AR}
+	ARFLAGS                : ${ARFLAGS}
+	M4                     : ${M4}
+	OCTAVE                 : ${OCTAVE}
+	DOXYGEN                : ${DOXYGEN}
+	HELP2MAN               : ${HELP2MAN}
+	CXX                    : ${CXX}
+=== Additional flags affecting only the benchmark program (rsbench): ===
+	RSB_RSBENCH_LIBS             : ${RSB_RSBENCH_LIBS}
+	RSB_RSBENCH_CFLAGS           : ${RSB_RSBENCH_CFLAGS}
+========= Main code generator values, this build vs defaults ===========
+(if these differ from the defaults, you need to have M4 and run 'make cleanall' and 'make')
+	All Numerical types          : \"${all_matrix_types}\"
+	Numerical types              : \"${want_matrix_types}\" vs \"${default_types}\"
+	Build Sparse BLAS Interface  : \"${sparse_blas_interface}\" vs \"${sparse_blas_interface_default}\"
+	Util. Kernels Unroll         : \"${util_unrolls}\" vs \"${default_util_unrolls}\"
+	Triangular solve zero check  : \"${want_spsm_diagonal_check}\" vs \"${want_spsm_diagonal_check_default}\"
+============== Build Configuration, this build vs defaults =============
+(if you reconfigure and change these, you need to run 'make clean' and 'make')
+	Supported I/O functionality level   : \"${want_io_level}\" vs \"${default_want_io_level}\"
+	Interface Error Verbosity           : \"${want_ext_verrbosity}\" vs \"${default_want_ext_verrbosity}\"
+	Internals Error Verbosity           : \"${want_int_verrbosity}\" vs \"${default_want_int_verrbosity}\"
+	Memory hierarchy info, detected     : \"${RSB_DETECTED_MEM_HIERARCHY_INFO}\"
+	Memory hierarchy info, selected     : \"${RSB_USER_SET_MEM_HIERARCHY_INFO}\"
+	Maximum of Supported threads        : \"${RSB_CONST_MAX_SUPPORTED_THREADS}\"
+	Build Fortran examples              : \"$enable_fortran_examples\"
+	Build C examples                    : \"$enable_c_examples\"
+	Install Sparse BLAS Fortran modules : \"$want_blas_sparse_mod_install\"
+	Install pkg-config \"librsb.pc\" file : \"$want_install_pkg_config\"
+	Build Octave generated tester       : \"$want_octave_testing\"
+	Build Octave generated tester (int) : \"$want_octave_testing_and_int\"
+	Build HTML and man documentation    : \"$want_build_doc\"
+	gzipped matrices support via zlib   : \"${want_zlib_support}\"
+	gather elapsed time in librsb       : \"${enable_librsb_stats}\"
+" >&6;}
+
+if test x"$OCTAVE" != xfalse && test x"$want_int" != x  ; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: You seem to have GNU Octave and enabled 'int' type. This will allow an additional part of the test suite to be generated." >&5
+$as_echo "$as_me: You seem to have GNU Octave and enabled 'int' type. This will allow an additional part of the test suite to be generated." >&6;}
+else
+{ $as_echo "$as_me:${as_lineno-$LINENO}: You seem to not have GNU Octave or have disabled 'int' type. Part of the test suite will not be generated. If you want more testing capabilities, you should enable the 'int' type as well." >&5
+$as_echo "$as_me: You seem to not have GNU Octave or have disabled 'int' type. Part of the test suite will not be generated. If you want more testing capabilities, you should enable the 'int' type as well." >&6;}
+fi
+if test x"$M4" = x ; then
+	{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: No m4 implementation detected. You will not be able to generate code." >&5
+$as_echo "$as_me: WARNING: No m4 implementation detected. You will not be able to generate code." >&2;}
+else
+	true
+fi
+# FIXME : should warn the user in the case of opting out the 'int' type, as in this case there would be no GNU/octave based testing.
+
+if test x"$default_types"  != x"$want_matrix_types" ; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You chose a custom matrix types selection. If you just unpacked from archive, you should issue \"make cleanall\" to delete the shipped code and then \"make\" will regenerate it by using m4." >&5
+$as_echo "$as_me: WARNING: You chose a custom matrix types selection. If you just unpacked from archive, you should issue \"make cleanall\" to delete the shipped code and then \"make\" will regenerate it by using m4." >&2;}
+fi
+if test x"$want_rsb_dl"  = x"yes" ; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: You chose an obsolete switch (--enable-shlib-linked-examples), which now on is disabled." >&5
+$as_echo "$as_me: WARNING: You chose an obsolete switch (--enable-shlib-linked-examples), which now on is disabled." >&2;}
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: Successfully configured librsb version \"$LIBRSB_VERSION\"." >&5
+$as_echo "$as_me: Successfully configured librsb version \"$LIBRSB_VERSION\"." >&6;}
diff --git a/configure.ac b/configure.ac
new file mode 100644
index 0000000..71d751b
--- /dev/null
+++ b/configure.ac
@@ -0,0 +1,1369 @@
+AC_PREREQ([2.54])
+################################################################################
+dnl	************************************************************************
+dnl	******* the following line shall be commented in branches/... **********
+dnl define([librsbsvnversion],[esyscmd([sh -c "basename `svn info | grep ^URL | sed 's/URL: '//g` | tr -d '\n'"])])dnl
+dnl	************************************************************************
+define([librsbtodaydate],[esyscmd([sh -c "date +'%B %d, %Y' | tr -d '\n'"])])dnl
+define([librsbsvnrevision],[esyscmd([sh -c "svnversion -n"])])dnl
+dnl	************************************************************************
+dnl	**** the following lines shall be updated at each release **************
+dnl	************************************************************************
+define([librsb_ver_major],[1])dnl
+define([librsb_ver_minor],[2])dnl
+define([librsb_ver_patch],[0])dnl
+define([librsb_ver_prers],[-rc5])dnl
+dnl	************************************************************************
+define([librsb_ver_string],librsb_ver_major.librsb_ver_minor.librsb_ver_patch)dnl
+define([librsb_librsbvernum],[librsb_ver_major"0"librsb_ver_minor"00"])dnl
+define([librsb_abi_vernum],[0:0:0])dnl
+define([librsb_lib_string],librsb_librsbvernum)dnl
+define([librsb_release],librsb_ver_string[]librsb_ver_prers)dnl
+define([librsbversion],[ifelse(librsbsvnversion,[trunk],librsbsvnrevision,librsb_release)])dnl
+dnl define([librsbversion],[ifelse(librsbsvnversion,[trunk],[trunk],librsb_ver_major.librsb_ver_minor.librsb_ver_patch[]librsb_ver_prers)])dnl
+AC_INIT([librsb],[librsbversion],[michelemartone_AT_users_DOT_sourceforge_DOT_net])
+dnl
+# The following are valid for both trunk and release version.
+# It indicates the version this trunk is loosely related to.
+LIBRSB_VER_MAJOR=librsb_ver_major
+LIBRSB_VER_MINOR=librsb_ver_minor
+LIBRSB_VER_PATCH=librsb_ver_patch
+LIBRSB_LIBRSB_VER=librsb_librsbvernum
+if test x"librsbsvnversion" = x"trunk" ; then
+LIBRSB_VER_PRERS="-trunk"
+else
+LIBRSB_VER_PRERS="librsb_ver_prers"
+fi
+LIBRSB_VER_DATE="librsbtodaydate"
+LIBRSB_VERSION="librsbversion"
+LIBRSB_MAIN_RELEASE="librsb_ver_string"
+LIBRSB_ABI_VERSION="librsb_abi_vernum"
+AC_SUBST(LIBRSB_VER_MAJOR)
+AC_SUBST(LIBRSB_VER_MINOR)
+AC_SUBST(LIBRSB_VER_PATCH)
+AC_SUBST(LIBRSB_VER_DATE)
+AC_SUBST(LIBRSB_VER_PRERS)
+AC_SUBST(LIBRSB_LIBRSB_VER)
+AC_SUBST(LIBRSB_VERSION)
+AC_SUBST(LIBRSB_MAIN_RELEASE)
+AC_SUBST(LIBRSB_ABI_VERSION)
+################################################################################
+SVN_REVISION="librsbsvnrevision"
+AH_TEMPLATE([SVN_REVISION])
+AC_DEFINE(SVN_REVISION,"librsbsvnrevision",[SVN REVISION])
+AC_SUBST(SVN_REVISION)
+AH_TEMPLATE([COPYRIGHT_STRING])
+AC_DEFINE(COPYRIGHT_STRING,"Copyright (c) 2008-2016 Michele Martone",[])
+dnl AC_SUBST([COPYRIGHT_STRING],["Copyright (c) 2008-2016 Michele Martone"])
+AC_COPYRIGHT([Copyright (c) 2008-2016, Michele Martone])
+################################################################################
+AM_INIT_AUTOMAKE
+LT_INIT
+AC_CONFIG_MACRO_DIR([m4])
+dnl AC_PROG_FC
+AC_PROG_FC(xlf2003 xlf2003_r ifort gfortran )
+m4_if(m4_version_compare(m4_defn([AC_AUTOCONF_VERSION]),[2.60]),-1, [],[AC_OPENMP()])
+AC_PROG_CXX(xlC xlC_r7 xlC_r4 xlC_r g++ pgCC )
+# on epsilon cc is not reloaded with modules
+AC_PROG_CC(xlc_r xlc icc pgcc gcc cc)
+# ... Makefile.am:45: compiling `unroll.c' with per-target flags requires `AM_PROG_CC_C_O' in `configure.ac'
+AM_PROG_CC_C_O
+# libtoolize if autoconf complains for the following
+AC_PROG_LIBTOOL
+AM_PROG_AS
+#AC_GNU_SOURCE
+#AC_PROG_C
+#AC_PROG_INSTALL
+dnl AC_PROG_RANLIB
+AC_PROG_AWK
+AC_PROG_GREP
+AC_CHECK_SIZEOF([void *])
+AC_CHECK_SIZEOF([char])
+dnl AC_CHECK_SIZEOF([unsigned char])
+AC_CHECK_SIZEOF([int])
+dnl AC_CHECK_SIZEOF([unsigned int])
+AC_CHECK_SIZEOF([short int])
+dnl AC_CHECK_SIZEOF([short unsigned int])
+AC_CHECK_SIZEOF([long])
+AC_CHECK_SIZEOF([long int])
+dnl AC_CHECK_SIZEOF([long unsigned int])
+AC_CHECK_SIZEOF([long long int])
+dnl AC_CHECK_SIZEOF([long long unsigned int])
+AC_CHECK_SIZEOF([size_t])
+AC_CHECK_SIZEOF([float])
+AC_CHECK_SIZEOF([double])
+AC_CHECK_SIZEOF([long double])
+# to use complex we need a specialized header
+AC_CHECK_SIZEOF([complex])
+AC_CHECK_SIZEOF([float complex])
+AC_CHECK_SIZEOF([double complex])
+#
+m4_if(m4_version_compare(m4_defn([AC_AUTOCONF_VERSION]),[2.60]),-1, [],[AC_OPENMP()])
+#
+AC_CHECK_PROG([have_grep],[grep],[yes],[no])
+AC_CHECK_PROG([have_sed],[sed],[yes],[no])
+AC_CHECK_PROGS([OCTAVE],[$OCTAVE octave],[false],[])
+AC_CHECK_PROGS([DOXYGEN],[$DOXYGEN doxygen],[false],[])
+AC_CHECK_PROGS([HELP2MAN],[$HELP2MAN help2man],[false],[])
+AC_CHECK_PROGS([M4],[$M4 m4 gm4 /opt/freeware/bin/m4],[false],[])
+dnl Precious variables:
+AC_ARG_VAR([M4],[M4 macro preprocessor])
+AC_ARG_VAR([OCTAVE],[GNU Octave executable])
+AC_ARG_VAR([AR],[Library archiver program])
+AC_ARG_VAR([ARFLAGS],[Library archiver program flags])
+AC_ARG_VAR([LD],[Linker program])
+AC_ARG_VAR([DOXYGEN],[Doxygen program for generating documentation from librsb source code])
+AC_ARG_VAR([HELP2MAN],[Help2man is a program for generating man pages from program help output])
+AC_ARG_VAR([RSB_USER_SET_MEM_HIERARCHY_INFO],[Memory hierarchy info string for librsb; e.g.: L2:4/64/512K,L1:8/64/24K])
+AC_ARG_VAR([OPENMP_CFLAGS],[C compilation flags for OpenMP])
+AC_ARG_VAR([OPENMP_FCFLAGS],[Fortran compilation flags for OpenMP])
+dnl AC_ARG_VAR([SED],["sed program."])
+dnl AC_ARG_VAR([GREP],["grep program."])
+#
+AC_CONFIG_HEADERS([rsb-config.h],[sed 's/^#define /#define RSB_/g;s/ RSB_RSB_/ RSB_/g' rsb-config.h > rsb-config.h.tmp ; echo '#endif /* RSB_CONFIG_H_INCLUDED */' >> rsb-config.h.tmp ; cat $srcdir/rsb_license_header.inc $srcdir/rsb-config.h.hin rsb-config.h.tmp > rsb-config.h ; rm rsb-config.h.tmp])
+AC_C_BIGENDIAN()
+AC_FUNC_VPRINTF
+AC_HEADER_STDC
+AC_C_CONST()
+AC_C_INLINE()
+AC_TYPE_SIZE_T()
+AC_HEADER_TIME()
+dnl AC_STRUCT_TM()
+dnl AC_HEADER_STDBOOL
+dnl AC_C_VOLATILE
+dnl AC_CHECK_TYPES([ptrdiff_t])
+#
+AC_CHECK_FUNC([mlockall], AC_DEFINE([HAVE_MLOCKALL], [1], [If present, the mlockall function makes all allocations memory resident.]))
+AC_CHECK_FUNC([sysconf], AC_DEFINE([HAVE_SYSCONF], [1], [If present, the sysconf function gives lots of system info.]))
+dnl AC_CHECK_FUNC([hwloc_topology_init], AC_DEFINE([HAVE_HWLOC], [1], [If present, the hwloc_topology_init function gives lots of system info.]))
+AC_CHECK_FUNC([gethostname], AC_DEFINE([HAVE_GETHOSTNAME], [1], [If present, will give us host name.]))
+AC_CHECK_FUNC([posix_memalign], AC_DEFINE([HAVE_POSIX_MEMALIGN], [1], [The POSIX aligned memory allocator.(The function posix_memalign() is available since glibc 2.1.91)]))
+AC_CHECK_FUNC([memalign], AC_DEFINE([HAVE_MEMALIGN], [1], [This function is obsolete.]))
+AC_CHECK_FUNC([getenv], AC_DEFINE([HAVE_GETENV], [1], [Get an environment variable.]))
+AC_CHECK_FUNC([fileno], AC_DEFINE([HAVE_FILENO], [1], [fileno(): C FILE to posix file descriptor.]))
+dnl AC_CHECK_FUNC([gzdirect], AC_DEFINE([HAVE_GZDIRECT], [1], []))
+
+AC_CHECK_FUNCS([rand isatty])
+AC_CHECK_FUNCS([sched_getaffinity])
+AC_CHECK_FUNCS([memset memcmp strncmp strcpy])
+AC_CHECK_FUNCS([dup])
+dnl AC_CHECK_FUNCS([read write])
+AC_CHECK_FUNCS([fread fwrite])
+#dnl	***********************************************************************
+#dnl					THESE ARE ESSENTIAL
+#dnl	***********************************************************************
+AC_CHECK_HEADERS([libgen.h])
+AC_CHECK_HEADERS([sched.h])
+AC_CHECK_HEADERS([dmalloc.h])
+dnl	AC_CHECK_HEADERS([duma.h])
+AC_CHECK_FUNC([getopt_long], AC_DEFINE([HAVE_GETOPT_LONG], [1], [getopt_long is GNU candy]))
+AC_CHECK_FUNC([times], AC_DEFINE([HAVE_TIMES], [1], [times]))
+AC_CHECK_FUNC([gettimeofday], AC_DEFINE([HAVE_GETTIMEOFDAY], [1], [gettimeofday]))
+AC_CHECK_FUNC([setenv], AC_DEFINE([HAVE_SETENV], [1], [setenv]))
+dnl It would be nice to use alloca (on-stack allocator), but our code is not amenable for this.
+dnl AC_CHECK_FUNC([alloca], AC_DEFINE([HAVE_ALLOCA], [1], [alloca]))
+dnl AC_CHECK_HEADERS([alloca.h])
+AC_CHECK_HEADERS([omp.h])
+AC_CHECK_HEADERS([getopt.h])
+AC_CHECK_HEADERS([malloc.h memory.h])
+AC_CHECK_HEADERS([pthread.h]) dnl unused, for now
+AC_CHECK_HEADERS([papi.h])
+AC_CHECK_HEADERS([gsl/gsl_sort.h])
+AC_CHECK_HEADERS([times.h]) dnl for times (optional)
+AC_CHECK_HEADERS([sys/utsname.h])
+AC_CHECK_HEADERS([sys/resource.h]) dnl for getrusage (optional)
+AC_CHECK_HEADERS([complex.h])
+AC_CHECK_HEADERS([assert.h])
+AC_CHECK_HEADERS([rpc/xdr.h])
+AC_CHECK_HEADERS([sys/mman.h]) dnl mlockall
+AC_CHECK_HEADERS([stdint.h])
+AC_CHECK_HEADERS([unistd.h]) dnl read write
+AC_CHECK_HEADERS([stdio.h]) dnl printf
+AC_CHECK_HEADERS([stdarg.h]) dnl vprintf
+AC_CHECK_HEADERS([time.h]) dnl 
+AC_CHECK_HEADERS([regex.h]) dnl 
+AC_CHECK_HEADERS([string.h] [strings.h] [ctype.h]) dnl 
+dnl
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [
+#ifdef __MINGW32__
+#error "You are likely using MINGW (Minimalist GNU for Windows)."
+#else
+    /* "You are likely not using MINGW (Minimalist GNU for Windows)." */
+#endif
+])], [ac_cv_we_use_mingw=no], [ac_cv_we_use_mingw=yes])
+if test "x$ac_cv_we_use_mingw" = xyes; then
+	ac_cv_mingw_add="-D__USE_MINGW_ANSI_STDIO=1"
+      	AC_MSG_NOTICE([You are likely using MINGW (Minimalist GNU for Windows). Adding ${ac_cv_mingw_add} to compilation flags to avoid broken C99 support.])
+	CFLAGS="${CFLAGS} ${ac_cv_mingw_add}"
+fi
+dnl
+dnl	In the following, will use  AC_ARG_ENABLE  for proper options/features and  AC_ARG_WITH  for packages or environment or compilation switches.
+# rsbench-only LIBS and CFLAGS:
+RSB_RSBENCH_LIBS=
+RSB_RSBENCH_CFLAGS=
+dnl
+AC_ARG_WITH(math, AC_HELP_STRING([--with-math], [Specify the math library]), [if test "x$withval" = xno; then want_math_libs= ; else want_math_libs="$withval" ; fi], [want_math_libs="-lm"])
+AC_CHECK_HEADERS([math.h], [LIBS="${LIBS} $want_math_libs"], [break])
+
+AC_ARG_WITH(xdr, AC_HELP_STRING([--with-xdr], [Specify XDR library.  e.g.: --with-xdr="..." ]), [if test "x$withval" = xno; then want_xdr_libs= ; else want_xdr_libs="$withval" ; fi], [want_xdr_libs=" "])
+AC_CHECK_HEADERS([rpc/xdr.h])
+
+AH_TEMPLATE([RSB_WITH_HWLOC])
+AC_ARG_WITH(hwloc, AC_HELP_STRING([--with-hwloc], [Specify the hwloc library (EXPERIMENTAL)]), [if test "x$withval" = xno; then want_hwloc_libs= ; else want_hwloc_libs="$withval"; if test "x$want_hwloc_libs" = x"yes" ; then want_hwloc_libs="-lhwloc" ; fi; enable_hwloc=yes ; fi], [want_hwloc_libs=" "])
+AC_CHECK_HEADERS([hwloc.h], [if test "x$enable_hwloc" != x -a "x$want_hwloc_libs" != x ; then LIBS="${LIBS} $want_hwloc_libs"; fi;], [break])
+ 
+if test "x${CC}" = x"xlc" -o "x${CC}" = x"xlc_r"  ; then
+      	AC_MSG_NOTICE([Disabling XDR support: our API was only tested on Linux.])
+	want_xdr_libs=
+	ac_cv_header_rpc_xdr_h=no
+fi
+
+if test x"$want_xdr_libs" != x"" && test "x$ac_cv_header_rpc_xdr_h" = xyes ; then
+	dnl use --without-xdr to disable it
+      	AC_MSG_NOTICE([Enabling xdr support.])
+	AC_DEFINE([RSB_WANT_XDR_SUPPORT],[1],[experimental.])
+	LIBS="${LIBS} $want_xdr_libs"
+	else
+	AC_MSG_WARN([No xdr headers found.])
+	AC_DEFINE([RSB_WANT_XDR_SUPPORT],[0],[experimental.])
+fi
+dnl
+AH_TEMPLATE([RSB_WANT_DMALLOC])
+AC_ARG_WITH(dmalloc, AC_HELP_STRING([--with-dmalloc], [With dmalloc (experimental).]),
+[if test "x$withval" = xyes; then
+	enable_dmalloc=1;
+	if test x"$ac_cv_header_dmalloc_h" = xyes ; then
+		LIBS="${LIBS} -ldmalloc"
+		DMALLOC_CFLAGS="-DDMALLOC"
+	fi
+ else 
+	enable_dmalloc=0
+ fi],[enable_dmalloc=0])
+AC_DEFINE_UNQUOTED([RSB_WANT_DMALLOC],[$enable_dmalloc],[])
+dnl
+dnl
+dnl
+AH_TEMPLATE([RSB_WANT_MKL])
+AC_ARG_WITH(mkl-include, AC_HELP_STRING([--with-mkl-include], [Specify the MKL (Intel Math Kernel Library) library headers path. e.g.: --with-mkl-include="/opt/intel/mkl/include". ]), [if test "x$withval" = xno; then MKL_INCLUDE= ; else if test "x$withval" = xyes; then MKL_INCLUDE="" ; else MKL_INCLUDE="$withval" ; fi  ; fi], [true] )
+
+AC_ARG_WITH(mkl, AC_HELP_STRING([--with-mkl], [Specify the MKL (Intel Math Kernel Library) library to be used with the benchmarking program. E.g.: --with-mkl="...". Include options should be specified in the MKL_INCLUDE environment variable. ]), [if test "x$withval" = xno; then want_mkl_libs= ; else if test "x$withval" = xyes; then want_mkl_libs="-static -L/opt/intel/mkl/lib/ia32/ -lmkl_solver -Wl,--start-group -lmkl_intel -lmkl_gnu_thread -lmkl_core -Wl,--end-group -fopenmp -lpthread" ; [...]
+
+dnl
+dnl 
+dnl
+AC_ARG_WITH(zlib, AC_HELP_STRING([--with-zlib], [Specify Z library.  e.g.: --with-zlib="..." for reading gzip-compressed matrix files.]), [if test "x$withval" = xno; then want_zlib_libs="" ; else want_zlib_libs="$withval" ; fi], [want_zlib_libs=""])
+AC_CHECK_HEADERS([zlib.h])
+want_zlib_support="no"
+if test x"$want_zlib_libs" != x"" && test "x$ac_cv_header_zlib_h" = xyes ; then
+	dnl use --without-zlib to disable it
+      	AC_MSG_NOTICE([Enabling zlib support.])
+	AC_DEFINE([RSB_WANT_ZLIB_SUPPORT],[1],[Support for reading gzipped matrices.])
+	if test x"$want_zlib_libs" = x"yes" ; then want_zlib_libs=-lz; fi
+	LIBS="${LIBS} $want_zlib_libs"
+	want_zlib_support="yes"
+	else
+dnl	AC_MSG_WARN([No zlib headers found.])
+	AC_DEFINE([RSB_WANT_ZLIB_SUPPORT],[0],[Support for reading gzipped matrices.])
+	want_zlib_support="no"
+fi
+dnl
+dnl 
+dnl
+AC_ARG_WITH(ompio, AC_HELP_STRING([--with-ompio], [Use OpenMP and fgets_unlocked() for parallel I/O]), [if test "x$withval" = xno; then want_ompio="no" ; else want_ompio="yes" ; fi], [want_ompio="no"])
+
+if test x"$want_ompio" = x"yes" ; then
+      	AC_MSG_NOTICE([Enabling OpenMP + fgets_unlocked() IO support.])
+	AC_DEFINE([RSB_WANT_OMPIO_SUPPORT],[1],[Support for reading matrices in parallel (Experimental, untested).])
+	else
+	AC_DEFINE([RSB_WANT_OMPIO_SUPPORT],[0],[Support for reading matrices in parallel (Experimental, untested).])
+fi
+dnl
+dnl 
+dnl
+AC_CHECK_HEADERS([limits.h], [break], [break])
+AC_CHECK_HEADERS([signal.h], [break], [break])
+dnl AC_CHECK_HEADERS([bits/sigaction.h], [], [],
+dnl [#ifdef RSB_HAVE_SIGNAL_H
+dnl # include <signal.h>
+dnl #endif
+dnl ])
+# an AIX specific check
+AC_CHECK_HEADERS([sys/systemcfg.h], [break], [break])
+AC_DEFINE([RSB_WANT_VERBOSE_MESSAGES],[0],[If set, the library will be much more verbose. Should be enabled for debugging purposes only.])
+AC_DEFINE([RSB_WANT_KERNELS_DEBUG],[1],[If set, RSB_WANT_KERNELS_DEBUG will enable comparative consistency checking of the multiplying kernels against a naive, trusted implementation.])
+AC_DEFINE([RSB_SORT_IN_PLACE],[0],[If set, sort operations will happen in place.])
+dnl AC_DEFINE([RSB_WANT_BLOCK_TRAILING_STRUCT_QUICK],[0],[This flag is still unsupported])
+AC_DEFINE([RSB_BLOCK_SMALL_INDICES],[1],[If set, the library will use smaller indices in blocks.])
+dnl
+dnl
+detected_memhinfo=`$srcdir/scripts/linux-sys-cache.sh`;
+dnl
+AC_ARG_WITH(nounroll-cflag, AC_HELP_STRING([--with-nounroll-cflag], [Specify the no unroll compiler flag (if unset, will be guessed).]),
+        [if test "x$withval" = xno; then userset_nounroll_cflag="" ; else userset_nounroll_cflag="$withval" ; fi] , [userset_nounroll_cflag="";])
+dnl
+default_want_int_verrbosity="0";
+AC_ARG_ENABLE(internals-error-verbosity, AC_HELP_STRING([--enable-internals-error-verbosity], [Set error verbosity level of library internal functions (RSB_INT_ERR_VERBOSITY): can be 0 (no printout at all, never), 1 (on error). Use this to debug the library itself. Experimental.]),
+        [if test "x$enableval" = xno; then want_int_verrbosity="${default_want_int_verrbosity}" ; else want_int_verrbosity="$enableval" ; fi] , [want_int_verrbosity="${default_want_int_verrbosity}";])
+dnl
+default_want_ext_verrbosity="0";
+AC_ARG_ENABLE(interface-error-verbosity, AC_HELP_STRING([--enable-interface-error-verbosity], [Set error verbosity level of library interface functions (RSB_OUT_ERR_VERBOSITY): can be 0 (no printout), 1 (printout on error, if requested), 2 (printout on error), 99 (exit on error). Use this to debug your program or to better understand the library usage. ]),
+        [if test "xenableval" = xno; then want_ext_verrbosity="${default_want_ext_verrbosity}" ; else want_ext_verrbosity="$enableval" ; fi] , [want_ext_verrbosity="${default_want_ext_verrbosity}";])
+dnl
+default_want_io_level=0;
+AC_ARG_ENABLE(io-level, AC_HELP_STRING([--enable-io-level], [Set input/output functionality level (RSB_WANT_IO_LEVEL), a number between 0 and 7, as any sum combination of 1 (standard input/output), 2 (standard error), 4 (arbitrary descriptors). (experimental).]),
+        [if test "x$enableval" = xno; then want_io_level="${default_want_io_level}" ; else want_io_level="$enableval" ; fi] , [want_io_level="7";])
+dnl
+AC_ARG_WITH(max-threads, AC_HELP_STRING([--with-max-threads], [Maximal number of supported threads (default 64).]),
+        [if test "x$withval" = xno; then want_max_threads="64" ; want_max_threads=64 ; else want_max_threads="$withval" ; fi] , [want_max_threads="64";])
+dnl
+AC_ARG_WITH(memhinfo, AC_HELP_STRING([--with-memhinfo], [Compile with user specified memory hierarchy information, which can be overridden by runtime detection and runtime read of RSB_USER_SET_MEM_HIERARCHY_INFO environment variable.]),
+        [if test "x$withval" = xno; then memhinfo="" ; openmp_flags= ; else memhinfo="$withval" ; fi] , [memhinfo="";])
+AC_SUBST([RSB_USER_SET_MEM_HIERARCHY_INFO],"${memhinfo}")
+AC_SUBST([RSB_DETECTED_MEM_HIERARCHY_INFO],"${detected_memhinfo}")
+dnl AC_DEFINE([RSB_USER_SET_MEM_HIERARCHY_INFO],[$memhinfo],[If not null, the library will rely on this for memory hierarchy info.])
+dnl
+dnl
+AC_ARG_WITH(ar, AC_HELP_STRING([--with-ar], [Specify the library archiver program explicitly.]), [if test "x$withval" = xno; then true ; else AR="$withval" ; fi] , [ AR="$AR" ] )
+AC_ARG_WITH(arflags, AC_HELP_STRING([--with-arflags], [Specify the library archiver program flags explicitly.]), [if test "x$withval" = xno; then true ; else ARFLAGS="$withval" ; fi] , [ ARFLAGS="$ARFLAGS" ] )
+AC_ARG_WITH(m4, AC_HELP_STRING([--with-m4], [Specify the M4 preprocessor program explicitly.]), [if test "x$withval" = xno; then true ; else M4="$withval" ; fi] , [true;] )
+dnl
+m4_if(m4_version_compare(m4_defn([AC_AUTOCONF_VERSION]),[2.60]),-1, [
+AC_ARG_WITH(openmp, AC_HELP_STRING([--with-openmp], [Use the OpenMP multithreading mechanisms.]),
+        [if test "x$withval" = xno; then enable_openmp=no ; openmp_flags= ; else openmp_flags="$withval" ; enable_openmp=yes ; fi] , [enable_openmp=yes])
+],[])
+dnl
+if test "x$enable_openmp" = x ; then
+	enable_openmp=yes
+fi
+dnl
+if test "x$enable_openmp" != x"yes" && test "x$want_ompio" = x"yes"; then
+	AC_MSG_ERROR([You must enable OpenMP if you want OpenMP-backed I/O!])
+fi
+dnl
+#dnl	***********************************************************************
+dnl define(DEFAULT_MATRIX_OPS,[spmv_uaua,spmv_sxsa,spmv_uxua,infty_norm,rowssums,spmv_sasa,spsv_sxsx,spsv_uxua])
+define(DEFAULT_MATRIX_OPS,[spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spsv_uxua,spmv_sxsa,spsv_sxsx,infty_norm,rowssums,scale])
+dnl define(DEFAULT_MATRIX_OPS,[spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spmv_sxsa,infty_norm,rowssums,scale])
+dnl define(DEFAULT_UNROLLS,[1,2,4])
+define(DEFAULT_UNROLLS,[1])
+define(RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR,[16])
+dnl define(DEFAULT_TYPES,[int,double])
+define(DEFAULT_TYPES,["double,float,float complex,double complex"])
+#dnl	***********************************************************************
+# the default block unrolls
+default_unrolls=DEFAULT_UNROLLS
+default_util_unrolls=RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR
+# the default types for macro-generated code
+blas_matrix_types="double,float,float complex,double complex"
+psblas_matrix_types="${blas_matrix_types}"
+non_blas_matrix_types="int"
+all_matrix_types="$non_blas_matrix_types,$blas_matrix_types"
+#default_types=int,double,float,float complex, double complex
+# float complex and double complex are c99 types
+default_types=DEFAULT_TYPES
+# the default matrix operations
+blas_matrix_ops=spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spsv_uxua,spmv_sxsa,spsv_sxsx
+dnl
+psblas_matrix_ops="${blas_matrix_ops}",infty_norm,rowssums,scale
+dnl
+dnl extra_blas_matrix_ops=,spmv_sxsx,spmv_uxux
+extra_blas_matrix_ops= # 20140719 these kernels are not active at the moment
+#
+non_blas_matrix_ops=infty_norm,rowssums,scale
+dnl non_blas_matrix_ops=...spmm_az # 20140719 this kernel is not active at the moment
+all_matrix_ops="$blas_matrix_ops,$non_blas_matrix_ops$extra_blas_matrix_ops"
+#
+default_matrix_ops=DEFAULT_MATRIX_OPS
+#dnl	***********************************************************************
+dnl AC_ARG_WITH(block-unrolls, AC_HELP_STRING([--with-block-unrolls], [Generate unrolled kernels for blocks with specified columns/rows (default:DEFAULT_UNROLLS)(experimental)]), [default_unrolls="$withval"],[default_unrolls="$default_unrolls"])
+#dnl	***********************************************************************
+dnl AC_ARG_WITH(row-unrolls, AC_HELP_STRING([--with-row-unrolls], [Generate unrolled kernels for blocks with specified rows (default:DEFAULT_UNROLLS)(experimental)]), [row_unrolls="$withval"],[row_unrolls="$default_unrolls"])
+row_unrolls="$default_unrolls"
+#dnl	***********************************************************************
+dnl AC_ARG_WITH(column-unrolls, AC_HELP_STRING([--with-column-unrolls], [Generate unrolled kernels for blocks with specified columns (default:DEFAULT_UNROLLS) (experimental)]), [column_unrolls="$withval"],[column_unrolls="$default_unrolls"])
+column_unrolls="$default_unrolls"
+#dnl	***********************************************************************
+AC_DEFINE([RSB_WANT_SPARSE_BLAS_LEVEL_1],[1],[If set, a reference, unoptimized Sparse BLAS Level 1 interface will be functional.])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(matrix-types, AC_HELP_STRING([--enable-matrix-types], [Generate kernels for specified types (default:DEFAULT_TYPES) (you can specify 'all' to get all of them, or 'blas' for Sparse BLAS ones)]), [want_matrix_types="$enableval"],[want_matrix_types="$default_types"])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(matrix-ops, AC_HELP_STRING([--enable-matrix-ops], [Generate kernels for specified matrix operations (default:DEFAULT_MATRIX_OPS) (you can specify 'all' to get all of them, or 'blas' for only ones for Sparse BLAS, or 'psblas' for only ones for PSBLAS) (Experimental, the default "all" is recommended.)]), [want_matrix_ops="$enableval"],[want_matrix_ops="$default_matrix_ops"])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(vector-utils-loop-unrolls, AC_HELP_STRING([--enable-vector-utils-loop-unrolls], [Loop unrolling of generated vector utility functions (default:RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR)]), [util_unrolls="$enableval"],[util_unrolls="$default_util_unrolls"])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(octave-testing, AC_HELP_STRING([--enable-octave-testing], [Enabling GNU Octave based testing.]),
+[if test "x$enableval" = xno; then
+	enable_octave_testing=no
+ else 
+	enable_octave_testing=yes
+ fi],[enable_octave_testing=yes])
+#dnl	***********************************************************************
+if test "x$want_matrix_types" = xall; then
+	AC_MSG_NOTICE([Enabling all matrix types.])
+	want_matrix_types="${all_matrix_types}";
+else 
+	true;
+	if test "x$want_matrix_types" = x"blas"; then
+		AC_MSG_NOTICE([Enabling all matrix types for Sparse BLAS (S,C,D,Z).])
+		want_matrix_types="${blas_matrix_types}";
+		#if test "x$enable_octave_testing" = xyes; then want_matrix_types=${want_matrix_types},int ; fi
+	else 
+	    if test "x$want_matrix_types" = x"psblas"; then
+		AC_MSG_NOTICE([Enabling matrix types for Parallel Sparse BLAS (PSBLAS).])
+		want_matrix_types="${psblas_matrix_types}";
+		#if test "x$enable_octave_testing" = xyes; then want_matrix_types=${want_matrix_types},int ; fi
+	    else
+		true;
+	    fi 
+	fi
+fi
+#dnl	***********************************************************************
+if test "x$want_matrix_ops" = xall; then
+	AC_MSG_NOTICE([Enabling all of the matrix ops.])
+	want_matrix_ops="${all_matrix_ops}";
+else 
+	if test "x$want_matrix_ops" = xblas; then
+		AC_MSG_NOTICE([Enabling matrix ops for Sparse BLAS.])
+		want_matrix_ops="${blas_matrix_ops}";
+	else
+	    if test "x$want_matrix_ops" = xpsblas; then
+		AC_MSG_NOTICE([Enabling matrix ops for Parallel Sparse BLAS (PSBLAS).])
+		want_matrix_ops="${psblas_matrix_ops}";
+	    else
+		true;
+	    fi 
+	fi
+fi
+#dnl	***********************************************************************
+if test x"$want_matrix_types" != x"$default_types" -o x"$want_matrix_ops" != x"$default_matrix_ops" -o x"$util_unrolls" != x"$default_util_unrolls"; then
+if test x"$M4" = x"false"; then
+	AC_MSG_ERROR([Did not specify an m4 processor, so code generation from m4 files is disabled (and so configure time specification of non default types, operations, unrolls) !])
+fi
+fi
+#dnl	***********************************************************************
+sparse_blas_interface_default=yes
+AC_ARG_ENABLE(sparse-blas-interface, AC_HELP_STRING([--disable-sparse-blas-interface], [Build a Sparse BLAS interface to librsb.]),
+[if test "x$enableval" = xno; then
+	sparse_blas_interface=no
+ else 
+	sparse_blas_interface=yes
+ fi],[sparse_blas_interface="${sparse_blas_interface_default}"])
+#dnl	***********************************************************************
+enable_looping_kernels=no
+AH_TEMPLATE([RSB_WANT_LOOPING_KERNELS])
+#dnl	***********************************************************************
+AC_ARG_WITH(oski, AC_HELP_STRING([--with-oski], [OSKI comparative benchmarking (WARNING: be sure to set 
+	OSKI_INCLUDE, OSKI_LUA_PATH, OSKI_PATH environment variables first). UNFINISHED.]),
+[if test "x$withval" = xno; then
+	enable_oski=no
+ else 
+	enable_oski=yes
+ fi],[enable_oski=no])
+#dnl	***********************************************************************
+#AC_ARG_WITH(papi, AC_HELP_STRING([--with-papi], [PAPI (Performance Application Programming Interface). UNFINISHED.]),
+#[if test "x$withval" = xno; then
+#	enable_papi=no
+# else 
+#	enable_papi=yes
+# fi],[enable_papi=no])
+#dnl	***********************************************************************
+AH_TEMPLATE([RSB_WITH_LIKWID])
+AC_ARG_WITH(likwid, AC_HELP_STRING([--with-likwid], [LIKWID support (will add the LIKWID_LIBS variable to LIBS and LIKWID_CFLAGS to CFLAGS). UNFINISHED.]),
+[if test "x$withval" = xno; then
+	enable_likwid=no
+ else 
+	enable_likwid=yes
+ fi],[enable_likwid=no])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(allocator-wrapper, AC_HELP_STRING([--enable-allocator-wrapper], [If enabled, librsb will keep count of internal memory allocations via a allocator functions wrappers.]),
+[if test "x$enableval" = xno; then
+	disable_allocator_wrapper=yes
+ else 
+	disable_allocator_wrapper=no
+ fi],[disable_allocator_wrapper=yes])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(alignment, AC_HELP_STRING([--disable-alignment], [By default, we allocate aligned memory. This can be disabled.]),
+[if test "x$enableval" = xno; then
+	enable_alignment=no
+ else 
+	enable_alignment=yes
+ fi],[enable_alignment=yes])
+enable_b=yes
+enable_c=yes
+#dnl	***********************************************************************
+AC_ARG_ENABLE(librsb-stats, AC_HELP_STRING([--enable-librsb-stats], [If enabled, will allow collection of time statistics in librsb operations.]),
+[if test "x$enableval" = xno; then
+	enable_librsb_stats=no
+ else 
+	enable_librsb_stats=yes
+ fi],[enable_librsb_stats=no])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(rsb-num-threads, AC_HELP_STRING([--enable-rsb-num-threads], [RSB_NUM_THREADS environment variable to control number of threads (Experimental, with effect on rsb_spmv/rsb_spmm).]),
+[if test "x$enableval" = xno; then
+	enable_rsb_num_threads=no
+ else 
+	enable_rsb_num_threads=yes
+ fi],[enable_rsb_num_threads=no])
+#dnl	***********************************************************************
+dnl AC_ARG_ENABLE(fortran-interface, AC_HELP_STRING([--disable-fortran-interface], [Fortran interface.]),
+dnl [if test "x$enableval" = xno; then
+dnl	enable_fortran=no
+dnl else 
+dnl 	enable_fortran=yes
+dnl  fi],[enable_fortran=yes])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(fortran-module-install, AC_HELP_STRING([--enable-fortran-module-install], [Install (compiler specific) Fortran module (blas_sparse.mod) (experimental).]),
+[if test "x$enableval" = xno; then
+	want_blas_sparse_mod_install=no
+ else 
+	want_blas_sparse_mod_install=yes
+ fi],[want_blas_sparse_mod_install=no])
+#dnl	***********************************************************************
+want_install_pkg_config_default=no
+AC_ARG_ENABLE(pkg-config-install, AC_HELP_STRING([--enable-pkg-config-install], [Install pkg-config file (librsb.pc) installation.]),
+[if test "x$enableval" = x"yes"; then
+	want_install_pkg_config=yes
+	AC_MSG_NOTICE([Will install pkg-config librsb.pc file.])
+ else 
+	want_install_pkg_config=yes
+	AC_MSG_NOTICE([Will not install pkg-config librsb.pc file (--enable-pkg-config-install to change).])
+ fi],[want_install_pkg_config="${want_install_pkg_config_default}"])
+#dnl	***********************************************************************
+dnl AC_ARG_ENABLE(fortran-blas-sparse-header-install, AC_HELP_STRING([--enable-fortran-blas-sparse-header-install], [Install (non standard) Sparse BLAS Fortran header (blas_sparse.fi) (experimental).]),
+dnl [if test "x$enableval" = xno; then
+dnl dnl 	want_blas_sparse_fi_install=no
+dnl  else 
+dnl 	want_blas_sparse_fi_install=yes
+dnl fi],[want_blas_sparse_fi_install=no])
+dnl want_blas_sparse_fi_install="${sparse_blas_interface}"
+#dnl	***********************************************************************
+AC_ARG_ENABLE(doc-build, AC_HELP_STRING([--enable-doc-build], [If doxygen is detected or supplied (DOXYGEN environment variable), documentation will be rebuilt. If 'help2man' (HELP2MAN) is also present, it will be used to build additional man pages.]),
+[if test "x$enableval" = xno; then
+	want_build_doc=no
+ else 
+	want_build_doc=yes
+ fi],[want_build_doc=no])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(shlib-linked-examples, AC_HELP_STRING([--enable-shlib-linked-examples], [Shared library based examples (experimental: developer only).]),
+[if test "x$enableval" = xno; then
+	want_rsb_dl=no
+ else 
+	want_rsb_dl=yes
+ fi],[want_rsb_dl=no])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(c-examples, AC_HELP_STRING([--disable-c-examples], [C example programs building.]),
+[if test "x$enableval" = xno; then
+	enable_c_examples=no
+ else 
+	enable_c_examples=yes
+ fi],[enable_c_examples=yes])
+#dnl	***********************************************************************
+AC_ARG_ENABLE(fortran-examples, AC_HELP_STRING([--disable-fortran-examples], [Fortran test and example programs generation and building. (experimental)]),
+[if test "x$enableval" = xno; then
+	enable_fortran_examples=no
+ else 
+	enable_fortran_examples=yes
+ fi],[enable_fortran_examples=yes])
+#dnl	***********************************************************************
+AC_C_RESTRICT()
+AC_ARG_ENABLE(restrict, AC_HELP_STRING([--disable-restrict], [Use the restrict keyword.]),
+[if test "x$enableval" = xno; then
+	enable_restrict=no
+ else 
+	enable_restrict=yes
+ fi],[enable_restrict=yes])
+#dnl	***********************************************************************
+AC_ARG_WITH(c99-flag, AC_HELP_STRING([--with-c99-flag], [Add the -std=c99 compilation flag to CFLAGS.]),
+[if test "x$withval" = xno; then
+	enable_c99=no
+ else 
+	enable_c99=yes
+ fi],[enable_c99=yes])
+#dnl	***********************************************************************
+want_spsm_diagonal_check_default=yes
+AC_ARG_ENABLE(zero-division-checks-on-solve, AC_HELP_STRING([--enable-zero-division-checks-on-solve], [Prevents zero-division when performing triangular solution.]),
+[if test "x$enableval" = xyes; then
+	want_spsm_diagonal_check=yes
+ else 
+	want_spsm_diagonal_check=no
+ fi],[want_spsm_diagonal_check="${want_spsm_diagonal_check_default}"])
+#dnl	***********************************************************************
+want_sigaction_in_rsbench=no
+AC_ARG_ENABLE(sigaction-interruptible-rsbench, AC_HELP_STRING([--enable-sigaction-interruptible-rsbench], [rsbench will be interruptible using sigaction (breaks the standard: may break the build.).]),
+[if test "x$enableval" = xyes; then
+	want_sigaction_in_rsbench=yes;
+ else 
+	want_sigaction_in_rsbench=no;
+ fi],[want_sigaction_in_rsbench=no;])
+if test x$want_sigaction_in_rsbench = xno ; then
+	AC_DEFINE([RSB_WANT_ACTION_SIGNAL],[1],[experimental.])
+fi
+#dnl	***********************************************************************
+enable_optimize=no
+#dnl	***********************************************************************
+AC_ARG_ENABLE(internal-headers-install, AC_HELP_STRING([--enable-internal-headers-install], [Install internal headers (only for debugging / inspection purposes, not for ordinary users).]),
+[if test "x$enableval" = xyes; then
+ 	enable_ihi=yes;
+ 	AC_MSG_NOTICE([Will install also internal headers.])
+else 
+ 	enable_ihi=no;
+fi],[enable_ihi=no])
+#dnl	***********************************************************************
+RSB_USE_ASSERT="";
+AC_ARG_ENABLE(debug-getenvs, AC_HELP_STRING([--enable-debug-getenvs], [Enable (undocumented) developer oriented getenv-based controls.]),
+[if test "x$enableval" = xyes; then
+	RSB_USE_ASSERT=1;
+	AC_DEFINE([RSB_ALLOW_INTERNAL_GETENVS],[1],[Extra (undocumented) developer oriented control switches.])
+else 
+	AC_DEFINE([RSB_ALLOW_INTERNAL_GETENVS],[0],[Extra (undocumented) developer oriented control switches.])
+fi],[true;])
+#dnl	***********************************************************************
+RSB_USE_ASSERT="";
+AC_ARG_ENABLE(debug, AC_HELP_STRING([--enable-debug], [Compile with debug flags and enable assertions and other internals. This will slow down the code considerably.]),
+[if test "x$enableval" = xyes; then
+	enable_debug=yes;
+	RSB_USE_ASSERT=1;
+dnl
+	want_int_verrbosity=1; # FIXME: this shall be removed from here, once the library gets stable for release!
+	AC_MSG_NOTICE([With the debug switch enabled, also setting internal error verbosity level at value 1.])
+dnl
+else 
+	enable_debug=no
+fi],[enable_debug=no])
+#dnl	***********************************************************************
+AH_TEMPLATE([RSB_WITH_SPARSE_BLAS_INTERFACE])
+if test "x$sparse_blas_interface" = xyes; then
+	AC_MSG_NOTICE(Will build a Sparse BLAS interface to librsb.)
+	AC_DEFINE([RSB_WITH_SPARSE_BLAS_INTERFACE],[1],[Sparse BLAS interface compilation.])
+else 
+      	AC_MSG_NOTICE([Will not build a Sparse BLAS interface to librsb.])
+fi
+#dnl	***********************************************************************
+want_looping_kernels=0;
+if test "x$enable_looping_kernels" = xyes; then
+	AC_MSG_NOTICE(Enabling looping kernels.)
+	want_looping_kernels=1;
+	AC_DEFINE([RSB_WANT_LOOPING_KERNELS],[1],[Looping kernels.])
+
+
+else 
+dnl      	AC_MSG_NOTICE([Skipping the compilation of looping kernels.])
+	true
+fi
+#dnl	***********************************************************************
+AC_ARG_WITH(papi, AC_HELP_STRING([--with-papi], [Specify the PAPI library (UNFINISHED)]), [if test "x$withval" = xno; then want_papi_libs= ; else enable_papi=yes; want_papi_libs="$withval" ; fi], [want_papi_libs="-lpapi"])
+#dnl	***********************************************************************
+if test "x$enable_papi" = xyes; then
+	if test "x$ac_cv_header_papi_h" != xyes; then
+	AC_MSG_WARN([Header file <papi.h> not found, therefore we will not use it!])
+	else
+	if test "$want_papi_libs" = yes ; then want_papi_libs=-lpapi ; fi
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} $want_papi_libs"
+	# TODO : set CFLAGS !
+      	AC_MSG_NOTICE(Enabling PAPI (Performance Application Programming Interface).)
+	# FIXME: should differentiate RSB_WANT_PERFORMANCE_COUNTERS from RSB_HAVE_PAPI
+	AC_DEFINE([RSB_WANT_PERFORMANCE_COUNTERS],[1],[Performance Counters.])
+	AC_DEFINE([RSB_HAVE_PAPI],[1],[Performance Application Programming Interface.])
+	fi
+else 
+      	dnl AC_MSG_NOTICE(Skipping the use of PAPI (Performance Application Programming Interface).)
+	true
+fi
+#dnl	***********************************************************************
+if test "x$enable_likwid" = xyes; then
+	if test "x$LIKWID_LIBS" = x; then
+		LIKWID_LIBS="-llikwid"
+	fi
+  	AC_MSG_NOTICE(Enabling support for LIKWID (LIKWID_CFLAGS=${LIKWID_CFLAGS}) (LIKWID_LIBS=${LIKWID_LIBS}).)
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} ${LIKWID_LIBS}"
+	RSB_RSBENCH_CFLAGS="${RSB_RSBENCH_CFLAGS} ${LIKWID_CFLAGS}"
+	AC_DEFINE([RSB_WITH_LIKWID],[1],[LIKWID marker API support.])
+else
+	AC_DEFINE([RSB_WITH_LIKWID],[0],[LIKWID marker API support.])
+fi
+#dnl	***********************************************************************
+if test "x$enable_hwloc" = xyes; then
+	if test "x$want_hwloc_libs" != x; then
+		HWLOC_LIBS="$want_hwloc_libs"
+	fi
+	AC_MSG_NOTICE(Enabling support for HWLOC (HWLOC_CFLAGS=${HWLOC_CFLAGS}) (HWLOC_LIBS=${HWLOC_LIBS}).)
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} ${HWLOC_LIBS}"
+	RSB_RSBENCH_CFLAGS="${RSB_RSBENCH_CFLAGS} ${HWLOC_CFLAGS}"
+	AC_DEFINE([RSB_WITH_HWLOC],[1],[HWLOC API support.])
+else
+	AC_DEFINE([RSB_WITH_HWLOC],[0],[HWLOC API support.])
+fi
+#dnl	***********************************************************************
+if test "x$disable_allocator_wrapper" = xyes; then
+AC_DEFINE([RSB_DISABLE_ALLOCATOR_WRAPPER],1,[If defined, will not account for internally used memory.])
+      AC_DEFINE([RSB_WANT_ALLOCATOR_LIMITS],0,[If 1, will allow the user to set hard limits to the memory allocated by librsb. Trespass attempts will fail.])
+      AC_MSG_NOTICE(Will disable memory allocators wrappers.)
+else 
+      AC_DEFINE([RSB_WANT_ALLOCATOR_LIMITS],1,[If 1, will allow the user to set hard limits to the memory allocated by librsb. Trespass attempts will fail.])
+      AC_MSG_NOTICE(Enabling memory allocators wrappers.)
+fi
+#dnl	***********************************************************************
+#dnl	***********************************************************************
+if test "x$enable_alignment" = xno; then
+      AC_MSG_NOTICE(Will not enforce aligned memory chunks allocation.)
+else 
+      AC_MSG_NOTICE(Will enforce aligned memory chunks allocation.)
+AC_DEFINE([RSB_WANT_DOUBLE_ALIGNED],[1],[On some architectures (notably modern Intel), floating point computations on non double aligned data make loose some clock cycle.])
+fi
+#dnl	***********************************************************************
+if test "x$enable_librsb_stats" = xyes; then
+	AC_MSG_NOTICE(Enabling collection of time statistics in librsb operations (this introduces an overhead).)
+	AC_DEFINE([RSB_WANT_LIBRSB_STATS],[1],[Enabling collection of time statistics in librsb operations (this introduces an overhead).])
+fi
+#dnl	***********************************************************************
+if test "x$enable_rsb_num_threads" = xyes; then
+	AC_MSG_NOTICE(Enabling experimental RSB_NUM_THREADS environment variable.)
+	AC_DEFINE([RSB_WANT_RSB_NUM_THREADS],[1],[Enabling experimental RSB_NUM_THREADS environment variable.])
+fi
+#dnl	***********************************************************************
+dnl if test x$enable_fortran = xno; then
+dnl       AC_MSG_NOTICE(Will not build Fortran bindings.)
+dnl else 
+dnl       AC_MSG_NOTICE(Will build Fortran bindings.)
+dnl       AC_DEFINE([RSB_WANT_EXPERIMENTAL_FORTRAN_INTERFACE],[1],[Fortran interface.])
+dnl fi
+#dnl	***********************************************************************
+if test "x$enable_c_examples" = xno; then
+      AC_MSG_NOTICE(Will not build C examples.)
+else 
+      AC_MSG_NOTICE(Will build C examples.)
+fi
+#dnl	***********************************************************************
+if test "x$enable_fortran_examples" = xno; then
+      AC_MSG_NOTICE(Will not build Fortran examples.)
+else 
+      AC_MSG_NOTICE(Will build (experimental) Fortran examples.)
+fi
+#dnl	***********************************************************************
+if test "x$enable_restrict" = xyes; then
+      AC_MSG_NOTICE([Will use the C99 restrict keyword.])
+      AC_MSG_NOTICE([Will also add the -std=c99 flag.])
+      enable_c99=yes
+else 
+      AC_MSG_NOTICE([Will not use the C99 restrict keyword ])
+fi
+#dnl	***********************************************************************
+if test "x$want_build_doc" = xyes ; then
+      if test x"$DOXYGEN" = x"false"; then
+	      AC_MSG_ERROR([Doxygen not detected ! Please --disable-doc-build or supply a valid DOXYGEN variable.])
+	      want_build_doc=no
+      else
+	      AC_MSG_NOTICE([Will rebuild the documentation using "$DOXYGEN" as Doxygen executable.])
+	      if test x"$HELP2MAN" = x"false"; then
+      	   	AC_MSG_NOTICE([Program man pages will not generated: HELPMAN not detected.])
+	      fi
+      fi
+else 
+	AC_MSG_NOTICE([Will not use Doxygen to build documentation (--enable-doc-build to change).])
+fi
+#dnl	***********************************************************************
+no_unroll_flags=""
+#dnl	***********************************************************************
+if test xyes = xyes; then # we need a new flag here for this : FIXME
+	# I am not sure whether these flags are optimal, but among these they are
+	
+	if test "x${CC}" = x"xlc" -o "x${CC}" = x"xlc_r"  && test "x$spigni_forte" = "x" ; then
+		# use -qnostrict to turn  off aggressive optimization (debug cases)
+		# use -q64 to enable 64 bit compilation and ar -X 64 cru ... for linking (FIXME)
+          	# -qfdpr
+          	# The compiler generates additional symbol information for use by the AIX "fdprpro" code optimizer.
+		# /opt/freeware/bin path is harmful with autotools on the ENEA grid environment, as it is the default one!
+		PATH="/bin/:$PATH"
+
+		# the following should only be used along with -q64. not without! (FIXME)
+		ARFLAGS="-X 64 cru"
+		SPCFLAGS="-q 64"
+
+		#spigni_forte="-O3 -lmass -lessl"
+		spigni_forte="-O3 -lmass -lessl -q64 -bmaxdata:0x1000000000"
+		#spigni_forte="-O3 -lmass -lessl -q64 -bmaxdata:0x70000000"
+		# FIXME : configure is not smart enough to add -X64 to ARFLAGS
+		# FIXME : CXXFLAGS too
+#		spigni_forte="-O3 -lmass -lessl"
+		restrict_flags="-qkeyword=restrict"
+		c99_flags="-qlanglvl=extc99 $restrict_flags"
+		debug_flags="-O0 -g"
+		openmp_flags="-qsmp=omp"
+		if test -f /bin/uname ; then
+			# some AFS systems (e.g.: ENEA.it grid) need this fix
+			uname_M="`/bin/uname -M`"
+		else
+			uname_M="`uname -M`"
+		fi
+		if test "x${uname_M}" = x"IBM,9118-575" ; then
+			spigni_forte="$spigni_forte -qarch=pwr5 -qtune=pwr5"
+		fi
+		if test "x${uname_M}" = x"IBM,7040-681" ; then
+			spigni_forte="$spigni_forte -qarch=pwr4 -qtune=pwr4"
+		fi
+		# verbose : 0-3
+		# profiling info: -pga
+		# -qbrowse 
+		no_unroll_flags="-qunroll=no"
+		# xlc has #pragma unroll !
+		AC_MSG_NOTICE([Guessing the compiler is xlc.])
+	fi
+
+        have_icc=no; # a fix to set correctly openmp_flags
+        if test "x${CC}" = x"icc" || ${CC} -V 2>&1 | grep Intel ; then have_icc=yes ; fi
+
+        if test "x${have_icc}" = x"yes" && test "x$spigni_forte" = "x" ; then
+#	if test "x${CC}" = x"icc" && test "x$spigni_forte" = "x" ; then
+		spigni_forte="-O3 -xSSE3  -no-alias-const -no-multibyte-chars -pipe "
+		# note: -tpp6 & -tpp7 and so on are old icc flags (version 11 does not support them)
+		# ipo seems to break autotools
+		# -xS ?
+		# TODO : '-ax:SSE2'  .. generate multiple paths ..
+
+		# -ax turns on the vectorizer (MMX, SSEx, ...)
+		# -mtune=..
+		restrict_flags="-restrict" # !
+		c99_flags="$restrict_flags"
+		debug_flags="-O0 -g"
+		#no_unroll_flags="-fno-unroll"
+		#20110608 icc v12 wants -unroll=0
+		no_unroll_flags="-unroll=0"
+		#openmp_flags="-openmp" # -parallel
+		openmp_flags="-qopenmp" # -parallel
+		AC_MSG_NOTICE([Guessing the compiler is icc.])
+		walls="-Wall"
+	fi
+
+	if test "x${CC}" = x"pgcc" && test "x$spigni_forte" = "x" ; then
+		spigni_forte="-O3 -Mvect=cachesize:automatic,fuse,prefetch,sse -Mquad -Mscalarsse -Mnoframe -Minfo=all" # O3 is same as 4 
+		c99_flags="-c99 -Xa"
+		restrict_flags="$c99_flags" # !
+		debug_flags="-O0 -g"
+		no_unroll_flags="-Mnounroll"
+		openmp_flags="-mp"
+		# -Mconcur is VERY interesting .. !!
+		# -Mlist (creates a listing file)
+		# -Mprof=hwcts     Use PAPI-based profiling with hardware counters (linux86-64 only).
+		# -pg exists, -g too
+		# -Mnovect disables the vectorizer, and is the default
+		AC_MSG_NOTICE([Guessing the compiler is pgcc.])
+	fi
+
+dnl	AC_MSG_NOTICE([Guessing compiler best flags...])
+	if test "x$ac_cv_c_compiler_gnu" = xyes && test "x$spigni_forte" = "x" ; then
+		# note that CC=icc will not imply ac_cv_c_compiler_gnu=yes !
+		# -malign-double does not make sense on 64 bit archs and triggers errors
+		#spigni_forte="-O3 -fomit-frame-pointer -ffast-math"
+		spigni_forte="-O3 -fomit-frame-pointer -mtune=native"
+		c99_flags="-std=c99" # ?
+		restrict_flags="$c99_flags" # !
+		debug_flags="-O0 -ggdb"
+		no_unroll_flags="-fno-unroll-loops"
+		openmp_flags="-fopenmp"
+		if test x != x"${OPENMP_CFLAGS}" ; then
+			openmp_flags="${OPENMP_CFLAGS}"
+		fi
+		# NOTE: -ffast-math disables math functions specifications, and therefore is EVIL
+		spigni_nativo="-pipe"
+		cpuinfomn=`cat /proc/cpuinfo| grep model.name | sed s/^.*://g`
+		# FIXME : the following will fail on tcsh
+#		if test x"` $CC -v 2>&1| grep -i red.*hat`" != x ; then
+		gcc_v=`$CC --version` # will be catched on tcsh
+		if test x"` $CC -v 2>&1 | grep -i red.*hat`" != x -o x"`echo $gcc_v` | grep -i red.hat" != x; then
+			# uhm..
+#			if test x"` echo $cpuinfomn | grep Athlon `" != x ; then
+#				# fails for a 
+#				# model name      : AMD Athlon(tm) 7750 Dual-Core Processor
+#				spigni_nativo="$spigni_nativo -march=athlon -mtune=athlon"
+#			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Opteron.*2216\>'`" != x ; then
+				# model name      : Dual-Core AMD Opteron(tm) Processor 2216
+				spigni_nativo="$spigni_nativo -march=opteron -mtune=opteron"
+			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Opteron.*2352\>'`" != x ; then
+				# Opteron barcelona are 2344-2350, but the instruction set is ok
+				# model name      : AMD Athlon(tm) 7750 Dual-Core Processor
+				spigni_nativo="$spigni_nativo -march=barcelona -mtune=barcelona"
+			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Athlon.*7750'`" != x ; then
+				# this is Phenom, not Opteron arcelona, but same instruction set
+				# model name      : AMD Athlon(tm) 7750 Dual-Core Processor
+				spigni_nativo="$spigni_nativo -march=barcelona -mtune=barcelona"
+			fi
+			if test x"` echo $cpuinfomn | grep 'AMD Athlon.*64.*X2.*Dual Core Processor 6000.'`" != x ; then
+                        	# K9 microarchitecture
+				# this is Windsor, released May 24, 2006
+				# rossini.ibspan.waw.pl
+				# model name      : AMD Athlon(tm) 64 X2 Dual Core Processor 6000+
+	                        spigni_nativo="$spigni_nativo -march=amdfam10 -mtune=amdfam10"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*[EXWL]3...\>'`" != x ; then
+				# Wolfdale 	31..
+				# Kentsfield 	32..
+				# Yorkfield 	33..
+				# Lynnfield 	34..
+				# Bloomfield	35..
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*X7...'`" != x ; then
+				# Tigerton series, d.c.	72..
+				# Tigerton series, q.c.	73..	1066MT/s
+				# cresco1x .. portici.enea.it
+				# model name      : Intel(R) Xeon(R) CPU           X7350  @ 2.93GHz
+				# Tulsa series		71..
+				# crescobf.brindisi.enea.it:
+				# model name      : Intel(R) Xeon(R) CPU           X7350  @ 2.93GHz
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*[EXWL]70..\>'`" != x ; then
+				# Paxville (Netburst)
+				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*[EXWL]50..\>'`" != x ; then
+				# Dempsey (Netburst)
+				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Core(TM)2 Quad CPU'`" != x ; then
+				# Conroe/Allendale
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*[EXWL]51..\>'`" != x ; then
+				# Woodcrest (Core2)
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*[EXWL]52..\>'`" != x ; then
+				# Wolfdale DP
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Xeon.*[EXWL]53..\>'`" != x ; then
+				# Clovertown series, 1333MT/s, 2x4MB L2
+				# ce1-cresco.portici.enea.it
+				# model name     : Intel(R) Xeon(R) CPU           E5335  @ 2.00GHz
+				# Clovertown series, 1333MT/s
+				# cresco2-f3.portici.enea.it
+				# model name      : Intel(R) Xeon(R) CPU           E5345  @ 2.33GHz
+				# Harpertown series	54..	, 12 MB L2
+				# Gainestown (Nehalem)s.55..	4x256kB L2, 8MB L3
+				spigni_nativo="$spigni_nativo -march=core2 -mtune=core2"
+			fi
+#			if test x"` echo $cpuinfomn | grep Opteron `" != x ; then
+#				spigni_nativo="$spigni_nativo -march=opteron -mtune=opteron"
+#			fi
+			if test x"` echo $cpuinfomn | grep 'Pentium(R).4' `" != x ; then
+				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+			fi
+			if test x"` echo $cpuinfomn | grep 'Pentium III (Coppermine)' `" != x ; then
+				spigni_nativo="$spigni_nativo -march=pentium3 -mtune=pentium3 -msse"
+			fi
+#			if test x"` echo $cpuinfomn | grep 'Xeon'`" != x ; then
+#				Intel(R) Xeon(TM) CPU 3.00GHz
+#				spigni_nativo="$spigni_nativo -march=pentium4 -mtune=pentium4"
+#			fi
+		else
+			spigni_nativo="-march=native -mtune=native $spigni_nativo"
+		fi
+		# NOTE : compilers like gcc version 3.4.6 20060404 (Red Hat 3.4.6-10) do not accept native switch
+		# -march=pentium3
+		# on p4: --malign=double -march=pentium4 -mfpmath=sse -msse2 -
+		walls="-Wall -Wredundant-decls -Wno-switch -Wdisabled-optimization -Wdeclaration-after-statement   "" -Wpointer-arith -Wstrict-prototypes "
+		#" -pedantic"
+		AC_MSG_NOTICE([Guessing the C compiler is gcc.])
+	fi
+else 
+	true
+fi
+
+#dnl	***********************************************************************
+#dnl	GNU FORTRAN runtime 
+#dnl	***********************************************************************
+if test "x$ac_cv_fc_compiler_gnu" = xyes ; then
+	LIBS="${LIBS} -lgfortran"
+	AC_MSG_NOTICE([Guessing the Fortran compiler is gfortran and adding -lgfortran to LIBS (invoke with ac_cv_fc_compiler_gnu=no to prevent this).])
+fi
+#dnl	***********************************************************************
+#dnl	CFLAGS handling starts here
+#dnl	***********************************************************************
+if test "x$CFLAGS" = x ; then
+if test "x$enable_optimize" = xyes && test x$enable_debug != xyes ; then
+	if test "x$mio_spigni_forte" = "x" ; then
+		true;
+	else
+		spigni_forte="$mio_spigni_forte";
+		spigni_nativo="";
+	fi
+	if test "x$spigni_forte" = "x" ; then
+		spigni="-O3 -malign-double $spigni_nativo"
+		#spigni="-O3 -fomit-frame-pointer -malign-double $spigni_nativo"
+	else
+		spigni="$spigni_forte $spigni_nativo"
+	fi
+
+	AC_MSG_NOTICE([Adding ${spigni} to CFLAGS.])
+	#CFLAGS="${CFLAGS} ${spigni}"
+	AC_MSG_NOTICE([Overriding CFLAGS="$CFLAGS".. ])
+	CFLAGS="${spigni}"
+else 
+	if test "x$enable_debug" = xyes; then
+		if test "x$debug_flags" = "x" ; then
+			CFLAGS="-O0 -g"
+		else
+			CFLAGS="${debug_flags}"
+		fi
+		AC_MSG_NOTICE([Optimization turned off. Debugging enabled. (CFLAGS overwritten)])
+		# since we are allowed to suggest flags, we do so
+		CFLAGS="${CFLAGS} ${SPCFLAGS}"
+	else 
+		true;
+	fi
+	true;
+fi
+#
+fi
+#dnl	***********************************************************************
+# fix just for SP
+if test x"${SPCFLAGS}" != x ; then
+	CFLAGS="${CFLAGS} ${SPCFLAGS}"
+fi
+#dnl	***********************************************************************
+if test x"$enable_dmalloc" == x"1"  ; then
+	CFLAGS="$CFLAGS $DMALLOC_CFLAGS"
+fi
+#dnl	***********************************************************************
+if test x"$want_mkl_libs" != x""  ; then
+	dnl use --without-mkl to disable it
+      	AC_MSG_NOTICE([Enabling MKL support in the benchmarking program (will add "$MKL_INCLUDE" to compilation flags; will link to "$want_mkl_libs").])
+	AC_DEFINE([RSB_WANT_MKL],[1],[Enabling MKL support in the benchmarking program.])
+	RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} $want_mkl_libs"
+		if test -n "$MKL_INCLUDE" ; then
+			RSB_RSBENCH_CFLAGS="$RSB_RSBENCH_CFLAGS -I $MKL_INCLUDE"
+		fi
+	else
+	AC_MSG_NOTICE([Disabling MKL support in the benchmarking program.])
+	AC_DEFINE([RSB_WANT_MKL],[0],[No MKL support wanted in the benchmarking program.])
+fi
+#dnl	***********************************************************************
+if test x"$enable_openmp" = x"yes"; then
+	AC_DEFINE([RSB_WANT_OMP_RECURSIVE_KERNELS],[1],[Recursive kernels parallelized with OpenMP.])
+	if test x"$openmp_flags" != "x" ; then
+dnl		AC_DEFINE([RSB_WANT_OMP_KERNELS],[1],[Kernels parallelized with OpenMP.])
+		CFLAGS="${CFLAGS} $openmp_flags"
+		if test "x$OPENMP_FCFLAGS" = "x" ; then
+			OPENMP_FCFLAGS="$openmp_flags"
+		fi
+		FCFLAGS="${FCFLAGS} ${OPENMP_FCFLAGS}"
+	else
+		AC_MSG_NOTICE([We do not know an appropriate OpenMP-enabling flag but assume OpenMP is active.])
+		CFLAGS="${CFLAGS}"
+	fi
+else 
+	AC_DEFINE([RSB_WANT_OMP_RECURSIVE_KERNELS],[0],[Recursive kernels parallelized with OpenMP.])
+	AC_MSG_NOTICE([OpenMP code disabled: 1 thread at most is allowed.])
+        want_max_threads="1"
+	true;
+fi
+#dnl	***********************************************************************
+if test "x$enable_c99" = xyes; then
+	if test "x$c99_flags" = "x" ; then
+		AC_MSG_NOTICE([We do not know an appropriate c99-enabling flag..])
+		CFLAGS="${CFLAGS}"
+	else
+		CFLAGS="${CFLAGS} $c99_flags"
+	fi
+else 
+	true;
+fi
+#dnl	***********************************************************************
+AH_TEMPLATE([OSKI_LUA_PATH])
+if test "x$enable_oski" = xyes; then
+      	AC_MSG_NOTICE([Looking for user set OSKI_INCLUDE, OSKI_LUA_PATH, OSKI_PATH environment variables..])
+	save_CFLAGS="$CFLAGS"
+	if test -n "$OSKI_INCLUDE" ; then
+		CFLAGS="$CFLAGS -I $OSKI_INCLUDE"
+	fi
+	AC_CHECK_HEADERS([oski/oski.h], [true])
+	CFLAGS="$save_CFLAGS"
+	if test "x$ac_cv_header_oski_oski_h" != xyes; then
+dnl AC_MSG_WARN
+	AC_MSG_ERROR([Header file <oski/oski.h> not found, therefore we will not use it!])
+	else
+		# FIXME: this is temporary, for my own machines
+		if test -d "~/usr/local/include/" ; then
+			CFLAGS="$CFLAGS -I ~/usr/local/include/"
+		fi
+
+		if test -n "$OSKI_INCLUDE" ; then
+			CFLAGS="$CFLAGS -I $OSKI_INCLUDE"
+		fi
+
+		if test x"$OSKI_PATH" = x && test -d "/usr/local/lib/oski" ; then
+			OSKI_PATH=/usr/local/lib/oski
+		fi
+		if test x"$OSKI_LUA_PATH" = x ; then
+			OSKI_LUA_PATH="$OSKI_PATH/?.lua"
+			else
+			true;
+		fi
+		if test x"$OSKI_LIBS" = x ; then
+			# oski-1.0.1h works in this way
+			#OSKI_LIBS=`cat $OSKI_PATH/site-modules-static.txt | tr '\n' ' '`
+			OSKI_LIBS=`cat $OSKI_PATH/site-modules-shared.txt | tr '\n' ' '`
+			# the following often fail due to the incorrect order of libs:
+			#OSKI_LIBS=`cat $OSKI_PATH/site-modules-static.txt|sed 's/^\/.*\///g;s/^'/"$OSKI_PATH\/"/g | tr '\n' ' '`
+		fi
+		if test x"$OSKI_LIBS" = x ; then
+			AC_MSG_ERROR([No linkable libraries for OSKI ? Disable OSKI support ot try setting OSKI_LIBS by hand.])
+		fi
+		if test x"${OSKI_CFLAGS}" = x && test -d "~/usr/local/include/"; then
+			OSKI_CFLAGS="$OSKI_CFLAGS -I  /usr/local/include/"
+			OSKI_CFLAGS="$OSKI_CFLAGS -I ~/usr/local/include/"
+		fi
+		#
+		RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} -L${OSKI_PATH} ${OSKI_LIBS}"
+
+		# FIXME: this is temporary, for my own machines
+		if test -d "~/usr/local/lib/oski/" ; then
+			RSB_RSBENCH_LIBS="${RSB_RSBENCH_LIBS} -L ~/usr/local/lib/oski/"
+		fi
+
+		RSB_RSBENCH_CFLAGS="${RSB_RSBENCH_CFLAGS} $OSKI_CFLAGS"
+	      	AC_MSG_NOTICE(Enabling comparative OSKI benchmarking.)
+		AC_DEFINE([RSB_WANT_OSKI_BENCHMARKING],[1],[OSKI comparative benchmarking.])
+		AC_DEFINE_UNQUOTED([OSKI_LUA_PATH],"$OSKI_LUA_PATH",[OSKI path to installed lua modules. User set OSKI_LUA_PATH environment variable at runtime will override this one, however.])
+	fi
+else 
+      	true;
+fi
+#dnl
+AC_SUBST([RSB_CONST_MAX_SUPPORTED_THREADS],"${want_max_threads}")
+#dnl	***********************************************************************
+AC_DEFINE_UNQUOTED([RSB_INT_ERR_VERBOSITY],[$want_int_verrbosity],[Inner error verbosity (internal debug level).])
+AC_DEFINE_UNQUOTED([RSB_OUT_ERR_VERBOSITY],[$want_ext_verrbosity],[Error verbosity (often known as debug level).])
+AC_DEFINE_UNQUOTED([RSB_WANT_IO_LEVEL],[$want_io_level],[Supported input/output functionality.])
+AC_DEFINE_UNQUOTED([RSB_USER_SET_MEM_HIERARCHY_INFO],["$RSB_USER_SET_MEM_HIERARCHY_INFO"],[If not null, the library will rely on this for memory hierarchy info.])
+AC_DEFINE_UNQUOTED([RSB_DETECTED_MEM_HIERARCHY_INFO],["$RSB_DETECTED_MEM_HIERARCHY_INFO"],[If not null, the library will rely on this for memory hierarchy info, unless RSB_USER_SET_MEM_HIERARCHY_INFO is set.])
+AC_DEFINE_UNQUOTED([RSB_CONST_MAX_SUPPORTED_THREADS],[$RSB_CONST_MAX_SUPPORTED_THREADS],[Maximal number of supported threads (default 64).])
+AC_DEFINE_UNQUOTED([CFLAGS],["$CFLAGS"],[Compilation flags.])
+AC_DEFINE_UNQUOTED([CC],["$CC"],[C compiler.])
+if test x"$RSB_USE_ASSERT" != x ; then
+AC_DEFINE_UNQUOTED([RSB_USE_ASSERT],["$RSB_USE_ASSERT"],[If undefined, NDEBUG will be defined.])
+fi
+#dnl	***********************************************************************
+WANT_MATRIX_STORAGE=""
+if test x"$enable_c" = xyes ; then
+	WANT_MATRIX_BCOO_STORAGE=BCOR
+#	WANT_MATRIX_BCOO_STORAGE=BCOR,BCOC
+	WANT_MATRIX_STORAGE="$WANT_MATRIX_STORAGE,$WANT_MATRIX_BCOO_STORAGE"
+fi
+if test x"$enable_b" = xyes ; then
+	WANT_MATRIX_BCSS_STORAGE=BCSR
+#	WANT_MATRIX_BCSS_STORAGE=BCSR,BCSC
+	WANT_MATRIX_STORAGE="$WANT_MATRIX_STORAGE,$WANT_MATRIX_BCSS_STORAGE"
+fi
+# we get rid of the comma
+WANT_MATRIX_STORAGE="`echo $WANT_MATRIX_STORAGE| sed 's/^,//g'`"
+#dnl	***********************************************************************
+if test "x${userset_nounroll_cflag}" != x ; then
+	no_unroll_flags="${userset_nounroll_cflag}"
+fi
+dnl	***********************************************************************
+# for rsb_config.m4.in
+AC_SUBST(enable_restrict)
+AC_SUBST(enable_openmp)
+AC_SUBST(CFLAGS)
+AC_SUBST(FCFLAGS)
+dnl AC_SUBST(NOUNROLLCFLAGS,"${CFLAGS} $no_unroll_flags")
+AC_SUBST(NOUNROLLCFLAGS,"$no_unroll_flags")
+AC_SUBST(RSB_RSBENCH_LIBS,"$RSB_RSBENCH_LIBS")
+AC_SUBST(RSB_RSBENCH_CFLAGS,"$RSB_RSBENCH_CFLAGS")
+AC_SUBST(WANT_ROW_UNLOOP_FACTORS,["$row_unrolls"])
+AC_SUBST(WANT_LOOPING_KERNELS,["$want_looping_kernels"])
+AC_SUBST(WANT_COLUMN_UNLOOP_FACTORS,["$column_unrolls"])
+AC_SUBST(WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR,["$util_unrolls"])
+dnl AC_SUBST(WANT_HALFWORD_INDICES,["$want_halfword_indices"])
+AC_SUBST(WANT_HALFWORD_INDICES,["yes"])
+AC_SUBST(WANT_SPSM_DIAG_CHECK,["$want_spsm_diagonal_check"])
+AC_SUBST(WANT_TYPES,["$want_matrix_types"])
+AC_SUBST(WANT_MATRIX_BCSS_STORAGE,["$WANT_MATRIX_BCSS_STORAGE"])
+AC_SUBST(WANT_MATRIX_BCOO_STORAGE,["$WANT_MATRIX_BCOO_STORAGE"])
+AC_SUBST(WANT_MATRIX_LINKED_STORAGE,["$WANT_MATRIX_LINKED_STORAGE"])
+AC_SUBST(WANT_MATRIX_VB_STORAGE,["$WANT_MATRIX_VB_STORAGE"])
+AC_SUBST(WANT_MATRIX_STORAGE,["$WANT_MATRIX_STORAGE"])
+AC_SUBST(WANT_MATRIX_OPS,["$want_matrix_ops"])
+AC_SUBST(WANT_MATRIX_ALL_META_OPS,["spmv,spsv"])
+AC_SUBST(WANT_MATRIX_ALL_OPS,["$all_matrix_ops"])
+AC_SUBST(WANT_MATRIX_ALL_TYPES,["$all_matrix_types"])
+dnl AC_SUBST(OT_SRCS,"`for o in $all_matrix_ops echo $o ; done`")
+#dnl	***********************************************************************
+if test x = x"$ARFLAGS" ; then ARFLAGS="cru" ; fi # damn AIX ar
+AC_SUBST(ARFLAGS,"$ARFLAGS")
+#dnl	***********************************************************************
+
+if test x"${enable_fortran_examples}" = x"yes" -a x"${FC}" = x"" ;  then
+	enable_fortran_examples=no
+	AC_MSG_WARN([No Fortran compiler detected (FC environment variable). Will not build the Fortran examples.])
+fi
+AM_CONDITIONAL([HAVE_FORTRAN_EXAMPLES],[test x"$enable_fortran_examples" = xyes ])
+AM_CONDITIONAL([HAVE_C_EXAMPLES],[test x"$enable_c_examples" = xyes ])
+AM_CONDITIONAL([HAVE_OCTAVE],[test x"$OCTAVE" != xfalse ])
+want_int=`echo "$want_matrix_types" | grep '\<int\>'`
+if test x"$OCTAVE" != xfalse -a x"$enable_octave_testing" = xyes ; then want_octave_testing=yes; else want_octave_testing=no; fi
+AM_CONDITIONAL([WANT_OCTAVE_TESTING],[test x"$want_octave_testing" = x"yes" ])
+if test x"$OCTAVE" != xfalse -a x"$want_int" != x -a x"$enable_octave_testing" = xyes ; then want_octave_testing_and_int=yes; else want_octave_testing_and_int=no ; fi
+AM_CONDITIONAL([WANT_OCTAVE_TESTING_AND_INT],[test x"$want_octave_testing_and_int" = x"yes" ])
+if test x"${FC}" = x"" ;  then
+if test x"${want_blas_sparse_mod_install}" = x"yes" -o x"${sparse_blas_interface}" = x"yes" ;  then
+	want_blas_sparse_mod_install=no;
+	sparse_blas_interface=no;
+	AC_MSG_WARN([No Fortran compiler detected (FC environment variable). Will not build the BLAS interface.])
+fi
+fi
+AM_CONDITIONAL([WANT_BLAS_SPARSE_MOD_INSTALL],[test x"$want_blas_sparse_mod_install" = x"yes"])
+dnl AM_CONDITIONAL([WANT_BLAS_SPARSE_FI],[test x"$want_blas_sparse_fi_install" = x"yes"])
+AM_CONDITIONAL([WANT_CXX_TEST_RSBENCH],[test x"$CXX" != x ])
+AM_CONDITIONAL([HAVE_DOXYGEN],[test x"$DOXYGEN" != x"false" ])
+AM_CONDITIONAL([WANT_BUILD_DOC],[test x"$want_build_doc" = x"yes" ])
+AM_CONDITIONAL([HAVE_PKGCONFIG_INSTALL],[test x"$want_install_pkg_config" = x"yes" ])
+AM_CONDITIONAL([HAVE_HELP2MAN],[test x"$HELP2MAN" != x"false" ])
+AM_CONDITIONAL([HAVE_M4],[test x"$M4" != xfalse ])
+AM_CONDITIONAL([HAVE_FC],[test x"$FC" != x ])
+dnl AM_CONDITIONAL([WANT_DL],[test x"$want_rsb_dl" = x"yes"])
+AM_CONDITIONAL([HAVE_SPARSE_BLAS_INTERFACE],[test x"$sparse_blas_interface" = xyes ])
+AM_CONDITIONAL([WANT_INTERNAL_HEADERS_INSTALL],[test x"$enable_ihi" = xyes ])
+AM_CONDITIONAL([WANT_OMPIO_SUPPORT],[test x"$want_ompio" = x"yes" && test x"$enable_openmp" = x"yes" ])
+AC_SUBST(OCTAVE_FLAGS,"--no-history --no-line-editing  --no-site-file --norc   --silent")
+#dnl	***********************************************************************
+dnl AC_MSG_NOTICE([
+dnl Will generate code for types in {$want_matrix_types}, for matrix ops in {$want_matrix_ops}.
+dnl Will generate code for {$row_unrolls} x {$column_unrolls}-sized blocks, for types in {$want_matrix_types} for matrix ops in {$want_matrix_ops}.
+dnl ])
+dnl AC_MSG_NOTICE([FIXME : please run `make clean` as a first thing  ])
+#dnl	***********************************************************************
+AC_CONFIG_FILES([librsb-config:librsb-config.in],[chmod +x librsb-config])
+AC_CONFIG_FILES([librsb.pc:librsb.pc.in],[])
+AC_OUTPUT([rsb_config.m4] [Makefile] [bench/Makefile] [doc/Makefile] [examples/Makefile] [scripts/Makefile] [m4/Makefile] [blas_sparse/Makefile])
+#dnl	***********************************************************************
+AC_MSG_NOTICE([dnl
+=============== Build Programs and Flags ===============================
+(you can override these at build time; e.g.: 'make CC=cc')
+dnl	ac_cv_c_compiler_gnu: ${ac_cv_c_compiler_gnu}
+	CC                     : ${CC}
+	FC (to disable, FC='') : ${FC}
+	CFLAGS                 : ${CFLAGS}
+	FCFLAGS                : ${FCFLAGS}
+	LDFLAGS                : ${LDFLAGS}
+	NOUNROLLCFLAGS         : ${NOUNROLLCFLAGS}
+	LIBS                   : ${LIBS}
+	AR                     : ${AR}
+	ARFLAGS                : ${ARFLAGS}
+	M4                     : ${M4}
+	OCTAVE                 : ${OCTAVE}
+dnl	WANT_OCTAVE_TESTING
+dnl	Octave executable            : "$OCTAVE"
+	DOXYGEN                : ${DOXYGEN}
+	HELP2MAN               : ${HELP2MAN}
+dnl	SED               : ${SED}
+dnl	HAVE_DOXYGEN                 : "$DOXYGEN"
+dnl	Doxygen executable           : "$DOXYGEN"
+dnl	HAVE_HELP2MAN                : "$HELP2MAN"
+dnl	help2man executable : "$HELP2MAN"
+dnl	HAVE_M4
+dnl	M4 executable       : "$M4"
+dnl	WANT_CXX_TEST_RSBENCH        : "$CXX"
+	CXX                    : ${CXX}
+dnl		Misc info:
+=== Additional flags affecting only the benchmark program (rsbench): ===
+	RSB_RSBENCH_LIBS             : ${RSB_RSBENCH_LIBS}
+	RSB_RSBENCH_CFLAGS           : ${RSB_RSBENCH_CFLAGS}
+dnl
+dnl		Main code generator values, predefined defaults
+dnl	All Numerical types          : ${all_matrix_types}
+dnl	Numerical types              : ${default_types}
+dnl	default_matrix_ops  : ${default_matrix_ops}
+dnl	default_unrolls	    : ${default_unrolls}
+dnl	all_matrix_ops      : ${all_matrix_ops}
+dnl	blas_matrix_ops     : ${blas_matrix_ops}
+dnl	psblas_matrix_ops   : ${psblas_matrix_ops}
+dnl	Build Sparse BLAS Interface  : "${sparse_blas_interface_default}"
+dnl	Util. Kernels Unroll         : ${default_util_unrolls}
+dnl	Triangular solve zero check  : "${want_spsm_diagonal_check_default}"
+dnl
+========= Main code generator values, this build vs defaults ===========
+(if these differ from the defaults, you need to have M4 and run 'make cleanall' and 'make')
+	All Numerical types          : "${all_matrix_types}"
+	Numerical types              : "${want_matrix_types}" vs ["${default_types}"]
+dnl	want_matrix_ops     : ${want_matrix_ops}
+dnl	row_unrolls         : ${row_unrolls}
+dnl	column_unrolls      : ${column_unrolls}
+dnl	HAVE_SPARSE_BLAS_INTERFACE:
+	Build Sparse BLAS Interface  : "${sparse_blas_interface}" vs ["${sparse_blas_interface_default}"]
+	Util. Kernels Unroll         : "${util_unrolls}" vs ["${default_util_unrolls}"]
+dnl	matrix storage      : ${WANT_MATRIX_STORAGE}
+	Triangular solve zero check  : "${want_spsm_diagonal_check}" vs ["${want_spsm_diagonal_check_default}"]
+============== Build Configuration, this build vs defaults =============
+(if you reconfigure and change these, you need to run 'make clean' and 'make')
+dnl	host_os		  : ${host_os}
+dnl	host_cpu	  : ${host_cpu}
+dnl	short indices     : ${want_halfword_indices}
+dnl	Configured I/O level : ${want_io_level}
+dnl	RSB_WANT_IO_LEVEL                   : ${want_io_level}
+	Supported I/O functionality level   : "${want_io_level}" vs "${default_want_io_level}"
+dnl	RSB_OUT_ERR_VERBOSITY               : ${want_ext_verrbosity}
+	Interface Error Verbosity           : "${want_ext_verrbosity}" vs "${default_want_ext_verrbosity}"
+dnl	RSB_INT_ERR_VERBOSITY               : ${want_int_verrbosity}
+	Internals Error Verbosity           : "${want_int_verrbosity}" vs "${default_want_int_verrbosity}"
+dnl		Host specific info:
+dnl	RSB_USER_SET_MEM_HIERARCHY_INFO     : ${RSB_USER_SET_MEM_HIERARCHY_INFO}
+dnl	RSB_DETECTED_MEM_HIERARCHY_INFO     : ${RSB_DETECTED_MEM_HIERARCHY_INFO}
+dnl	RSB_CONST_MAX_SUPPORTED_THREADS     : ${RSB_CONST_MAX_SUPPORTED_THREADS}
+	Memory hierarchy info, detected     : "${RSB_DETECTED_MEM_HIERARCHY_INFO}"
+	Memory hierarchy info, selected     : "${RSB_USER_SET_MEM_HIERARCHY_INFO}"
+	Maximum of Supported threads        : "${RSB_CONST_MAX_SUPPORTED_THREADS}"
+dnl
+dnl		Configured Makefile conditionals:
+dnl	WANT_SPSM_DIAG_CHECK
+dnl	HAVE_FORTRAN_EXAMPLES
+dnl 	Build Fortran code                  : "$enable_fortran"
+	Build Fortran examples              : "$enable_fortran_examples"
+dnl	HAVE_C_EXAMPLES
+	Build C examples                    : "$enable_c_examples"
+dnl	WANT_OMPIO_SUPPORT
+dnl	Want OpenMP + I/O            : "$want_ompio"
+dnl	RSB_DISABLE_ALLOCATOR_WRAPPER: "$disable_allocator_wrapper"
+dnl	WANT_BLAS_SPARSE_MOD_INSTALL
+	Install Sparse BLAS Fortran modules : "$want_blas_sparse_mod_install"
+	Install pkg-config "librsb.pc" file : "$want_install_pkg_config"
+dnl	WANT_BLAS_SPARSE_FI
+dnl	Install Sparse BLAS Fortran headers : "$want_blas_sparse_fi_install"
+dnl	WANT_OCTAVE_TESTING                 : "$want_octave_testing"
+	Build Octave generated tester       : "$want_octave_testing"
+dnl	WANT_OCTAVE_TESTING_AND_INT
+	Build Octave generated tester (int) : "$want_octave_testing_and_int"
+	Build HTML and man documentation    : "$want_build_doc"
+	gzipped matrices support via zlib   : "${want_zlib_support}"
+	gather elapsed time in librsb       : "${enable_librsb_stats}"
+dnl
+dnl	WANT_INTERNAL_HEADERS_INSTALL: "$enable_ihi"
+dnl	CPU  	          : ${CPU}
+dnl	all types ...
+])
+
+if test x"$OCTAVE" != xfalse && test x"$want_int" != x  ; then
+AC_MSG_NOTICE([You seem to have GNU Octave and enabled 'int' type. This will allow an additional part of the test suite to be generated.])
+else
+AC_MSG_NOTICE([You seem to not have GNU Octave or have disabled 'int' type. Part of the test suite will not be generated. If you want more testing capabilities, you should enable the 'int' type as well.])
+fi
+if test x"$M4" = x ; then
+	AC_MSG_WARN([No m4 implementation detected. You will not be able to generate code.])
+else
+	true
+fi
+# FIXME : should warn the user in the case of opting out the 'int' type, as in this case there would be no GNU/octave based testing.
+
+if test x"$default_types"  != x"$want_matrix_types" ; then
+AC_MSG_WARN([You chose a custom matrix types selection. If you just unpacked from archive, you should issue "make cleanall" to delete the shipped code and then "make" will regenerate it by using m4.])
+fi
+if test x"$want_rsb_dl"  = x"yes" ; then
+dnl AC_MSG_WARN([You chose dynamic linking of example executables. To execute them you will probably to update your environment; e.g.: export LD_LIBRARY_PATH=`pwd`/:\$LD_LIBRARY_PATH .])
+AC_MSG_WARN([You chose an obsolete switch (--enable-shlib-linked-examples), which now on is disabled.])
+fi
+AC_MSG_NOTICE([Successfully configured librsb version "$LIBRSB_VERSION".])
diff --git a/depcomp b/depcomp
new file mode 100755
index 0000000..25a39e6
--- /dev/null
+++ b/depcomp
@@ -0,0 +1,708 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2012-03-27.16; # UTC
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010,
+# 2011, 2012 Free Software Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva at dcc.unicamp.br>.
+
+case $1 in
+  '')
+     echo "$0: No command.  Try '$0 --help' for more information." 1>&2
+     exit 1;
+     ;;
+  -h | --h*)
+    cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+  depmode     Dependency tracking mode.
+  source      Source file read by 'PROGRAMS ARGS'.
+  object      Object file output by 'PROGRAMS ARGS'.
+  DEPDIR      directory where to store dependencies.
+  depfile     Dependency file to output.
+  tmpdepfile  Temporary file to use when outputting dependencies.
+  libtool     Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake at gnu.org>.
+EOF
+    exit $?
+    ;;
+  -v | --v*)
+    echo "depcomp $scriptversion"
+    exit $?
+    ;;
+esac
+
+# A tabulation character.
+tab='	'
+# A newline character.
+nl='
+'
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+  echo "depcomp: Variables source, object and depmode must be set" 1>&2
+  exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+  sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Some modes work just like other modes, but use different flags.  We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write.  Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+  # HP compiler uses -M and no extra arg.
+  gccflag=-M
+  depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+   # This is just like dashmstdout with a different argument.
+   dashmflag=-xM
+   depmode=dashmstdout
+fi
+
+cygpath_u="cygpath -u -f -"
+if test "$depmode" = msvcmsys; then
+   # This is just like msvisualcpp but w/o cygpath translation.
+   # Just convert the backslash-escaped backslashes to single forward
+   # slashes to satisfy depend.m4
+   cygpath_u='sed s,\\\\,/,g'
+   depmode=msvisualcpp
+fi
+
+if test "$depmode" = msvc7msys; then
+   # This is just like msvc7 but w/o cygpath translation.
+   # Just convert the backslash-escaped backslashes to single forward
+   # slashes to satisfy depend.m4
+   cygpath_u='sed s,\\\\,/,g'
+   depmode=msvc7
+fi
+
+if test "$depmode" = xlc; then
+   # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency informations.
+   gccflag=-qmakedep=gcc,-MF
+   depmode=gcc
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want.  Yay!  Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff.  Hmm.
+## Unfortunately, FreeBSD c89 acceptance of flags depends upon
+## the command line argument order; so add the flags where they
+## appear in depend2.am.  Note that the slowdown incurred here
+## affects only configure: in makefiles, %FASTDEP% shortcuts this.
+  for arg
+  do
+    case $arg in
+    -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
+    *)  set fnord "$@" "$arg" ;;
+    esac
+    shift # fnord
+    shift # $arg
+  done
+  "$@"
+  stat=$?
+  if test $stat -eq 0; then :
+  else
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  mv "$tmpdepfile" "$depfile"
+  ;;
+
+gcc)
+## There are various ways to get dependency output from gcc.  Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+##   up in a subdir.  Having to rename by hand is ugly.
+##   (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+##   -MM, not -M (despite what the docs say).
+## - Using -M directly means running the compiler twice (even worse
+##   than renaming).
+  if test -z "$gccflag"; then
+    gccflag=-MD,
+  fi
+  "$@" -Wp,"$gccflag$tmpdepfile"
+  stat=$?
+  if test $stat -eq 0; then :
+  else
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
+## The second -e expression handles DOS-style file names with drive letters.
+  sed -e 's/^[^:]*: / /' \
+      -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the "deleted header file" problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header).  We avoid this by adding
+## dummy dependencies for each header file.  Too bad gcc doesn't do
+## this for us directly.
+  tr ' ' "$nl" < "$tmpdepfile" |
+## Some versions of gcc put a space before the ':'.  On the theory
+## that the space means something, we add a space to the output as
+## well.  hp depmode also adds that space, but also prefixes the VPATH
+## to the object.  Take care to not repeat it in the output.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly.  Breaking it into two sed invocations is a workaround.
+    sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
+      | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+hp)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+sgi)
+  if test "$libtool" = yes; then
+    "$@" "-Wp,-MDupdate,$tmpdepfile"
+  else
+    "$@" -MDupdate "$tmpdepfile"
+  fi
+  stat=$?
+  if test $stat -eq 0; then :
+  else
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+
+  if test -f "$tmpdepfile"; then  # yes, the sourcefile depend on other files
+    echo "$object : \\" > "$depfile"
+
+    # Clip off the initial element (the dependent).  Don't try to be
+    # clever and replace this with sed code, as IRIX sed won't handle
+    # lines with more than a fixed number of characters (4096 in
+    # IRIX 6.2 sed, 8192 in IRIX 6.5).  We also remove comment lines;
+    # the IRIX cc adds comments like '#:fec' to the end of the
+    # dependency line.
+    tr ' ' "$nl" < "$tmpdepfile" \
+    | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
+    tr "$nl" ' ' >> "$depfile"
+    echo >> "$depfile"
+
+    # The second pass generates a dummy entry for each header file.
+    tr ' ' "$nl" < "$tmpdepfile" \
+   | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+   >> "$depfile"
+  else
+    # The sourcefile does not contain any dependencies, so just
+    # store a dummy comment line, to avoid errors with the Makefile
+    # "include basename.Plo" scheme.
+    echo "#dummy" > "$depfile"
+  fi
+  rm -f "$tmpdepfile"
+  ;;
+
+xlc)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+aix)
+  # The C for AIX Compiler uses -M and outputs the dependencies
+  # in a .u file.  In older versions, this file always lives in the
+  # current directory.  Also, the AIX compiler puts '$object:' at the
+  # start of each line; $object doesn't have directory information.
+  # Version 6 uses the directory in both cases.
+  dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+  test "x$dir" = "x$object" && dir=
+  base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+  if test "$libtool" = yes; then
+    tmpdepfile1=$dir$base.u
+    tmpdepfile2=$base.u
+    tmpdepfile3=$dir.libs/$base.u
+    "$@" -Wc,-M
+  else
+    tmpdepfile1=$dir$base.u
+    tmpdepfile2=$dir$base.u
+    tmpdepfile3=$dir$base.u
+    "$@" -M
+  fi
+  stat=$?
+
+  if test $stat -eq 0; then :
+  else
+    rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+    exit $stat
+  fi
+
+  for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
+  do
+    test -f "$tmpdepfile" && break
+  done
+  if test -f "$tmpdepfile"; then
+    # Each line is of the form 'foo.o: dependent.h'.
+    # Do two passes, one to just change these to
+    # '$object: dependent.h' and one to simply 'dependent.h:'.
+    sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+    sed -e 's,^.*\.[a-z]*:['"$tab"' ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+  else
+    # The sourcefile does not contain any dependencies, so just
+    # store a dummy comment line, to avoid errors with the Makefile
+    # "include basename.Plo" scheme.
+    echo "#dummy" > "$depfile"
+  fi
+  rm -f "$tmpdepfile"
+  ;;
+
+icc)
+  # Intel's C compiler anf tcc (Tiny C Compiler) understand '-MD -MF file'.
+  # However on
+  #    $CC -MD -MF foo.d -c -o sub/foo.o sub/foo.c
+  # ICC 7.0 will fill foo.d with something like
+  #    foo.o: sub/foo.c
+  #    foo.o: sub/foo.h
+  # which is wrong.  We want
+  #    sub/foo.o: sub/foo.c
+  #    sub/foo.o: sub/foo.h
+  #    sub/foo.c:
+  #    sub/foo.h:
+  # ICC 7.1 will output
+  #    foo.o: sub/foo.c sub/foo.h
+  # and will wrap long lines using '\':
+  #    foo.o: sub/foo.c ... \
+  #     sub/foo.h ... \
+  #     ...
+  # tcc 0.9.26 (FIXME still under development at the moment of writing)
+  # will emit a similar output, but also prepend the continuation lines
+  # with horizontal tabulation characters.
+  "$@" -MD -MF "$tmpdepfile"
+  stat=$?
+  if test $stat -eq 0; then :
+  else
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  # Each line is of the form 'foo.o: dependent.h',
+  # or 'foo.o: dep1.h dep2.h \', or ' dep3.h dep4.h \'.
+  # Do two passes, one to just change these to
+  # '$object: dependent.h' and one to simply 'dependent.h:'.
+  sed -e "s/^[ $tab][ $tab]*/  /" -e "s,^[^:]*:,$object :," \
+    < "$tmpdepfile" > "$depfile"
+  sed '
+    s/[ '"$tab"'][ '"$tab"']*/ /g
+    s/^ *//
+    s/ *\\*$//
+    s/^[^:]*: *//
+    /^$/d
+    /:$/d
+    s/$/ :/
+  ' < "$tmpdepfile" >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+hp2)
+  # The "hp" stanza above does not work with aCC (C++) and HP's ia64
+  # compilers, which have integrated preprocessors.  The correct option
+  # to use with these is +Maked; it writes dependencies to a file named
+  # 'foo.d', which lands next to the object file, wherever that
+  # happens to be.
+  # Much of this is similar to the tru64 case; see comments there.
+  dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+  test "x$dir" = "x$object" && dir=
+  base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+  if test "$libtool" = yes; then
+    tmpdepfile1=$dir$base.d
+    tmpdepfile2=$dir.libs/$base.d
+    "$@" -Wc,+Maked
+  else
+    tmpdepfile1=$dir$base.d
+    tmpdepfile2=$dir$base.d
+    "$@" +Maked
+  fi
+  stat=$?
+  if test $stat -eq 0; then :
+  else
+     rm -f "$tmpdepfile1" "$tmpdepfile2"
+     exit $stat
+  fi
+
+  for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
+  do
+    test -f "$tmpdepfile" && break
+  done
+  if test -f "$tmpdepfile"; then
+    sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
+    # Add 'dependent.h:' lines.
+    sed -ne '2,${
+	       s/^ *//
+	       s/ \\*$//
+	       s/$/:/
+	       p
+	     }' "$tmpdepfile" >> "$depfile"
+  else
+    echo "#dummy" > "$depfile"
+  fi
+  rm -f "$tmpdepfile" "$tmpdepfile2"
+  ;;
+
+tru64)
+   # The Tru64 compiler uses -MD to generate dependencies as a side
+   # effect.  'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'.
+   # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+   # dependencies in 'foo.d' instead, so we check for that too.
+   # Subdirectories are respected.
+   dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+   test "x$dir" = "x$object" && dir=
+   base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+
+   if test "$libtool" = yes; then
+      # With Tru64 cc, shared objects can also be used to make a
+      # static library.  This mechanism is used in libtool 1.4 series to
+      # handle both shared and static libraries in a single compilation.
+      # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
+      #
+      # With libtool 1.5 this exception was removed, and libtool now
+      # generates 2 separate objects for the 2 libraries.  These two
+      # compilations output dependencies in $dir.libs/$base.o.d and
+      # in $dir$base.o.d.  We have to check for both files, because
+      # one of the two compilations can be disabled.  We should prefer
+      # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+      # automatically cleaned when .libs/ is deleted, while ignoring
+      # the former would cause a distcleancheck panic.
+      tmpdepfile1=$dir.libs/$base.lo.d   # libtool 1.4
+      tmpdepfile2=$dir$base.o.d          # libtool 1.5
+      tmpdepfile3=$dir.libs/$base.o.d    # libtool 1.5
+      tmpdepfile4=$dir.libs/$base.d      # Compaq CCC V6.2-504
+      "$@" -Wc,-MD
+   else
+      tmpdepfile1=$dir$base.o.d
+      tmpdepfile2=$dir$base.d
+      tmpdepfile3=$dir$base.d
+      tmpdepfile4=$dir$base.d
+      "$@" -MD
+   fi
+
+   stat=$?
+   if test $stat -eq 0; then :
+   else
+      rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+      exit $stat
+   fi
+
+   for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+   do
+     test -f "$tmpdepfile" && break
+   done
+   if test -f "$tmpdepfile"; then
+      sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+      sed -e 's,^.*\.[a-z]*:['"$tab"' ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+   else
+      echo "#dummy" > "$depfile"
+   fi
+   rm -f "$tmpdepfile"
+   ;;
+
+msvc7)
+  if test "$libtool" = yes; then
+    showIncludes=-Wc,-showIncludes
+  else
+    showIncludes=-showIncludes
+  fi
+  "$@" $showIncludes > "$tmpdepfile"
+  stat=$?
+  grep -v '^Note: including file: ' "$tmpdepfile"
+  if test "$stat" = 0; then :
+  else
+    rm -f "$tmpdepfile"
+    exit $stat
+  fi
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  # The first sed program below extracts the file names and escapes
+  # backslashes for cygpath.  The second sed program outputs the file
+  # name when reading, but also accumulates all include files in the
+  # hold buffer in order to output them again at the end.  This only
+  # works with sed implementations that can handle large buffers.
+  sed < "$tmpdepfile" -n '
+/^Note: including file:  *\(.*\)/ {
+  s//\1/
+  s/\\/\\\\/g
+  p
+}' | $cygpath_u | sort -u | sed -n '
+s/ /\\ /g
+s/\(.*\)/'"$tab"'\1 \\/p
+s/.\(.*\) \\/\1:/
+H
+$ {
+  s/.*/'"$tab"'/
+  G
+  p
+}' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+msvc7msys)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+#nosideeffect)
+  # This comment above is used by automake to tell side-effect
+  # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+  # Important note: in order to support this mode, a compiler *must*
+  # always write the preprocessed file to stdout, regardless of -o.
+  "$@" || exit $?
+
+  # Remove the call to Libtool.
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+
+  # Remove '-o $object'.
+  IFS=" "
+  for arg
+  do
+    case $arg in
+    -o)
+      shift
+      ;;
+    $object)
+      shift
+      ;;
+    *)
+      set fnord "$@" "$arg"
+      shift # fnord
+      shift # $arg
+      ;;
+    esac
+  done
+
+  test -z "$dashmflag" && dashmflag=-M
+  # Require at least two characters before searching for ':'
+  # in the target name.  This is to cope with DOS-style filenames:
+  # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise.
+  "$@" $dashmflag |
+    sed 's:^['"$tab"' ]*[^:'"$tab"' ][^:][^:]*\:['"$tab"' ]*:'"$object"'\: :' > "$tmpdepfile"
+  rm -f "$depfile"
+  cat < "$tmpdepfile" > "$depfile"
+  tr ' ' "$nl" < "$tmpdepfile" | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly.  Breaking it into two sed invocations is a workaround.
+    sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+dashXmstdout)
+  # This case only exists to satisfy depend.m4.  It is never actually
+  # run, as this mode is specially recognized in the preamble.
+  exit 1
+  ;;
+
+makedepend)
+  "$@" || exit $?
+  # Remove any Libtool call
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+  # X makedepend
+  shift
+  cleared=no eat=no
+  for arg
+  do
+    case $cleared in
+    no)
+      set ""; shift
+      cleared=yes ;;
+    esac
+    if test $eat = yes; then
+      eat=no
+      continue
+    fi
+    case "$arg" in
+    -D*|-I*)
+      set fnord "$@" "$arg"; shift ;;
+    # Strip any option that makedepend may not understand.  Remove
+    # the object too, otherwise makedepend will parse it as a source file.
+    -arch)
+      eat=yes ;;
+    -*|$object)
+      ;;
+    *)
+      set fnord "$@" "$arg"; shift ;;
+    esac
+  done
+  obj_suffix=`echo "$object" | sed 's/^.*\././'`
+  touch "$tmpdepfile"
+  ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+  rm -f "$depfile"
+  # makedepend may prepend the VPATH from the source file name to the object.
+  # No need to regex-escape $object, excess matching of '.' is harmless.
+  sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
+  sed '1,2d' "$tmpdepfile" | tr ' ' "$nl" | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly.  Breaking it into two sed invocations is a workaround.
+    sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile" "$tmpdepfile".bak
+  ;;
+
+cpp)
+  # Important note: in order to support this mode, a compiler *must*
+  # always write the preprocessed file to stdout.
+  "$@" || exit $?
+
+  # Remove the call to Libtool.
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+
+  # Remove '-o $object'.
+  IFS=" "
+  for arg
+  do
+    case $arg in
+    -o)
+      shift
+      ;;
+    $object)
+      shift
+      ;;
+    *)
+      set fnord "$@" "$arg"
+      shift # fnord
+      shift # $arg
+      ;;
+    esac
+  done
+
+  "$@" -E |
+    sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+       -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
+    sed '$ s: \\$::' > "$tmpdepfile"
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  cat < "$tmpdepfile" >> "$depfile"
+  sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+msvisualcpp)
+  # Important note: in order to support this mode, a compiler *must*
+  # always write the preprocessed file to stdout.
+  "$@" || exit $?
+
+  # Remove the call to Libtool.
+  if test "$libtool" = yes; then
+    while test "X$1" != 'X--mode=compile'; do
+      shift
+    done
+    shift
+  fi
+
+  IFS=" "
+  for arg
+  do
+    case "$arg" in
+    -o)
+      shift
+      ;;
+    $object)
+      shift
+      ;;
+    "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+	set fnord "$@"
+	shift
+	shift
+	;;
+    *)
+	set fnord "$@" "$arg"
+	shift
+	shift
+	;;
+    esac
+  done
+  "$@" -E 2>/dev/null |
+  sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
+  rm -f "$depfile"
+  echo "$object : \\" > "$depfile"
+  sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile"
+  echo "$tab" >> "$depfile"
+  sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+  rm -f "$tmpdepfile"
+  ;;
+
+msvcmsys)
+  # This case exists only to let depend.m4 do its work.  It works by
+  # looking at the text of this script.  This case will never be run,
+  # since it is checked for above.
+  exit 1
+  ;;
+
+none)
+  exec "$@"
+  ;;
+
+*)
+  echo "Unknown depmode $depmode" 1>&2
+  exit 1
+  ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/do_unroll.m4 b/do_unroll.m4
new file mode 100644
index 0000000..49f497f
--- /dev/null
+++ b/do_unroll.m4
@@ -0,0 +1,1820 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl	This file contains loop unrolled kernels for many operations.
+dnl
+dnl	TODO  : eliminate all traces of "negation" kernel, as it is not legal anymore.
+dnl	FIXME : the only kernel working with transposition is spmv.
+dnl
+include(`rsb_misc.m4')dnl
+include(`wisdom.m4')dnl
+dnl 
+dnl 
+dnl  Follows an m4 documentation macro
+dnl 
+dnl
+dnl 
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function name and declaration macros
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_FUNCTION_NAME(TYPE,ROWS_UNROLL,COLS_UNROLL,UNROLLING,MOP,TRANSPOSITION)
+dnl	-----------------------------------------------------------------------
+dnl	Expands to the function name of the kernel specified by the macro arguments.
+dnl
+dnl
+define(`RSB_M4_KERNEL_FUNCTION_NAME',dnl
+pushdef(`type',$1)dnl
+pushdef(`rows_unroll',$2)dnl
+pushdef(`cols_unroll',$3)dnl
+pushdef(`unrolling',$4)dnl
+pushdef(`mop',$5)dnl
+pushdef(`transposition',$6)dnl
+pushdef(`citype',$7)dnl
+dnl
+dnl	FIXME : using symbolic names here is troublesome !
+dnl	
+dnl pushdef(`unrolling',ifelse($2,`l',`l',`'))dnl FIXME : this is a temporary fix (setting to `' instead of `u')
+``rsb_m'`$5'`_'RSB_M4_TYPE_CODE(`type')`_'dnl
+RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE($7)`_'dnl
+`_r'`$2'`_c'`$3'ifelse(`$4',l,_l,)`'RSB_M4_TRANSPOSITION_CODE(transposition)'dnl
+dnl popdef(`unrolling')
+popdef(`citype')dnl
+popdef(`transposition')dnl
+popdef(`rows_unroll')dnl
+popdef(`cols_unroll')dnl
+popdef(`unrolling')dnl
+popdef(`type')dnl
+popdef(`mop')dnl
+)dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_FUNCTION_ARGS(TYPE,UNROLLING,MOP)
+dnl	-----------------------------------------------
+dnl	Expands to the function arguments of the kernel specified by the macro arguments.
+dnl
+define(`RSB_M4_KERNEL_FUNCTION_ARGS',`dnl
+dnl
+dnl
+pushdef(`mtype',`$1')dnl
+pushdef(`itype',`rsb_coo_idx_t')dnl
+pushdef(`unrolling',ifelse($2,`l',`l',`'))dnl FIXME : this is a temporary fix (setting to `' instead of `u')
+pushdef(`optype',$3)dnl
+dnl	not fully unrolled :
+ifelse(unrolling`'optype, `spmm_az',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns, const itype bstride, const itype cstride, const itype nrhs)')dnl
+ifelse(unrolling`'optype, `l'`spmm_az',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns, const itype bstride, const itype cstride, const itype nrhs)')dnl
+dnl	fully unrolled :
+ifelse(unrolling`'optype, `infty_norm',dnl
+`(const mtype *a, mtype *local_row_sums, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l'`infty_norm',dnl
+`(const mtype *a, mtype *local_row_sums, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `rowssums',dnl
+`(const mtype *a, mtype *local_row_sums, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l'`rowssums',dnl
+`(const mtype *a, mtype *local_row_sums, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `negation',dnl
+`(mtype *a, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l'`negation',dnl
+`(mtype *a, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `spmv_uauz',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `spsv_uxua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l'`spsv_uxua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `spmv_unua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l'`spmv_unua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `spmv_uaua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype dummy_rows, const itype dummy_columns)')dnl
+ifelse(unrolling`'optype,`l'`spmv_uaua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns)')dnl
+ifelse(unrolling`'optype, `spmv_uxua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype dummy_rows, const itype dummy_columns, const mtype *alphap)')dnl
+ifelse(unrolling`'optype,`l'`spmv_uxua',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns, const mtype * alphap)')dnl
+ifelse(unrolling`'optype, `spmv_uxux',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype dummy_rows, const itype dummy_columns, const mtype *alphap, const mtype *betap)')dnl
+ifelse(unrolling`'optype,`l'`spmv_uxux',dnl
+`(const mtype *a, const mtype *b, mtype *c, const itype rows, const itype columns, const mtype * alphap, const mtype *betap)')dnl
+ifelse(unrolling`'optype,`scale',dnl
+`(mtype *a, const mtype *d, const itype rows, const itype columns /* dummy rows and columns */)')dnl
+dnl `(mtype *a, const mtype *d)')dnl
+ifelse(unrolling`'optype,`l'`scale',dnl
+`(mtype *a, const mtype *d, const itype rows, const itype columns)')dnl
+popdef(`optype')dnl
+popdef(`unrolling')dnl
+popdef(`itype')dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_FUNCTION_PROTOTYPE(TYPE,ROWS_UNROLL,COLS_UNROLL,UNROLLING,MOP,TRANSPOSITION)
+dnl	----------------------------------------------------------------------------
+dnl	Expands to the function prototype of the kernel specified by the macro arguments.
+dnl
+define(`RSB_M4_KERNEL_FUNCTION_PROTOTYPE',`dnl
+void RSB_M4_KERNEL_FUNCTION_NAME($1,$2,$3,$4,$5,$6,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE) RSB_M4_KERNEL_FUNCTION_ARGS($1,$4,$5)'dnl
+)dnl
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function body macros
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_NEGATE_FUNCTION_BODY_UNROLLED()
+dnl	--------------------------------------
+dnl	Negates the whole matrix;
+dnl	Unrolled.
+dnl
+define(`RSB_M4_NEGATE_FUNCTION_BODY_UNROLLED',`dnl
+dnl
+	/* NOTE : should better use some intrinsic here. */
+RSB_M4_DEBUGINFO(``$0'')dnl
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/* major_increment times unrolling on the major dimension, for each unroll on the minor dimension */
+')dnl
+	forloop(`major_unrolling',0,decr(major_increment),`forloop(`minor_unrolling',0,decr(minor_increment),
+	`a[(minor_unrolling*major_increment)+major_unrolling]=-a[(minor_unrolling*major_increment)+major_unrolling];
+	')'
+	)
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl	RSB_M4_UNROLL_L_NEGATE_FUNCTION_BODY()
+dnl	--------------------------------------
+dnl	Negates the whole matrix; 
+dnl
+define(`RSB_M4_UNROLL_L_NEGATE_FUNCTION_BODY',`dnl
+dnl
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/*!
+	 * loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one
+	 */
+')dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+	register itype minor_index,major_index;
+	for(minor_index=0;minor_index+eval(minor_increment-1)<minor_maximum;minor_index+=minor_increment)
+	{
+		for(major_index=0;major_index+eval(major_increment-1)<major_maximum;major_index+=major_increment)
+		{
+			forloop(`minor_unrolling',0,decr(minor_increment),
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+			`/* major_increment times unrolling on the major dimension,  */
+')dnl
+			forloop(`major_unrolling',0,decr(major_increment),
+			`a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling]=-a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling];
+			')
+			')
+		}
+		ifelse(major_increment,1,`',
+		`/* we handle the last (columns mod major_increment) columns */
+		for(;major_index<major_maximum;++major_index)
+		{forloop(`minor_unrolling',0,decr(minor_increment),`
+		a[major_maximum*(minor_index+minor_unrolling)+major_index]=-a[major_maximum*(minor_index+minor_unrolling)+major_index];
+		')
+		}
+		')
+	}
+	ifelse(minor_increment,1,`',
+	`/* we handle the last (rows mod minor_increment) rows entirely */
+	for(;minor_index<minor_maximum;++minor_index) for(major_index=0;major_index<major_maximum;++major_index)a[major_maximum*(minor_index)+major_index]=-a[major_maximum*(minor_index)+major_index];
+	')
+')dnl
+dnl
+dnl
+dnl	RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED()
+dnl	------------------------------------------
+dnl	Expands to the fully unrolled infinity norm kernel.
+dnl
+define(`RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED',`dnl
+dnl
+ifelse(RSB_M4_IS_COMPLEX_TYPE(mtype),1,`/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */')dnl
+
+`#define CABS(X)' RSB_M4_ABS(mtype,X)
+ifelse(transposition,RSB_M4_TRANS_T,`dnl
+pushdef(`transposed_major_increment',minor_increment)`'dnl
+pushdef(`transposed_minor_increment',major_increment)`'dnl
+pushdef(`transposed_minor_unrolling',major_unrolling)`'dnl
+',`dnl
+pushdef(`transposed_minor_increment',minor_increment)`'dnl
+pushdef(`transposed_major_increment',major_increment)`'dnl
+pushdef(`transposed_minor_unrolling',minor_unrolling)`'dnl
+')dnl
+
+	/* NOTE : should better use some intrinsic here. */
+RSB_M4_DEBUGINFO(``$0'')dnl
+	forloop(`minor_unrolling',0,decr(transposed_minor_increment),`register mtype `sum_'minor_unrolling=0;
+	')
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/* major_increment times unrolling on the major dimension, for each unroll on the minor dimension */
+')dnl
+	forloop(`major_unrolling',0,decr(major_increment),`forloop(`minor_unrolling',0,decr(minor_increment),
+	``sum_'transposed_minor_unrolling+=CABS(RSB_M4_CONJ(a[(minor_unrolling*major_increment)+major_unrolling],mtype,transposition))<0?-RSB_M4_CONJ(a[(minor_unrolling*major_increment)+major_unrolling],mtype,transposition,k_symmetry):RSB_M4_CONJ(a[(minor_unrolling*major_increment)+major_unrolling],mtype,transposition,k_symmetry);
+	')'
+	)
+	forloop(`minor_unrolling',0,decr(transposed_minor_increment),
+	`local_row_sums[minor_unrolling]+=`sum_'minor_unrolling;
+	')
+`#undef CABS'
+dnl
+popdef(`transposed_major_increment')`'dnl
+popdef(`transposed_minor_increment')`'dnl
+
+')dnl
+dnl
+dnl	end RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED()
+dnl	------------------------------------------
+dnl	Expands to the fully unrolled infinity norm kernel.
+dnl
+dnl	FIXME: should document this is used also for rowssums
+dnl
+define(`RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED',`dnl
+dnl
+ifelse(RSB_M4_IS_COMPLEX_TYPE(mtype),1,`/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */')dnl
+
+ifelse(mop,`rowssums',`dnl
+pushdef(`abs_or_not',`0')`'dnl
+')dnl
+dnl
+ifelse(mop,`infty_norm',`dnl
+pushdef(`abs_or_not',`1')`'dnl
+dnl`#define CABS(X)' RSB_M4_ABS(mtype,X)
+dnl pushdef(`RSB_M4_CABS_',`CABS')`'dnl
+dnl pushdef(`RSB_M4_CABS_',`RSB_M4_ABS')`'dnl
+')dnl
+dnl
+ifelse(transposition,RSB_M4_TRANS_T,`dnl
+pushdef(`transposed_major_increment',minor_increment)`'dnl
+pushdef(`transposed_minor_increment',major_increment)`'dnl
+pushdef(`transposed_minor_unrolling',major_unrolling)`'dnl
+',`dnl
+pushdef(`transposed_minor_increment',minor_increment)`'dnl
+pushdef(`transposed_major_increment',major_increment)`'dnl
+pushdef(`transposed_minor_unrolling',minor_unrolling)`'dnl
+')dnl
+dnl
+ifelse(transposition,RSB_M4_TRANS_N,`dnl
+pushdef(`transposed_row_sums_off',roff)`'dnl
+pushdef(`retransposed_row_sums_off',coff)`'dnl
+',`dnl
+pushdef(`transposed_row_sums_off',coff)`'dnl
+pushdef(`retransposed_row_sums_off',roff)`'dnl
+')dnl
+
+	/* NOTE : should better use some intrinsic here. */
+RSB_M4_DEBUGINFO(``$0'')dnl
+	forloop(`minor_unrolling',0,decr(transposed_minor_increment),`register mtype `sum_'minor_unrolling=0;
+	')
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/* major_increment times unrolling on the major dimension, for each unroll on the minor dimension */
+')dnl
+dnl
+	forloop(`major_unrolling',0,decr(major_increment),`forloop(`minor_unrolling',0,decr(minor_increment),
+	``sum_'transposed_minor_unrolling += RSB_M4_ABS_IF_1(mtype,RSB_M4_CONJ(a[(minor_unrolling*major_increment)+major_unrolling],mtype,transposition),abs_or_not);
+	')'
+	)
+	forloop(`minor_unrolling',0,decr(transposed_minor_increment),
+	`local_row_sums[transposed_row_sums_off+minor_unrolling]+=`sum_'minor_unrolling;
+	')
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),0,`dnl
+dnl	unsymmetric case: ok
+',`dnl
+dnl	symmetric case: 
+	if(roff!=coff || i!=j)
+	forloop(`minor_unrolling',0,decr(transposed_minor_increment),
+	`	row_sums[retransposed_row_sums_off+minor_unrolling+bci]+=`sum_'minor_unrolling;
+	')
+')dnl
+ifelse(mop,`infty_norm',`dnl
+dnl `#undef CABS'
+')dnl
+dnl
+popdef(`retransposed_row_sums_off')`'dnl
+popdef(`transposed_row_sums_off')`'dnl
+popdef(`abs_or_not')`'dnl
+popdef(`transposed_major_increment')`'dnl
+popdef(`transposed_minor_increment')`'dnl
+
+')dnl
+dnl
+dnl	end RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl	RSB_M4_UNROLL_L_INFTY_NORM_FUNCTION_BODY()
+dnl	------------------------------------------
+dnl	Expands to the unrolled (with loops) infinity norm kernel.
+dnl
+define(`RSB_M4_UNROLL_L_INFTY_NORM_FUNCTION_BODY',`dnl
+dnl
+ifelse(RSB_M4_IS_COMPLEX_TYPE(mtype),1,`/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */')dnl
+dnl
+ifelse(RSB_M4_IS_BCSR(minor_increment,major_increment),`1',`dnl
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/*!
+	 * loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one
+	 */
+')dnl
+')dnl
+dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+
+`#define CABS(X)' RSB_M4_ABS(mtype,X)
+
+	register itype minor_index,major_index;
+	for(minor_index=0;minor_index+eval(minor_increment-1)<minor_maximum;minor_index+=minor_increment)
+	{
+		for(major_index=0;major_index+eval(major_increment-1)<major_maximum;major_index+=major_increment)
+		{
+			forloop(`minor_unrolling',0,decr(minor_increment),
+			`/* major_increment times unrolling on the major dimension,  */
+			forloop(`major_unrolling',0,decr(major_increment),
+			`local_row_sums[minor_index+minor_unrolling]+=CABS(RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling],mtype,transposition))<0? - RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling],mtype,transposition,k_symmetry):RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling],mtype,transposition,k_symmetry);
+			')
+			')
+		}
+		ifelse(major_increment,1,`',
+		`/* we handle the last (columns mod major_increment) columns */
+		for(;major_index<major_maximum;++major_index)
+		{forloop(`minor_unrolling',0,decr(minor_increment),`
+		local_row_sums[minor_index+minor_unrolling]+=RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index],mtype,transposition,k_symmetry);
+		')
+		}
+		')
+	}
+`#undef CABS'
+	ifelse(minor_increment,1,`',
+	`/* we handle the last (rows mod minor_increment) rows entirely */
+	for(;minor_index<minor_maximum;++minor_index) for(major_index=0;major_index<major_maximum;++major_index)local_row_sums[minor_index]+=RSB_M4_CONJ(a[major_maximum*(minor_index)+major_index],mtype,transposition,k_symmetry);
+	')
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED()
+dnl	-----------------------------------------
+dnl	Expands to the fully unrolled row scaling kernel.
+dnl
+define(`RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED',`dnl
+dnl
+dnl	* Apply a scaling to the rows of the matrix, or apply a scalar scaling to all the coefficients.
+dnl	* 
+dnl	* Equivalent to multiply against a k x m matrix (the d vector is sized m) where column i values 
+dnl	* are all d[i].
+dnl
+dnl	TODO : C ORDER ONLY
+dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+ifelse(transposition,RSB_M4_TRANS_T,`
+dnl	Fortran order
+	forloop(`major_unrolling',0,decr(major_increment),`forloop(`minor_unrolling',0,decr(minor_increment),
+	`a[(minor_unrolling*major_increment)+major_unrolling]*=d[major_unrolling];
+	')'
+	)
+',`
+dnl	C order
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/* major_increment times unrolling on the major dimension, for each unroll on the minor dimension */
+')dnl
+	forloop(`major_unrolling',0,decr(major_increment),`forloop(`minor_unrolling',0,decr(minor_increment),
+	`a[(minor_unrolling*major_increment)+major_unrolling]*=d[minor_unrolling];
+	')'
+	)
+')dnl
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl	RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED_L()
+dnl	-------------------------------------------
+dnl	Expands to the unrolled (with loops) row scaling kernel.
+dnl
+define(`RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED_L',`dnl
+dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+	/*
+	* Apply a scaling to the rows of the matrix, or apply a scalar scaling to all the coefficients.
+	* 
+	* Equivalent to multiply against a k x m matrix (the d vector is sized m) where column i values 
+	* are all d[i].
+	*/
+
+		register itype minor_index,major_index;
+		for(minor_index=0;minor_index+eval(minor_increment-1)<minor_maximum;minor_index+=minor_increment)
+		{
+			for(major_index=0;major_index+eval(major_increment-1)<major_maximum;major_index+=major_increment)
+			{
+				forloop(`minor_unrolling',0,decr(minor_increment),
+				`/* major_increment times unrolling on the major dimension,  */
+				forloop(`major_unrolling',0,decr(major_increment),
+				`a[(minor_index+minor_unrolling)*columns+major_index+major_unrolling]*=d[minor_index+minor_unrolling];
+				')
+				')
+			}
+			ifelse(major_increment,1,`',
+			`/* we handle the last (columns mod major_increment) columns */
+			for(;major_index<major_maximum;++major_index)
+			{forloop(`minor_unrolling',0,decr(minor_increment),`
+				a[(minor_index+minor_unrolling)*columns+major_index]*=d[minor_index+minor_unrolling];')
+			}
+			')
+		}
+	ifelse(minor_increment,1,`',
+		`/* we handle the last (rows mod minor_increment) rows entirely */
+		for(;minor_index<minor_maximum;++minor_index) for(major_index=0;major_index<major_maximum;++major_index)
+		a[minor_index * major_maximum + major_index ]*=d[minor_index];
+		')
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl	RSB_M4_MV_FUNCTION_BODY_UNROLLED()
+dnl	----------------------------------
+dnl	Expands to the fully unrolled matrix vector multiplication kernel.
+dnl
+dnl	TODO : This macro is unused when register blocking is on. Should unify both.
+dnl
+define(`RSB_M4_MV_FUNCTION_BODY_UNROLLED',`dnl
+dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+	forloop(`minor_unrolling',0,decr(minor_increment),`register mtype `c_'minor_unrolling=0;
+	')
+dnl	register mtype c_=0;
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/* major_increment times unrolling on the major dimension, for each unroll on the minor dimension */
+')dnl
+	forloop(`major_unrolling',0,decr(major_increment),`forloop(`minor_unrolling',0,decr(minor_increment),
+	``c_'minor_unrolling += RSB_M4_CONJ(a[(minor_unrolling*major_increment)+major_unrolling],mtype,transposition,k_symmetry)*b[major_unrolling];
+	')'
+	)
+	forloop(`minor_unrolling',0,decr(minor_increment),
+	`c[minor_unrolling]+=`c_'minor_unrolling;
+	/*c[minor_unrolling]+= alpha * `c_'minor_unrolling + beta * c[minor_unrolling];*/
+	')
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_MV_FUNCTION_BODY_UNROLLED_REGISTER_BLOCKED()
+dnl	---------------------------------------------------
+dnl	Expands to the fully unrolled, register blocked matrix vector multiplication kernel.
+dnl
+define(`RSB_M4_MV_FUNCTION_BODY_UNROLLED_REGISTER_BLOCKED',`dnl
+dnl
+dnl	FIXME : missing documentation
+dnl
+dnl	20101112 spsv.* subkernel is supported here	
+dnl
+dnl	/* experimental RSB_M4_REGISTERS registers tuned blocking technique. */
+dnl
+pushdef(`should_merge_value_after_inner_loop',`RSB_M4_should_merge_value_after_inner_loop_inner')dnl
+dnl
+forloop(`RB',0,eval(minor_increment/RSB_M4_REGISTERS),`dnl
+ifelse(eval(RB*RSB_M4_REGISTERS<minor_increment),1,`dnl
+ifelse(eval(RB*RSB_M4_REGISTERS<minor_increment),1,`dnl
+	{
+dnl		/* using alpha and beta seems like a 10% hit for some 8x8 setups */
+dnl		/*mtype *alphap=1, *betap=0;*/
+dnl		/* beginning of a register block */',`
+')dnl
+dnl
+dnl	FIXME : new
+dnl
+dnl
+dnl ifelse(transposition,RSB_M4_TRANS_T,`
+dnl /* this is a transposed kernel */
+dnl ',`')dnl
+dnl
+pushdef(`c_unrolling_rb',`ifelse(transposition,RSB_M4_TRANS_T,`major_unrolling',`minor_unrolling_rb')')dnl
+pushdef(`b_unrolling_rb',`ifelse(transposition,RSB_M4_TRANS_T,`minor_unrolling_rb',`major_unrolling')')dnl
+pushdef(`c_increment',`ifelse(transposition,RSB_M4_TRANS_T,`major_increment',`minor_increment')')dnl
+pushdef(`b_increment',`ifelse(transposition,RSB_M4_TRANS_T,`minor_increment',`major_increment')')dnl
+dnl pushdef(`c_dest',ifelse(should_merge_value_after_inner_loop,`1',`cacc',`c[c_unrolling_rb]'))dnl
+pushdef(`c_dest',ifelse(should_merge_value_after_inner_loop,`1',`cacc',`c[c_unrolling_rb]'))dnl
+dnl
+forloop(`c_unrolling_rb',eval((RB*RSB_M4_REGISTERS)),ifelse(eval((RB+1)*RSB_M4_REGISTERS>=c_increment),1,decr(c_increment),decr(eval((RB+1)*RSB_M4_REGISTERS))),`
+dnl	register block variables declaration
+ifelse(should_merge_value_after_inner_loop,`1',`',`dnl
+		register mtype `c_'c_unrolling_rb = RSB_M4_ZERO(mtype);
+')dnl
+dnl		format(`register %s `c_'%02d=0;',mtype,c_unrolling_rb)dnl
+		')',`')dnl
+dnl	register block variables declaration
+		ifelse(eval(RB*RSB_M4_REGISTERS<minor_increment),1,`
+dnl	register block variables declaration
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+		/* major_increment times unrolling on the major dimension, for each unroll on the minor dimension */
+')dnl
+dnl	FIXME : THE FOLLOWING LOOP UNROLLINGS ORDER HAS INFLUENCE ON PERFORMANCE AND SHOULD BE STUDIED AT LOW LEVEL !
+forloop(`minor_unrolling_rb',eval((RB*RSB_M4_REGISTERS)),ifelse(eval((RB+1)*RSB_M4_REGISTERS>=minor_increment),1,decr(minor_increment),decr(eval((RB+1)*RSB_M4_REGISTERS))),dnl
+`
+forloop(`major_unrolling',0,decr(major_increment),`dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+ifelse(should_merge_value_after_inner_loop,`1',`dnl
+`		cacc'+=RSB_M4_CONJ(a[(minor_unrolling_rb*major_increment)+major_unrolling],mtype,transposition,k_symmetry)*bn[b_unrolling_rb];
+',`dnl
+`		c_'c_unrolling_rb += RSB_M4_CONJ(a[(minor_unrolling_rb*major_increment)+major_unrolling],mtype,transposition,k_symmetry)*bn[b_unrolling_rb];
+')dnl
+',`dnl
+'ifelse(should_merge_value_after_inner_loop,`1',`dnl
+`		cacc'+=RSB_M4_CONJ(a[(minor_unrolling_rb*major_increment)+major_unrolling],mtype,transposition,k_symmetry)*b[b_unrolling_rb];
+',`dnl
+`		c_'c_unrolling_rb += RSB_M4_CONJ(a[(minor_unrolling_rb*major_increment)+major_unrolling],mtype,transposition,k_symmetry)*b[b_unrolling_rb];
+')dnl
+)dnl
+')dnl
+')dnl
+dnl
+forloop(`c_unrolling_rb',eval((RB*RSB_M4_REGISTERS)),ifelse(eval((RB+1)*RSB_M4_REGISTERS>=c_increment),1,decr(c_increment),decr(eval((RB+1)*RSB_M4_REGISTERS))),`dnl
+ifelse(should_merge_value_after_inner_loop,`1',`',`dnl
+ifelse(RSB_M4_IS_SPMX_OP_NEGATING_KERNEL_MOP(mop),`1',`dnl
+			c[c_unrolling_rb]-=`c_'c_unrolling_rb;
+')dnl
+ifelse(RSB_M4_IS_SPMX_OP_ADDING_KERNEL_MOP(mop),`1',`dnl
+			c[c_unrolling_rb]+=`c_'c_unrolling_rb;
+')dnl
+ifelse(RSB_M4_IS_SPMX_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+ifelse(RSB_M4_is_transposed_spmv,1,`dnl
+			c[c_unrolling_rb]+= `c_'c_unrolling_rb ;
+',`dnl
+			c[c_unrolling_rb]+= alpha * `c_'c_unrolling_rb ;
+')dnl
+')dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),`1',`dnl
+dnl
+dnl	FIXME: an optimized implementation places this out.
+dnl
+			c[c_unrolling_rb]+= `c_'c_unrolling_rb ;
+')dnl
+')dnl
+')dnl
+dnl
+	}',`')dnl
+	')
+dnl
+popdef(`should_merge_value_after_inner_loop')dnl
+popdef(`c_dest')dnl
+popdef(`b_unrolling_rb')dnl
+popdef(`c_unrolling_rb')dnl
+popdef(`c_increment')dnl
+popdef(`b_increment')dnl
+dnl
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl	RSB_M4_UNROLL_L_MV_FUNCTION_BODY()
+dnl	----------------------------------
+dnl	Expands to the unrolled (but with loops), matrix vector multiplication kernel.
+dnl
+define(`RSB_M4_UNROLL_L_MV_FUNCTION_BODY',`dnl
+dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+	/*!
+	 * Loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one.
+	 * Assumes a,b,c, are mtype arrays : a is minor_maximum x major_maximum, b is major_maximum x 1, c is minor_maximum x 1.
+	 * Assumes matrices are in column major (C) order.
+	 */
+	register itype minor_index,major_index;
+dnl
+pushdef(`spmv_uxux_alpha',`ifelse(RSB_M4_IS_SPMX_OP_SCALING_KERNEL_MOP(mop),`1',`alpha*',`')')dnl
+dnl
+	for(minor_index=0;minor_index+eval(minor_increment-1)<minor_maximum;minor_index+=minor_increment)
+	{
+		for(major_index=0;major_index+eval(major_increment-1)<major_maximum;major_index+=major_increment)
+		{
+			forloop(`minor_unrolling',0,decr(minor_increment),
+			`/* major_increment times unrolling on the major dimension,  */
+			forloop(`major_unrolling',0,decr(major_increment),
+			`c[minor_index+minor_unrolling]+=spmv_uxux_alpha`'RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling],mtype,transposition,k_symmetry)*b[major_index+major_unrolling];
+			')
+			')
+		}
+		ifelse(major_increment,1,`',
+		`/* we handle the last (columns mod major_increment) columns */
+		for(;major_index<major_maximum;++major_index)
+		{forloop(`minor_unrolling',0,decr(minor_increment),
+ifelse(RSB_M4_IS_SPMX_OP_ADDING_KERNEL_MOP(mop),1,`dnl
+		c[minor_index+minor_unrolling]+=RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index],mtype,transposition,k_symmetry)*b[major_index];
+		')dnl
+ifelse(RSB_M4_IS_SPMX_OP_NEGATING_KERNEL_MOP(mop),`1',`dnl
+		c[minor_index+minor_unrolling]-=RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index],mtype,transposition,k_symmetry)*b[major_index];
+		')dnl
+ifelse(RSB_M4_IS_SPMX_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+		c[minor_index+minor_unrolling]+=alpha*RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index],mtype,transposition,k_symmetry)*b[major_index];
+	')dnl
+		)
+		}
+		')
+	}
+	ifelse(minor_increment,1,`',
+	`/* we handle the last (rows mod minor_increment) rows entirely */
+	for(;minor_index<minor_maximum;++minor_index) for(major_index=0;major_index<major_maximum;++major_index)c[minor_index]+=spmv_uxux_alpha`'RSB_M4_CONJ(a[major_maximum*(minor_index)+major_index],mtype,transposition,k_symmetry)*b[major_index];
+	')
+dnl
+popdef(`spmv_uxux_alpha')dnl
+dnl
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_UNROLL_MM_FUNCTION_BODY()
+dnl	--------------------------------
+dnl	Expands to the matrix matrix multiplication kernel.
+dnl
+define(`RSB_M4_UNROLL_MM_FUNCTION_BODY',`dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+pushdef(`r',`rh')
+	/*!
+	 * `RSB_M4_UNROLL_MM_FUNCTION_BODY kernel'
+	 * Loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one.
+	 * Assumes a,b,c, are mtype arrays : a is minor_increment x major_increment, b is major_increment x nrhs, c is minor_increment x nrhs.
+	 * Assumes matrices are in column major (C) order.
+	 */
+	/* WARNING : THIS IS JUST A SKETCH OF THE CODE : IT IS NOT SUPPOSED TO BE CORRECT NOR FAST */
+	register itype r;
+	for(r=0;r<nrhs;++r)/* right hand side columns could have a big stride, though */
+	{
+		/* loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one */
+		/* this function will touch a block eval(minor_increment*major_increment)*sizeof(mtype) matrix bytes */
+forloop(`minor_unrolling',0,decr(minor_increment),`dnl
+		/* major_increment times unrolling on the major dimension,  */
+forloop(`major_unrolling',0,decr(major_increment),`dnl
+		c[r*cstride+minor_unrolling]+=RSB_M4_CONJ(a[minor_unrolling*major_increment+major_unrolling],mtype,transposition,k_symmetry)*b[r*bstride+major_unrolling];
+')dnl
+')dnl
+	}
+popdef(`r')
+')dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_UNROLL_L_MM_FUNCTION_BODY()
+dnl	----------------------------------
+dnl	Expands to the matrix matrix multiplication kernel.
+dnl
+define(`RSB_M4_UNROLL_L_MM_FUNCTION_BODY',`dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+pushdef(`r',`rh')
+	/*!
+	 * Loop unrolled minor_increment times on the minor dimension, major_increment times on the major one.
+	 * Assumes a,b,c, are mtype arrays : a is minor_maximum x major_maximum, b is MM x nrhs, c is mM x nrhs.
+	 * Assumes matrices are in column major (C) order.
+	 */
+	/* WARNING : THIS IS JUST A SKETCH OF THE CODE : IT IS NOT SUPPOSED TO BE CORRECT NOR FAST */
+	register itype minor_index=0,major_index=0,r=0;
+	for(r=0;r<nrhs;++r)/* right hand side columns could have a big stride, though */
+	{
+		for(minor_index=0;minor_index+eval(minor_increment-1)<minor_maximum;minor_index+=minor_increment)
+		{
+			for(major_index=0;major_index+eval(major_increment-1)<major_maximum;major_index+=major_increment)
+			{
+forloop(`minor_unrolling',0,decr(minor_increment),dnl
+				`/* major_increment times unrolling on the major dimension,  */
+forloop(`major_unrolling',0,decr(major_increment),dnl
+				`c[r*cstride+minor_index+minor_unrolling]+=RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index+major_unrolling],mtype,transposition,k_symmetry)*b[r*bstride+major_index+major_unrolling];
+')dnl
+')dnl
+			}
+			ifelse(major_increment,1,`',
+			`/* we handle the last (columns mod major_increment) columns */
+			for(;major_index<major_maximum;++major_index)
+			{forloop(`minor_unrolling',0,decr(minor_increment),`
+				c[r*cstride+minor_index+minor_unrolling]+=RSB_M4_CONJ(a[major_maximum*(minor_index+minor_unrolling)+major_index],mtype,transposition,k_symmetry)*b[r*bstride+major_index];')
+			}
+			')
+		}
+		
+dnl	in the following, we omit the generation of a whole double loop if minor_increment is 1
+		ifelse(minor_increment,1,`',
+		`/* we handle the last (rows mod minor_increment) rows entirely */
+		for(;minor_index<minor_maximum;++minor_index)
+			for(major_index=0;major_index<major_maximum;++major_index)
+				c[r*cstride+minor_index]+=RSB_M4_CONJ(a[major_maximum*(minor_index)+major_index],mtype,transposition,k_symmetry)*b[r*bstride+major_index];
+			')
+	}
+popdef(`r')
+')dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function body dispatcher
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_FUNCTION_BODY(	r,minor_maximum,minor_increment,major_index_basename,major_maximum,major_increment,
+dnl					mtype,want_header,mop,unrolling)
+dnl	-------------------------------------------------------------------------------------------------------------------
+dnl	Expands to the function body of a particular computational kernel.
+dnl
+define(`RSB_M4_KERNEL_FUNCTION_BODY',`dnl
+dnl
+pushdef(`r',$1)dnl
+pushdef(`minor_index',`$1_$3')dnl minor dimension unroll variable identifier
+pushdef(`minor_increment',$3)dnl minor dimension increment
+pushdef(`minor_maximum',$2)dnl minor dimension maximum
+pushdef(`major_index',$4_$6)dnl major dimension unroll variable identifier
+pushdef(`major_increment',$6)dnl minor dimension maximum
+pushdef(`major_maximum',$5)dnl major dimension maximum
+pushdef(`mtype',$7)dnl
+pushdef(`itype',`rsb_coo_idx_t')dnl
+pushdef(`want_header',$8)dnl
+pushdef(`mop',$9)dnl
+pushdef(`unrolling',`ifelse($10,`l',`l',)')dnl
+pushdef(`k_symmetry',$11)dnl
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/*!
+	 * Loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one.
+	 * Assumes a,b,c, are mtype arrays : a is minor_increment x major_increment, b is major_increment x 1, c is minor_increment x 1.
+	 * Assumes matrices are in column major (C) order.
+	 */
+')dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),`1',`dnl
+dnl
+dnl	FIXME : THIS IS A TEMPORARY HACK
+dnl
+RSB_M4_MV_FUNCTION_BODY_UNROLLED_REGISTER_BLOCKED(mop)dnl
+dnl RSB_M4_KERNEL_FUNCTION_BODY($1,$2,$3,$4,$5,$6,$7,$8,`spmv_uauz',$10,$11)dnl
+')dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+ifelse(unrolling,`l',`dnl
+dnl	Matrix vector unroll with loop
+RSB_M4_UNROLL_L_MV_FUNCTION_BODY(mop)
+',`dnl unrolling else
+dnl	Matrix vector unroll without any loop
+dnl
+	ifelse(eval(RSB_M4_WANT_BLOCKING>=1),`1',`dnl
+RSB_M4_MV_FUNCTION_BODY_UNROLLED_REGISTER_BLOCKED(mop)',`
+RSB_M4_MV_FUNCTION_BODY_UNROLLED')
+')dnl end unrolling ifelse
+')dnl 
+dnl
+dnl same as spmv_uauz:
+dnl
+ifelse(mop,`spmm_az',`dnl
+dnl`spmm_az',`dnl mop else
+ifelse(unrolling,`l',`dnl
+dnl	Matrix - matrix unroll with loop
+RSB_M4_UNROLL_L_MM_FUNCTION_BODY
+',`dnl unrolling else
+dnl	Matrix - matrix unroll without loop
+/* WRONG */
+RSB_M4_UNROLL_MM_FUNCTION_BODY
+/* WRONG */
+')
+dnl,`unknown `unrolling' : urolling ?')dnl end unrolling ifelse
+popdef(`spmm_az')
+dnl ',`unknown `mop' : mop ?'
+')dnl end mop ifelse
+ifelse(mop,`scale',`dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED_L
+',`dnl unrolling else
+RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED
+')
+')dnl end mop ifelse
+dnl
+ifelse(mop,`rowssums',`dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_ERROR_UNIMPLEMENTED
+',`dnl unrolling else
+RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED(mop)
+')dnl
+')dnl end mop ifelse
+dnl
+ifelse(mop,`infty_norm',`dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_UNROLL_L_INFTY_NORM_FUNCTION_BODY(mop)
+',`dnl unrolling else
+RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED(mop)
+')
+')dnl end mop ifelse
+dnl
+ifelse(mop,`negation',`dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_UNROLL_L_NEGATE_FUNCTION_BODY(mop)
+',`dnl unrolling else
+RSB_M4_NEGATE_FUNCTION_BODY_UNROLLED(mop)
+')
+')dnl end mop ifelse
+dnl
+popdef(`k_symmetry')dnl
+popdef(`unrolling')dnl
+popdef(`mop')dnl
+popdef(`want_header')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`major_maximum')dnl
+popdef(`major_increment')dnl
+popdef(`major_index')dnl
+popdef(`minor_maximum')dnl
+popdef(`minor_increment')dnl
+popdef(`spmm_az')dnl
+popdef(`r')dnl
+')dnl end RSB_M4_KERNEL_FUNCTION_BODY
+dnl
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+define(`RSB_M4_EXTRA_SYMMETRIC_DIAGONAL_FIXING_KERNEL',`dnl
+dnl
+dnl	UNFINISHED
+dnl
+pushdef(`r',$1)dnl
+pushdef(`minor_index',`$1_$3')dnl minor dimension unroll variable identifier
+pushdef(`minor_increment',$3)dnl minor dimension increment
+pushdef(`minor_maximum',$2)dnl minor dimension maximum
+pushdef(`major_index',$4_$6)dnl major dimension unroll variable identifier
+pushdef(`major_increment',$6)dnl minor dimension maximum
+pushdef(`major_maximum',$5)dnl major dimension maximum
+pushdef(`mtype',$7)dnl
+pushdef(`itype',`rsb_coo_idx_t')dnl
+pushdef(`want_header',$8)dnl
+pushdef(`mop',$9)`'dnl
+pushdef(`unrolling',`ifelse($10,`l',`l',`')')dnl
+pushdef(`transposition',$11)`'dnl
+pushdef(`k_symmetry',$12)`'dnl
+RSB_M4_DEBUGINFO(``$0'')
+ifelse(RSB_M4_want_verbose_comments,`1',`dnl
+dnl /* : UNFINISHED : FIXME */
+dnl
+/*
+	Should determine the offset (in terms of elements) to first diagonal element.
+	Should determine the intersection length.
+	For each diagonal intersecting element, subtract the outcome of operation mop.
+	`transposition' : "transposition", `symmetry' : "k_symmetry"
+*/
+')dnl
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),1,`pushdef(`transposition',RSB_M4_TRANS_N)')dnl
+dnl
+ifelse(RSB_M4_IS_SPMX_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+			*c -= RSB_M4_CONJ(*a,mtype,transposition,k_symmetry)**b*alpha; /* no matrix pointer advance is needed, as this is a corrective term */
+')dnl
+ifelse(RSB_M4_IS_SPMX_OP_NEGATING_KERNEL_MOP(mop),`1',`dnl
+			*c += RSB_M4_CONJ(*a,mtype,transposition,k_symmetry)**b; /* no matrix pointer advance is needed, as this is a corrective term */
+')dnl
+ifelse(RSB_M4_IS_SPMX_OP_ADDING_KERNEL_MOP(mop),`1',`dnl
+			*c -= RSB_M4_CONJ(*a,mtype,transposition,k_symmetry)**b; /* no matrix pointer advance is needed, as this is a corrective term */
+')dnl
+dnl
+dnl	FIXME : symmetry is not among the macro arguments, and this is DANGEROUS
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),1,`popdef(`transposition')')dnl
+dnl
+dnl	UNFINISHED
+dnl
+dnl
+dnl	UNFINISHED
+dnl
+popdef(`k_symmetry')`'dnl
+popdef(`unrolling')dnl
+popdef(`mop')dnl
+popdef(`want_header')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`major_maximum')dnl
+popdef(`major_increment')dnl
+popdef(`major_index')dnl
+popdef(`minor_maximum')dnl
+popdef(`minor_increment')dnl
+popdef(`spmm_az')dnl
+popdef(`r')dnl
+popdef(`transposition')dnl
+')dnl end RSB_M4_EXTRA_SYMMETRIC_DIAGONAL_FIXING_KERNEL
+dnl	
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl	RSB_M4_UNROLL_KERNEL(	r,minor_maximum,minor_increment,major_index_basename,major_maximum,major_increment,
+dnl				mtype,want_header,mop,unrolling)
+dnl	-----------------------------------------------------------------------------------------------------------
+dnl	A general macro for matrix-matrix and matrix-vector multiplication unrolled kernels.
+dnl	FIXME : THIS MACRO (WILL BE) DEPRECATED.
+dnl
+define(`RSB_M4_UNROLL_KERNEL',`dnl
+dnl
+dnl
+pushdef(`r',$1)dnl
+pushdef(`minor_index',`$1_$3')dnl minor dimension unroll variable identifier
+pushdef(`minor_increment',$3)dnl minor dimension increment
+pushdef(`minor_maximum',$2)dnl minor dimension maximum
+pushdef(`major_index',$4_$6)dnl major dimension unroll variable identifier
+pushdef(`major_increment',$6)dnl minor dimension maximum
+pushdef(`major_maximum',$5)dnl major dimension maximum
+pushdef(`mtype',$7)dnl
+pushdef(`itype',`rsb_coo_idx_t')dnl
+pushdef(`want_header',$8)dnl
+pushdef(`mop',$9)`'dnl
+pushdef(`unrolling',`ifelse($10,`l',`l',`')')dnl
+pushdef(`transposition',$11)`'dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+dnl
+ifelse(want_header,`h',`dnl
+dnl
+dnl Only the function header is expanded.
+dnl
+RSB_M4_KERNEL_FUNCTION_PROTOTYPE(mtype,minor_increment,major_increment,unrolling,mop,transposition);
+',`dnl
+dnl
+dnl The entire function declaration is expanded.
+dnl
+RSB_M4_KERNEL_FUNCTION_PROTOTYPE(mtype,minor_increment,major_increment,unrolling,mop,transposition)
+{
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+dnl
+dnl	Matrix Vector product
+dnl
+ifelse(unrolling,`l',`dnl
+dnl
+dnl	Matrix Vector unroll with loop
+dnl
+RSB_M4_UNROLL_L_MV_FUNCTION_BODY
+',`dnl unrolling else
+dnl
+dnl	Matrix vector unroll without any loop
+dnl
+ifelse(RSB_M4_there_is_real_blocking,`1',`dnl
+	/*!
+	 * Loop fully unrolled minor_increment times on the minor dimension, major_increment times on the major one.
+	 * Assumes a,b,c, are mtype arrays : a is minor_increment x major_increment, b is major_increment x 1, c is minor_increment x 1.
+	 * Assumes matrices are in column major (C) order.
+	 */
+')dnl
+	ifelse(eval(RSB_M4_WANT_BLOCKING>=1),`1',`dnl
+RSB_M4_MV_FUNCTION_BODY_UNROLLED_REGISTER_BLOCKED',`
+RSB_M4_MV_FUNCTION_BODY_UNROLLED')
+')dnl end unrolling ifelse
+')
+dnl
+ifelse(mop,`spmm_az',`dnl
+dnl
+dnl	Matrix Matrix product
+dnl
+pushdef(`spmm_az',`$1_$3')dnl minor dimension unroll variable identifier
+ifelse(unrolling,`l',`dnl
+dnl
+dnl	Matrix - matrix unroll with loop
+dnl
+RSB_M4_UNROLL_L_MM_FUNCTION_BODY
+',`dnl unrolling else
+dnl
+dnl	Matrix - matrix unroll without loop
+dnl
+RSB_M4_UNROLL_MM_FUNCTION_BODY
+')
+popdef(`spmm_az')
+')dnl
+dnl
+dnl
+ifelse(mop,`scale',`dnl
+dnl
+dnl	Matrix Scaling
+dnl
+ifelse(unrolling,`l',`dnl
+/* WARNING : THIS IS NOT LOOPED ! */
+RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED_L
+',`dnl unrolling else
+RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED
+')
+')dnl
+dnl
+ifelse(mop,`spmv_uxux',`dnl
+dnl
+dnl	Matrix Scaling
+dnl
+ifelse(unrolling,`l',`dnl
+/* WARNING : THIS IS NOT LOOPED ! */
+dnl RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED_L(mop)
+RSB_M4_UNROLL_L_MV_FUNCTION_BODY(mop)
+',`dnl unrolling else
+dnl RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED(mop)
+RSB_M4_MV_FUNCTION_BODY_UNROLLED_REGISTER_BLOCKED(mop)
+')
+')dnl
+dnl
+ifelse(mop,`rowssums',`dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_ERROR_UNIMPLEMENTED
+',`dnl unrolling else
+RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED(mop)
+')dnl
+')dnl end mop ifelse
+dnldnl
+ifelse(mop,`infty_norm',`dnl
+dnl
+dnl	Infinity Norm
+dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_UNROLL_L_INFTY_NORM_FUNCTION_BODY(mop)
+',`dnl unrolling else
+RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED(mop)
+')
+')dnl end mop ifelse
+dnl
+ifelse(mop,`negation',`dnl
+ifelse(unrolling,`l',`dnl
+RSB_M4_UNROLL_L_NEGATE_FUNCTION_BODY(mop)
+',`dnl unrolling else
+RSB_M4_NEGATE_FUNCTION_BODY_UNROLLED(mop)
+')
+')dnl end mop ifelse
+dnl
+dnl
+	return;
+}
+')dnl end header ifelse
+popdef(`unrolling')dnl
+popdef(`mop')dnl
+popdef(`want_header')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`major_maximum')dnl
+popdef(`major_increment')dnl
+popdef(`major_index')dnl
+popdef(`minor_maximum')dnl
+popdef(`minor_increment')dnl
+popdef(`spmm_az')dnl
+popdef(`r')dnl
+popdef(`transposition')dnl
+')dnl end RSB_M4_UNROLL_KERNEL
+dnl	
+dnl	
+dnl	DOUBLE_LINEAR_KERNEL_SEARCH(MOP,TYPE,...)
+dnl	-----------------------------------------
+dnl	
+dnl	The following M4 macro generates C macro code for selecting the right completely unrolled function.
+dnl	It is not optimal, but it exists.
+dnl	
+define(`DOUBLE_LINEAR_KERNEL_SEARCH',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`transposition',$3)dnl
+ifelse($#,4,`$4\'
+,`dnl
+( (R)==($4) && (C)==($5)?  pushdef(`rowsu',$4)pushdef(`colsu',$5)dnl
+RSB_M4_KERNEL_FUNCTION_NAME(type,rowsu,colsu,looped,mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE) \
+popdef(`rowsu')popdef(`colsu') :  (DOUBLE_LINEAR_KERNEL_SEARCH(mop,type,transposition,shift(shift(shift(shift(shift($@)))))) )) dnl
+')dnl
+popdef(`transposition')dnl
+popdef(`type')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl	DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_IDENTIFIER(MOP,TYPE,UNROLLING)
+dnl	----------------------------------------------------------------
+dnl
+define(`DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`unrolling',$3)dnl
+`RSB_'RSB_M4_TYPE_CODE(type)`_kernel_'mop`'dnl
+popdef(`unrolling')dnl
+popdef(`type')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl	DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_(mop,type,unrolling,...)
+dnl	----------------------------------------------------------
+dnl	FIXME : someday there will be a binary search macro
+dnl
+define(`DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`unrolling',$3)dnl
+pushdef(`unrolls',shift(shift(shift($@))))dnl
+`#define' DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_IDENTIFIER(mop,type,unrolling)`'(R,C) DOUBLE_LINEAR_KERNEL_SEARCH(mop,type,unrolls)
+popdef(`unrolls')dnl
+popdef(`unrolling')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl
+dnl	UNLOOP_R_C_PAIRS()
+dnl	------------------
+dnl
+dnl	generates a list of (R,C) pairs; the cartesian product of rowsu and colsu lists
+dnl
+define(`UNLOOP_R_C_PAIRS',`foreach(`rowsu',RSB_M4_ROWS_UNROLL,`foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`rowsu,colsu,')')')dnl
+dnl
+dnl 
+dnl	RSB_M4_TYPES : ...
+dnl 
+define(`RSB_M4_TYPES',(WANT_TYPES))dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)
+dnl	-------------------------------------------------------
+dnl	FIXME
+dnl
+dnl #define RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)
+dnl
+define(`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING',`dnl
+pushdef(`mtype',$1)`'dnl
+RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(mtype)`'`_PRINTF_STRING'dnl
+popdef(`mtype')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype)
+dnl	-------------------------------------------------------
+dnl	FIXME
+dnl
+dnl #define RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype)
+dnl
+define(`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG',`dnl
+pushdef(`mtype',$1)`'dnl
+pushdef(`arg',$2)`'dnl
+ifelse(mtype,`double complex',`creal(arg),cimag(arg)',`dnl
+ifelse(mtype,`float complex',`crealf(arg),cimagf(arg)',`dnl
+ifelse(mtype,`complex',`creal(arg),cimag(arg),creal(arg),cimag(arg)',`dnl
+arg`'dnl
+')dnl
+')dnl
+')dnl
+popdef(`arg')`'dnl
+popdef(`mtype')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_MATRIX_STORAGE_PREPROCESSOR_STRING(matrix_storage)
+dnl	---------------------------------------------------------
+dnl	FIXME
+dnl
+dnl #define RSB_M4_MATRIX_STORAGE_PREPROCESSOR_STRING(matrix_storage)
+dnl
+define(`RSB_M4_MATRIX_STORAGE_PREPROCESSOR_STRING',`dnl
+pushdef(`matrix_storage',$1)`'dnl
+RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(matrix_storage)`'`_STRING'dnl
+popdef(`matrix_storage')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL()
+dnl	-----------------------------------------------
+dnl
+dnl #define RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL() 
+dnl
+define(`RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`citype',$1)`'dnl
+`RSB_COORDINATE_TYPE_'RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE(citype)`'dnl
+popdef(`citype')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_MATRIX_DIAGONAL_PREPROCESSOR_SYMBOL()
+dnl	-----------------------------------------------
+dnl
+dnl #define RSB_M4_MATRIX_DIAGONAL_PREPROCESSOR_SYMBOL() 
+dnl
+define(`RSB_M4_MATRIX_DIAGONAL_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`k_diagonal',$1)`'dnl
+`RSB_DIAGONAL_'RSB_M4_MATRIX_DIAGONAL_CHAR(k_diagonal)`'dnl
+popdef(`k_diagonal')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL()
+dnl	-----------------------------------------------
+dnl
+dnl #define RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL() 
+dnl
+define(`RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`transposition',$1)`'dnl
+`RSB_TRANSPOSITION_'RSB_M4_MATRIX_TRANSPOSITION_CHAR(transposition)`'dnl
+popdef(`transposition')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL(type)
+dnl	-----------------------------------------------
+dnl
+dnl #define RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL(type) 
+dnl
+define(`RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`k_symmetry',$1)`'dnl
+`RSB_SYMMETRY_'touppercase(RSB_M4_CHOPSPACES(k_symmetry))`'dnl
+popdef(`k_symmetry')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(type)
+dnl	-----------------------------------------------
+dnl	FIXME
+dnl
+dnl #define RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(type) 
+dnl
+define(`RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`matrix_storage',$1)`'dnl
+`RSB_MATRIX_STORAGE_'touppercase(RSB_M4_CHOPSPACES(matrix_storage))`'dnl
+popdef(`matrix_storage')`'dnl
+')dnl
+dnl 
+dnl 
+dnl 
+dnl	RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)
+dnl	-----------------------------------------------
+dnl	Converts a type name in a preprocessor symbol used to indicate the type availability.
+dnl
+dnl #define RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type) 
+dnl
+define(`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`type',$1)`'dnl
+`RSB_NUMERICAL_TYPE_'touppercase( RSB_M4_CHOPSPACES(type) )dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_DIAGONAL_CHAR()
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_DIAGONAL_CHAR',`dnl
+pushdef(`k_diagonal',$1)`'dnl
+`'touppercase(RSB_M4_CHOPSPACES(k_diagonal))`'dnl
+popdef(`k_diagonal')`'dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_TRANSPOSITION_CHAR()
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_TRANSPOSITION_CHAR',`dnl
+pushdef(`transposition',$1)`'dnl
+`'touppercase(RSB_M4_CHOPSPACES(transposition))`'dnl
+popdef(`transposition')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE',`rsb_coo_idx_t')dnl
+dnl
+dnl	RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE()
+dnl	------------------------------------
+dnl	FIXME !
+dnl
+define(`RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE_',`dnl
+pushdef(`citype',$1)`'dnl
+ifelse(citype,`rsb_coo_idx_t',`0x01')`'dnl
+ifelse(citype,`rsb_half_idx_t',`0x02')`'dnl
+popdef(`citype')`'dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE()
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE',`dnl
+pushdef(`citype',$1)`'dnl
+ifelse(citype,`rsb_coo_idx_t',`C')`'dnl
+ifelse(citype,`rsb_half_idx_t',`H')`'dnl
+popdef(`citype')`'dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_TRANSPOSITION_CHARCODE()
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_TRANSPOSITION_CHARCODE',`dnl
+pushdef(`transposition',$1)`'dnl
+dnl ifelse(transposition,RSB_M4_TRANS_T,`0x01 /*!< Transposed flag value, valid for \ref rsb_trans_t valued variables. */')`'dnl
+dnl ifelse(transposition,RSB_M4_TRANS_N,`0x00 /*!< Non transposed flag, valid for \ref rsb_trans_t typed variables. */')`'dnl
+dnl ifelse(transposition,RSB_M4_TRANS_C,`0x02 /*!< Conjugated transpose flag, valid for \ref rsb_trans_t typed variables. */')`'dnl
+ifelse(transposition,RSB_M4_TRANS_T,`0x54 /*!< T: Transposed flag value, valid for \ref rsb_trans_t valued variables. */')`'dnl
+ifelse(transposition,RSB_M4_TRANS_N,`0x4E /*!< N: Non transposed flag, valid for \ref rsb_trans_t typed variables. */')`'dnl
+ifelse(transposition,RSB_M4_TRANS_C,`0x43 /*!< C: Conjugated transpose flag, valid for \ref rsb_trans_t typed variables. */')`'dnl
+popdef(`transposition')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_DIAGONAL_CHARCODE(k_diagonal)
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_DIAGONAL_CHARCODE',`dnl
+pushdef(`k_diagonal',$1)`'dnl
+ifelse(k_diagonal,`e',`0x01 /*  */')`'dnl
+ifelse(k_diagonal,`i',`0x02 /*  */')`'dnl FIXME : new
+popdef(`k_diagonal')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_SYMMETRY_CHARCODE(k_symmetry)
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_SYMMETRY_CHARCODE',`dnl
+pushdef(`k_symmetry',$1)`'dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_SYMMETRIC,`RSB_FLAG_SYMMETRIC /*  */')`'dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_HERMITIAN,`RSB_FLAG_HERMITIAN /*  */')`'dnl FIXME : new
+ifelse(k_symmetry,RSB_M4_SYMBOL_UNSYMMETRIC,`0x00 /*  */')`'dnl
+popdef(`k_symmetry')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_STORAGE_CHARCODE(type)
+dnl	------------------------------------
+dnl	FIXME
+dnl
+define(`RSB_M4_MATRIX_STORAGE_CHARCODE',`dnl
+pushdef(`matrix_storage',$1)`'dnl
+ifelse(matrix_storage,`BCSR',`0x01 /*  */')`'dnl
+ifelse(matrix_storage,`BCSC',`0x02 /* */ ')`'dnl
+ifelse(matrix_storage,`VBR',`0x04 /* */')`'dnl
+ifelse(matrix_storage,`VBC',`0x08 /* */')`'dnl
+ifelse(matrix_storage,`LR',`0x10 /* */')`'dnl
+ifelse(matrix_storage,`LC',`0x20 /* */')`'dnl
+ifelse(matrix_storage,`BCOR',`0x40 /* */')`'dnl
+ifelse(matrix_storage,`BCOC',`0x80 /* */')`'dnl
+dnl ifelse(matrix_storage,`LVBR',`0x10 /* */')`'dnl
+dnl ifelse(matrix_storage,`LVBC',`0x20 /* */')`'dnl
+dnl ifelse(matrix_storage,`BCOO',`0x10 /* */')`'dnl
+popdef(`matrix_storage')`'dnl
+')dnl
+dnl 
+dnl
+dnl
+dnl
+dnl	RSB_M4_TYPE_CHARCODE_ASCII_VALUE(type)
+dnl	--------------------------------------
+dnl
+define(`RSB_M4_TYPE_CHARCODE_ASCII_VALUE',`dnl
+pushdef(`type',$1)`'dnl
+ifelse(type,`long double',`76')`'dnl /* L (? compliance)*/
+ifelse(type,`double',`68')`'dnl /* D (BLAS compliance)*/
+ifelse(type,`float',`83')`'dnl /*F (BLAS compliance)*/ 
+ifelse(type,`int',`73')`'dnl /*I*/
+ifelse(type,`unsigned int',`85')`'dnl /*U*/
+ifelse(type,`char',`h')`'dnl /*h*/
+ifelse(type,`unsigned char',`72')`'dnl /*H*/
+dnl ifelse(type,`complex',`99')`'dnl /*c*/
+ifelse(type,`float complex',`67')`'dnl /*c (BLAS compliance)*/
+ifelse(type,`double complex',`90')`'dnl /*z (BLAS compliance)*/
+ifelse(type,`long double complex',`81')`'dnl /*Q (? compliance)*/
+ifelse(type,RSB_M4_INVALID_TYPE,`?')`'dnl /* invalid type */
+popdef(`type')`'dnl
+')dnl
+dnl 
+dnl	
+dnl	dnl
+dnl
+dnl
+dnl	RSB_M4_TYPE_CHARCODE(type)
+dnl	--------------------------
+dnl	Converts a type name to a char type value (in hex, for quoting reasons).
+dnl
+define(`RSB_M4_TYPE_CHARCODE',`dnl
+pushdef(`type',$1)`'dnl
+ifelse(type,`long double',`L')`'dnl /* L (? compliance)*/
+ifelse(type,`double',`D')`'dnl /* D (BLAS compliance)*/
+ifelse(type,`float',`S')`'dnl /* F (BLAS compliance)*/ 
+ifelse(type,`int',`I')`'dnl /* I */
+ifelse(type,`unsigned int',`U')`'dnl /*U*/
+ifelse(type,`char',`h')`'dnl /*h*/
+ifelse(type,`unsigned char',`H')`'dnl /*H*/
+ifelse(type,`float complex',`C')`'dnl /* c (BLAS compliance)*/
+ifelse(type,`double complex',`Z')`'dnl /* z (BLAS compliance)*/
+ifelse(type,`long double complex',`Q')`'dnl /* Q (? compliance)*/
+ifelse(type,RSB_M4_INVALID_TYPE,`?')`'dnl /* invalid type */
+popdef(`type')`'dnl
+')dnl
+dnl 
+dnl	
+dnl	
+dnl	DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH(...)
+dnl	-------------------------------------------
+dnl	FIXME: REDOCUMENT
+dnl	
+dnl	The following code generates macro for selecting the right completely unrolled function.
+dnl	It is not optimal, but it exists.
+dnl	
+define(`DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`transposition',$3)dnl
+ifelse($#,4,`$4\'
+,`dnl
+( (R)==($4) && (C)==($5)?  pushdef(`rowsu',$4)pushdef(`colsu',$5)dnl
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME(type,`BCSR',transposition,k_symmetry,rowsu,colsu,unrolling,mop,,) \
+popdef(`rowsu')popdef(`colsu') :  (DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH(mop,type,transposition,shift(shift(shift(shift(shift($@)))))) )) dnl
+')dnl
+popdef(`type')dnl
+popdef(`mop')dnl
+popdef(`transposition')dnl
+')dnl
+dnl
+dnl
+dnl	DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_IDENTIFIER(...)
+dnl	-------------------------------------------------
+dnl	FIXME: REDOCUMENT
+dnl
+define(`DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH_MACRO_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`unrolling',$3)dnl
+pushdef(`matrix_storage',$4)dnl
+pushdef(`transposition',$5)dnl
+`RSB_'RSB_M4_TYPE_CODE(type)`_kernel_dispatcher_'matrix_storage`_'mop`_'unrolling`_'RSB_M4_TRANSPOSITION_CODE(transposition)`'dnl
+popdef(`matrix_storage')dnl
+popdef(`transposition')dnl
+popdef(`unrolling')dnl
+popdef(`type')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl	DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH_MACRO_(mop,type,unrolling,...)
+dnl	---------------------------------------------------------------------
+dnl	FIXME: REDOCUMENT
+dnl
+define(`DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH_MACRO_',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`unrolling',$3)dnl
+pushdef(`matrix_storage',$4)dnl
+pushdef(`transposition',$5)dnl
+pushdef(`k_symmetry',$5)dnl
+pushdef(`unrolls',shift(shift(shift(shift(shift(shift($@)))))))dnl
+/* a macro is faster than a switch construct */
+`#define' DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH_MACRO_IDENTIFIER(mop,type,unrolling,matrix_storage,transposition)`'(R,C) DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH(mop,type,transposition,unrolls)
+popdef(`k_symmetry')dnl
+popdef(`matrix_storage')dnl
+popdef(`transposition')dnl
+popdef(`unrolls')dnl
+popdef(`unrolling')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl
+dnl	KERNEL_TYPE_DISPATCHER_SEARCH_MACRO_IDENTIFIER(MOP,TYPE,UNROLLING,..)
+dnl	------------------------------------------------------------------
+dnl	FIXME: REDOCUMENT
+dnl
+define(`KERNEL_TYPE_DISPATCHER_SEARCH_MACRO_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`type',$2)dnl
+pushdef(`unrolling',$3)dnl
+pushdef(`matrix_storage',$4)dnl
+pushdef(`transposition',$5)dnl
+`RSB_type_kernel_dispatcher_'matrix_storage`_'mop`_'unrolling`_'RSB_M4_TRANSPOSITION_CODE(transposition)`'dnl
+popdef(`transposition')dnl
+popdef(`matrix_storage')dnl
+popdef(`unrolling')dnl
+popdef(`type')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl	DOUBLE_LINEAR_KERNEL_DISPATCHER_TYPE_SEARCH_MACRO_(MOP,UNROLLING,..)
+dnl	-----------------------------------------------------------------
+dnl	FIXME: REDOCUMENT
+dnl
+define(`DOUBLE_LINEAR_KERNEL_DISPATCHER_TYPE_SEARCH_MACRO_',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`unrolling',$2)dnl
+pushdef(`matrix_storage',$3)dnl
+pushdef(`transposition',$3)dnl
+/* a macro is faster than a switch construct */
+`#define' KERNEL_TYPE_DISPATCHER_SEARCH_MACRO_IDENTIFIER(mop,type,unrolling,matrix_storage,transposition)`'(TYPE,R,C) \
+(dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+  (TYPE)==RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type) ? (void*)DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH_MACRO_IDENTIFIER(mop,type,unrolling,matrix_storage,transposition)(R,C) : \
+dnl
+')dnl
+NULL ) dnl
+dnl
+popdef(`transposition')dnl
+popdef(`matrix_storage')dnl
+popdef(`unrolling')dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,unrolling,mop)
+dnl	-----------------------------------------------------------------------------
+dnl
+define(`RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl	
+pushdef(`transposition',$3)dnl	
+pushdef(`k_symmetry',$4)dnl	
+pushdef(`unrolling',$5)dnl	
+pushdef(`mop',$6)dnl	
+pushdef(`citype',$7)dnl	
+pushdef(`k_diagonal',$8)dnl	
+pushdef(`uplo',$9)dnl
+dnl
+dnl	FIXME : should handle citype
+dnl
+RSB_M4_PREFIX`'matrix_storage`_'mop`_'RSB_M4_TYPE_CODE(mtype)`_'RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE(citype)`_'unrolling`'dnl
+dnl
+dnl ifelse(RSB_M4_MEMBER(matrix_storage,`VBR',`VBC'),1,`dnl
+dnl RSB_M4_PREFIX`'matrix_storage`_'mop`_'RSB_M4_TYPE_CODE(mtype)`_'RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE(citype)`_'unrolling`'dnl
+dnl ')dnl
+dnl ifelse(RSB_M4_MEMBER(matrix_storage,`BCSR',`BCSC'),1,`dnl
+dnl RSB_M4_PREFIX`'matrix_storage`_'mop`_'RSB_M4_TYPE_CODE(mtype)`_'RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE(citype)`_'unrolling`'dnl
+dnl ')dnl
+dnl ifelse(RSB_M4_MEMBER(matrix_storage,`LR',`LC'),1,`dnl
+dnl dnl NEW
+dnl RSB_M4_PREFIX`'matrix_storage`_'mop`_'RSB_M4_TYPE_CODE(mtype)`_'RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE(citype)`_'unrolling`'dnl
+dnl ')dnl
+dnl ifelse(transposition,RSB_M4_TRANS_T,`_T',`_N')`'dnl
+`_t'RSB_M4_MATRIX_TRANSPOSITION_CHAR(transposition)`'dnl
+`_s'touppercase(k_symmetry)`'dnl
+`_d'touppercase(k_diagonal)`'dnl
+`_u'touppercase(uplo)`'dnl
+dnl else should give error : fixme
+popdef(`uplo')dnl
+popdef(`k_diagonal')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`k_symmetry')dnl
+popdef(`transposition')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR',WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR)dnl
+dnl define(`RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR',`16')dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_MEDIUM',`8')dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_SMALL',`4')dnl
+dnl
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL_2S_WITH_JUMP()
+dnl	---------------------------
+dnl	A quick nd dirty way to unroll simple loops.
+dnl	Could cause infinite recursion on identifiers clash.
+dnl
+dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_2S_WITH_JUMP',`dnl
+pushdef(`_ii',$1)dnl
+pushdef(`_LI',`$2')dnl
+pushdef(`_li',$3)dnl
+pushdef(`_ui',$4)dnl
+pushdef(`_st1',$5)dnl
+pushdef(`_st2',$6)dnl
+pushdef(`_uff',`eval(ifelse($7,,RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR,$7))')dnl
+pushdef(`_uf',`eval(4*_uff)')dnl
+pushdef(`_ff',`eval(1*_uff)')dnl
+pushdef(`_hf',`eval(_uff/2)')dnl
+{
+	_ii=_li;
+	if((_ui-_ii)<(_ff))goto anolu;
+dnl	if((_ui-_ii)<(_uf+_ff+_hf))goto nolu;
+dnl	switch(_ui%eval(_uf/4)){case }
+dnl
+for(;_ii+decr(_uf)<_ui;_ii+=_uf){`'pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_uf),`_st1`'')`'forloop(`_LI_',0,decr(_uf),`_st2`'')dnl
+}`'popdef(_LI)dnl
+dnl
+for(;_ii+decr(_ff)<_ui;_ii+=_ff){`'pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_ff),`_st1`'')`'forloop(`_LI_',0,decr(_ff),`_st2`'')dnl
+}`'popdef(_LI)dnl
+dnl
+anolu:
+dnl
+ifelse(eval(_hf),0,`',`dnl
+for(;_ii+decr(_hf)<_ui;_ii+=_hf){`'pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_hf),`_st1`'')`'forloop(`_LI_',0,decr(_hf),`_st2`'')dnl
+}`'popdef(_LI)dnl
+')dnl
+dnl
+pushdef(`_LI_',`0')dnl
+for(     ;_ii<_ui;++_ii){`'_st1`'_st2`'}
+}`'popdef(`_LI_')dnl
+dnl
+popdef(`_ii')dnl
+popdef(`_LI')dnl
+popdef(`_li')dnl
+popdef(`_ui')dnl
+popdef(`_st2')dnl
+popdef(`_st1')dnl
+popdef(`_uf')dnl
+popdef(`_ff')dnl
+popdef(`_hf')dnl
+popdef(`_uff')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL_5S()
+dnl	---------------------------
+dnl	A quick nd dirty way to unroll simple loops.
+dnl	Could cause infinite recursion on identifiers clash.
+dnl
+dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_5S',`dnl
+pushdef(`_ii',$1)dnl
+pushdef(`_LI',`$2')dnl
+pushdef(`_li',$3)dnl
+pushdef(`_ui',$4)dnl
+pushdef(`_st0',$5)dnl
+pushdef(`_st1',$6)dnl
+pushdef(`_st2',$8)dnl
+pushdef(`_st3',$7)dnl
+pushdef(`_st4',$9)dnl
+pushdef(`_uf',`ifelse($10,,RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR,$10)')dnl
+{
+_st0`'dnl
+for(_ii=_li;_ii+decr(_uf)<_ui;_ii+=_uf){
+pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_uf),`dnl
+_st1`'dnl
+')dnl
+popdef(_LI)dnl
+_st3`'dnl
+pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_uf),`dnl
+_st2`'dnl
+')dnl
+popdef(_LI)dnl
+_st4`'dnl
+}
+pushdef(`_LI_',`0')dnl
+for(     ;_ii<_ui;++_ii){`'_st1`'_st2`'}
+popdef(`_LI_')dnl
+}
+popdef(`_ii')dnl
+popdef(`_LI')dnl
+popdef(`_li')dnl
+popdef(`_ui')dnl
+popdef(`_st4')dnl
+popdef(`_st3')dnl
+popdef(`_st2')dnl
+popdef(`_st1')dnl
+popdef(`_st0')dnl
+popdef(`_uf')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL_2S()
+dnl	---------------------------
+dnl	A quick nd dirty way to unroll simple loops.
+dnl	Could cause infinite recursion on identifiers clash.
+dnl
+dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_2S',`dnl
+RSB_M4_SIMPLE_LOOP_UNROLL_5S(`$1',`$2',`$3',`$4',`',`$5',`',`$6',`',`$7')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL()
+dnl	---------------------------
+dnl	A quick nd dirty way to unroll simple loops.
+dnl	Could cause infinite recursion on identifiers clash.
+dnl
+dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL',`dnl
+pushdef(`_ii',$1)dnl
+pushdef(`_LI',`$2')dnl
+pushdef(`_li',$3)dnl
+pushdef(`_ui',$4)dnl
+pushdef(`_st',$5)dnl
+pushdef(`_uf',`ifelse($6,,RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR,$6)')dnl
+ifelse(_uf,1,`dnl
+for(_ii=_li;_ii+decr(_uf)<_ui;_ii+=_uf)
+{
+pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_uf),`dnl
+_st`'dnl
+')dnl
+popdef(_LI)dnl
+}
+',`dnl
+{
+for(_ii=_li;_ii+decr(_uf)<_ui;_ii+=_uf){
+pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',0,decr(_uf),`dnl
+_st`'dnl
+')dnl
+popdef(_LI)dnl
+}
+pushdef(`_LI_',`0')dnl
+for(     ;_ii<_ui;++_ii){ _st }
+popdef(`_LI_')dnl
+}')
+popdef(`_ii')dnl
+popdef(`_LI')dnl
+popdef(`_li')dnl
+popdef(`_ui')dnl
+popdef(`_st')dnl
+popdef(`_uf')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SIMPLE_LOOP_UNROLL_2S_J',`dnl
+pushdef(`RSB_DO_WANT_PATCH_20101213',`0')dnl
+ifelse(RSB_DO_WANT_PATCH_20101213,`1',`RSB_M4_SIMPLE_LOOP_UNROLL_2S_WITH_JUMP($@)',`RSB_M4_SIMPLE_LOOP_UNROLL_2S($@)')dnl
+dnl
+popdef(`RSB_DO_WANT_PATCH_20101213')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SIMPLE_UNROLL()
+dnl	----------------------
+dnl	A quick and dirty way to unroll simple loops.
+dnl	Could cause infinite recursion on identifiers clash.
+dnl	It is buggy : doesn't support nested unrollings. (FIXME)
+dnl
+dnl
+define(`RSB_M4_SIMPLE_UNROLL',`dnl
+pushdef(`_LI',`$1')dnl
+pushdef(`_li',$2)dnl
+pushdef(`_st',$4)dnl
+pushdef(`_uf',`ifelse($3,,16,$3)')dnl
+{
+pushdef(_LI,`_LI_ ')dnl
+forloop(`_LI_',_li,decr(_uf),`dnl
+_st`'dnl
+')dnl
+popdef(_LI)dnl
+}
+popdef(`_uf')dnl
+popdef(`_st')dnl
+popdef(`_li')dnl
+popdef(`_LI')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_RESTRICT
+dnl	---------------
+dnl	The 'restrict' keyword of C99.
+dnl	It forces strict aliasing in parameter arguments, leaving room for more compiler optimizations.
+dnl
+define(`RSB_M4_RESTRICT',`ifelse(RSB_M4_USE_RESTRICT,`1',`restrict',`')')`'dnl
+dnl
+dnl
+dnl
diff --git a/doc/Doxyfile b/doc/Doxyfile
new file mode 100644
index 0000000..17a8348
--- /dev/null
+++ b/doc/Doxyfile
@@ -0,0 +1,551 @@
+# Doxyfile 1.5.4
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+DOXYFILE_ENCODING      = UTF-8
+PROJECT_NAME           = librsb
+PROJECT_NUMBER=$(DOXYGEN_PROJECT_NUMBER)
+OUTPUT_DIRECTORY       = ./
+CREATE_SUBDIRS         = NO
+OUTPUT_LANGUAGE        = English
+BRIEF_MEMBER_DESC      = YES
+REPEAT_BRIEF           = YES
+ABBREVIATE_BRIEF       = "The $name class" \
+                         "The $name widget" \
+                         "The $name file" \
+                         is \
+                         provides \
+                         specifies \
+                         contains \
+                         represents \
+                         a \
+                         an \
+                         the
+ALWAYS_DETAILED_SEC    = NO
+INLINE_INHERITED_MEMB  = NO
+FULL_PATH_NAMES        = YES
+STRIP_FROM_PATH        = ..
+STRIP_FROM_INC_PATH    = 
+SHORT_NAMES            = NO
+JAVADOC_AUTOBRIEF      = NO
+QT_AUTOBRIEF           = NO
+MULTILINE_CPP_IS_BRIEF = NO
+#DETAILS_AT_TOP         = NO
+INHERIT_DOCS           = YES
+SEPARATE_MEMBER_PAGES  = NO
+TAB_SIZE               = 8
+#ALIASES                += "\librsb=librsb"
+ALIASES                += "rsbmtxpmessage=On success, a valid pointer (\c struct \c rsb_mtx_t*) to the newly allocated matrix structure; on error, \c NULL."
+ALIASES                += "rsbmtxpmessage_bg=On success, a valid pointer to the inner matrix structure (\c struct \c rsb_mtx_t*); on error, \c NULL."
+ALIASES                += "rsb_BLAS_get_mtx_msg=Given a valid Sparse BLAS handle, returns a pointer to the inner rsb_mtx_t structure. Then, this can be used for many of the \ref rsb.h functions. This is an experimental function, so we recommend to use it with functions not modifying the matrix (ones that take \c const \c struct \c rsb_mtx_t*mtxAp). You can use this funtion from either Fortran or C."
+ALIASES                += "rsb_BLAS_get_mtx_msg_todo="
+ALIASES                += "rsb_BLAS_get_mtx_msg_note=\rsb_spblasl2e_ext_msg"
+ALIASES                += "rsb_BLAS_get_mtx_msg_warn="
+ALIASES                += "rsb_BLAS_get_mtx_example=An example using Fortran: \n \code \n ...  \n USE blas_sparse \n USE rsb \n IMPLICIT NONE \n TYPE(C_PTR),TARGET :: mtxAp = C_NULL_PTR ! matrix pointer \n INTEGER :: A ! blas_sparse_matrix handle \n INTEGER, TARGET :: istat = 0 \n ... ! begin, populate and finalize A, e.g. using BLAS_duscr_begin, BLAS_duscr_insert_entries, BLAS_uscr_end\n ! get pointer to rsb structure: \n mtxAp = rsb_blas_get_mtx(A) \n ! Now one can use it with any rsb. [...]
+ALIASES                += "rsbmtxapmessage=Pointer to a \c rsb_mtx_t  matrix structure in assembly state, or \c NULL (on error)."
+#ALIASES                += "rsberrorcodestr=A const string pointer to a textual description of the specified error code (see \ref rsb_doc_error_handling)." 
+#ALIASES                += "rsberrcodemsg=#RSB_ERR_NO_ERROR on correct operation, an error code (see \ref rsb_doc_error_handling) otherwise." 
+ALIASES                += "rsberrcodemsg=#RSB_ERR_NO_ERROR on correct operation, an error code otherwise. You can use #rsb_strerror_r() or #rsb_perror() to get more information about the error." 
+ALIASES                += "rsb_va_ia_ja_decl=VA,IA,JA"
+ALIASES                += "rsb_va_rp_ja_decl=VA,RP,JA"
+ALIASES                += "rsb_rw_va_ia_ja_desc_msg=VA,IA,JA Input/output numerical values array (\c VA); row (\c IA) and column (\c JA) indices arrays." 
+ALIASES                += "rsb_ro_va_ia_ja_desc_msg=VA,IA,JA Input numerical values (\c VA) array; row (\c IA) and column (\c JA) input indices arrays." 
+ALIASES                += "rsb_wr_va_ia_ja_desc_msg=VA,IA,JA Output numerical values (\c VA) array; output row (\c IA) and column (\c JA) indices arrays." 
+ALIASES                += "rsb_wr_va_rd_ia_ja_desc_msg=VA,IA,JA Output numerical values (\c VA) array; input row (\c IA) and column (\c JA) indices arrays." 
+ALIASES                += "rsb_wr_va_ia_ja_p_desc_msg=VAp,IAp,JAp Output numerical values (\c VAp) array pointer; output row (\c IAp) and column (\c JAp) indices arrays pointers." 
+ALIASES                += "rsb_wo_va_rp_ja_desc_msg=\rsb_va_rp_ja_decl Output numerical values (\c VA) array, compressed row indices (\c RP) and column indices (\c JA) arrays." 
+ALIASES                += "rsb_wr_va_rp_ja_desc_msg=\rsb_va_rp_ja_decl Input numerical values (\c VA) array; compressed rows (\c RP) and column (\c JA) input indices arrays. Will not be freed by #rsb_mtx_free()." 
+ALIASES                += "rsb_ro_va_rp_ja_desc_msg=\rsb_va_rp_ja_decl Input numerical values (\c VA) array; compressed rows (\c RP) and column (\c JA) input indices arrays." 
+ALIASES                += "rsb_ro_va_ia_cp_desc_msg=VA,IA,CP Input numerical values (\c VA) array, input row indices (\c IA) and compressed column (\c CP) indices arrays." 
+ALIASES                += "rsb_wr_ia_ja_desc_msg=IA,JA output row (\c IA) and column (\c JA) indices arrays." 
+ALIASES                += "rsb_inp_rnz_msg=rnzp A pointer where the number of relevant nonzero elements will be written to." 
+ALIASES                += "rsb_errval_inp_param_msg=errval A valid error flag value (see #rsb_err_t)." 
+ALIASES                += "rsb_buf_inp_param_msg=buf A valid string buffer pointer where to write to." 
+ALIASES                += "rsb_buflen_inp_param_msg=buflen The string buffer length." 
+ALIASES                += "rsb_inp_frlr_msg=frA,lrA First and last row indices." 
+ALIASES                += "rsb_inp_fclc_msg=fcA,lcA First and last column indices." 
+ALIASES                += "rsb_mtx_getinfo_msg=Returns a specified matrix (numerical) property " 
+ALIASES                += "rsb_io_str_msg=iop A pointer to a #rsb_initopts  structure with library options. It may be \c NULL  (or better, #RSB_NULL_INIT_OPTIONS/#RSB_NULL_EXIT_OPTIONS) for specifying default options." 
+ALIASES                += "rsb_io_str_msg_opnp=opnp A pointer to a library option input name string (may not be \c NULL)." 
+ALIASES                += "rsb_io_str_msg_opvp=opvp A pointer to a library option input value string (may not be \c NULL)." 
+ALIASES                += "rsb_nnz_inp_param_msg=nnz The number of nonzeroes in the input arrays." 
+ALIASES                += "rsb_nnzA_inp_param_msg=nnzA The number of nonzeroes in the input arrays representing matrix \f$A\f$." 
+ALIASES                += "rsb_nnzA_inp_param_msg_i=nnzA A rough estimate of the number of nonzeroes matrix \f$A\f$ will host (used for optimizing arrays allocation). If you do not know yet, you can specify zero." 
+ALIASES                += "rsb_nrows_inp_param_msg=nr The number of rows." 
+ALIASES                += "rsb_rowmajor_B_inp_param_msg=rowmajorB #RSB_BOOL_TRUE if the dense matrix \f$B\f$ is considered stored as row major, or #RSB_BOOL_FALSE if as column major." 
+ALIASES                += "rsb_rowmajor_C_inp_param_msg=rowmajorC #RSB_BOOL_TRUE if the dense matrix \f$C\f$ is considered stored as row major, or #RSB_BOOL_FALSE if as column major." 
+ALIASES                += "rsb_ncols_inp_param_msg=nc The number of columns." 
+ALIASES                += "rsb_nrcowsp_inp_param_msg=nrp,ncp Output pointers to rows and columns count variables (can be \c NULL)." 
+ALIASES                += "rsb_nnzp_inp_param_msg=nzp Output pointer to the nonzeroes count variable (can be \c NULL)." 
+ALIASES                += "rsb_nrbows_A_sparse_inp_param_msg=brA,bcA 	Blocking parameters: \c brA  should be set to 1 or #RSB_DEFAULT_ROW_BLOCKING  (currently unused, reserved for future use);  \c bcA should be set to 1 or #RSB_DEFAULT_ROW_BLOCKING  (currently unused, reserved for future use)."
+#ALIASES                += "rsb_nrbows_sparse_inp_param_msg=brA,bcA 	\c brA  Should be set to 1 or #RSB_DEFAULT_ROW_BLOCKING (reserved for future use).  \c bcA Should be set to 1 or #RSB_DEFAULT_ROW_BLOCKING (reserved for future use)."
+ALIASES                += "rsb_nrcows_A_sparse_inp_param_msg=\rsb_nrA,\rsb_ncA The number of rows and columns of the sparse matrix \f$A\f$." 
+ALIASES                += "rsb_nrcows_B_dense_inp_param_msg=nrB,ncB The number of rows and columns for the dense matrix \f$B\f$." 
+ALIASES                += "rsb_nrcows_C_dense_inp_param_msg=nrC,ncC The number of rows and columns for the dense matrix \f$C\f$." 
+ALIASES                += "rsb_miflags_inp_param_msg=miflags A valid value of matrix info flags (see #rsb_mif_t for valid values)." 
+ALIASES                += "rsb_minfop_inp_param_msg=minfop Pointer to a variable of the right type, according to the matrix info flag specification (see #rsb_mif_t)." 
+ALIASES                += "rsb_flags_inp_param_msg=\rsb_flags A valid combination of matrix storage flags (see \ref flags_section flags section)." 
+ALIASES                += "rsb_flags_getv_inp_param_msg=\rsb_flags Either #RSB_FLAG_FORTRAN_INDICES_INTERFACE or #RSB_FLAG_C_INDICES_INTERFACE		 (see \ref flags_section flags section)." 
+ALIASES                += "rsb_flags_setv_inp_param_msg=\rsb_flags Either #RSB_FLAG_FORTRAN_INDICES_INTERFACE or #RSB_FLAG_C_INDICES_INTERFACE plus either #RSB_FLAG_DUPLICATES_SUM (to sum into) or #RSB_FLAG_DUPLICATES_KEEP_LAST (to overwrite entries) (see \ref flags_section flags section)."
+ALIASES                += "rsb_flags_getcb_inp_param_msg=\rsb_flags_getv_inp_param_msg"
+ALIASES                += "rsb_flags_getrs_inp_param_msg=\rsb_flags_getv_inp_param_msg"
+ALIASES                += "rsb_flags_getcs_inp_param_msg=\rsb_flags_getv_inp_param_msg"
+ALIASES                += "rsb_flags_getco_inp_param_msg=\rsb_flags_getv_inp_param_msg"
+ALIASES                += "rsb_flags_swcoo_inp_param_msg=\rsb_flags A combination of #RSB_FLAG_C_INDICES_INTERFACE,#RSB_FLAG_FORTRAN_INDICES_INTERFACE,#RSB_FLAG_FORTRAN_INDICES_INTERFACE. (see \ref flags_section flags section)." 
+ALIASES                += "rsb_flagsa_inp_param_msg=\rsb_flagsA A valid combination of matrix storage flags." 
+ALIASES                += "rsb_flags_elop_param_msg=elop_flags Elemental operation specification flags (see #rsb_elopf_t for valid choices)." 
+ALIASES                += "rsb_flags_stru_fla_msg=\rsb_flags Either #RSB_FLAG_IDENTICAL_FLAGS or a combination of other flags, e.g.: #RSB_FLAG_C_INDICES_INTERFACE, #RSB_FLAG_SYMMETRIC, #RSB_FLAG_HERMITIAN, #RSB_FLAG_TRIANGULAR, #RSB_FLAG_UPPER, #RSB_FLAG_LOWER, #RSB_FLAG_UNIT_DIAG_IMPLICIT, #RSB_FLAG_DISCARD_ZEROS. Flag #RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS is forbidden. Flag #RSB_FLAG_FORTRAN_INDICES_INTERFACE is ignored." 
+ALIASES                += "rsb_transa_mtx_msg=\f$opa( A )=A\f$ if \c transA=#RSB_TRANSPOSITION_N; \n \f$opa( A )= A ^T\f$ if \c transA=#RSB_TRANSPOSITION_T; \f$opa( A )= A ^H\f$ if \c transA=#RSB_TRANSPOSITION_C;"
+ALIASES                += "rsb_num_threads=If \c --enable-rsb-num-threads  has been specified at configure time, the \c RSB_NUM_THREADS  environment variable will override the number of executing threads specified by \c OMP_NUM_THREADS. (See also #RSB_IO_WANT_EXECUTING_THREADS)."
+ALIASES                += "rsb_transb_mtx_msg=\f$opb( B )=B\f$ if \c transB=#RSB_TRANSPOSITION_N; \n \f$opb( B )= B ^T\f$ if \c transB=#RSB_TRANSPOSITION_T; \f$opb( B )= B ^H\f$ if \c transB=#RSB_TRANSPOSITION_C;"
+ALIASES                += "rsb_transt_mtx_msg=\f$opt( T )=T\f$ if \c transT=#RSB_TRANSPOSITION_N; \n \f$opt( T )= T ^T\f$ if \c transT=#RSB_TRANSPOSITION_T; \f$opt( T )= T ^H\f$ if \c transT=#RSB_TRANSPOSITION_C;"
+#ALIASES                += "rsb_flags_idc_param_msg=\rsb_flags A valid combination of index conversion flags (that is, #RSB_FLAG_NOFLAGS and #RSB_FLAG_FORTRAN_INDICES_INTERFACE)." 
+ALIASES                += "rsb_flags_idc_param_msg=\rsb_flags A valid combination of index conversion flags (that is, #RSB_FLAG_C_INDICES_INTERFACE and #RSB_FLAG_FORTRAN_INDICES_INTERFACE) and other meaningful flags. Symmetry flags shall be the same as in the matrix in use, because symmetry expansion may happen otherwise." 
+ALIASES                += "rsb_flagsp_inp_param_msg=flagsp Output pointer to the detected structural flags variable. Will be a combination of #RSB_FLAG_LOWER, #RSB_FLAG_UPPER, #RSB_FLAG_SYMMETRIC, #RSB_FLAG_HERMITIAN." 
+ALIASES                += "rsb_flagsa_coc_param_msg=\rsb_flagsA A valid combination of index conversion and matrix storage flags and other meaningful flags.\n The encouraged base choice here is #RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS. If Fortran (1 based) indices are being used for the IA, JA arrays, then the #RSB_FLAG_FORTRAN_INDICES_INTERFACE flag should be added. If symmetric storage is desired, then #RSB_FLAG_SYMMETRIC (or #RSB_FLAG_HERMITIAN, for Hermitian matrices) is necessary, in comb [...]
+ALIASES                += "rsb_flagsa_coi_param_msg=\rsb_flagsa_coc_param_msg" 
+ALIASES                += "rsb_flagsa_csc_param_msg=\rsb_flagsa_coc_param_msg" 
+ALIASES                += "rsb_flagsa_csr_param_msg=\rsb_flagsa_coc_param_msg" 
+ALIASES                += "rsb_xren_inp_param_msg=IREN,JREN Renumbering arrays for \c IA and \c JA (respectively rows count and columns count sized). If \c NULL, no renumbering will be used." 
+ALIASES                += "rsb_errvp_inp_param_msg=errvalp An optional (can be \c NULL) pointer to #rsb_err_t  where the error status will be written to." 
+ALIASES                += "rsb_yvlp_param_msg=yvlp An optional pointer (can be \c NULL). If supplied, vector length will be written here, and no vector will be read." 
+ALIASES                += "rsb_yvl_param_msg=yvl Output vector length." 
+ALIASES                += "rsb_VA_can_null_msg=VA can be \c NULL "
+ALIASES                += "rsb_IA_can_null_msg=IA can be \c NULL "
+#ALIASES                += "rsb_brows_inp_param_msg=br Should be set to 1 or #RSB_DEFAULT_ROW_BLOCKING (reserved for future use)." 
+#ALIASES                += "rsb_bcols_inp_param_msg=bc Should be set to 1 or #RSB_DEFAULT_COL_BLOCKING (reserved for future use)." 
+ALIASES                += "rsb_brptr_inp_param_msg=..." 
+ALIASES                += "rsb_bcptr_inp_param_msg=..." 
+ALIASES                += "rsb_type_param_msg=typecode A valid type code for the given (numerical array) input pointer (see \ref matrix_type_symbols_section)." 
+ALIASES                += "rsb_type_o_param_msg=typecode A valid type code for the desired output matrix (see \ref matrix_type_symbols_section)." 
+ALIASES                += "rsb_psb_trans_inp_param_msg=psbtrans   Transposition parameter value valid in the PSBLAS library." 
+ALIASES                += "rsb_trans_inp_param_msg=trans   Transposition parameter (see \ref matrix_transposition_flags_section)." 
+ALIASES                += "rsb_transa_inp_param_msg=transA Transposition parameter for \f$A\f$ (see \ref matrix_transposition_flags_section)." 
+ALIASES                += "rsb_transb_inp_param_msg=transB Transposition parameter for \f$B\f$ (see \ref matrix_transposition_flags_section)." 
+ALIASES                += "rsb_transc_inp_param_msg=transC Transposition parameter for \f$C\f$ (see \ref matrix_transposition_flags_section)." 
+ALIASES                += "rsb_transt_inp_param_msg=transT Transposition parameter for \f$T\f$ (see \ref matrix_transposition_flags_section)." 
+ALIASES                += "rsb_alpha_inp_param_msg=alphap Optional pointer (if \c NULL, will default to 1) to a numerical value (of the same type as matrix)." 
+ALIASES                += "rsb_exp_inp_param_msg=exp The power."
+ALIASES                += "rsb_omega_inp_param_msg=omegap Pointer to a numerical location(s) (of the same type as matrix)." 
+ALIASES                += "rsb_alpha_s_inp_param_msg=alphap Optional pointer (if \c NULL, will default to 1) to a numerical value for scaling the output."
+ALIASES                += "rsb_beta_inp_param_msg=betap Optional pointer (if \c NULL, will default to 1) to a numerical value." 
+ALIASES                += "rsb_c_inp_param_msg=Cp The output vector array." 
+ALIASES                += "rsb_c_tune_inp_param_msg=\rsb_c_inp_param_msg If \c NULL, a temporary, internally allocated copy will be used." 
+ALIASES                += "rsb_x1x2_inp_param_msg=x1p,x2p Right hand side vector arrays pointers." 
+ALIASES                += "rsb_y1y2_inp_param_msg=y1p,y2p Output vector arrays pointers." 
+ALIASES                += "rsb_b_inp_param_msg=Bp The input vector array." 
+ALIASES                += "rsb_b_tune_inp_param_msg=\rsb_b_inp_param_msg If \c NULL, a temporary, internally allocated copy will be used." 
+ALIASES                += "rsb_y_out_param_msg=Yp The output array vector." 
+ALIASES                += "rsb_y_inp_param_msg=Yp The input  array vector." 
+ALIASES                += "rsb_d_inp_param_msg=Dp A valid pointer to a numerical vector array \f$D\f$." 
+ALIASES                += "rsb_diagonal_inp_param_msg=diagonalvp is an array sized like min (rows,columns) of the same type of the matrix." 
+ALIASES                += "rsb_x_inp_param_msg=Xp The input vector array." 
+ALIASES                += "rsb_incx_inp_param_msg=incX Spacing of vector elements in each input vector array (>=1)." 
+ALIASES                += "rsb_incy_inp_param_msg=incY Spacing of vector elements in each output vector array (>=1)." 
+#ALIASES                += "rsb_lda_inp_param_msg=ldA Leading dimension of Ap." 
+ALIASES                += "rsb_ldb_inp_param_msg=ldB Leading dimension of \c Bp array." 
+ALIASES                += "rsb_ldc_inp_param_msg=ldC Leading dimension of \c Cp array." 
+ALIASES                += "rsb_filename_inp_param_msg=filename The specified matrix file name (cannot be \c NULL)." 
+ALIASES                += "rsb_filename_inv_param_msg=filename The specified vector file name (cannot be \c NULL)." 
+ALIASES                += "rsb_filename_out_param_msg=filename The specified output file name (if \c NULL, will write to standard output)." 
+ALIASES                += "rsb_order_inp_param_msg=order A flag among #RSB_FLAG_WANT_COLUMN_MAJOR_ORDER and #RSB_FLAG_WANT_ROW_MAJOR_ORDER. For contiguous vector arrays, you probably want #RSB_FLAG_WANT_COLUMN_MAJOR_ORDER." 
+ALIASES                += "rsb_nrhs_inp_param_msg=nrhs The number of right hand side vectors (cannot be \c <1)." 
+ALIASES                += "rsb_dmtx_abi_param_msg_b=Bp Array representing the dense matrix \f$B\f$." 
+ALIASES                += "rsb_dmtx_abi_param_msg_c=Cp Array representing the dense matrix \f$C\f$." 
+ALIASES                += "rsb_mtxt_abi_param_msg=mtxAp A valid \c rsb_mtx_t pointer to the matrix." 
+ALIASES                += "rsb_mtxt_abi_param_msg_a=mtxAp Valid \c rsb_mtx_t pointer to matrix \f$A\f$ representation." 
+ALIASES                += "rsb_mtxt_abi_param_msg_b=mtxBp Valid \c rsb_mtx_t pointer to matrix \f$B\f$ representation." 
+ALIASES                += "rsb_mtxt_inp_param_msg_a=mtxAp Valid \c rsb_mtx_t pointer to matrix \f$A\f$ representation." 
+ALIASES                += "rsb_mtxt_inp_param_msg_i=mtxApp \c rsb_mtx_t pointer to an unassembled matrix address." 
+ALIASES                += "rsb_mtxt_inp_param_msg_b=mtxBp Valid \c rsb_mtx_t pointer to matrix \f$B\f$ representation." 
+ALIASES                += "rsb_mtxt_inp_param_msg_t=mtxTp Valid \c rsb_mtx_t pointer to matrix \f$T\f$ representation. The matrix must be triangular; that is, it must have been allocated with either #RSB_FLAG_LOWER_TRIANGULAR or #RSB_FLAG_UPPER_TRIANGULAR flags." 
+ALIASES                += "rsb_mtxtpp_inp_param_msg_b=mtxBpp Valid \c rsb_mtx_t pointer to an address for matrix \f$B\f$." 
+ALIASES                += "rsb_mtxt_inp_param_msg=mtxAp A valid const matrix pointer." 
+#ALIASES                += "rsb_mtxt_iou_param_msg=mtxAp A valid matrix pointer of type (\c rsb_mtx_t*)." 
+ALIASES                += "rsb_mtxtp_iou_param_msg=mtxAp A valid matrix pointer pointer (\c rsb_mtx_t**)." 
+ALIASES                += "rsb_mtxtpa_iou_param_msg=mtxBpp A valid pointer to the matrix to assign to." 
+# Rendering specific messages:
+ALIASES                += "rsb_render_rflags_inp_param_msg=rflags	The color mode; only #RSB_MARF_RGB is supported for now (1 byte per channel, 3 channels --- red, green, blue): this requires array \c pmp to be at least (3*\c pmlWidth*\c pmHeight) bytes large."
+ALIASES                += "rsb_render_pmlwidth_inp_param_msg=pmlWidth	stride between lines (in pixels; no less than \c pmWidth)."
+ALIASES                += "rsb_render_pmwidth_inp_param_msg=pmWidth 	Pixel map width  (in pixels or points)."
+ALIASES                += "rsb_render_pmheight_inp_param_msg=pmHeight 	Pixel map height (in pixels or points)."
+ALIASES                += "rsb_render_pmp_inp_param_msg=pmp		Pixel map array pointer."
+# Autotuning specific messages:
+ALIASES                += "rsb_tune__doc_msg=The tuner works by evaluating different instances and working threads variants.  The instance leading to faster operation time will be retained and given back to the user in \c *mtxOpp.  If \c nrhs==1 and \c order==#RSB_FLAG_WANT_COLUMN_MAJOR_ORDER, unitary stride vectors are assumed.  In case of error, the original input matrix shall be unaffected.  It is possible to specify the leading dimensions of \c Bp,Cp implicitly, with  \c ldB=0  and \ [...]
+ALIASES                += "rsb_tune_mtxOpp_iou_param_msg=mtxOpp		Optimal matrix structure pointer will be assigned to \c *mtxOpp (it may occur that *mtxOpp==mtxAp on output). If \c mtxOpp  is \c NULL then no data structure optimization will be attempted; rather, only optimal threads search will occur (\c tnp must be not \c NULL  then)."
+ALIASES                += "rsb_tune_sfp_iou_param_msg=sfp		Achieved speedup factor will be written to \c *sfp  (unless \c sfp==NULL)."
+ALIASES                += "rsb_tune_tnp_iou_param_msg=tnp		If \c tnp==NULL on input, the current thread count will be utilized. Otherwise, if \c *tnp>0, then *tnp will be used as first suggestion in optimal thread count searching. If \c tnp!=NULL ,on output \c *tnp  will be set to contain the optimal number of threads. Then, the user is expected to set this number of threads using e.g.: \c RSB_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,tnp,errval). Please note that this will a [...]
+ALIASES                += "rsb_tune_maxr_iou_param_msg=maxr		Optimizer rounds max count. If \c <1, will be treated as 1; if 0 will be decided automatically. Max is #RSB_CONST_MAX_TUNING_ROUNDS."
+ALIASES                += "rsb_tune_maxt_iou_param_msg=maxt		Maximum time (in seconds) per optimization round (does not take in account conversion time). If \c maxt<0.0 is provided, \c -ceil(maxt) will be interpreted as number of iterations to check for each operation time sample. If \c maxt==0.0 is provided, a default choice will be made instead."
+# Sparse BLAS specific messages:
+ALIASES                += "rsb_spblas_istat_msg=\param istat  If non \c NULL, \c *istat will be set to the return code, either 0 (success) or -1 (failure).  \return This is a subroutine for Fortran, so it does not return any value.\n" 
+ALIASES                += "rsb_spblas_f_istat_msg=\param istat  The return code will be written to \c istat (this is a Fortran routine): either 0 (success) or -1 (failure).\n"
+ALIASES                += "rsb_spblas_return_msg=\return On success, 0 is returned; on error, -1. " 
+ALIASES                += "rsb_spblas_return_mtx_msg=\return A matrix handle in case of success, or -1 on error. " 
+ALIASES                += "rsb_spblas_set_mtx_msg=Will assign a valid matrix handle to \f$A\f$ in case of success, or set it to -1 on error." 
+ALIASES                += "rsb_tune_warning_doc_msg=This function is still experimental.  In case of error, although the matrix shall be unaffected, the library status may be affected (e.g.: execution thread count, default matrix subdivision)."
+ALIASES                += "rsb_tune_todo_doc_msg=In the future, autotuning functionality shall improve considerably. Need support for lightweight, threads-only optimization.  May support strided vectors in the future."
+# Sparse L1 BLAS specific messages:
+#ALIASES                += "rsb_spblasl1_msg=Sparse BLAS Level 1 is not implemented (but soon will be supported, unoptimized), so this implementation is only a stub." 
+ALIASES                += "rsb_spblasl1_common_parms_msg=\param y Array for \f$Y\f$ vector. \param x Array for \f$X\f$ vector. \param nnz Size of \f$X\f$\a and \a \f$Y\f$ vectors.  \param indx Is the array of indices at which sparse vector \f$X\f$ will be accessed. \param index_base Specifies the contents of \c indx, either #blas_one_base or #blas_one_base. \param incy The distance between consecutive \c y array elements." 
+ALIASES                += "rsb_spblasl1_msg=Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind."
+ALIASES                += "rsb_spblasl1_axpy_msg=Sparse vector update:  \f$Y \leftarrow \alpha X + Y\f$. \rsb_spblasl1_common_parms_msg \param alpha Will scale values of \f$X\f$ before accumulating to \f$Y\f$." 
+ALIASES                += "rsb_spblasl1_dot_msg=Sparse dot product.  \f$r \leftarrow X^T Y,\f$ \f$r \leftarrow X^H Y\f$ \param r Sparse dot result array. \rsb_spblasl1_common_parms_msg \param conj If #blas_conj, values of X will be considered conjugated." 
+ALIASES                += "rsb_spblasl1_ga_msg=Sparse gather.  \f$X \leftarrow Y |_x\f$. \rsb_spblasl1_common_parms_msg." 
+ALIASES                += "rsb_spblasl1_gz_msg=Sparse gather  and zero.  \f$X \leftarrow Y |_x;Y|_x\leftarrow 0\f$. \rsb_spblasl1_common_parms_msg." 
+ALIASES                += "rsb_spblasl1_sc_msg=Sparse scatter:  \f$Y |_x\leftarrow X\f$. \rsb_spblasl1_common_parms_msg."
+# Sparse L2 BLAS specific messages:
+ALIASES                += "rsb_spblasl2_Ap_msg=\param A A valid pointer to an empty matrix handle." 
+ALIASES                += "rsb_spblasl2_A_msg=\param A A valid matrix handle." 
+ALIASES                += "rsb_spblasl2_A_msg_ftn=\param A On success, a valid matrix handle will be written to A. \todo Shall make \c A \c intent(inout) as well. " 
+ALIASES                += "rsb_spblasl2_T_msg=\param T A valid triangular matrix handle." 
+ALIASES                += "rsb_spblasl2_val_msg=\param val Array of values." 
+ALIASES                += "rsb_spblasl2_order_msg=\param order layour of the dense array." 
+ALIASES                += "rsb_spblasl2_transa_msg=\param transA Transposition operator for matrix \a A." 
+ALIASES                += "rsb_spblasl2_transt_msg=\param transT Transposition operator for matrix \a T." 
+ALIASES                += "rsb_spblasl2_b_msg=\param b Dense vector \a b." 
+ALIASES                += "rsb_spblasl2_c_msg=\param c Dense vector \a c." 
+ALIASES                += "rsb_spblasl2_x_msg=\param x Dense vector \a x." 
+ALIASES                += "rsb_spblasl2_y_msg=\param y Dense vector \a y." 
+ALIASES                += "rsb_spblasl2_ldb_msg=\param ldb Leading dimension of \a b." 
+ALIASES                += "rsb_spblasl2_ldc_msg=\param ldc Leading dimension of \a c." 
+ALIASES                += "rsb_spblasl2_incx_msg=\param incx Stride of \a x." 
+ALIASES                += "rsb_spblasl2_incy_msg=\param incy Stride of \a y." 
+ALIASES                += "rsb_spblasl2_nrhs_msg=\param nrhs Number of right hand side columns." 
+ALIASES                += "rsb_spblasl2_alpha_msg=\param alpha Value for \f$ \alpha \f$." 
+ALIASES                += "rsb_spblasl2_nnz_ti_msg=\param nnz Number of nonzeroes to insert." 
+ALIASES                += "rsb_spblasl2_mn_msg=\param m Is the count of rows. \param n Is the count of columns." 
+ALIASES                += "rsb_spblasl2_MN_msg=\param Mb Block rows count. \param Nb Block columns count. " 
+ALIASES                += "rsb_spblasl2_cr_begin_msg=Allocates an empty matrix (A) and leaves it in build state. \rsb_spblasl2_mn_msg" 
+ALIASES                += "rsb_spblasl2_usmx_list=#BLAS_dusmv, #BLAS_susmv, #BLAS_zusmv, #BLAS_cusmv, #BLAS_dusmm, #BLAS_susmm, #BLAS_zusmm, #BLAS_cusmm"
+ALIASES                += "rsb_spblasl2_atno=\note By setting the \c #blas_rsb_autotune_next_operation property via #BLAS_ussp (at any time) the next multiplication routine call (either of \rsb_spblasl2_usmx_list) will invoke autotuning before carrying out the effective operation. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants." 
+ALIASES                += "rsb_spblasl2_at_asm=By setting the \c #blas_rsb_spmv_autotuning_on property via #BLAS_ussp, the default number of executing threads for this matrix will be determined once, at matrix assembly time, and employed irrespective of the default threads count (different values for transposed and untrasposed multiply). This can be overridden only by setting the \c RSB_NUM_THREADS environment variable. " 
+ALIASES                += "rsb_spblasl2_at=\note \rsb_spblasl2_atno \rsb_spblasl2_at_asm \see On the topic of autotuning, see also rsb_tune_spmm.\n " 
+ALIASES                += "rsb_spblasl2_cr_block_msg_warn=\note At the moment \librsb  does not implement neither dense blocked technique (BCSR, VBR), so the matrix will be represented as RSB."
+ALIASES                += "rsb_spblasl2_cr_block_msg=Allocates an empty matrix (A) and leaves it in build state. \param k,l Are row and column dimensions when specifying a matrix as BCSR. \rsb_spblasl2_MN_msg" 
+ALIASES                += "rsb_spblasl2_cr_vbr_msg=Allocates an empty matrix (A) and leaves it in build state. \param K,L Are arrays specifying row/column block sizes when specifying a matrix as VBR. \rsb_spblasl2_MN_msg " 
+ALIASES                += "rsb_spblasl2_cr_insert_entry_dup_msg=By default, duplicate entries will be summed together." 
+ALIASES                += "rsb_spblasl2_cr_insert_entry_msg=Inserts an entry in a matrix, assuming it is in build state. \rsb_spblasl2_cr_insert_entry_dup_msg \rsb_spblasl2_A_msg \rsb_spblasl2_val_msg \rsb_spblasl2_val_msg \param i,j Row and column indices." 
+ALIASES                += "rsb_spblasl2_cr_insert_entries_msg=Inserts entries in a matrix, assuming it is in build state. \rsb_spblasl2_cr_insert_entry_dup_msg \rsb_spblasl2_A_msg \rsb_spblasl2_nnz_ti_msg \rsb_spblasl2_val_msg \param indx Row indices array. \param jndx Column indices array." 
+ALIASES                += "rsb_spblasl2_cr_insert_col_msg=Inserts a whole column in a matrix, assuming it is in build state. \rsb_spblasl2_cr_insert_entry_dup_msg \rsb_spblasl2_A_msg \param j Column index. \rsb_spblasl2_nnz_ti_msg \rsb_spblasl2_val_msg \param indx Row indices array." 
+ALIASES                += "rsb_spblasl2_cr_insert_row_msg=Inserts a whole row in a matrix, assuming it is in build state. \rsb_spblasl2_cr_insert_entry_dup_msg \rsb_spblasl2_A_msg \param i Row index. \rsb_spblasl2_nnz_ti_msg \rsb_spblasl2_val_msg \param indx Row index." 
+ALIASES                += "rsb_spblas_correct_msg=This shall be corrected in a future release." 
+ALIASES                += "rsb_BLAS_Xuscr_block_begin=#BLAS_cuscr_block_begin, #BLAS_cuscr_block_begin, #BLAS_duscr_block_begin, #BLAS_zuscr_block_begin"
+ALIASES                += "rsb_BLAS_Xuscr_begin=#BLAS_cuscr_begin, #BLAS_suscr_begin, #BLAS_duscr_begin, #BLAS_zuscr_begin"
+ALIASES                += "rsb_see_BLAS_Xuscr_block_begin=\see \rsb_BLAS_Xuscr_block_begin, \rsb_BLAS_Xuscr_begin"
+ALIASES                += "rsb_spblasl2_cr_insert_block_msg=Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \c begin function. If not called a blocked \c begin function, will assume 1x1 (that is, no) blocking. \rsb_spblasl2_cr_insert_entry_dup_msg \rsb_spblasl2_A_msg \rsb_spblasl2_val_msg \param row_stride,col_stride Row and column strides in accessing \c val. \param i [...]
+ALIASES                += "rsb_spblasl2_cr_insert_clique_msg=Inserts a whole clique in a matrix, assuming this is in build state. \rsb_spblasl2_cr_insert_entry_dup_msg \rsb_spblasl2_A_msg \param k,l Clique rows and columns count. \rsb_spblasl2_val_msg \param row_stride,col_stride Row/columns stride in accessing the clique. \param indx,jndx Row/column indices arrays.  \warning Signature of this routine for Fortran does not agree to the standard. \rsb_spblas_correct_msg" 
+ALIASES                += "rsb_spblasl2_cr_end_msg=Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. \rsb_spblasl2_A_msg" 
+ALIASES                += "rsb_spblasl2_ds_msg=Destroys a matrix. \rsb_spblasl2_A_msg" 
+ALIASES                += "rsb_spblasl2_sp_msg=Set a matrix property. Should be called just after creation, before nonzeroes insertion. \param A A is the matrix to apply the property. \param pname  The desired matrix property. \rsb_spblasl2_pl_msg " 
+ALIASES                += "rsb_spblasl2_pl_msg=For valid matrix properties, see #blas_rsb_ext_type, #blas_uplo_type, #blas_diag_type, #blas_conj_type, #blas_base_type, #blas_symmetry_type, #blas_field_type, #blas_size_type, #blas_sparsity_optimization_type."
+# Discarded: #blas_handle_type, #blas_order_type, #blas_trans_type, #blas_side_type, #blas_cmach_type, #blas_norm_type, #blas_sort_type, #blas_jrot_type, #blas_prec_type
+ALIASES                += "rsb_spblasl2_gp_msg=Get a matrix property. \param A A is the matrix to apply the property. \param pname  The desired matrix property. \rsb_spblasl2_pl_msg"
+ALIASES                += "rsb_spblasl2_mv_msg=Multiply by a dense vector. Either of \f$Y \leftarrow \alpha A   X + Y  ,\f$\n \f$Y \leftarrow \alpha A^T X + Y,\f$\n \f$Y \leftarrow \alpha A^H X + Y\f$, depending on the value of \c transA. \rsb_spblasl2_transa_msg \rsb_spblasl2_alpha_msg \rsb_spblasl2_A_msg \rsb_spblasl2_x_msg \rsb_spblasl2_incx_msg \rsb_spblasl2_y_msg \rsb_spblasl2_incy_msg \note  \rsb_spblasl2_at \rsb_num_threads" 
+ALIASES                += "rsb_spblasl2_sv_msg=Triangular solve, by a dense vector. Either of \f$X \leftarrow \alpha T^{-1}X,\f$ \f$X \leftarrow \alpha T^{-T}X,\f$ \f$X \leftarrow \alpha T^{-H}X\f$, depending on the value of \c transT. \rsb_spblasl2_transt_msg \rsb_spblasl2_alpha_msg \rsb_spblasl2_T_msg \rsb_spblasl2_x_msg \rsb_spblasl2_incx_msg" 
+ALIASES                += "rsb_spblasl2_mm_msg=Multiply by a dense matrix (aka multi-vector). Either of \f$C \leftarrow \alpha AB+C,\f$ \f$C \leftarrow \alpha A^T B+C,\f$ \f$C \leftarrow \alpha A^H B+C\f$, depending on the value of \c transA. \rsb_spblasl2_order_msg \rsb_spblasl2_transa_msg \rsb_spblasl2_nrhs_msg \rsb_spblasl2_A_msg \rsb_spblasl2_alpha_msg \rsb_spblasl2_b_msg \rsb_spblasl2_ldb_msg \rsb_spblasl2_c_msg \rsb_spblasl2_ldc_msg \note   \rsb_spblasl2_at\rsb_num_threads" 
+ALIASES                += "rsb_spblasl2_sm_msg=Triangular solve, by a dense matrix (aka multi-vector). Either of \f$B \leftarrow \alpha T^{-1} B,\f$ \f$B \leftarrow \alpha T^{-T} B,\f$ \f$B \leftarrow \alpha T^{-H} B\f$, depending on the value of \c transT. \rsb_spblasl2_order_msg \rsb_spblasl2_transt_msg \rsb_spblasl2_nrhs_msg \rsb_spblasl2_alpha_msg \rsb_spblasl2_T_msg \rsb_spblasl2_b_msg \rsb_spblasl2_ldb_msg" 
+# Sparse L2 BLAS specific messages (extensions):
+ALIASES                += "rsb_spblasl2e_ext_msg=\note This function is an extension implemented by \librsb and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality." 
+ALIASES                += "rsb_matrixmarketonlynote_v=\note The only dense vector file format currently supported is Matrix Market. E.g.: \verbinclude vf.mtx \n \rsb_matrixmarketonlynote_t" 
+ALIASES                += "rsb_matrixmarketonlynote_m=\note The only sparse matrix file format currently supported is Matrix Market. E.g.: \verbinclude A.mtx \n \rsb_matrixmarketonlynote_t \rsb_matrixmarketonlynote_s" 
+ALIASES                += "rsb_matrixmarketonlynote_t=In the above example header on the first line, you can specify either \c real or \c complex or \c pattern for the numerical type."
+ALIASES                += "rsb_matrixmarketonlynote_s=Either \c general, \c symmetric, \c hermitian  can be specified for the structure. In case of \c pattern  matrices, only coordinate indices will be loaded (saving \c pattern matrices is not yet supported); in case of \c real  matrices, also one coefficient value will be saved/loaded; in the case of \c complex  matrices, both the real and imaginary parts will be saved/loaded in addition to the indices."
+ALIASES                += "rsb_spblasl2e_usget_diag_msg=Get matrix diagonal. \f$d\leftarrow diag(A)\f$. \rsb_spblasl2_A_msg \param d Array for the diagonal entries. \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usrows_scale_msg=Scale rows interval of matrix by specified factor. \rsb_spblasl2_A_msg \param d Rows scaling vector. \param trans Transposition parameter (if transposed will scale columns).  \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usget_rows_sparse_msg=Get sparse rows of matrix. \rsb_spblasl2_A_msg \param VA pointer to values. \param IA Row indices array. \param JA Column indices array. \param nnz Obtained nonzeroes. \param fr first row. \param lr Last row.\n \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usget_rows_nnz_msg=Get \rsb_nnz count of matrix row interval. \rsb_spblasl2_A_msg \param fr First row. \param lr Last row. \param nnzp Pointer to the nonzeroes variable.  \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usget_matrix_nnz_msg=Get \rsb_nnz count of matrix. \rsb_spblasl2_A_msg \param nnz Output value pointer. \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usget_infinity_norm_msg=Get infinity norm of matrix.  \rsb_spblasl2_A_msg \param in Infinity norm pointer. \param trans Transposition parameter.  \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usset_elements_norm_msg=Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. \rsb_spblasl2_A_msg \param ia Row indices array. \param ja Column indices array. \param va Values array. \param nnz Length of the \c ia,ja,va arrays.  \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usset_element_norm_msg=Set a single (existing) matrix nonzero coefficient \f$A_{i,j}\f$. \rsb_spblasl2_A_msg \param i Row index. \param j Column index. \param v Value pointer. \rsb_spblasl2e_ext_msg" 
+ALIASES                += "rsb_spblasl2e_usget_element_norm_msg=Get a single matrix nonzero coefficient \f$A_{i,j}\f$. \rsb_spblasl2_A_msg \param i Row index. \param j Column index. \param v Value pointer.  \rsb_spblasl2e_ext_msg" 
+# end Sparse BLAS specific messages
+ALIASES                += "librsb=\c librsb" 
+ALIASES                += "rsb_nnz=nnz" 
+ALIASES                += "rsb_nnzA=nnzA" 
+ALIASES                += "rsb_nrA=nrA" 
+ALIASES                += "rsb_ncA=ncA" 
+#ALIASES                += "rsb_m=nr" 
+#ALIASES                += "rsb_k=nc" 
+ALIASES                += "rsb_flags=flags" 
+ALIASES                += "rsb_flagsA=flagsA" 
+ALIASES                += "rsb_warn_flags_not_complete_msg=Some structural info contained in the matrix structural flags may be lost in the output data." 
+ALIASES                += "rsb_warn_unimplemented_msg=Not implemented yet. \rsb_give_me_feedback" 
+ALIASES                += "rsb_warn_configuredout_msg=Although this operation is supported, this particular function numerical type has been configured out at build time, so an error code will be returned. \see_readme" 
+ALIASES                += "rsb_warn_unfinished_msg=This function is still unfinished. \rsb_give_me_feedback" 
+ALIASES                += "rsb_warn_unfinished_noerr_msg=This function does not handle errors consistently!" 
+ALIASES                += "rsb_warn_errors_fatal=Failures here could be FATAL!" 
+ALIASES                += "rsb_warn_unfinished_doc_msg=Documentation for this function is incomplete!" 
+ALIASES                += "rsb_warn_unfinished_flags_doc_msg=Documentation for this function is incomplete: shall define the supported flags combinations." 
+ALIASES                += "rsb_warn_unfinished_doc_sec_msg=Documentation for this section is to be completed." 
+#ALIASES                += "rsb_todo_unfinished_msg=Shall introduce overflow check."
+ALIASES                += "rsb_todo_unfinished_msg=This function implementation is not complete."
+ALIASES                += "rsb_todo_unfinished_inc_msg=This function implementation is not complete."
+ALIASES                += "rsb_warn_untested_msg=This function is not yet in the test suite." 
+ALIASES                += "rsb_warn_not_th_tested_msg=This function has not been thoroughly tested."
+ALIASES                += "rsb_note_switch_in_place=This function is only valid if \c mtxAp has been assembled in place (that is, in the arrays that are being reclaimed), so with e.g.: #rsb_mtx_alloc_from_coo_inplace(). Please also note that the matrix will get freed internally and so \c mtxAp  will not be usable in any way afterwards." 
+ALIASES                += "rsb_warn_unoptimized_msg=This function is not optimized." 
+ALIASES                += "rsb_warn_soon_to_be_deprecated_msg=This function will be deprecated soon." 
+ALIASES                += "rsb_warn_soon_to_be_updated_msg=This function shall be updated and expanded soon." 
+#ALIASES                += "rsb_warn_soon_to_be_deprecated_msg =This function may get deprecated soon." 
+ALIASES                += "rsb_value_param_msg=A pointer to the computed value." 
+ALIASES                += "rsb_note_assume_nnz_sized=Assumes all three \c VA,IA,JA arrays are at least min(\c \rsb_nnzA,\c \rsb_nrA+1,\c \rsb_ncA+1) sized. The user is expected NOT to use these arrays until the matrix has been destroyed with #rsb_mtx_free(). Then, it is possible to use these arrays again." 
+ALIASES                += "rsb_mtx_alloc_coo_inplace_msg=Given as input COO arrays \c \rsb_va_ia_ja_decl, allocates and assembles an RSB matrix reusing input arrays."
+ALIASES                += "rsb_mtx_alloc_csr_inplace_msg=Given as input CSR arrays \c \rsb_va_rp_ja_decl , allocates and assembles an RSB matrix reusing input arrays."
+ALIASES                += "rsb_ret_null=Always \c NULL." 
+ALIASES                += "rsb_spsv_no_zero=If \c --enable-zero-division-checks-on-solve  was specified at configure time, attempts to solve a triangular matrix with zeroes on a diagonal will fail." 
+ALIASES                += "rsb_configure_memwrap=Only works if the memory wrapper (\c --enable-allocator-wrapper) has been specified at configure time."
+#
+ALIASES                += "see_lib_spmx=\see rsb_spmv, rsb_spmm, rsb_tune_spmm"
+ALIASES                += "see_lib_spsx=\see rsb_spsm, rsb_spsv, rsb_tune_spsm" 
+ALIASES                += "rsb_iof_macros=#RSB_REINIT_SINGLE_VALUE_GET, #RSB_REINIT_SINGLE_VALUE_SET, #RSB_REINIT_SINGLE_VALUE, #RSB_REINIT_SINGLE_VALUE_C_IOP"
+ALIASES                += "rsb_iof_param_msg=\param iof library options flags. See #rsb_opt_t for a list of valid options."
+ALIASES                += "rsb_iop_inp_param_msg=\param iop library options value input (read only) pointer." 
+ALIASES                += "rsb_iop_out_param_msg=\param iop library options value output pointer (pointed location will be updated)." 
+#ALIASES                += "rsb_iof_see=For usage with \see_lib_init_funcs or (deprecated) macros \rsb_iof_macros."
+ALIASES                += "see_lib_init_funcs=#rsb_lib_init, #rsb_lib_set_opt_str, #rsb_lib_reinit, #rsb_lib_exit, #rsb_lib_get_opt, #rsb_lib_set_opt, or (deprecated) macros \rsb_iof_macros." 
+ALIASES                += "see_lib_init=\see \see_lib_init_funcs." 
+ALIASES                += "rsb_lib_alloc_in_place=#rsb_mtx_alloc_from_coo_inplace,#rsb_mtx_alloc_from_csr_inplace"
+ALIASES                += "see_lib_alloc=\see rsb_mtx_alloc_from_coo_const, rsb_mtx_alloc_from_coo_inplace, rsb_mtx_free, rsb_mtx_clone, rsb_mtx_alloc_from_csr_const, rsb_mtx_alloc_from_csc_const, rsb_mtx_alloc_from_csr_inplace, rsb_mtx_switch_to_csr, rsb_mtx_alloc_from_coo_begin, rsb_mtx_alloc_from_coo_end" 
+ALIASES                += "see_lib_get=\see rsb_mtx_get_coo, rsb_mtx_get_csr, rsb_mtx_get_rows_sparse, rsb_mtx_get_coo_block, rsb_mtx_get_prec, rsb_mtx_get_nrm, rsb_mtx_get_vec, rsb_file_mtx_get_dims, rsb_mtx_get_vals"
+ALIASES                += "see_lib_set=\see rsb_mtx_upd_vals, rsb_mtx_set_vals"
+ALIASES                += "see_lib_conv=\see rsb_mtx_switch_to_coo,rsb_mtx_switch_to_coo"
+#ALIASES                += "rsb_version_12="
+ALIASES                += "rsb_version_12=\note Introduced in librsb-1.2."
+ALIASES                += "see_lib_gemm=\see rsb_spmsp_to_dense, rsb_sppsp, rsb_spmsp, rsb_mtx_add_to_dense"
+ALIASES                += "see_lib_rndr=\see rsb_mtx_rndr, rsb_file_mtx_rndr"
+ALIASES                += "see_lib_info=\see rsb_mtx_get_info, rsb_mtx_get_info_str, rsb_file_mtx_save, rsb_file_vec_load, rsb_file_mtx_load"
+ALIASES                += "see_lib_util=\see rsb_time, rsb_coo_sort"
+ALIASES                += "see_lib_error=\see rsb_perror, rsb_strerror_r"
+ALIASES                += "see_lib_psblas=\see rsb_psblas_trans_to_rsb_trans"
+#
+ALIASES                += "see_readme=See the main README documentation file about build time configuration."
+ALIASES                += "see_rsb_BLAS_get_mtx_msg=\see_lib_get, \see_lib_set, \see_lib_gemm, \see_lib_rndr, \see_lib_info."
+ALIASES                += "rsb_give_me_feedback=Consider contacting the author if you intend to use it." 
+#
+OPTIMIZE_OUTPUT_FOR_C  = YES
+OPTIMIZE_OUTPUT_JAVA   = NO
+BUILTIN_STL_SUPPORT    = NO
+CPP_CLI_SUPPORT        = NO
+SIP_SUPPORT            = NO
+DISTRIBUTE_GROUP_DOC   = NO
+SUBGROUPING            = YES
+TYPEDEF_HIDES_STRUCT   = NO
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL            = YES
+EXTRACT_PRIVATE        = YES
+EXTRACT_STATIC         = NO
+EXTRACT_LOCAL_CLASSES  = YES
+EXTRACT_LOCAL_METHODS  = NO
+EXTRACT_ANON_NSPACES   = NO
+HIDE_UNDOC_MEMBERS     = NO
+HIDE_UNDOC_CLASSES     = NO
+HIDE_FRIEND_COMPOUNDS  = NO
+HIDE_IN_BODY_DOCS      = NO
+INTERNAL_DOCS          = NO
+CASE_SENSE_NAMES       = YES
+HIDE_SCOPE_NAMES       = NO
+SHOW_INCLUDE_FILES     = NO
+INLINE_INFO            = YES
+SORT_MEMBER_DOCS       = YES
+SORT_BRIEF_DOCS        = NO
+SORT_BY_SCOPE_NAME     = NO
+GENERATE_TODOLIST      = YES
+GENERATE_TESTLIST      = YES
+GENERATE_BUGLIST       = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS       = USERDOC 
+#ENABLED_SECTIONS       = USERDOC,INNERDOC
+MAX_INITIALIZER_LINES  = 30
+SHOW_USED_FILES        = YES
+#SHOW_DIRECTORIES       = NO # 20131112 Now obsolete
+FILE_VERSION_FILTER    = 
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET                  = NO
+WARNINGS               = YES
+WARN_IF_UNDOCUMENTED   = YES
+WARN_IF_DOC_ERROR      = YES
+WARN_NO_PARAMDOC       = NO
+WARN_FORMAT            = "$file:$line: $text"
+WARN_LOGFILE           = 
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+# INPUT                  = ../
+INPUT                  = ../blas_sparse.h ../rsb.h ../rsb.F90 ../rsb_libspblas.c ../rsb_libspblas.h ../rsb_rsb.c ../rsb_types.h  ../rsb_blas_sparse.F90 ../rsb_libspblas_handle.c
+INPUT_ENCODING         = UTF-8
+FILE_PATTERNS          = *.c \
+                         *.cpp \
+                         *.c++ \
+                         *.F90 \
+                         *.F03 \
+                         *.h
+RECURSIVE              = NO
+# WARNING: to me, it seems that the following does not work!
+EXCLUDE                = .svn doc bench plots autom4te.cache scripts obsolete obsolete-moved junk ./rsb-librsb-internals.h
+EXCLUDE_SYMLINKS       = NO
+#EXCLUDE_PATTERNS       = *rsb-types.h* .svn .libs .deps *.m4 *.m *.R *.sh
+EXCLUDE_PATTERNS       = types.h .svn .libs .deps *.m4 *.m *.R *.sh config.h rsb-config.h rsb_wrecks.c
+EXCLUDE_SYMBOLS        = 
+#EXAMPLE_PATH           = ../examples
+EXAMPLE_PATH           = ../ ../examples
+EXAMPLE_PATTERNS       = 
+EXAMPLE_RECURSIVE      = NO
+IMAGE_PATH             = 
+INPUT_FILTER           = 
+FILTER_PATTERNS        = 
+FILTER_SOURCE_FILES    = NO
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER         = NO
+INLINE_SOURCES         = NO
+STRIP_CODE_COMMENTS    = NO
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION    = NO
+REFERENCES_LINK_SOURCE = NO
+USE_HTAGS              = NO
+VERBATIM_HEADERS       = NO
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX     = NO
+COLS_IN_ALPHA_INDEX    = 5
+IGNORE_PREFIX          = 
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML          = YES
+HTML_OUTPUT            = html
+HTML_FILE_EXTENSION    = .html
+HTML_HEADER            = 
+HTML_FOOTER            = 
+HTML_STYLESHEET        = 
+#HTML_ALIGN_MEMBERS     = YES # 20131112 Now obsolete
+GENERATE_HTMLHELP      = NO
+HTML_DYNAMIC_SECTIONS  = NO
+CHM_FILE               = 
+HHC_LOCATION           = 
+GENERATE_CHI           = NO
+BINARY_TOC             = NO
+TOC_EXPAND             = NO
+DISABLE_INDEX          = NO
+ENUM_VALUES_PER_LINE   = 4
+GENERATE_TREEVIEW      = NO
+TREEVIEW_WIDTH         = 250
+HTML_DYNAMIC_SECTIONS = NO
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX         = NO
+LATEX_OUTPUT           = latex
+LATEX_CMD_NAME         = latex
+MAKEINDEX_CMD_NAME     = makeindex
+COMPACT_LATEX          = NO
+PAPER_TYPE             = a4wide
+EXTRA_PACKAGES         = 
+LATEX_HEADER           = 
+PDF_HYPERLINKS         = NO
+USE_PDFLATEX           = YES
+LATEX_BATCHMODE        = NO
+LATEX_HIDE_INDICES     = NO
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF           = NO
+RTF_OUTPUT             = rtf
+COMPACT_RTF            = NO
+RTF_HYPERLINKS         = NO
+RTF_STYLESHEET_FILE    = 
+RTF_EXTENSIONS_FILE    = 
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN           = YES
+MAN_OUTPUT             = man
+MAN_EXTENSION          = .3
+MAN_LINKS              = YES
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML           = NO
+XML_OUTPUT             = xml
+XML_SCHEMA             = 
+XML_DTD                = 
+XML_PROGRAMLISTING     = YES
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF   = NO
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD       = NO
+PERLMOD_LATEX          = NO
+PERLMOD_PRETTY         = YES
+PERLMOD_MAKEVAR_PREFIX = 
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING   = YES
+MACRO_EXPANSION        = NO
+EXPAND_ONLY_PREDEF     = NO
+SEARCH_INCLUDES        = YES
+INCLUDE_PATH           = 
+#INCLUDE_PATH           = rsb-config.h
+INCLUDE_FILE_PATTERNS  = 
+# FIXME 20130109 the following is a workaround:
+PREDEFINED             = RSB_HAVE_CONFIG_H=1 RSB_WITH_SPARSE_BLAS_INTERFACE=1
+# PREDEFINED             = 
+EXPAND_AS_DEFINED      = 
+SKIP_FUNCTION_MACROS   = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+TAGFILES               = 
+GENERATE_TAGFILE       = 
+ALLEXTERNALS           = NO
+EXTERNAL_GROUPS        = NO
+PERL_PATH              = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS         = NO
+MSCGEN_PATH            = 
+HIDE_UNDOC_RELATIONS   = YES
+#HAVE_DOT               = YES
+HAVE_DOT               = NO
+#CLASS_GRAPH            = YES
+CLASS_GRAPH            = NO
+#COLLABORATION_GRAPH    = YES
+COLLABORATION_GRAPH    = NO
+#GROUP_GRAPHS           = YES
+GROUP_GRAPHS           = NO
+UML_LOOK               = NO
+TEMPLATE_RELATIONS     = NO
+INCLUDE_GRAPH          = YES
+INCLUDED_BY_GRAPH      = YES
+#CALL_GRAPH             = YES
+CALL_GRAPH             = NO
+CALLER_GRAPH           = NO
+GRAPHICAL_HIERARCHY    = YES
+DIRECTORY_GRAPH        = YES
+DOT_IMAGE_FORMAT       = png
+DOT_PATH               = 
+DOTFILE_DIRS           = 
+DOT_GRAPH_MAX_NODES    = 50
+MAX_DOT_GRAPH_DEPTH    = 1000
+DOT_TRANSPARENT        = YES
+DOT_MULTI_TARGETS      = NO
+GENERATE_LEGEND        = YES
+DOT_CLEANUP            = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+SEARCHENGINE           = NO
diff --git a/doc/Makefile.am b/doc/Makefile.am
new file mode 100644
index 0000000..49b2d80
--- /dev/null
+++ b/doc/Makefile.am
@@ -0,0 +1,70 @@
+
+all:
+       
+EXTRA_DIST=Doxyfile
+if WANT_BUILD_DOC
+dox: html/index.html
+EXTRA_DIST+=man/* html/*
+BUILT_DOC=html man
+if HAVE_HELP2MAN
+man3_MANS=man/man3/rsb* man/rsbench.3 man/librsb-config.3
+else
+man3_MANS=man/man3/rsb*
+endif
+
+if HAVE_HELP2MAN
+man/librsb-config.3: ../librsb-config
+	$(HELP2MAN) --no-info $< > $@ 
+man/rsbench.3: ../rsbench
+	$(HELP2MAN) --no-info $< > $@ 
+endif
+
+$(man3_MANS): html/index.html
+
+html/index.html: ../rsb.h
+	$(MAKE) makedox
+
+makedox:
+	DOXYGEN_PROJECT_NUMBER=$(VERSION) $(DOXYGEN) Doxyfile || echo "are you sure you have doxygen installed ?"
+	mkdir -p man/man3_
+	mv man/man3/* man/man3_/
+	if find man/man3_/rsb_doc* ; then \
+	mv man/man3_/rsb_doc* man/man3/ ; \
+	sed -i s/_doc_/::/g man/man3/* ; \
+	for f in man/man3/rsb_* ; do mv $$f `echo $$f | sed s/_doc_/::/g` ; done ; \
+	for f in man/man3/rsb* ; do $(AWK) -f ../scripts/rsbmandesc.awk $$f > $$f.tmp ; mv $$f.tmp $$f ; done ; \
+	for f in man/man3/rsb* ; do sed -i 's/^\(Generated.*$$\)/librsb was written by Michele Martone; this documentation has been generated by Doxygen./g' $$f ; done ; \
+	mv  -v man/man3/rsb::rsb.3               man/man3/rsb.h.3 ; \
+	sed -i s/rsb::rsb/rsb.h/g                man/man3/rsb.h.3 ; \
+	mv  -v man/man3/rsb::sparse_blas.3       man/man3/rsb-spblas.h.3 ; \
+	sed -i s/rsb::sparse_blas/rsb-spblas.h/g man/man3/rsb-spblas.h.3 ; \
+	mv  -v man/man3/rsb::examples.3          man/man3/rsb-examples.3 ; \
+	sed -i s/rsb::examples/rsb-examples/g  man/man3/rsb-examples.3 ; \
+	sed -i 's/\\fP\([a-z]\)/\\fP \1/g'     man/man3/rsb*.3 ; \
+	for f in man/man3/rsb* ; do ../scripts/rsbmanseealso.sh man/man3/rsb* >>  $$f ; done ; \
+	rm -fR man/man3_ ; fi
+
+else
+makedox:
+	echo "Documentation building has been disabled at configure time --- skipping."
+
+install-data-local:
+	$(mkdir_p) "$(DESTDIR)$(docdir)"
+	$(mkdir_p) "$(DESTDIR)$(docdir)/html/"
+	if test -f ./html/index.html ; then $(INSTALL_DATA)  ./html/* "$(DESTDIR)$(docdir)/html/" ; fi
+
+uninstall-local:
+	cd ./html ; for f in * ; do if test -f "$(DESTDIR)$(docdir)/html/"$$f ; then  rm "$(DESTDIR)$(docdir)/html/"$$f ; fi ; done
+	if test -d "$(DESTDIR)$(docdir)/html" ; then rmdir "$(DESTDIR)$(docdir)/html" || true ; fi
+	if test -d "$(DESTDIR)$(docdir)" ; then rmdir "$(DESTDIR)$(docdir)" || true ; fi
+dox:
+EXTRA_DIST+=
+BUILT_DOC=
+man3_MANS=
+$(man3_MANS):
+endif
+
+cleanall:
+	rm -rf $(BUILT_DOC)
+
+
diff --git a/doc/Makefile.in b/doc/Makefile.in
new file mode 100644
index 0000000..b37671d
--- /dev/null
+++ b/doc/Makefile.in
@@ -0,0 +1,584 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+ at WANT_BUILD_DOC_TRUE@am__append_1 = man/* html/*
+ at WANT_BUILD_DOC_FALSE@am__append_2 = 
+subdir = doc
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/rsb-config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+man3dir = $(mandir)/man3
+am__installdirs = "$(DESTDIR)$(man3dir)"
+NROFF = nroff
+MANS = $(man3_MANS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = Doxyfile $(am__append_1) $(am__append_2)
+ at WANT_BUILD_DOC_FALSE@BUILT_DOC = 
+ at WANT_BUILD_DOC_TRUE@BUILT_DOC = html man
+ at HAVE_HELP2MAN_FALSE@@WANT_BUILD_DOC_TRUE at man3_MANS = man/man3/rsb*
+ at HAVE_HELP2MAN_TRUE@@WANT_BUILD_DOC_TRUE at man3_MANS = man/man3/rsb* man/rsbench.3 man/librsb-config.3
+ at WANT_BUILD_DOC_FALSE@man3_MANS = 
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu doc/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu doc/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+install-man3: $(man3_MANS)
+	@$(NORMAL_INSTALL)
+	@list1='$(man3_MANS)'; \
+	list2=''; \
+	test -n "$(man3dir)" \
+	  && test -n "`echo $$list1$$list2`" \
+	  || exit 0; \
+	echo " $(MKDIR_P) '$(DESTDIR)$(man3dir)'"; \
+	$(MKDIR_P) "$(DESTDIR)$(man3dir)" || exit 1; \
+	{ for i in $$list1; do echo "$$i"; done;  \
+	if test -n "$$list2"; then \
+	  for i in $$list2; do echo "$$i"; done \
+	    | sed -n '/\.3[a-z]*$$/p'; \
+	fi; \
+	} | while read p; do \
+	  if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+	  echo "$$d$$p"; echo "$$p"; \
+	done | \
+	sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \
+	      -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
+	sed 'N;N;s,\n, ,g' | { \
+	list=; while read file base inst; do \
+	  if test "$$base" = "$$inst"; then list="$$list $$file"; else \
+	    echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man3dir)/$$inst'"; \
+	    $(INSTALL_DATA) "$$file" "$(DESTDIR)$(man3dir)/$$inst" || exit $$?; \
+	  fi; \
+	done; \
+	for i in $$list; do echo "$$i"; done | $(am__base_list) | \
+	while read files; do \
+	  test -z "$$files" || { \
+	    echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man3dir)'"; \
+	    $(INSTALL_DATA) $$files "$(DESTDIR)$(man3dir)" || exit $$?; }; \
+	done; }
+
+uninstall-man3:
+	@$(NORMAL_UNINSTALL)
+	@list='$(man3_MANS)'; test -n "$(man3dir)" || exit 0; \
+	files=`{ for i in $$list; do echo "$$i"; done; \
+	} | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^3][0-9a-z]*$$,3,;x' \
+	      -e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
+	dir='$(DESTDIR)$(man3dir)'; $(am__uninstall_files_from_dir)
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@list='$(MANS)'; if test -n "$$list"; then \
+	  list=`for p in $$list; do \
+	    if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
+	    if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
+	  if test -n "$$list" && \
+	    grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
+	    echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
+	    grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/         /' >&2; \
+	    echo "       to fix them, install help2man, remove and regenerate the man pages;" >&2; \
+	    echo "       typically \`make maintainer-clean' will remove them" >&2; \
+	    exit 1; \
+	  else :; fi; \
+	else :; fi
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(MANS)
+installdirs:
+	for dir in "$(DESTDIR)$(man3dir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+ at WANT_BUILD_DOC_TRUE@uninstall-local:
+ at WANT_BUILD_DOC_TRUE@install-data-local:
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-data-local install-man
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man: install-man3
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-local uninstall-man
+
+uninstall-man: uninstall-man3
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-data-local install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-man3 install-pdf install-pdf-am install-ps \
+	install-ps-am install-strip installcheck installcheck-am \
+	installdirs maintainer-clean maintainer-clean-generic \
+	mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
+	ps ps-am uninstall uninstall-am uninstall-local uninstall-man \
+	uninstall-man3
+
+
+all:
+ at WANT_BUILD_DOC_TRUE@dox: html/index.html
+
+ at HAVE_HELP2MAN_TRUE@@WANT_BUILD_DOC_TRUE at man/librsb-config.3: ../librsb-config
+ at HAVE_HELP2MAN_TRUE@@WANT_BUILD_DOC_TRUE@	$(HELP2MAN) --no-info $< > $@ 
+ at HAVE_HELP2MAN_TRUE@@WANT_BUILD_DOC_TRUE at man/rsbench.3: ../rsbench
+ at HAVE_HELP2MAN_TRUE@@WANT_BUILD_DOC_TRUE@	$(HELP2MAN) --no-info $< > $@ 
+
+ at WANT_BUILD_DOC_TRUE@$(man3_MANS): html/index.html
+
+ at WANT_BUILD_DOC_TRUE@html/index.html: ../rsb.h
+ at WANT_BUILD_DOC_TRUE@	$(MAKE) makedox
+
+ at WANT_BUILD_DOC_TRUE@makedox:
+ at WANT_BUILD_DOC_TRUE@	DOXYGEN_PROJECT_NUMBER=$(VERSION) $(DOXYGEN) Doxyfile || echo "are you sure you have doxygen installed ?"
+ at WANT_BUILD_DOC_TRUE@	mkdir -p man/man3_
+ at WANT_BUILD_DOC_TRUE@	mv man/man3/* man/man3_/
+ at WANT_BUILD_DOC_TRUE@	if find man/man3_/rsb_doc* ; then \
+ at WANT_BUILD_DOC_TRUE@	mv man/man3_/rsb_doc* man/man3/ ; \
+ at WANT_BUILD_DOC_TRUE@	sed -i s/_doc_/::/g man/man3/* ; \
+ at WANT_BUILD_DOC_TRUE@	for f in man/man3/rsb_* ; do mv $$f `echo $$f | sed s/_doc_/::/g` ; done ; \
+ at WANT_BUILD_DOC_TRUE@	for f in man/man3/rsb* ; do $(AWK) -f ../scripts/rsbmandesc.awk $$f > $$f.tmp ; mv $$f.tmp $$f ; done ; \
+ at WANT_BUILD_DOC_TRUE@	for f in man/man3/rsb* ; do sed -i 's/^\(Generated.*$$\)/librsb was written by Michele Martone; this documentation has been generated by Doxygen./g' $$f ; done ; \
+ at WANT_BUILD_DOC_TRUE@	mv  -v man/man3/rsb::rsb.3               man/man3/rsb.h.3 ; \
+ at WANT_BUILD_DOC_TRUE@	sed -i s/rsb::rsb/rsb.h/g                man/man3/rsb.h.3 ; \
+ at WANT_BUILD_DOC_TRUE@	mv  -v man/man3/rsb::sparse_blas.3       man/man3/rsb-spblas.h.3 ; \
+ at WANT_BUILD_DOC_TRUE@	sed -i s/rsb::sparse_blas/rsb-spblas.h/g man/man3/rsb-spblas.h.3 ; \
+ at WANT_BUILD_DOC_TRUE@	mv  -v man/man3/rsb::examples.3          man/man3/rsb-examples.3 ; \
+ at WANT_BUILD_DOC_TRUE@	sed -i s/rsb::examples/rsb-examples/g  man/man3/rsb-examples.3 ; \
+ at WANT_BUILD_DOC_TRUE@	sed -i 's/\\fP\([a-z]\)/\\fP \1/g'     man/man3/rsb*.3 ; \
+ at WANT_BUILD_DOC_TRUE@	for f in man/man3/rsb* ; do ../scripts/rsbmanseealso.sh man/man3/rsb* >>  $$f ; done ; \
+ at WANT_BUILD_DOC_TRUE@	rm -fR man/man3_ ; fi
+
+ at WANT_BUILD_DOC_FALSE@makedox:
+ at WANT_BUILD_DOC_FALSE@	echo "Documentation building has been disabled at configure time --- skipping."
+
+ at WANT_BUILD_DOC_FALSE@install-data-local:
+ at WANT_BUILD_DOC_FALSE@	$(mkdir_p) "$(DESTDIR)$(docdir)"
+ at WANT_BUILD_DOC_FALSE@	$(mkdir_p) "$(DESTDIR)$(docdir)/html/"
+ at WANT_BUILD_DOC_FALSE@	if test -f ./html/index.html ; then $(INSTALL_DATA)  ./html/* "$(DESTDIR)$(docdir)/html/" ; fi
+
+ at WANT_BUILD_DOC_FALSE@uninstall-local:
+ at WANT_BUILD_DOC_FALSE@	cd ./html ; for f in * ; do if test -f "$(DESTDIR)$(docdir)/html/"$$f ; then  rm "$(DESTDIR)$(docdir)/html/"$$f ; fi ; done
+ at WANT_BUILD_DOC_FALSE@	if test -d "$(DESTDIR)$(docdir)/html" ; then rmdir "$(DESTDIR)$(docdir)/html" || true ; fi
+ at WANT_BUILD_DOC_FALSE@	if test -d "$(DESTDIR)$(docdir)" ; then rmdir "$(DESTDIR)$(docdir)" || true ; fi
+ at WANT_BUILD_DOC_FALSE@dox:
+ at WANT_BUILD_DOC_FALSE@$(man3_MANS):
+
+cleanall:
+	rm -rf $(BUILT_DOC)
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/doc/html/annotated.html b/doc/html/annotated.html
new file mode 100644
index 0000000..eca8108
--- /dev/null
+++ b/doc/html/annotated.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Structures</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Data Structures</div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock">Here are the data structures with brief descriptions:</div><div class="directory">
+<div class="levels">[detail level <span onclick="javascript:toggleLevel(1);">1</span><span onclick="javascript:toggleLevel(2);">2</span>]</div><table class="directory">
+<tr id="row_0_" class="even"><td class="entry"><img id="arr_0_" src="ftv2mnode.png" alt="o" width="16" height="22" onclick="toggleFolder('0_')"/><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="classblas__sparse.html" target="_self">blas_sparse</a></td><td class="desc"></td></tr>
+<tr id="row_0_0_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html" target="_self">rsb_blas_get_mtx</a></td><td class="desc"></td></tr>
+<tr id="row_0_1_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html" target="_self">uscr_insert_block</a></td><td class="desc">Inserts a dense block</td></tr>
+<tr id="row_0_2_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html" target="_self">uscr_insert_clique</a></td><td class="desc">Inserts a clique</td></tr>
+<tr id="row_0_3_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html" target="_self">uscr_insert_col</a></td><td class="desc">Inserts a sparse column</td></tr>
+<tr id="row_0_4_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html" target="_self">uscr_insert_entries</a></td><td class="desc">Inserts multiple entries</td></tr>
+<tr id="row_0_5_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html" target="_self">uscr_insert_entry</a></td><td class="desc">A Sparse BLAS interface for RSB</td></tr>
+<tr id="row_0_6_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html" target="_self">uscr_insert_row</a></td><td class="desc">Inserts a sparse row</td></tr>
+<tr id="row_0_7_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1usmm.html" target="_self">usmm</a></td><td class="desc">Multiplication : c <- beta c + alpha A b</td></tr>
+<tr id="row_0_8_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1usmv.html" target="_self">usmv</a></td><td class="desc">Multiplication : c <- beta c + alpha A b</td></tr>
+<tr id="row_0_9_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1ussm.html" target="_self">ussm</a></td><td class="desc">Triangular solve: b <- alpha A^-1 b</td></tr>
+<tr id="row_0_10_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2lastnode.png" alt="\" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfaceblas__sparse_1_1ussv.html" target="_self">ussv</a></td><td class="desc">Triangular solve: b <- alpha A^-1 b</td></tr>
+<tr id="row_1_" class="even"><td class="entry"><img id="arr_1_" src="ftv2mnode.png" alt="o" width="16" height="22" onclick="toggleFolder('1_')"/><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="classrsb.html" target="_self">rsb</a></td><td class="desc"></td></tr>
+<tr id="row_1_0_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__coo__sort.html" target="_self">rsb_coo_sort</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a></td></tr>
+<tr id="row_1_1_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html" target="_self">rsb_file_mtx_get_dims</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a></td></tr>
+<tr id="row_1_2_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__file__mtx__load.html" target="_self">rsb_file_mtx_load</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a></td></tr>
+<tr id="row_1_3_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html" target="_self">rsb_file_mtx_rndr</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a></td></tr>
+<tr id="row_1_4_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__file__mtx__save.html" target="_self">rsb_file_mtx_save</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a></td></tr>
+<tr id="row_1_5_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__file__vec__load.html" target="_self">rsb_file_vec_load</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a></td></tr>
+<tr id="row_1_6_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__file__vec__save.html" target="_self">rsb_file_vec_save</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a></td></tr>
+<tr id="row_1_7_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__lib__exit.html" target="_self">rsb_lib_exit</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a></td></tr>
+<tr id="row_1_8_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__lib__get__opt.html" target="_self">rsb_lib_get_opt</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a></td></tr>
+<tr id="row_1_9_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__lib__init.html" target="_self">rsb_lib_init</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a></td></tr>
+<tr id="row_1_10_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__lib__reinit.html" target="_self">rsb_lib_reinit</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a></td></tr>
+<tr id="row_1_11_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__lib__set__opt.html" target="_self">rsb_lib_set_opt</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a></td></tr>
+<tr id="row_1_12_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html" target="_self">rsb_lib_set_opt_str</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a></td></tr>
+<tr id="row_1_13_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html" target="_self">rsb_mtx_add_to_dense</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a></td></tr>
+<tr id="row_1_14_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html" target="_self">rsb_mtx_alloc_from_coo_begin</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin [...]
+<tr id="row_1_15_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html" target="_self">rsb_mtx_alloc_from_coo_const</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_f [...]
+<tr id="row_1_16_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html" target="_self">rsb_mtx_alloc_from_coo_end</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a></ [...]
+<tr id="row_1_17_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html" target="_self">rsb_mtx_alloc_from_coo_inplace</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_all [...]
+<tr id="row_1_18_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html" target="_self">rsb_mtx_alloc_from_csc_const</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const [...]
+<tr id="row_1_19_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html" target="_self">rsb_mtx_alloc_from_csr_const</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_f [...]
+<tr id="row_1_20_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html" target="_self">rsb_mtx_alloc_from_csr_inplace</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_i [...]
+<tr id="row_1_21_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__clone.html" target="_self">rsb_mtx_clone</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a></td></tr>
+<tr id="row_1_22_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__free.html" target="_self">rsb_mtx_free</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a></td></tr>
+<tr id="row_1_23_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html" target="_self">rsb_mtx_get_coo</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a></td></tr>
+<tr id="row_1_24_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html" target="_self">rsb_mtx_get_coo_block</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a></td></tr>
+<tr id="row_1_25_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html" target="_self">rsb_mtx_get_csr</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a></td></tr>
+<tr id="row_1_26_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__info.html" target="_self">rsb_mtx_get_info</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a></td></tr>
+<tr id="row_1_27_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html" target="_self">rsb_mtx_get_info_str</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a></td></tr>
+<tr id="row_1_28_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html" target="_self">rsb_mtx_get_nrm</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a></td></tr>
+<tr id="row_1_29_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html" target="_self">rsb_mtx_get_prec</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a></td></tr>
+<tr id="row_1_30_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html" target="_self">rsb_mtx_get_rows_sparse</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a></td></tr>
+<tr id="row_1_31_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html" target="_self">rsb_mtx_get_vals</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a></td></tr>
+<tr id="row_1_32_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html" target="_self">rsb_mtx_get_vec</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a></td></tr>
+<tr id="row_1_33_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__rndr.html" target="_self">rsb_mtx_rndr</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a></td></tr>
+<tr id="row_1_34_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html" target="_self">rsb_mtx_set_vals</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a></td></tr>
+<tr id="row_1_35_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html" target="_self">rsb_mtx_switch_to_coo</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a></td></tr>
+<tr id="row_1_36_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html" target="_self">rsb_mtx_switch_to_csr</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a></td></tr>
+<tr id="row_1_37_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html" target="_self">rsb_mtx_upd_vals</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a></td></tr>
+<tr id="row_1_38_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__perror.html" target="_self">rsb_perror</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a></td></tr>
+<tr id="row_1_39_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html" target="_self">rsb_psblas_trans_to_rsb_trans</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_tr [...]
+<tr id="row_1_40_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__spmm.html" target="_self">rsb_spmm</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a></td></tr>
+<tr id="row_1_41_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__spmsp.html" target="_self">rsb_spmsp</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a></td></tr>
+<tr id="row_1_42_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html" target="_self">rsb_spmsp_to_dense</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a></td></tr>
+<tr id="row_1_43_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__spmv.html" target="_self">rsb_spmv</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a></td></tr>
+<tr id="row_1_44_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__sppsp.html" target="_self">rsb_sppsp</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a></td></tr>
+<tr id="row_1_45_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__spsm.html" target="_self">rsb_spsm</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a></td></tr>
+<tr id="row_1_46_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__spsv.html" target="_self">rsb_spsv</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a></td></tr>
+<tr id="row_1_47_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__strerror__r.html" target="_self">rsb_strerror_r</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a></td></tr>
+<tr id="row_1_48_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__time.html" target="_self">rsb_time</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a></td></tr>
+<tr id="row_1_49_" class="even"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__tune__spmm.html" target="_self">rsb_tune_spmm</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a></td></tr>
+<tr id="row_1_50_"><td class="entry"><img src="ftv2vertline.png" alt="|" width="16" height="22" /><img src="ftv2lastnode.png" alt="\" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="interfacersb_1_1rsb__tune__spsm.html" target="_self">rsb_tune_spsm</a></td><td class="desc">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a></td></tr>
+<tr id="row_2_" class="even"><td class="entry"><img src="ftv2lastnode.png" alt="\" width="16" height="22" /><img src="ftv2cl.png" alt="C" width="24" height="22" /><a class="el" href="structrsb__initopts.html" target="_self">rsb_initopts</a></td><td class="desc">A structure specifying library (initialization) options, to be used with the <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit()</a> function. <br/>
+</td></tr>
+</table>
+</div><!-- directory -->
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/bc_s.png b/doc/html/bc_s.png
new file mode 100644
index 0000000..25e3beb
Binary files /dev/null and b/doc/html/bc_s.png differ
diff --git a/doc/html/bdwn.png b/doc/html/bdwn.png
new file mode 100644
index 0000000..940a0b9
Binary files /dev/null and b/doc/html/bdwn.png differ
diff --git a/doc/html/blas__sparse_8h.html b/doc/html/blas__sparse_8h.html
new file mode 100644
index 0000000..26dd4c8
--- /dev/null
+++ b/doc/html/blas__sparse_8h.html
@@ -0,0 +1,1768 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse.h File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#define-members">Macros</a> |
+<a href="#typedef-members">Typedefs</a> |
+<a href="#enum-members">Enumerations</a> |
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse.h File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:aab00e94b9818e92bb03c32f7ec677932"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#aab00e94b9818e92bb03c32f7ec677932">BLAS_ENUM_H</a></td></tr>
+<tr class="memitem:a6719ae77dfef6d6dd0790e34a65c1924"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">BLAS_ussp</a>   rsb_wp__BLAS_ussp</td></tr>
+<tr class="memitem:a5eec91b6d95962811bd9cb4e37266214"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>   rsb_wp__BLAS_usgp</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="typedef-members"></a>
+Typedefs</h2></td></tr>
+<tr class="memitem:a6f56456b01e0cc6b25b81201aa67c163"><td class="memItemLeft" align="right" valign="top">typedef int </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="enum-members"></a>
+Enumerations</h2></td></tr>
+<tr class="memitem:a9e6ec9e515f9d9b7e47110ae5f6ea04e"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102, 
+<a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102
+<br/>
+ }</td></tr>
+<tr class="memitem:a23e5e138364c80074ac014a3dfd346b7"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113, 
+<a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113, 
+<a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113
+<br/>
+ }</td></tr>
+<tr class="memitem:acc2b26a405868ca1bd8a18e0eb62e820"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122, 
+<a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122
+<br/>
+ }</td></tr>
+<tr class="memitem:ad7b35ac9114bfe21e15d011bf878b164"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132, 
+<a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132
+<br/>
+ }</td></tr>
+<tr class="memitem:ac10de4d3a9ae38c876ec94ee7929e695"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_side_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142, 
+<a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142
+<br/>
+ }</td></tr>
+<tr class="memitem:a6ef40f4bf16a7f484390a20fdb55d3aa"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_cmach_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161
+<br/>
+ }</td></tr>
+<tr class="memitem:a07072da9995d9196d9176f56c784952b"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952b">blas_norm_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178
+<br/>
+ }</td></tr>
+<tr class="memitem:a4a9825e92ac3a85e524c58283ac42c14"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sort_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182, 
+<a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182
+<br/>
+ }</td></tr>
+<tr class="memitem:a125c156d54359fba48a6b9cf2a2d0a07"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192, 
+<a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192
+<br/>
+ }</td></tr>
+<tr class="memitem:abdf3d2dd2387ff18e265347d2dfc1f04"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_jrot_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203, 
+<a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203, 
+<a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203
+<br/>
+ }</td></tr>
+<tr class="memitem:a8970170b9fd2a64eb18d9509ea624475"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_prec_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214
+<br/>
+ }</td></tr>
+<tr class="memitem:a3fe740ad5a139d723de260d638987e9e"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222, 
+<a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222
+<br/>
+ }</td></tr>
+<tr class="memitem:a7da08ccc1c4c7f5ff40768d502a6e63b"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_symmetry_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240
+<br/>
+ }</td></tr>
+<tr class="memitem:a09d8be749e909b403b1563f0ca84aef8"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_field_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244
+<br/>
+ }</td></tr>
+<tr class="memitem:a540f6a907f9f5e49d84a65c530e598c6"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_size_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253, 
+<a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253, 
+<a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253
+<br/>
+ }</td></tr>
+<tr class="memitem:a7cb10fb1b47b79ef278d6f09d571bd06"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_handle_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264
+<br/>
+ }</td></tr>
+<tr class="memitem:a3f95e19247de0359b56de195704e05a5"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparsity_optimization_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274
+<br/>
+ }</td></tr>
+<tr class="memitem:aee94244609acd12511418bfbf0a77729"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999
+<br/>
+ }</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:ga88a22a58b50ce89708abb232e4cbffcd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">BLAS_susdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const float *x, const int *indx, const float *y, int incy, float *r, enum <a class="el" href="blas__sparse_8 [...]
+<tr class="memitem:ga3d4d6df66fbbdfb8585770ce2ce37e6b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_susdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const float *x, const int *indx, const float *y, int *incy, float *r, enum <a class="el" href="blas__spa [...]
+<tr class="memitem:ga2ff8ae1b5a89cdb1bfd23b7b27635614"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">BLAS_dusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const double *x, const int *indx, const double *y, int incy, double *r, enum <a class="el" href="blas__spars [...]
+<tr class="memitem:ga891919cc22b2f9db6b26c857e2080b48"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">blas_dusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const double *x, const int *indx, const double *y, int *incy, double *r, enum <a class="el" href="blas__ [...]
+<tr class="memitem:gae02711e85989d740894aa260028cab15"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">BLAS_cusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:ga6805ad5c8346534e68b436708920d135"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">blas_cusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:ga1baea6bd05a2117418d333f5365e34df"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">BLAS_zusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:gaa9f54b685570087469d21462d089ef7d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">blas_zusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:gaeedaef37cd7591d8b15bc7e8ee049414"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">BLAS_susaxpy</a> (int nnz, float alpha, const float *x, const int *indx, float *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga863f07d7735eaa4fc0c6dbe1be09974e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">blas_susaxpy_</a> (int *nnz, float *alpha, const float *x, const int *indx, float *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga31b475fb2cc3f50775a5b6db930ab570"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">BLAS_dusaxpy</a> (int nnz, double alpha, const double *x, const int *indx, double *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga90f1fe9fa99b947c8096befdbfb49fb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">blas_dusaxpy_</a> (int *nnz, double *alpha, const double *x, const int *indx, double *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafaf15e2530cd078b260bb744e00487cb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">BLAS_cusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gac6189fef9b94289f2b8a5b6b7287b50b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">blas_cusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga20f8bb20cf00554547342750d80b2197"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">BLAS_zusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga58ad4724155b0cef43cdb7d95f879d8c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">blas_zusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga40cdf6b61694154efa1ba8d180381827"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">BLAS_susga</a> (int nnz, const float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga69bea2986de886f37a493464b1006456"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">blas_susga_</a> (int *nnz, const float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaa253fd591971e664e48e058e85855882"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">BLAS_dusga</a> (int nnz, const double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga21d8b0bd816bfd21371f70ca82ee9d9c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">blas_dusga_</a> (int *nnz, const double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga71f2df0176e5f44bf482ea2386ac5fac"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">BLAS_cusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga284485bb91904fe1324257ba1ab3a982"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">blas_cusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a29ab06d610d011109dd0c3da94992f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">BLAS_zusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga245af9e95488dece29876354c6e91fed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">blas_zusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2c53b81e979cbae6a5d198509f6d905a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">BLAS_susgz</a> (int nnz, float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga74964bd95bd8945b13c7fe2c7f559e5c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">blas_susgz_</a> (int *nnz, float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0b26bd51a324ee09433dbfa995396344"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">BLAS_dusgz</a> (int nnz, double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gadd448e0d4a33417634e6232c77d8a82a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">blas_dusgz_</a> (int *nnz, double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a4c72eb85493e921f4d40e18edb83ef"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">BLAS_cusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga32fdcc497a0db0ba36b413725ddc8c13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">blas_cusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0d52a140d65ab78ee0c515c445b42451"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">BLAS_zusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga5a6be1c191d51a622b99fe1b9a776bdc"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">blas_zusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gad58ff27808df2287b9cc77f6ed4d55ff"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">BLAS_sussc</a> (int nnz, const float *x, float *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga3f88389831294ad45b84ec31313fbc15"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">blas_sussc_</a> (int *nnz, const float *x, float *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gac71029e615c6c893b54e2f9395a536a4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">BLAS_dussc</a> (int nnz, const double *x, double *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga98ac28de307a8713020edd41be98d455"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">blas_dussc_</a> (int *nnz, const double *x, double *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga1b93628d321fbb77a50f98b467a3ff84"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">BLAS_cussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gafc77b392db05fc22122d4639595cccb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">blas_cussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaad333ae644010e3b059190b98528c79d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">BLAS_zussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gab89e9860df0ed52620651cfc607a987a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">blas_zussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafb4d039eb5319613ed30db7fb323278c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A,  [...]
+<tr class="memitem:ga651b1d1df5c964dbb21c1a5b14d7878b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">blas_susmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:ga9a8f45ddd3c890a296239b212f0c033b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, [...]
+<tr class="memitem:ga7172d1d1d0f3310ceaf9ecd1d128407b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">blas_dusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga9ec2e63176f2d6b11ee48bb523b4f7c7"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga3d60593a2a4ea8c081590b392c39419d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">blas_cusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga1ee2eb4be4c1e0565051fe04ca7415a2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga6747bd2d7930018d8693a97a3eb2865c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">blas_zusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:gafc9acf48136458baa6ace90355e7abb2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">BLAS_sussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T,  [...]
+<tr class="memitem:ga3b63c0a83f8088e60c8e609b451354f0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">blas_sussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:gade1bbec9b8263a2a5e76112f1042576b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">BLAS_dussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T, [...]
+<tr class="memitem:ga36f989895809beaafaa57bb5ab41347f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">blas_dussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga4c327ba1fa391b550f2fc5580ad49bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">BLAS_cussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga39b0ab077486c1fc3766d68ae9048447"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">blas_cussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga7c1e740064369d0029cd627643eb841a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">BLAS_zussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga5d14a5df82e93614e8c524f6d20bb5c5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">blas_zussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga29c11c0c304637e89852359b0f8b10b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">BLAS_susmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2c1da8c4c1473a930ebfaa62f360ca8e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">blas_susmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:gaeeddeb634efe4448a31d62fb547362f6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">BLAS_dusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaa6f99d27ec6f88cca6c6cfac1e8ce7e3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">blas_dusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga8c87639294b57d2893cd29f64902a64d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">BLAS_cusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2dc070f4b09c4b37d89ab9a0fb16352b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">blas_cusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga88138db4545610d234d18d42237f36ee"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">BLAS_zusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaf7018fb638e25fe8b149d0cab4e844c0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">blas_zusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga3d7835bb3621aaf70787d72f86355f8d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">BLAS_sussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga916f5af1f63f33a3a084accaf2dfd6f1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">blas_sussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gaad6ff4b3cce242f76362e6ad8a947713"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">BLAS_dussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga4b93f6ef00d1aa3197a45a7e492edcd6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">blas_dussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad864666e842f7d0878b1fb9d57e80c28"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">BLAS_cussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:gac3d8f0b6742566cbbadf6b18c9aa40b5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">blas_cussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:ga8602eae41f9e5248ff086087abe68bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">BLAS_zussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga60f808ded982233be9a4faaa5fb75db3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">blas_zussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">BLAS_suscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad2f7ede753754c2474d5460a92bba99e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">blas_suscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac931dcb1129ee3016ab82602c3d14fee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad7d5969e9edee49441fc89d22715e60d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">blas_duscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga3483c364b4afec22621e46059b166247"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">BLAS_cuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gaf4d21720c592de22cfd4139517d9d255"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">blas_cuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga52b67393ad16e3d40e74fcdba88c7da4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">BLAS_zuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gae0246836bd8d4b8697c6674998397f3a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">blas_zuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga11c5559450e186c2a86d714f564411f3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">BLAS_suscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga0067882e19affabebf581452a7c05252"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">blas_suscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac0ca32cd2c78c8553d6d6b324e06ef59"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga864facf0316453a27af4b7024a11453b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">blas_duscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga050218d0fa552a3e2c2d5452f876d9b5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga967bfc819ed66559e96ae55a6826d1f8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">blas_cuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5a261b2d1cc996c2a982ff8469faf286"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">BLAS_zuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga62c3bd7ba1a96f82055478d40af67370"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">blas_zuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae7e006a448094a70204be60f24cdf1a3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">BLAS_suscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaab267e13449c999ad8a8e3e358f4b2ed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">blas_suscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae0c3c6dc5503e21afb8192efb0f66edd"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">BLAS_duscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga12c7c1bdd46724147dbbd9b38dd2028e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">blas_duscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga95174fcf3bfbef91ab6b3b85fc90b128"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">BLAS_cuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga24a2dac4570e6021fdcc5c84b52fb5bb"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">blas_cuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gaa582b369a0233027349f8f844cce7622"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">BLAS_zuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaa51253d1c144c8aa744b2e13742fec40"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">blas_zuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga7176a90049256cb0e0fe45db66f57dd2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">BLAS_suscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga5822f3be35eeb550c323de69ec9933d3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">blas_suscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5d9ce97bf054b1e3750eaae5d4e6c335"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga2120eb06b87f0e85d03a368e5bc55485"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">blas_duscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac2b5eccd5cf442b5e2e79201d62ca2b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gaa78d3bef027e5a29ab5e5dd6188bcd75"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">blas_cuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gad6315d71f6f7abf8b82c89c70d6abbf3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga6c23466b531e84f472d5fa75228cb895"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">blas_zuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga26e2c422895e5df8492bdb561cab4a54"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">BLAS_suscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float val, int i, int j)</td></tr>
+<tr class="memitem:ga9b3085c739330bca518e8ef371f7d3b1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">blas_suscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga346ff5263bf0b3a5d7dda94e2000130c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">BLAS_duscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double val, int i, int j)</td></tr>
+<tr class="memitem:ga29c2f202a144845cc1d32c8d65bd5c5f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">blas_duscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gaa39564978ebda8a88f8d19e3e060bc4d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">BLAS_cuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:ga6d735497bdd3bbafbb6168cb0fde5103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">blas_cuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga1ffe345c537b53ac5839da21b236d87c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">BLAS_zuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:gaad6627231dc4230affa318726ff3f345"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">blas_zuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gac6158601459aabebc22795864a2a62ba"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">BLAS_suscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const float *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga9119b49fd049bcaa310bccb36fcda664"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">blas_suscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const float *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gae0683bc8f0af5dd3e53b964190f9e1b4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const double *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gac2c1a4c7b2cebca56aedbad7a002e15f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">blas_duscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const double *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga5af752a3fcb2898412f576eee7d9d618"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">BLAS_cuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga3deb906fcd5f9b9221b5865541c57d18"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">blas_cuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gaacc9c9e5c95df4ea6656ad93f1f09666"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">BLAS_zuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gad9ad3afc16fc0181117004fd46ff78ae"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">blas_zuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga547d271038794dfc797aecc70e294761"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">BLAS_suscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga2d8c691851acf099c25eff1a4c2885c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">blas_suscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga8ee73d3b27bdc68e12c85ba281a337be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">BLAS_duscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:ga5645393bb00d715d882e8e2d55c3f0d1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">blas_duscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga589495aa8acd4eac99ef9132bc4062c9"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">BLAS_cuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga1aadf4dc810ff6eb123a1bf9c859efe8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">blas_cuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga00cfdd3669b146b25d42a32f104ff8a3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">BLAS_zuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga10a2dc6a5399459c83282bda757f5096"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">blas_zuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga9b815fa125e3c84a6e6a6ead2c9ef87b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">BLAS_suscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga71080ddbf0e0e602c7bc36993a6c88ca"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">blas_suscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gac3472ca6b036771a68d6f5f01387e482"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">BLAS_duscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:gaa72e5450302fa424dcd6cfae0bad872d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">blas_duscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga77929c94cee3278cc7594a3f1377f5f8"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">BLAS_cuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gad4acfbfdf33a5682ac657add0292711d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">blas_cuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gab52e13dc7c61fc48e593276f04cb2d30"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">BLAS_zuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gaf871e29bfce399dedbebe2aa9c7831df"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">blas_zuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga6e567e79f675ed861c8f446d0e7a78f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">BLAS_suscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const float *val, const int row_stride, const int col_stride, const int *indx, con [...]
+<tr class="memitem:gafcee9667fc445e32012c960fca7e698d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">blas_suscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const float *val, const int *row_stride, const int *col_stride, const int *in [...]
+<tr class="memitem:ga290547e34be3648b2fe6a7378e59a7ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">BLAS_duscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const double *val, const int row_stride, const int col_stride, const int *indx, co [...]
+<tr class="memitem:ga1f7870f8a1114b94444c721c933e8bef"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">blas_duscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const double *val, const int *row_stride, const int *col_stride, const int *i [...]
+<tr class="memitem:gaf089aaac5d65a4e38130b25d5ba2ba27"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">BLAS_cuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga06acafbf28371b1ad8a75a85173261e6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">blas_cuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:ga52519d2caa1070b0c80ac3c6cb104d92"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">BLAS_zuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga8c3430083655b74988536d823e40c723"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">blas_zuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:gaa682b478ac48e12d4a091977e8c45768"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">BLAS_suscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga61080e2828351bd1585deb2713ed8a29"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">blas_suscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga5d35aa3e27cdbf8a50db5b47ff5e0892"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">BLAS_duscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga38012bbc4e99df72fb95409a4860ead7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">blas_duscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga42054351f49850f079733143b2af87fb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">BLAS_cuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga527ae15ee9e003d948494d9fcdad5dba"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">blas_cuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga573ee2ea89db4a133b8729abbb1223f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">BLAS_zuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:gac3837cd5c7b2e8ac11c6c0e5cff8914c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">blas_zuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga2ff68116b5ae79c37bf335096de973c0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">BLAS_uscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga60974067bf5367a9a3c6eaa9f6f8f4ab"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">blas_uscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga8b0cca8196f40f7b55084a978b40717f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gae4db91cffaf71632bd41b7423c64b757"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">blas_usds_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae671b9fc06140680a8c104ef4f0f54f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">BLAS_susrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</ [...]
+<tr class="memitem:ga9de54361f778577330c6c5ece88a63c3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">blas_susrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga01917c64887638dfb5226be1f87d964a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">BLAS_dusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga9f09f9d05e01d5b354ce234781e3945a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">blas_dusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gafc79de03622ceeb2e0b4343fe5904a36"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">BLAS_cusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:gae09ac29c14cede27a8d6a2be2687453e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">blas_cusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gad551879cdde6d16d9dd5b9edc647c667"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">BLAS_zusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:ga806bb32c4231e4cd9d833370484ad369"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">blas_zusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:ga1113eda1c806ca3631fefde07624fbd6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">BLAS_susget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *d)</td></tr>
+<tr class="memitem:ga0444e8a4b321bf1488fb496bdf3116d2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">blas_susget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *d, int *istat)</td></tr>
+<tr class="memitem:ga35b70a7c3083b791cf1b94cb20ef57be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">BLAS_dusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *d)</td></tr>
+<tr class="memitem:ga7cfde04c833adeb887db75f4b2e104dd"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">blas_dusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *d, int *istat)</td></tr>
+<tr class="memitem:ga4ec4b6dce3701c5803efa6b7455e1504"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">BLAS_cusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga4865a8fda031074a0d91cf5c548584b9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">blas_cusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad175937c05d3d05d3aa7fa35eb3028ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">BLAS_zusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga73feb9adc685f7ff1d66763b0801a0f9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">blas_zusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad84dbcdeda549e1b0361f7ade7a38b13"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">BLAS_susget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga1a8c39f41962e3be6ac84ea3be73f7a0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">blas_susget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gab866cf0951b576a47da3864d668919f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">BLAS_dusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:gac09a79789dc8b79d2e5a375732703103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">blas_dusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gac50e955d6e2bff77e2c3ac2146c77aaf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">BLAS_cusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga9e11da08762387d8a7a885665298e815"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">blas_cusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gadeb3cbe1cc6987763a55665bcdb8aef5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">BLAS_zusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga50cba1e236b63775110d6d1b292417da"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">blas_zusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:ga8f78343207ff584d2d78789bd90e5533"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">BLAS_susget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga0977f63d781215c826aa5a0ea2df9f47"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">blas_susget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga498d143bae71d800dc35e2f1ee071359"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">BLAS_dusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:gaf2e6ab2c5cbd23a7690bbe8e26794033"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">blas_dusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga23f0c1852e05a426d24d2eb1bcae168b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">BLAS_cusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga04751c01dcfb6730a33eaa91f403dd09"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">blas_cusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gaf9d44fc73526a4fdf9627424626bf4a5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">BLAS_zusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga63f072aa25f7f7f8ac1ac4e32aae0c2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">blas_zusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gafc031d78d0274c81039c2448a403cd10"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">BLAS_susget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga039a9d4da3423ea71726242e1c1251e7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">blas_susget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga441bff94fdc50b9bf6e180d36f51c3ce"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">BLAS_dusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga3a4bc573dc07849e7a72ecb2d2f0c31d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">blas_dusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafe27f3044269d37cadb569fc6796ac01"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">BLAS_cusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga19e30bb70673342b4d6308bd9cf46884"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">blas_cusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga85e15d7a3331e8ed4d702908477e2896"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">BLAS_zusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga9bdd048dea68ecbd8fd712349d4fbf13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">blas_zusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafc49f44b76021677000bebe7d7fe133b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">BLAS_susget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gaffaaf5b49e850adda0163b6bc082077d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">blas_susget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_t [...]
+<tr class="memitem:ga39b4e25d5d5ce080f8dd994856e41fd0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">BLAS_dusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga15c7a93ed41a5488c0ef814d2061214a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">blas_dusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ [...]
+<tr class="memitem:ga65e5bef193bd5a2d47e80bff7eebed8e"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">BLAS_cusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:gacefa288104224e6c8f069f4001dacc08"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">blas_cusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:ga286c2cf2c749c80c8b71ff2f4bdb1566"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">BLAS_zusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga01b88a27714ca87085421fd9a4f3e479"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">blas_zusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gad3e05b01efa2857c0938ada63f30cadf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">BLAS_susset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const float *va, int nnz)</td></tr>
+<tr class="memitem:gac0abb530fc46d610bf56e7fb1ef42c6c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">blas_susset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const float *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gae34ff937437af99d317739192e2783da"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">BLAS_dusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const double *va, int nnz)</td></tr>
+<tr class="memitem:ga8e2acb49dac4221d1554c30238bd6747"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">blas_dusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const double *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga3b358be87656e2d8065e1d30dd8060f4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">BLAS_cusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga38398053da29e668ee440e55f675532b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">blas_cusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gac542af7517c9f667122e8bdc408487b3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">BLAS_zusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga156a8d0225d9761cd58e15e026b9ba2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">blas_zusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gaf17e549ec8cf353144ac1e3a1f080f46"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">BLAS_susset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gab8c3e5745870d4399382051dcedad144"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">blas_susset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gac8aa3ed1e29f2555519421290d236d0c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">BLAS_dusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:gab50cd8a5a6a5d866789628da0c9141a2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">blas_dusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga778acfebd02199f440b890b0176af19c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">BLAS_cusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga4a32533889a4ed82a21f457d1253317d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">blas_cusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gaca954a070d476342e254587fc2faa7fd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">BLAS_zusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga52efe19f0972fa51ac6329cf717b676c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">blas_zusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gad86989cd1f58003617f3db251b6fc0f1"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">BLAS_susget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gaac53e141083bc9871d81b587e5f785c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">blas_susget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gacf35fa073f6cc991efe75f6a012a9a04"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:ga6443c32b223693698a8a0f0198ae4bee"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">blas_dusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga4c7eae1cfcd8cafc16f31b169c4a7514"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">BLAS_cusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga104bc9ee1e6ce32012933e822019ecf0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">blas_cusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga27417bc0d923f7288ed736837492275c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">BLAS_zusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga845cca2b512e38b467fc0d4b93d660b7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">blas_zusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga89577a4a63cc8659f1d463fb819bc002"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:ga852f4a68eef6963708d11f37e975b178"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:a7769e3aac9ffdba04f29dd1f8f57daa4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="blas__sparse_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_load_spblas_matrix_file_as_matrix_market</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el [...]
+<tr class="memitem:gac4d8c73e5d9faa85209bcc4e885d4ff1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_blas_get_mtx</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) . </p>
+<dl class="section author"><dt>Author</dt><dd>Michele Martone </dd></dl>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="aab00e94b9818e92bb03c32f7ec677932"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_ENUM_H</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a5eec91b6d95962811bd9cb4e37266214"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_usgp   rsb_wp__BLAS_usgp</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6719ae77dfef6d6dd0790e34a65c1924"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_ussp   rsb_wp__BLAS_ussp</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<h2>Typedef Documentation</h2>
+<a class="anchor" id="a6f56456b01e0cc6b25b81201aa67c163"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef int <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>the sparse matrix descriptor type </p>
+
+</div>
+</div>
+<h2>Enumeration Type Documentation</h2>
+<a class="anchor" id="a3fe740ad5a139d723de260d638987e9e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Index base (valid at matrix build/modify time). </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_cmach_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies (<a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>) or inquiries (<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>) whether the diagonal of a matrix is (implicitly) unitary or not. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_field_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Numerical field type; can be used with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_handle_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The following are not fully implemented. Usable with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_jrot_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a07072da9995d9196d9176f56c784952b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952b">blas_norm_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify a dense array's elements layout. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_prec_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aee94244609acd12511418bfbf0a77729"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Properties suitable to be used with <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. All of these are not in the Sparse BLAS standard. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_side_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_size_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Quantities that can be obtained via <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sort_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3f95e19247de0359b56de195704e05a5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparsity_optimization_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The following are usable with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_symmetry_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Symmetry properties. If not specified otherwise, valid for the both of <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> and <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify a transposition operator to a matrix operand. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies (<a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>) or inquiries (<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>) upper or lower triangularity of a matrix. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<h2>Function Documentation</h2>
+<a class="anchor" id="a7769e3aac9ffdba04f29dd1f8f57daa4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> rsb_load_spblas_matrix_file_as_matrix_market </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Loads a BLAS Sparse matrix from a Matrix Market file. This is a <code>librsb</code> extension.</p>
+<p>Sets either blas_upper_triangular, blas_lower_triangular, blas_upper_hermitian, blas_lower_hermitian, blas_upper_symmetric or blas_lower_symmetric property according to the loaded file.</p>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:21 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/classblas__sparse.html b/doc/html/classblas__sparse.html
new file mode 100644
index 0000000..5468898
--- /dev/null
+++ b/doc/html/classblas__sparse.html
@@ -0,0 +1,5523 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse Module Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#nested-classes">Data Structures</a> |
+<a href="#pub-methods">Public Member Functions</a> |
+<a href="#pub-attribs">Data Fields</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse Module Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="nested-classes"></a>
+Data Structures</h2></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html">rsb_blas_get_mtx</a></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html">uscr_insert_block</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a dense block  <a href="interfaceblas__sparse_1_1uscr__insert__block.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html">uscr_insert_clique</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a clique  <a href="interfaceblas__sparse_1_1uscr__insert__clique.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html">uscr_insert_col</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a sparse column  <a href="interfaceblas__sparse_1_1uscr__insert__col.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html">uscr_insert_entries</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts multiple entries  <a href="interfaceblas__sparse_1_1uscr__insert__entries.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html">uscr_insert_entry</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">A Sparse BLAS interface for RSB.  <a href="interfaceblas__sparse_1_1uscr__insert__entry.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html">uscr_insert_row</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a sparse row  <a href="interfaceblas__sparse_1_1uscr__insert__row.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmm.html">usmm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">multiplication : c <- beta c + alpha A b  <a href="interfaceblas__sparse_1_1usmm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmv.html">usmv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">multiplication : c <- beta c + alpha A b  <a href="interfaceblas__sparse_1_1usmv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussm.html">ussm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">triangular solve: b <- alpha A^-1 b  <a href="interfaceblas__sparse_1_1ussm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussv.html">ussv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">triangular solve: b <- alpha A^-1 b  <a href="interfaceblas__sparse_1_1ussv.html#details">More...</a><br/></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a8a3b6cd055048ab5e15b1b18be291f32"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8a3b6cd055048ab5e15b1b18be291f32">usds</a> (A, istat)</td></tr>
+<tr class="memdesc:a8a3b6cd055048ab5e15b1b18be291f32"><td class="mdescLeft"> </td><td class="mdescRight">Destroys a matrix.  <a href="#a8a3b6cd055048ab5e15b1b18be291f32"></a><br/></td></tr>
+<tr class="memitem:a48f1e1b82322910d45a1b2455421745f"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a48f1e1b82322910d45a1b2455421745f">uscr_end</a> (A, istat)</td></tr>
+<tr class="memdesc:a48f1e1b82322910d45a1b2455421745f"><td class="mdescLeft"> </td><td class="mdescRight">Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines.  <a href="#a48f1e1b82322910d45a1b2455421745f"></a><br/></td></tr>
+<tr class="memitem:a1e0eb1ccd8ffbf49baefe455a248f7fe"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a1e0eb1ccd8ffbf49baefe455a248f7fe">usgp</a> (A, pname, istat)</td></tr>
+<tr class="memdesc:a1e0eb1ccd8ffbf49baefe455a248f7fe"><td class="mdescLeft"> </td><td class="mdescRight">Get a matrix property.  <a href="#a1e0eb1ccd8ffbf49baefe455a248f7fe"></a><br/></td></tr>
+<tr class="memitem:a469df92a4d25a9554fb1d79cdac1de84"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">ussp</a> (A, pname, istat)</td></tr>
+<tr class="memdesc:a469df92a4d25a9554fb1d79cdac1de84"><td class="mdescLeft"> </td><td class="mdescRight">Set a matrix property. Should be called just after creation, before nonzeroes insertion.  <a href="#a469df92a4d25a9554fb1d79cdac1de84"></a><br/></td></tr>
+<tr class="memitem:ae78739e1ebe48fe8b9752a43cd5c15a0"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ae78739e1ebe48fe8b9752a43cd5c15a0">suscr_begin</a> (m, n, A, istat)</td></tr>
+<tr class="memdesc:ae78739e1ebe48fe8b9752a43cd5c15a0"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#ae78739e1ebe48fe8b9752a43cd5c15a0"></a><br/></td></tr>
+<tr class="memitem:acf14608f8b0375ca133b7f850bde3b50"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#acf14608f8b0375ca133b7f850bde3b50">duscr_begin</a> (m, n, A, istat)</td></tr>
+<tr class="memdesc:acf14608f8b0375ca133b7f850bde3b50"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#acf14608f8b0375ca133b7f850bde3b50"></a><br/></td></tr>
+<tr class="memitem:af4e9f97f85799c5e8f60c78d40d906f3"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af4e9f97f85799c5e8f60c78d40d906f3">cuscr_begin</a> (m, n, A, istat)</td></tr>
+<tr class="memdesc:af4e9f97f85799c5e8f60c78d40d906f3"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#af4e9f97f85799c5e8f60c78d40d906f3"></a><br/></td></tr>
+<tr class="memitem:a9ec8326625fe0762e3e6e523260d2655"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9ec8326625fe0762e3e6e523260d2655">zuscr_begin</a> (m, n, A, istat)</td></tr>
+<tr class="memdesc:a9ec8326625fe0762e3e6e523260d2655"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#a9ec8326625fe0762e3e6e523260d2655"></a><br/></td></tr>
+<tr class="memitem:a8ccdce913bf1b8a1d30b6889611143cb"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8ccdce913bf1b8a1d30b6889611143cb">suscr_block_begin</a> (Mb, Nb, k, l, A, istat)</td></tr>
+<tr class="memdesc:a8ccdce913bf1b8a1d30b6889611143cb"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#a8ccdce913bf1b8a1d30b6889611143cb"></a><br/></td></tr>
+<tr class="memitem:ab33c2f497f0a53213f38cd8449ab4349"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ab33c2f497f0a53213f38cd8449ab4349">duscr_block_begin</a> (Mb, Nb, k, l, A, istat)</td></tr>
+<tr class="memdesc:ab33c2f497f0a53213f38cd8449ab4349"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#ab33c2f497f0a53213f38cd8449ab4349"></a><br/></td></tr>
+<tr class="memitem:a6085ddf99c2459e051a6106e4a2c4785"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a6085ddf99c2459e051a6106e4a2c4785">cuscr_block_begin</a> (Mb, Nb, k, l, A, istat)</td></tr>
+<tr class="memdesc:a6085ddf99c2459e051a6106e4a2c4785"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#a6085ddf99c2459e051a6106e4a2c4785"></a><br/></td></tr>
+<tr class="memitem:a5fbd2bae9f3849fda1be4691ca3df5ea"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a5fbd2bae9f3849fda1be4691ca3df5ea">zuscr_block_begin</a> (Mb, Nb, k, l, A, istat)</td></tr>
+<tr class="memdesc:a5fbd2bae9f3849fda1be4691ca3df5ea"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#a5fbd2bae9f3849fda1be4691ca3df5ea"></a><br/></td></tr>
+<tr class="memitem:aab5942faf7f9fe31f9dfd13143f37dc7"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aab5942faf7f9fe31f9dfd13143f37dc7">suscr_variable_block_begin</a> (Mb, Nb, K, L, A, istat)</td></tr>
+<tr class="memdesc:aab5942faf7f9fe31f9dfd13143f37dc7"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#aab5942faf7f9fe31f9dfd13143f37dc7"></a><br/></td></tr>
+<tr class="memitem:ab1fd9e9f8cdd5f79134873fd6af47c28"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ab1fd9e9f8cdd5f79134873fd6af47c28">duscr_variable_block_begin</a> (Mb, Nb, K, L, A, istat)</td></tr>
+<tr class="memdesc:ab1fd9e9f8cdd5f79134873fd6af47c28"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#ab1fd9e9f8cdd5f79134873fd6af47c28"></a><br/></td></tr>
+<tr class="memitem:abd5c88929ed1c7133169c401881fa1c7"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#abd5c88929ed1c7133169c401881fa1c7">cuscr_variable_block_begin</a> (Mb, Nb, K, L, A, istat)</td></tr>
+<tr class="memdesc:abd5c88929ed1c7133169c401881fa1c7"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#abd5c88929ed1c7133169c401881fa1c7"></a><br/></td></tr>
+<tr class="memitem:a700e8b151004b9c8829a1fe4fd331465"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a700e8b151004b9c8829a1fe4fd331465">zuscr_variable_block_begin</a> (Mb, Nb, K, L, A, istat)</td></tr>
+<tr class="memdesc:a700e8b151004b9c8829a1fe4fd331465"><td class="mdescLeft"> </td><td class="mdescRight">Allocates an empty matrix (A) and leaves it in build state.  <a href="#a700e8b151004b9c8829a1fe4fd331465"></a><br/></td></tr>
+<tr class="memitem:a38d9574e6360fcaa6035eaf9518001d8"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a38d9574e6360fcaa6035eaf9518001d8">suscr_end</a> (A, istat)</td></tr>
+<tr class="memdesc:a38d9574e6360fcaa6035eaf9518001d8"><td class="mdescLeft"> </td><td class="mdescRight">Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines.  <a href="#a38d9574e6360fcaa6035eaf9518001d8"></a><br/></td></tr>
+<tr class="memitem:a88d066acac28b6fe7c7cdc9e6941ff8f"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a88d066acac28b6fe7c7cdc9e6941ff8f">duscr_end</a> (A, istat)</td></tr>
+<tr class="memdesc:a88d066acac28b6fe7c7cdc9e6941ff8f"><td class="mdescLeft"> </td><td class="mdescRight">Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines.  <a href="#a88d066acac28b6fe7c7cdc9e6941ff8f"></a><br/></td></tr>
+<tr class="memitem:a9878426469b215a78642e5245a054203"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9878426469b215a78642e5245a054203">cuscr_end</a> (A, istat)</td></tr>
+<tr class="memdesc:a9878426469b215a78642e5245a054203"><td class="mdescLeft"> </td><td class="mdescRight">Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines.  <a href="#a9878426469b215a78642e5245a054203"></a><br/></td></tr>
+<tr class="memitem:a5f00b912397c8dc3ee87fecdf4cf98aa"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a5f00b912397c8dc3ee87fecdf4cf98aa">zuscr_end</a> (A, istat)</td></tr>
+<tr class="memdesc:a5f00b912397c8dc3ee87fecdf4cf98aa"><td class="mdescLeft"> </td><td class="mdescRight">Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines.  <a href="#a5f00b912397c8dc3ee87fecdf4cf98aa"></a><br/></td></tr>
+<tr class="memitem:a26a40430bf4de9b01eaf9dacf999dea6"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a26a40430bf4de9b01eaf9dacf999dea6">suscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:a26a40430bf4de9b01eaf9dacf999dea6"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a26a40430bf4de9b01eaf9dacf999dea6"></a><br/></td></tr>
+<tr class="memitem:ae3706fcae9dcbf6ebe96335717823939"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ae3706fcae9dcbf6ebe96335717823939">duscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:ae3706fcae9dcbf6ebe96335717823939"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#ae3706fcae9dcbf6ebe96335717823939"></a><br/></td></tr>
+<tr class="memitem:a4bee5ce9a9bb94863469797ca22d44b0"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a4bee5ce9a9bb94863469797ca22d44b0">cuscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:a4bee5ce9a9bb94863469797ca22d44b0"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a4bee5ce9a9bb94863469797ca22d44b0"></a><br/></td></tr>
+<tr class="memitem:af003c4713fb7a39896ad1537fec94ea9"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af003c4713fb7a39896ad1537fec94ea9">zuscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:af003c4713fb7a39896ad1537fec94ea9"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#af003c4713fb7a39896ad1537fec94ea9"></a><br/></td></tr>
+<tr class="memitem:a5c4a2d0b9164fb232c102426693ccfd1"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a5c4a2d0b9164fb232c102426693ccfd1">suscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a5c4a2d0b9164fb232c102426693ccfd1"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a5c4a2d0b9164fb232c102426693ccfd1"></a><br/></td></tr>
+<tr class="memitem:a87f44b33cf81a30af58fe9a299ea78a3"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a87f44b33cf81a30af58fe9a299ea78a3">duscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a87f44b33cf81a30af58fe9a299ea78a3"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a87f44b33cf81a30af58fe9a299ea78a3"></a><br/></td></tr>
+<tr class="memitem:a6ee075639a028bfbb8d8c3652bb3c147"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a6ee075639a028bfbb8d8c3652bb3c147">cuscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a6ee075639a028bfbb8d8c3652bb3c147"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a6ee075639a028bfbb8d8c3652bb3c147"></a><br/></td></tr>
+<tr class="memitem:ad28c55a5ed7b359a30a2538a45878e08"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ad28c55a5ed7b359a30a2538a45878e08">zuscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:ad28c55a5ed7b359a30a2538a45878e08"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#ad28c55a5ed7b359a30a2538a45878e08"></a><br/></td></tr>
+<tr class="memitem:a183a3ff9aa5af1dcedc5cf7bd4918b5e"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a183a3ff9aa5af1dcedc5cf7bd4918b5e">suscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a183a3ff9aa5af1dcedc5cf7bd4918b5e"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a183a3ff9aa5af1dcedc5cf7bd4918b5e"></a><br/></td></tr>
+<tr class="memitem:a1ed0bf47156c5d299ef678b71aec7ef0"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a1ed0bf47156c5d299ef678b71aec7ef0">duscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a1ed0bf47156c5d299ef678b71aec7ef0"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a1ed0bf47156c5d299ef678b71aec7ef0"></a><br/></td></tr>
+<tr class="memitem:a847661e819534c083984a453a1e282ea"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a847661e819534c083984a453a1e282ea">cuscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a847661e819534c083984a453a1e282ea"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a847661e819534c083984a453a1e282ea"></a><br/></td></tr>
+<tr class="memitem:a3a2dcc960e33dbae28abc3f1fdd52e66"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a3a2dcc960e33dbae28abc3f1fdd52e66">zuscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a3a2dcc960e33dbae28abc3f1fdd52e66"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a3a2dcc960e33dbae28abc3f1fdd52e66"></a><br/></td></tr>
+<tr class="memitem:a9ec4465da954f0761c7edfd78d2be717"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9ec4465da954f0761c7edfd78d2be717">suscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a9ec4465da954f0761c7edfd78d2be717"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a9ec4465da954f0761c7edfd78d2be717"></a><br/></td></tr>
+<tr class="memitem:a055df1b4ef9aa7e7937bb1dfe1f228b9"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a055df1b4ef9aa7e7937bb1dfe1f228b9">duscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a055df1b4ef9aa7e7937bb1dfe1f228b9"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a055df1b4ef9aa7e7937bb1dfe1f228b9"></a><br/></td></tr>
+<tr class="memitem:aad23b1379a471af392fa33066fd66140"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aad23b1379a471af392fa33066fd66140">cuscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:aad23b1379a471af392fa33066fd66140"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#aad23b1379a471af392fa33066fd66140"></a><br/></td></tr>
+<tr class="memitem:a7114c5a5b9487634711034c693b5e9b3"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a7114c5a5b9487634711034c693b5e9b3">zuscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a7114c5a5b9487634711034c693b5e9b3"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a7114c5a5b9487634711034c693b5e9b3"></a><br/></td></tr>
+<tr class="memitem:a508ee5b058f7c6a1a3d21d3f706cddd4"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a508ee5b058f7c6a1a3d21d3f706cddd4">suscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a508ee5b058f7c6a1a3d21d3f706cddd4"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#a508ee5b058f7c6a1a3d21d3f706cddd4"></a><br/></td></tr>
+<tr class="memitem:a533a7082811ea859d079b5e9513ce1b4"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a533a7082811ea859d079b5e9513ce1b4">duscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a533a7082811ea859d079b5e9513ce1b4"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#a533a7082811ea859d079b5e9513ce1b4"></a><br/></td></tr>
+<tr class="memitem:af9a2f1bf6543dccc8b5bb1b5d0f35636"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af9a2f1bf6543dccc8b5bb1b5d0f35636">cuscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:af9a2f1bf6543dccc8b5bb1b5d0f35636"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#af9a2f1bf6543dccc8b5bb1b5d0f35636"></a><br/></td></tr>
+<tr class="memitem:a0b7e474844552d62f72e18bac4592ced"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0b7e474844552d62f72e18bac4592ced">zuscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a0b7e474844552d62f72e18bac4592ced"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#a0b7e474844552d62f72e18bac4592ced"></a><br/></td></tr>
+<tr class="memitem:a1f4709630ab2be2247580eb1fbb48472"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a1f4709630ab2be2247580eb1fbb48472">suscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:a1f4709630ab2be2247580eb1fbb48472"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#a1f4709630ab2be2247580eb1 [...]
+<tr class="memitem:a40b43e04b282dd6f6ad11f51701a9b81"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a40b43e04b282dd6f6ad11f51701a9b81">duscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:a40b43e04b282dd6f6ad11f51701a9b81"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#a40b43e04b282dd6f6ad11f51 [...]
+<tr class="memitem:add06f42953fc4dff9dafc487f58172ee"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#add06f42953fc4dff9dafc487f58172ee">cuscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:add06f42953fc4dff9dafc487f58172ee"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#add06f42953fc4dff9dafc487 [...]
+<tr class="memitem:a0ea88b095d147ffe96d05c5d53b4480a"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0ea88b095d147ffe96d05c5d53b4480a">zuscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:a0ea88b095d147ffe96d05c5d53b4480a"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#a0ea88b095d147ffe96d05c5d [...]
+<tr class="memitem:afb6e4dbb50553fa86818408d9db6d7c3"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#afb6e4dbb50553fa86818408d9db6d7c3">susmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:afb6e4dbb50553fa86818408d9db6d7c3"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#afb6e4dbb50553fa86818408d9db6d7c3"></a>< [...]
+<tr class="memitem:a9fd3bf400531b8277a082b0663491329"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9fd3bf400531b8277a082b0663491329">dusmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:a9fd3bf400531b8277a082b0663491329"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#a9fd3bf400531b8277a082b0663491329"></a>< [...]
+<tr class="memitem:a437ce36d8520ffeadfbb6e6f9885b9f3"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a437ce36d8520ffeadfbb6e6f9885b9f3">cusmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:a437ce36d8520ffeadfbb6e6f9885b9f3"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#a437ce36d8520ffeadfbb6e6f9885b9f3"></a>< [...]
+<tr class="memitem:af67f81abcb78cc03000257888e47f517"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af67f81abcb78cc03000257888e47f517">zusmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:af67f81abcb78cc03000257888e47f517"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#af67f81abcb78cc03000257888e47f517"></a>< [...]
+<tr class="memitem:ab21d16c7bda69becec8edf113b62dee0"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ab21d16c7bda69becec8edf113b62dee0">sussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:ab21d16c7bda69becec8edf113b62dee0"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#ab21d16c7bda69becec8edf113b62dee0" [...]
+<tr class="memitem:aae591d7a08af50e34313f347d888779d"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aae591d7a08af50e34313f347d888779d">dussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:aae591d7a08af50e34313f347d888779d"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#aae591d7a08af50e34313f347d888779d" [...]
+<tr class="memitem:a8f2db2c64bbd1ecd032fb7a103e30c97"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8f2db2c64bbd1ecd032fb7a103e30c97">cussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:a8f2db2c64bbd1ecd032fb7a103e30c97"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#a8f2db2c64bbd1ecd032fb7a103e30c97" [...]
+<tr class="memitem:a2331da0465b9a3298f8b6dd1c3c7c150"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a2331da0465b9a3298f8b6dd1c3c7c150">zussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:a2331da0465b9a3298f8b6dd1c3c7c150"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#a2331da0465b9a3298f8b6dd1c3c7c150" [...]
+<tr class="memitem:a03977fef75f9ee8773400c08153069d5"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a03977fef75f9ee8773400c08153069d5">susmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:a03977fef75f9ee8773400c08153069d5"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#a03977fef75f9ee8773400c0815 [...]
+<tr class="memitem:ae717638ebcf6e277f2621fd8eae75249"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ae717638ebcf6e277f2621fd8eae75249">dusmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:ae717638ebcf6e277f2621fd8eae75249"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#ae717638ebcf6e277f2621fd8ea [...]
+<tr class="memitem:a4f7e8d071d2309ed60cb9d588fd749b6"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a4f7e8d071d2309ed60cb9d588fd749b6">cusmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:a4f7e8d071d2309ed60cb9d588fd749b6"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#a4f7e8d071d2309ed60cb9d588f [...]
+<tr class="memitem:ae1048833494ef86cd0d74648989599db"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ae1048833494ef86cd0d74648989599db">zusmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:ae1048833494ef86cd0d74648989599db"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#ae1048833494ef86cd0d7464898 [...]
+<tr class="memitem:a617ef412adc547e3f050610874549889"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a617ef412adc547e3f050610874549889">sussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:a617ef412adc547e3f050610874549889"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#a617ef412adc [...]
+<tr class="memitem:a0eb97f56a6467e87ce06f8be8a50e88d"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0eb97f56a6467e87ce06f8be8a50e88d">dussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:a0eb97f56a6467e87ce06f8be8a50e88d"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#a0eb97f56a64 [...]
+<tr class="memitem:abc00bd143edf8a993e7d79a1d8baf636"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#abc00bd143edf8a993e7d79a1d8baf636">cussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:abc00bd143edf8a993e7d79a1d8baf636"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#abc00bd143ed [...]
+<tr class="memitem:a772211da8da7a031fe7845be6a2dd403"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a772211da8da7a031fe7845be6a2dd403">zussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:a772211da8da7a031fe7845be6a2dd403"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#a772211da8da [...]
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-attribs"></a>
+Data Fields</h2></td></tr>
+<tr class="memitem:a0e333ba9a5cc3b014697d0a12d08f6b2"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0e333ba9a5cc3b014697d0a12d08f6b2">blas_sparse_const_success</a> = 0</td></tr>
+<tr class="memitem:a5d97ddcd53d2bba670233f5335b44f55"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a5d97ddcd53d2bba670233f5335b44f55">blas_sparse_const_failure</a> = -1</td></tr>
+<tr class="memitem:aae79119fabe06a887f461eda50c97d0a"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aae79119fabe06a887f461eda50c97d0a">blas_sparse_const_not_available</a> = -9999</td></tr>
+<tr class="memitem:aa1d4df9e25dcab40269247450e1b3e4e"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aa1d4df9e25dcab40269247450e1b3e4e">blas_rowmajor</a> = 101</td></tr>
+<tr class="memitem:af4ede8e7f0445be25841733354b747bd"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af4ede8e7f0445be25841733354b747bd">blas_colmajor</a> = 102</td></tr>
+<tr class="memitem:a5b700c1a472d7d12decf3d7d7fd244c2"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a5b700c1a472d7d12decf3d7d7fd244c2">blas_no_trans</a> = 111</td></tr>
+<tr class="memitem:a12f06635d9f1c40722ad4bd757e737bb"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a12f06635d9f1c40722ad4bd757e737bb">blas_trans</a> = 112</td></tr>
+<tr class="memitem:a7b8d414b608929ba0abced46c98889d6"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a7b8d414b608929ba0abced46c98889d6">blas_conj_trans</a> = 113</td></tr>
+<tr class="memitem:a8be70a15dda0ebf3b782b66e72f924d2"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8be70a15dda0ebf3b782b66e72f924d2">blas_upper</a> = 121</td></tr>
+<tr class="memitem:a67e376dc6a7cc769ee24415dc2a8d9d1"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a67e376dc6a7cc769ee24415dc2a8d9d1">blas_lower</a> = 122</td></tr>
+<tr class="memitem:a8e76732b3c06d9fc27669bda37f24ed6"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8e76732b3c06d9fc27669bda37f24ed6">blas_non_unit_diag</a> = 131</td></tr>
+<tr class="memitem:af1e902c099efbedd09c7ce65b4772626"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af1e902c099efbedd09c7ce65b4772626">blas_unit_diag</a> = 132</td></tr>
+<tr class="memitem:a5059846f8eba839bb1afc32abac380e4"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a5059846f8eba839bb1afc32abac380e4">blas_left_side</a> = 141</td></tr>
+<tr class="memitem:a6285c5cc8fe45bb73ece03c7900d5a18"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a6285c5cc8fe45bb73ece03c7900d5a18">blas_right_side</a> = 142</td></tr>
+<tr class="memitem:ac6324bd9c488f6ad4c176fd05a5c1a94"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ac6324bd9c488f6ad4c176fd05a5c1a94">blas_base</a> = 151</td></tr>
+<tr class="memitem:a6acca6eab87ec90dcf71b8c7b40aaa8f"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a6acca6eab87ec90dcf71b8c7b40aaa8f">blas_t</a> = 152</td></tr>
+<tr class="memitem:af75f815c459344a5a38cd0794b93504a"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af75f815c459344a5a38cd0794b93504a">blas_rnd</a> = 153</td></tr>
+<tr class="memitem:a114a6fabae21d32477af3acee15b9d5d"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a114a6fabae21d32477af3acee15b9d5d">blas_ieee</a> = 154</td></tr>
+<tr class="memitem:a150f1864fd1bf514a9769914490a23ba"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a150f1864fd1bf514a9769914490a23ba">blas_emin</a> = 155</td></tr>
+<tr class="memitem:a18b1555d2c4e1d8b3a8d38bc7105c3fa"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a18b1555d2c4e1d8b3a8d38bc7105c3fa">blas_emax</a> = 156</td></tr>
+<tr class="memitem:acbf407624c42cad4f1ea47776977d160"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#acbf407624c42cad4f1ea47776977d160">blas_eps</a> = 157</td></tr>
+<tr class="memitem:a8c3f3a997d8d96f470d44d1f34e3ed39"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8c3f3a997d8d96f470d44d1f34e3ed39">blas_prec</a> = 158</td></tr>
+<tr class="memitem:ae9f01b90527ebe6b178d4c73a46bbf25"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ae9f01b90527ebe6b178d4c73a46bbf25">blas_underflow</a> = 159</td></tr>
+<tr class="memitem:ad8f2f29c92552e53910e5c92feb3567d"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ad8f2f29c92552e53910e5c92feb3567d">blas_overflow</a> = 160</td></tr>
+<tr class="memitem:a0b1596744fa5acb891d4908588249c54"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0b1596744fa5acb891d4908588249c54">blas_sfmin</a> = 161</td></tr>
+<tr class="memitem:a05c66c0c87c72e39580258418c46341f"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a05c66c0c87c72e39580258418c46341f">blas_one_norm</a> = 171</td></tr>
+<tr class="memitem:aca85a61f11b3c36113209d61a89e4957"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aca85a61f11b3c36113209d61a89e4957">blas_real_one_norm</a> = 172</td></tr>
+<tr class="memitem:abb84b2b7195d878e71760bdad596d693"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#abb84b2b7195d878e71760bdad596d693">blas_two_norm</a> = 173</td></tr>
+<tr class="memitem:af1dbfea000291bde9fe93507f62a31ba"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#af1dbfea000291bde9fe93507f62a31ba">blas_frobenius_norm</a> = 174</td></tr>
+<tr class="memitem:afb9ba15096a7184519256ec2923fda49"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#afb9ba15096a7184519256ec2923fda49">blas_inf_norm</a> = 175</td></tr>
+<tr class="memitem:a0a60f070ff9a1a864af39e4489c93e31"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0a60f070ff9a1a864af39e4489c93e31">blas_real_inf_norm</a> = 176</td></tr>
+<tr class="memitem:a487afe34859579523bba7b4851e106c6"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a487afe34859579523bba7b4851e106c6">blas_max_norm</a> = 177</td></tr>
+<tr class="memitem:afecc0bd8ce11628fe2bbe55b9244c295"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#afecc0bd8ce11628fe2bbe55b9244c295">blas_real_max_norm</a> = 178</td></tr>
+<tr class="memitem:ae972ff04001d8bbcc52f242134af52d8"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ae972ff04001d8bbcc52f242134af52d8">blas_increasing_order</a> = 181</td></tr>
+<tr class="memitem:aedf0364e33ddfd0ee88e93acf3683cf7"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aedf0364e33ddfd0ee88e93acf3683cf7">blas_decreasing_order</a> = 182</td></tr>
+<tr class="memitem:a1964a262e04c046d0f97c7de7cf1d916"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a1964a262e04c046d0f97c7de7cf1d916">blas_conj</a> = 191</td></tr>
+<tr class="memitem:aabedcf272e063a48f7e310ce04784b17"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aabedcf272e063a48f7e310ce04784b17">blas_no_conj</a> = 192</td></tr>
+<tr class="memitem:a2fc184f889f72b17fdb2ba6266c25b02"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a2fc184f889f72b17fdb2ba6266c25b02">blas_jrot_inner</a> = 201</td></tr>
+<tr class="memitem:ac526dad147c751bb8b175edd47d29c22"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ac526dad147c751bb8b175edd47d29c22">blas_jrot_outer</a> = 202</td></tr>
+<tr class="memitem:a0612a82431d61a1b6c4c5030e65c5e31"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0612a82431d61a1b6c4c5030e65c5e31">blas_jrot_sorted</a> = 203</td></tr>
+<tr class="memitem:ab378aeb6aa39b2495f084cd31e32e5a6"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ab378aeb6aa39b2495f084cd31e32e5a6">blas_prec_single</a> = 211</td></tr>
+<tr class="memitem:a6f0692e06d3b42828813a7a0a9ec59bb"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a6f0692e06d3b42828813a7a0a9ec59bb">blas_prec_double</a> = 212</td></tr>
+<tr class="memitem:a71c278e64f30229d19b46376c4385669"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a71c278e64f30229d19b46376c4385669">blas_prec_indigenous</a> = 213</td></tr>
+<tr class="memitem:a305383f56368c35429bfd9e7ca23a0f5"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a305383f56368c35429bfd9e7ca23a0f5">blas_prec_extra</a> = 214</td></tr>
+<tr class="memitem:a8c89ffd8863d708e55c5330d11c772f2"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8c89ffd8863d708e55c5330d11c772f2">blas_zero_base</a> = 221</td></tr>
+<tr class="memitem:a60fbe98d827ebea9e7c431e7698bc462"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a60fbe98d827ebea9e7c431e7698bc462">blas_one_base</a> = 222</td></tr>
+<tr class="memitem:a6dd42fe3a5c74d293855e6ed0825cc67"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a6dd42fe3a5c74d293855e6ed0825cc67">blas_general</a> = 231</td></tr>
+<tr class="memitem:a13161955ecb9fc2ce12963cc319c93d2"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a13161955ecb9fc2ce12963cc319c93d2">blas_symmetric</a> = 232</td></tr>
+<tr class="memitem:aaab0006bc8bcddf6cba32a69d3ddbf95"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aaab0006bc8bcddf6cba32a69d3ddbf95">blas_hermitian</a> = 233</td></tr>
+<tr class="memitem:a04619b8ef6be6983ded4e3c22fce63b8"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a04619b8ef6be6983ded4e3c22fce63b8">blas_triangular</a> = 234</td></tr>
+<tr class="memitem:a280772883a7487fa68aabd98e4a49342"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a280772883a7487fa68aabd98e4a49342">blas_lower_triangular</a> = 235</td></tr>
+<tr class="memitem:a97b523912445087a965737cb8cfd69af"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a97b523912445087a965737cb8cfd69af">blas_upper_triangular</a> = 236</td></tr>
+<tr class="memitem:ad474a894be1f45a6937c2a880963b1c7"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ad474a894be1f45a6937c2a880963b1c7">blas_lower_symmetric</a> = 237</td></tr>
+<tr class="memitem:afd82b7f277c54dfa83ab44ea6ed89fb1"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#afd82b7f277c54dfa83ab44ea6ed89fb1">blas_upper_symmetric</a> = 238</td></tr>
+<tr class="memitem:a101f5eab06d45474d64bff200d2387ec"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a101f5eab06d45474d64bff200d2387ec">blas_lower_hermitian</a> = 239</td></tr>
+<tr class="memitem:a4fcd2dfde7722199b9125542622c8c4a"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a4fcd2dfde7722199b9125542622c8c4a">blas_upper_hermitian</a> = 240</td></tr>
+<tr class="memitem:ac72dca9b25a744006fb7e2b272958494"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ac72dca9b25a744006fb7e2b272958494">blas_complex</a> = 241</td></tr>
+<tr class="memitem:a0d365ccd71fdedaa5cf30a46f34bcf37"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0d365ccd71fdedaa5cf30a46f34bcf37">blas_real</a> = 242</td></tr>
+<tr class="memitem:a9c54c439abc55e509b4a7ec35f6faa4e"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9c54c439abc55e509b4a7ec35f6faa4e">blas_double_precision</a> = 243</td></tr>
+<tr class="memitem:a0badb7c2679a5d0ba4e90f599b678768"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0badb7c2679a5d0ba4e90f599b678768">blas_single_precision</a> = 244</td></tr>
+<tr class="memitem:afe9e1f52ba336f041e1e750b3a989510"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#afe9e1f52ba336f041e1e750b3a989510">blas_num_rows</a> = 251</td></tr>
+<tr class="memitem:a726b73c19dae30439aa65988fa5b5dd1"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a726b73c19dae30439aa65988fa5b5dd1">blas_num_cols</a> = 252</td></tr>
+<tr class="memitem:ab01ecdd54a1f10e944e446e0efed3bc3"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#ab01ecdd54a1f10e944e446e0efed3bc3">blas_num_nonzeros</a> = 253</td></tr>
+<tr class="memitem:aa9ee2ffde87e203fd37719979e7b546d"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aa9ee2ffde87e203fd37719979e7b546d">blas_invalid_handle</a> = 261</td></tr>
+<tr class="memitem:a817d9813d36f7a7abea44e3b781e21ba"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a817d9813d36f7a7abea44e3b781e21ba">blas_new_handle</a> = 262</td></tr>
+<tr class="memitem:a0936a1798a61b56c52c116e428a4e6b7"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a0936a1798a61b56c52c116e428a4e6b7">blas_open_handle</a> = 263</td></tr>
+<tr class="memitem:a83704034c72929f3b1df534034b2786d"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a83704034c72929f3b1df534034b2786d">blas_valid_handle</a> = 264</td></tr>
+<tr class="memitem:a21d31433d4f29a6fd54c214b7a26c7d4"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a21d31433d4f29a6fd54c214b7a26c7d4">blas_regular</a> = 271</td></tr>
+<tr class="memitem:a9893aa4d547b371f6ba59a8615aa752e"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9893aa4d547b371f6ba59a8615aa752e">blas_irregular</a> = 272</td></tr>
+<tr class="memitem:aef5b352231bcff68b28b97742899558e"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aef5b352231bcff68b28b97742899558e">blas_block</a> = 273</td></tr>
+<tr class="memitem:a50d0da49cbf6822ed5e9a8ff81faf6d5"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a50d0da49cbf6822ed5e9a8ff81faf6d5">blas_unassembled</a> = 274</td></tr>
+<tr class="memitem:afab5c86162fcf329199b0666f33cde96"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#afab5c86162fcf329199b0666f33cde96">blas_rsb_spmv_autotuning_on</a> = 6660</td></tr>
+<tr class="memitem:a8cc6d8c9036cb66051cc1cfb7c739b5e"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a8cc6d8c9036cb66051cc1cfb7c739b5e">blas_rsb_spmv_autotuning_off</a> = 6661</td></tr>
+<tr class="memitem:abd8e06d35f2c4c3a6ecc1eb315548c43"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#abd8e06d35f2c4c3a6ecc1eb315548c43">blas_rsb_spmv_n_autotuning_on</a> = 6662</td></tr>
+<tr class="memitem:aefa9f681506ee4ceb578f11b9a0e664c"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#aefa9f681506ee4ceb578f11b9a0e664c">blas_rsb_spmv_n_autotuning_off</a> = 6663</td></tr>
+<tr class="memitem:a7bfbab78e4c5a789e2d76274a2fbc96c"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a7bfbab78e4c5a789e2d76274a2fbc96c">blas_rsb_spmv_t_autotuning_on</a> = 6664</td></tr>
+<tr class="memitem:a56dc72776b8dcdc43f0cebbdc93dcd21"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a56dc72776b8dcdc43f0cebbdc93dcd21">blas_rsb_spmv_t_autotuning_off</a> = 6665</td></tr>
+<tr class="memitem:acf0fe16da38fc03226e462dc6104cc68"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a> = 6666</td></tr>
+<tr class="memitem:a9cd9a2263c79534384bef6bf27e65787"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9cd9a2263c79534384bef6bf27e65787">blas_rsb_rep_rsb</a> = 9995</td></tr>
+<tr class="memitem:a9fe6012ccac0890c7f7a8500e77e9ff7"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a9fe6012ccac0890c7f7a8500e77e9ff7">blas_rsb_rep_csr</a> = 9996</td></tr>
+<tr class="memitem:abb6a552efaab32ed9687f2e2df895783"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#abb6a552efaab32ed9687f2e2df895783">blas_rsb_rep_coo</a> = 9997</td></tr>
+<tr class="memitem:a10ebafcdf3cc36cf0471ba20ffcd2980"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a10ebafcdf3cc36cf0471ba20ffcd2980">blas_rsb_duplicates_ovw</a> = 9998</td></tr>
+<tr class="memitem:a508fec1d9853698fd08c239dd08a7291"><td class="memItemLeft" align="right" valign="top">integer, parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html#a508fec1d9853698fd08c239dd08a7291">blas_rsb_duplicates_sum</a> = 9999</td></tr>
+</table>
+<h2>Member Function Documentation</h2>
+<a class="anchor" id="af4e9f97f85799c5e8f60c78d40d906f3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000005">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a6085ddf99c2459e051a6106e4a2c4785"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000009">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a9878426469b215a78642e5245a054203"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_end </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="add06f42953fc4dff9dafc487f58172ee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="af9a2f1bf6543dccc8b5bb1b5d0f35636"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a847661e819534c083984a453a1e282ea"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a6ee075639a028bfbb8d8c3652bb3c147"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a4bee5ce9a9bb94863469797ca22d44b0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aad23b1379a471af392fa33066fd66140"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="abd5c88929ed1c7133169c401881fa1c7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cuscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000013">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a4f7e8d071d2309ed60cb9d588fd749b6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cusmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a437ce36d8520ffeadfbb6e6f9885b9f3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cusmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="abc00bd143edf8a993e7d79a1d8baf636"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a8f2db2c64bbd1ecd032fb7a103e30c97"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::cussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="acf14608f8b0375ca133b7f850bde3b50"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000004">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ab33c2f497f0a53213f38cd8449ab4349"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000008">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a88d066acac28b6fe7c7cdc9e6941ff8f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_end </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a40b43e04b282dd6f6ad11f51701a9b81"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a533a7082811ea859d079b5e9513ce1b4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a1ed0bf47156c5d299ef678b71aec7ef0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a87f44b33cf81a30af58fe9a299ea78a3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ae3706fcae9dcbf6ebe96335717823939"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a055df1b4ef9aa7e7937bb1dfe1f228b9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ab1fd9e9f8cdd5f79134873fd6af47c28"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::duscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000012">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ae717638ebcf6e277f2621fd8eae75249"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::dusmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9fd3bf400531b8277a082b0663491329"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::dusmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a0eb97f56a6467e87ce06f8be8a50e88d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::dussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aae591d7a08af50e34313f347d888779d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::dussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ae78739e1ebe48fe8b9752a43cd5c15a0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000003">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a8ccdce913bf1b8a1d30b6889611143cb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000007">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a38d9574e6360fcaa6035eaf9518001d8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_end </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a1f4709630ab2be2247580eb1fbb48472"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a508ee5b058f7c6a1a3d21d3f706cddd4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a183a3ff9aa5af1dcedc5cf7bd4918b5e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a5c4a2d0b9164fb232c102426693ccfd1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a26a40430bf4de9b01eaf9dacf999dea6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9ec4465da954f0761c7edfd78d2be717"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aab5942faf7f9fe31f9dfd13143f37dc7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::suscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000011">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a03977fef75f9ee8773400c08153069d5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::susmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="afb6e4dbb50553fa86818408d9db6d7c3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::susmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a617ef412adc547e3f050610874549889"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::sussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ab21d16c7bda69becec8edf113b62dee0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::sussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a48f1e1b82322910d45a1b2455421745f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_end </td>
+          <td>(</td>
+          <td class="paramtype">integer, intent(in) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a8a3b6cd055048ab5e15b1b18be291f32"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usds </td>
+          <td>(</td>
+          <td class="paramtype">integer, intent(in) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Destroys a matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a1e0eb1ccd8ffbf49baefe455a248f7fe"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usgp </td>
+          <td>(</td>
+          <td class="paramtype">integer, intent(in) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(in) </td>
+          <td class="paramname"><em>pname</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Get a matrix property. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A is the matrix to apply the property. </td></tr>
+    <tr><td class="paramname">pname</td><td>The desired matrix property. For valid matrix properties, see <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a>, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a>, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a>, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_t [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a469df92a4d25a9554fb1d79cdac1de84"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussp </td>
+          <td>(</td>
+          <td class="paramtype">integer, intent(in) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(in) </td>
+          <td class="paramname"><em>pname</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Set a matrix property. Should be called just after creation, before nonzeroes insertion. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A is the matrix to apply the property. </td></tr>
+    <tr><td class="paramname">pname</td><td>The desired matrix property. For valid matrix properties, see <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a>, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a>, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a>, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_t [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9ec8326625fe0762e3e6e523260d2655"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000006">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a5fbd2bae9f3849fda1be4691ca3df5ea"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000010">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a5f00b912397c8dc3ee87fecdf4cf98aa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_end </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a0ea88b095d147ffe96d05c5d53b4480a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a0b7e474844552d62f72e18bac4592ced"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3a2dcc960e33dbae28abc3f1fdd52e66"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ad28c55a5ed7b359a30a2538a45878e08"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="af003c4713fb7a39896ad1537fec94ea9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7114c5a5b9487634711034c693b5e9b3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a700e8b151004b9c8829a1fe4fd331465"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zuscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+    <tr><td class="paramname">A</td><td>On success, a valid matrix handle will be written to A. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000014">Todo:</a></b></dt><dd>Shall make <code>A</code> <code>intent(inout)</code> as well. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ae1048833494ef86cd0d74648989599db"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zusmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="af67f81abcb78cc03000257888e47f517"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zusmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a772211da8da7a031fe7845be6a2dd403"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a2331da0465b9a3298f8b6dd1c3c7c150"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::zussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<h2>Field Documentation</h2>
+<a class="anchor" id="ac6324bd9c488f6ad4c176fd05a5c1a94"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_base = 151</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aef5b352231bcff68b28b97742899558e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_block = 273</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="af4ede8e7f0445be25841733354b747bd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_colmajor = 102</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ac72dca9b25a744006fb7e2b272958494"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_complex = 241</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a1964a262e04c046d0f97c7de7cf1d916"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_conj = 191</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a7b8d414b608929ba0abced46c98889d6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_conj_trans = 113</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aedf0364e33ddfd0ee88e93acf3683cf7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_decreasing_order = 182</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a9c54c439abc55e509b4a7ec35f6faa4e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_double_precision = 243</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a18b1555d2c4e1d8b3a8d38bc7105c3fa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_emax = 156</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a150f1864fd1bf514a9769914490a23ba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_emin = 155</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="acbf407624c42cad4f1ea47776977d160"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_eps = 157</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="af1dbfea000291bde9fe93507f62a31ba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_frobenius_norm = 174</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6dd42fe3a5c74d293855e6ed0825cc67"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_general = 231</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aaab0006bc8bcddf6cba32a69d3ddbf95"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_hermitian = 233</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a114a6fabae21d32477af3acee15b9d5d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_ieee = 154</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ae972ff04001d8bbcc52f242134af52d8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_increasing_order = 181</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="afb9ba15096a7184519256ec2923fda49"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_inf_norm = 175</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aa9ee2ffde87e203fd37719979e7b546d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_invalid_handle = 261</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a9893aa4d547b371f6ba59a8615aa752e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_irregular = 272</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a2fc184f889f72b17fdb2ba6266c25b02"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_jrot_inner = 201</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ac526dad147c751bb8b175edd47d29c22"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_jrot_outer = 202</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0612a82431d61a1b6c4c5030e65c5e31"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_jrot_sorted = 203</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a5059846f8eba839bb1afc32abac380e4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_left_side = 141</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a67e376dc6a7cc769ee24415dc2a8d9d1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_lower = 122</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a101f5eab06d45474d64bff200d2387ec"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_lower_hermitian = 239</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ad474a894be1f45a6937c2a880963b1c7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_lower_symmetric = 237</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a280772883a7487fa68aabd98e4a49342"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_lower_triangular = 235</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a487afe34859579523bba7b4851e106c6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_max_norm = 177</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a817d9813d36f7a7abea44e3b781e21ba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_new_handle = 262</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aabedcf272e063a48f7e310ce04784b17"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_no_conj = 192</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a5b700c1a472d7d12decf3d7d7fd244c2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_no_trans = 111</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a8e76732b3c06d9fc27669bda37f24ed6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_non_unit_diag = 131</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a726b73c19dae30439aa65988fa5b5dd1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_num_cols = 252</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ab01ecdd54a1f10e944e446e0efed3bc3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_num_nonzeros = 253</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="afe9e1f52ba336f041e1e750b3a989510"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_num_rows = 251</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a60fbe98d827ebea9e7c431e7698bc462"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_one_base = 222</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a05c66c0c87c72e39580258418c46341f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_one_norm = 171</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0936a1798a61b56c52c116e428a4e6b7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_open_handle = 263</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ad8f2f29c92552e53910e5c92feb3567d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_overflow = 160</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a8c3f3a997d8d96f470d44d1f34e3ed39"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_prec = 158</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6f0692e06d3b42828813a7a0a9ec59bb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_prec_double = 212</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a305383f56368c35429bfd9e7ca23a0f5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_prec_extra = 214</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a71c278e64f30229d19b46376c4385669"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_prec_indigenous = 213</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ab378aeb6aa39b2495f084cd31e32e5a6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_prec_single = 211</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0d365ccd71fdedaa5cf30a46f34bcf37"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_real = 242</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0a60f070ff9a1a864af39e4489c93e31"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_real_inf_norm = 176</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="afecc0bd8ce11628fe2bbe55b9244c295"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_real_max_norm = 178</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aca85a61f11b3c36113209d61a89e4957"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_real_one_norm = 172</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a21d31433d4f29a6fd54c214b7a26c7d4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_regular = 271</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6285c5cc8fe45bb73ece03c7900d5a18"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_right_side = 142</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="af75f815c459344a5a38cd0794b93504a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rnd = 153</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aa1d4df9e25dcab40269247450e1b3e4e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rowmajor = 101</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="acf0fe16da38fc03226e462dc6104cc68"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_autotune_next_operation = 6666</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a10ebafcdf3cc36cf0471ba20ffcd2980"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_duplicates_ovw = 9998</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a508fec1d9853698fd08c239dd08a7291"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_duplicates_sum = 9999</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="abb6a552efaab32ed9687f2e2df895783"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_rep_coo = 9997</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a9fe6012ccac0890c7f7a8500e77e9ff7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_rep_csr = 9996</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a9cd9a2263c79534384bef6bf27e65787"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_rep_rsb = 9995</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a8cc6d8c9036cb66051cc1cfb7c739b5e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_spmv_autotuning_off = 6661</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="afab5c86162fcf329199b0666f33cde96"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_spmv_autotuning_on = 6660</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aefa9f681506ee4ceb578f11b9a0e664c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_spmv_n_autotuning_off = 6663</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="abd8e06d35f2c4c3a6ecc1eb315548c43"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_spmv_n_autotuning_on = 6662</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a56dc72776b8dcdc43f0cebbdc93dcd21"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_spmv_t_autotuning_off = 6665</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a7bfbab78e4c5a789e2d76274a2fbc96c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_rsb_spmv_t_autotuning_on = 6664</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0b1596744fa5acb891d4908588249c54"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_sfmin = 161</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0badb7c2679a5d0ba4e90f599b678768"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_single_precision = 244</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a5d97ddcd53d2bba670233f5335b44f55"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_sparse_const_failure = -1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aae79119fabe06a887f461eda50c97d0a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_sparse_const_not_available = -9999</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0e333ba9a5cc3b014697d0a12d08f6b2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_sparse_const_success = 0</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a13161955ecb9fc2ce12963cc319c93d2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_symmetric = 232</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6acca6eab87ec90dcf71b8c7b40aaa8f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_t = 152</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a12f06635d9f1c40722ad4bd757e737bb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_trans = 112</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a04619b8ef6be6983ded4e3c22fce63b8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_triangular = 234</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="abb84b2b7195d878e71760bdad596d693"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_two_norm = 173</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a50d0da49cbf6822ed5e9a8ff81faf6d5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_unassembled = 274</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ae9f01b90527ebe6b178d4c73a46bbf25"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_underflow = 159</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="af1e902c099efbedd09c7ce65b4772626"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_unit_diag = 132</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a8be70a15dda0ebf3b782b66e72f924d2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_upper = 121</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a4fcd2dfde7722199b9125542622c8c4a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_upper_hermitian = 240</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="afd82b7f277c54dfa83ab44ea6ed89fb1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_upper_symmetric = 238</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a97b523912445087a965737cb8cfd69af"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_upper_triangular = 236</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a83704034c72929f3b1df534034b2786d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_valid_handle = 264</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a8c89ffd8863d708e55c5330d11c772f2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer, parameter blas_sparse::blas_zero_base = 221</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this module was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/classes.html b/doc/html/classes.html
new file mode 100644
index 0000000..c602de1
--- /dev/null
+++ b/doc/html/classes.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Structure Index</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Data Structure Index</div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="qindex"><a class="qindex" href="#letter_B">B</a> | <a class="qindex" href="#letter_R">R</a> | <a class="qindex" href="#letter_U">U</a></div>
+<table style="margin: 10px; white-space: nowrap;" align="center" width="95%" border="0" cellspacing="0" cellpadding="0">
+<tr><td rowspan="2" valign="bottom"><a name="letter_B"></a><table border="0" cellspacing="0" cellpadding="0"><tr><td><div class="ah">  B  </div></td></tr></table>
+</td><td valign="top"><a class="el" href="interfacersb_1_1rsb__lib__exit.html">rsb::rsb_lib_exit</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__free.html">rsb::rsb_mtx_free</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html">rsb::rsb_mtx_upd_vals</a>   </td><td valign="top"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html">blas_sparse::uscr_insert_clique</a> [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__lib__get__opt.html">rsb::rsb_lib_get_opt</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html">rsb::rsb_mtx_get_coo</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__perror.html">rsb::rsb_perror</a>   </td><td valign="top"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html">blas_sparse::uscr_insert_col</a> & [...]
+<tr><td valign="top"><a class="el" href="classblas__sparse.html">blas_sparse</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__lib__init.html">rsb::rsb_lib_init</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html">rsb::rsb_mtx_get_coo_block</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html">rsb::rsb_psblas_trans_to_rsb_trans</a>& [...]
+<tr><td rowspan="2" valign="bottom"><a name="letter_R"></a><table border="0" cellspacing="0" cellpadding="0"><tr><td><div class="ah">  R  </div></td></tr></table>
+</td><td valign="top"><a class="el" href="interfacersb_1_1rsb__lib__reinit.html">rsb::rsb_lib_reinit</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html">rsb::rsb_mtx_get_csr</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__spmm.html">rsb::rsb_spmm</a>   </td><td valign="top"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html">blas_sparse::uscr_insert_entry</a> &#1 [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__lib__set__opt.html">rsb::rsb_lib_set_opt</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__info.html">rsb::rsb_mtx_get_info</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__spmsp.html">rsb::rsb_spmsp</a>   </td><td valign="top"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html">blas_sparse::uscr_insert_row</a> & [...]
+<tr><td valign="top"><a class="el" href="classrsb.html">rsb</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html">rsb::rsb_lib_set_opt_str</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html">rsb::rsb_mtx_get_info_str</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html">rsb::rsb_spmsp_to_dense</a>   </td><td v [...]
+<tr><td valign="top"><a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html">blas_sparse::rsb_blas_get_mtx</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html">rsb::rsb_mtx_add_to_dense</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html">rsb::rsb_mtx_get_nrm</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__spmv.html">rsb::rsb_spmv< [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__coo__sort.html">rsb::rsb_coo_sort</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html">rsb::rsb_mtx_alloc_from_coo_begin</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html">rsb::rsb_mtx_get_prec</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__sppsp.html">rsb::rsb_sppsp</a>&# [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html">rsb::rsb_file_mtx_get_dims</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html">rsb::rsb_mtx_alloc_from_coo_const</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html">rsb::rsb_mtx_get_rows_sparse</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rs [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__file__mtx__load.html">rsb::rsb_file_mtx_load</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html">rsb::rsb_mtx_alloc_from_coo_end</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html">rsb::rsb_mtx_get_vals</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__spsv.html">rsb::rsb_spsv< [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html">rsb::rsb_file_mtx_rndr</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html">rsb::rsb_mtx_alloc_from_coo_inplace</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html">rsb::rsb_mtx_get_vec</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__strerror__r.html">r [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__file__mtx__save.html">rsb::rsb_file_mtx_save</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html">rsb::rsb_mtx_alloc_from_csc_const</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__rndr.html">rsb::rsb_mtx_rndr</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__time.html">rsb::rsb_time</a>&# [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__file__vec__load.html">rsb::rsb_file_vec_load</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html">rsb::rsb_mtx_alloc_from_csr_const</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html">rsb::rsb_mtx_set_vals</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__tune__spmm.html">rsb: [...]
+<tr><td valign="top"><a class="el" href="interfacersb_1_1rsb__file__vec__save.html">rsb::rsb_file_vec_save</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html">rsb::rsb_mtx_alloc_from_csr_inplace</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html">rsb::rsb_mtx_switch_to_coo</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__tune__ [...]
+<tr><td valign="top"><a class="el" href="structrsb__initopts.html">rsb_initopts</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__clone.html">rsb::rsb_mtx_clone</a>   </td><td valign="top"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html">rsb::rsb_mtx_switch_to_csr</a>   </td><td rowspan="2" valign="bottom"><a name="letter_U"></a><table border="0" cellspacing="0" cellpadding="0"><tr><td><div class="ah" [...]
+</td><td></td></tr>
+<tr><td></td><td></td><td></td><td></td></tr>
+<tr><td></td><td></td><td></td><td valign="top"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html">blas_sparse::uscr_insert_block</a>   </td><td></td></tr>
+<tr><td></td><td></td><td></td><td></td><td></td></tr>
+</table>
+<div class="qindex"><a class="qindex" href="#letter_B">B</a> | <a class="qindex" href="#letter_R">R</a> | <a class="qindex" href="#letter_U">U</a></div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/classrsb.html b/doc/html/classrsb.html
new file mode 100644
index 0000000..bc5548f
--- /dev/null
+++ b/doc/html/classrsb.html
@@ -0,0 +1,2099 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb Module Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#nested-classes">Data Structures</a> |
+<a href="#pub-attribs">Data Fields</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb Module Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="nested-classes"></a>
+Data Structures</h2></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__coo__sort.html">rsb_coo_sort</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a>.  <a href="interfacersb_1_1rsb__coo__sort.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html">rsb_file_mtx_get_dims</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>.  <a href="interfacersb_1_1rsb__file__mtx__get__dims.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__load.html">rsb_file_mtx_load</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a>.  <a href="interfacersb_1_1rsb__file__mtx__load.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html">rsb_file_mtx_rndr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>.  <a href="interfacersb_1_1rsb__file__mtx__rndr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__save.html">rsb_file_mtx_save</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>.  <a href="interfacersb_1_1rsb__file__mtx__save.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__vec__load.html">rsb_file_vec_load</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>.  <a href="interfacersb_1_1rsb__file__vec__load.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__vec__save.html">rsb_file_vec_save</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a>.  <a href="interfacersb_1_1rsb__file__vec__save.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__exit.html">rsb_lib_exit</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>.  <a href="interfacersb_1_1rsb__lib__exit.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__get__opt.html">rsb_lib_get_opt</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a>.  <a href="interfacersb_1_1rsb__lib__get__opt.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__init.html">rsb_lib_init</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>.  <a href="interfacersb_1_1rsb__lib__init.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__reinit.html">rsb_lib_reinit</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>.  <a href="interfacersb_1_1rsb__lib__reinit.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__set__opt.html">rsb_lib_set_opt</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>.  <a href="interfacersb_1_1rsb__lib__set__opt.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html">rsb_lib_set_opt_str</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>.  <a href="interfacersb_1_1rsb__lib__set__opt__str.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html">rsb_mtx_add_to_dense</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a>.  <a href="interfacersb_1_1rsb__mtx__add__to__dense.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html">rsb_mtx_alloc_from_coo_begin</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html">rsb_mtx_alloc_from_coo_const</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html">rsb_mtx_alloc_from_coo_end</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html">rsb_mtx_alloc_from_coo_inplace</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html">rsb_mtx_alloc_from_csc_const</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html">rsb_mtx_alloc_from_csr_const</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html">rsb_mtx_alloc_from_csr_inplace</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__clone.html">rsb_mtx_clone</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>.  <a href="interfacersb_1_1rsb__mtx__clone.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__free.html">rsb_mtx_free</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>.  <a href="interfacersb_1_1rsb__mtx__free.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html">rsb_mtx_get_coo</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>.  <a href="interfacersb_1_1rsb__mtx__get__coo.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html">rsb_mtx_get_coo_block</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>.  <a href="interfacersb_1_1rsb__mtx__get__coo__block.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html">rsb_mtx_get_csr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>.  <a href="interfacersb_1_1rsb__mtx__get__csr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__info.html">rsb_mtx_get_info</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>.  <a href="interfacersb_1_1rsb__mtx__get__info.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html">rsb_mtx_get_info_str</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>.  <a href="interfacersb_1_1rsb__mtx__get__info__str.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html">rsb_mtx_get_nrm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a>.  <a href="interfacersb_1_1rsb__mtx__get__nrm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html">rsb_mtx_get_prec</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a>.  <a href="interfacersb_1_1rsb__mtx__get__prec.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html">rsb_mtx_get_rows_sparse</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>.  <a href="interfacersb_1_1rsb__mtx__get__rows__sparse.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html">rsb_mtx_get_vals</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a>.  <a href="interfacersb_1_1rsb__mtx__get__vals.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html">rsb_mtx_get_vec</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a>.  <a href="interfacersb_1_1rsb__mtx__get__vec.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__rndr.html">rsb_mtx_rndr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>.  <a href="interfacersb_1_1rsb__mtx__rndr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html">rsb_mtx_set_vals</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>.  <a href="interfacersb_1_1rsb__mtx__set__vals.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html">rsb_mtx_switch_to_coo</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a>.  <a href="interfacersb_1_1rsb__mtx__switch__to__coo.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html">rsb_mtx_switch_to_csr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a>.  <a href="interfacersb_1_1rsb__mtx__switch__to__csr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html">rsb_mtx_upd_vals</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>.  <a href="interfacersb_1_1rsb__mtx__upd__vals.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__perror.html">rsb_perror</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>.  <a href="interfacersb_1_1rsb__perror.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html">rsb_psblas_trans_to_rsb_trans</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a>.  <a href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmm.html">rsb_spmm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>.  <a href="interfacersb_1_1rsb__spmm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmsp.html">rsb_spmsp</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>.  <a href="interfacersb_1_1rsb__spmsp.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html">rsb_spmsp_to_dense</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>.  <a href="interfacersb_1_1rsb__spmsp__to__dense.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmv.html">rsb_spmv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>.  <a href="interfacersb_1_1rsb__spmv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__sppsp.html">rsb_sppsp</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>.  <a href="interfacersb_1_1rsb__sppsp.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spsm.html">rsb_spsm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>.  <a href="interfacersb_1_1rsb__spsm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spsv.html">rsb_spsv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>.  <a href="interfacersb_1_1rsb__spsv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__strerror__r.html">rsb_strerror_r</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a>.  <a href="interfacersb_1_1rsb__strerror__r.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__time.html">rsb_time</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>.  <a href="interfacersb_1_1rsb__time.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__tune__spmm.html">rsb_tune_spmm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>.  <a href="interfacersb_1_1rsb__tune__spmm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__tune__spsm.html">rsb_tune_spsm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a>.  <a href="interfacersb_1_1rsb__tune__spsm.html#details">More...</a><br/></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-attribs"></a>
+Data Fields</h2></td></tr>
+<tr class="memitem:a2f418e43e861a006b5aea1d55913fee2"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a2f418e43e861a006b5aea1d55913fee2">rsb_err_no_error</a> = -INT(Z"0000", C_INT)</td></tr>
+<tr class="memdesc:a2f418e43e861a006b5aea1d55913fee2"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>.  <a href="#a2f418e43e861a006b5aea1d55913fee2"></a><br/></td></tr>
+<tr class="memitem:abe86debd990b7989427a98378c0c2ea4"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#abe86debd990b7989427a98378c0c2ea4">rsb_err_generic_error</a> = -INT(Z"0001", C_INT)</td></tr>
+<tr class="memdesc:abe86debd990b7989427a98378c0c2ea4"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">RSB_ERR_GENERIC_ERROR</a>.  <a href="#abe86debd990b7989427a98378c0c2ea4"></a><br/></td></tr>
+<tr class="memitem:aa9069fa99bea2127f31ac62365b19bcd"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa9069fa99bea2127f31ac62365b19bcd">rsb_err_unsupported_operation</a> = -INT(Z"0002", C_INT)</td></tr>
+<tr class="memdesc:aa9069fa99bea2127f31ac62365b19bcd"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#ab4f407e7c8364bee51cc77546d6f0922">RSB_ERR_UNSUPPORTED_OPERATION</a>.  <a href="#aa9069fa99bea2127f31ac62365b19bcd"></a><br/></td></tr>
+<tr class="memitem:ab8643c59b36b245e6f59ce00e10ad17f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ab8643c59b36b245e6f59ce00e10ad17f">rsb_err_unsupported_type</a> = -INT(Z"0004", C_INT)</td></tr>
+<tr class="memdesc:ab8643c59b36b245e6f59ce00e10ad17f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#afdf2ab3912960ee19f23e7d585371548">RSB_ERR_UNSUPPORTED_TYPE</a>.  <a href="#ab8643c59b36b245e6f59ce00e10ad17f"></a><br/></td></tr>
+<tr class="memitem:a48a68ee015ab06c1b72e26659479cd9e"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a48a68ee015ab06c1b72e26659479cd9e">rsb_err_unsupported_format</a> = -INT(Z"0008", C_INT)</td></tr>
+<tr class="memdesc:a48a68ee015ab06c1b72e26659479cd9e"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#ac00cd41eab18a0d2b9323b401029dd73">RSB_ERR_UNSUPPORTED_FORMAT</a>.  <a href="#a48a68ee015ab06c1b72e26659479cd9e"></a><br/></td></tr>
+<tr class="memitem:a76c59842ba7bef3a5e0cfe577b45e3af"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a76c59842ba7bef3a5e0cfe577b45e3af">rsb_err_internal_error</a> = -INT(Z"0010", C_INT)</td></tr>
+<tr class="memdesc:a76c59842ba7bef3a5e0cfe577b45e3af"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a8e650a7e3b5c5aa1fb9763b0f1498126">RSB_ERR_INTERNAL_ERROR</a>.  <a href="#a76c59842ba7bef3a5e0cfe577b45e3af"></a><br/></td></tr>
+<tr class="memitem:a05f3d2c8888332697f182ea6d8ab66b0"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a05f3d2c8888332697f182ea6d8ab66b0">rsb_err_badargs</a> = -INT(Z"0020", C_INT)</td></tr>
+<tr class="memdesc:a05f3d2c8888332697f182ea6d8ab66b0"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#af0b262c6c554403269234219b3aec409">RSB_ERR_BADARGS</a>.  <a href="#a05f3d2c8888332697f182ea6d8ab66b0"></a><br/></td></tr>
+<tr class="memitem:a0abffcaa259b8f2cbf1b025c4c179fb0"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0abffcaa259b8f2cbf1b025c4c179fb0">rsb_err_enomem</a> = -INT(Z"0040", C_INT)</td></tr>
+<tr class="memdesc:a0abffcaa259b8f2cbf1b025c4c179fb0"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a538215b32e908646c979a2e446ae5467">RSB_ERR_ENOMEM</a>.  <a href="#a0abffcaa259b8f2cbf1b025c4c179fb0"></a><br/></td></tr>
+<tr class="memitem:a4405be6ac615c1db2c161185d455374c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a4405be6ac615c1db2c161185d455374c">rsb_err_unimplemented_yet</a> = -INT(Z"0100", C_INT)</td></tr>
+<tr class="memdesc:a4405be6ac615c1db2c161185d455374c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a0bd20d0f68cf911bf9dfda495d8e12db">RSB_ERR_UNIMPLEMENTED_YET</a>.  <a href="#a4405be6ac615c1db2c161185d455374c"></a><br/></td></tr>
+<tr class="memitem:a20784aca964572d033d9f79a08b8842d"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a20784aca964572d033d9f79a08b8842d">rsb_err_limits</a> = -INT(Z"0200", C_INT)</td></tr>
+<tr class="memdesc:a20784aca964572d033d9f79a08b8842d"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a3d7758ee9127e0c93c9075402999d154">RSB_ERR_LIMITS</a>.  <a href="#a20784aca964572d033d9f79a08b8842d"></a><br/></td></tr>
+<tr class="memitem:ac81e797f7f250fb3d2c20f2a46360838"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ac81e797f7f250fb3d2c20f2a46360838">rsb_err_unsupported_feature</a> = -INT(Z"0400", C_INT)</td></tr>
+<tr class="memdesc:ac81e797f7f250fb3d2c20f2a46360838"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>.  <a href="#ac81e797f7f250fb3d2c20f2a46360838"></a><br/></td></tr>
+<tr class="memitem:aed70b921cdbe20cc81d03c9b9c7aab38"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aed70b921cdbe20cc81d03c9b9c7aab38">rsb_err_no_user_configuration</a> = -INT(Z"0800", C_INT)</td></tr>
+<tr class="memdesc:aed70b921cdbe20cc81d03c9b9c7aab38"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a5ab0f86009e1f934b25b23fc4837b9b0">RSB_ERR_NO_USER_CONFIGURATION</a>.  <a href="#aed70b921cdbe20cc81d03c9b9c7aab38"></a><br/></td></tr>
+<tr class="memitem:a4e124cfacc5e0492952ccda10905206a"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a4e124cfacc5e0492952ccda10905206a">rsb_err_corrupt_input_data</a> = -INT(Z"01000", C_INT)</td></tr>
+<tr class="memdesc:a4e124cfacc5e0492952ccda10905206a"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a14103828be5eb82e40d3b772ce54abda">RSB_ERR_CORRUPT_INPUT_DATA</a>.  <a href="#a4e124cfacc5e0492952ccda10905206a"></a><br/></td></tr>
+<tr class="memitem:aa0868e7080760845d911eae040df8c44"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa0868e7080760845d911eae040df8c44">rsb_err_failed_memhier_detection</a> = -INT(Z"02000", C_INT)</td></tr>
+<tr class="memdesc:aa0868e7080760845d911eae040df8c44"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a3cacb604d0ad892e195c7c97eda18dba">RSB_ERR_FAILED_MEMHIER_DETECTION</a>.  <a href="#aa0868e7080760845d911eae040df8c44"></a><br/></td></tr>
+<tr class="memitem:a65da259a04a3b6b09b1e67d2aae53108"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a65da259a04a3b6b09b1e67d2aae53108">rsb_err_could_not_honour_externally_allocation_flags</a> = -INT(Z"04000", C_INT)</td></tr>
+<tr class="memdesc:a65da259a04a3b6b09b1e67d2aae53108"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a935de71c3acc5714ad539d65288e2593">RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS</a>.  <a href="#a65da259a04a3b6b09b1e67d2aae53108"></a><br/></td></tr>
+<tr class="memitem:a8dec384225c4700df1b201b6dbc5aa60"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a8dec384225c4700df1b201b6dbc5aa60">rsb_err_no_stream_output_configured_out</a> = -INT(Z"08000", C_INT)</td></tr>
+<tr class="memdesc:a8dec384225c4700df1b201b6dbc5aa60"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT</a>.  <a href="#a8dec384225c4700df1b201b6dbc5aa60"></a><br/></td></tr>
+<tr class="memitem:a018c06fd82826d0b56fdec98da22da17"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a018c06fd82826d0b56fdec98da22da17">rsb_err_invalid_numerical_data</a> = -INT(Z"010000", C_INT)</td></tr>
+<tr class="memdesc:a018c06fd82826d0b56fdec98da22da17"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a8d504baa13048da05bb71235e2c8d181">RSB_ERR_INVALID_NUMERICAL_DATA</a>.  <a href="#a018c06fd82826d0b56fdec98da22da17"></a><br/></td></tr>
+<tr class="memitem:a3534459ee186379f45444c289df70175"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a3534459ee186379f45444c289df70175">rsb_err_memory_leak</a> = -INT(Z"020000", C_INT)</td></tr>
+<tr class="memdesc:a3534459ee186379f45444c289df70175"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a1b63053f52d6426b726a05b206a3862a">RSB_ERR_MEMORY_LEAK</a>.  <a href="#a3534459ee186379f45444c289df70175"></a><br/></td></tr>
+<tr class="memitem:a65dbcb1d6e6347e5b7e85b5aa49db90c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a65dbcb1d6e6347e5b7e85b5aa49db90c">rsb_flag_noflags</a> = INT(Z"0000000", C_INT)</td></tr>
+<tr class="memdesc:a65dbcb1d6e6347e5b7e85b5aa49db90c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>.  <a href="#a65dbcb1d6e6347e5b7e85b5aa49db90c"></a><br/></td></tr>
+<tr class="memitem:a8ca3ae90c2f8e0923f80f04e53ad2c37"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a8ca3ae90c2f8e0923f80f04e53ad2c37">rsb_flag_fortran_indices_interface</a> = INT(Z"0000001", C_INT)</td></tr>
+<tr class="memdesc:a8ca3ae90c2f8e0923f80f04e53ad2c37"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a>.  <a href="#a8ca3ae90c2f8e0923f80f04e53ad2c37"></a><br/></td></tr>
+<tr class="memitem:a0cd8d81bf275bfdc685080e0d855fbb1"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0cd8d81bf275bfdc685080e0d855fbb1">rsb_flag_c_indices_interface</a> = INT(Z"0000000", C_INT)</td></tr>
+<tr class="memdesc:a0cd8d81bf275bfdc685080e0d855fbb1"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a>.  <a href="#a0cd8d81bf275bfdc685080e0d855fbb1"></a><br/></td></tr>
+<tr class="memitem:ae2c87798ff9cee8bdc0eaacdec62a5d0"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ae2c87798ff9cee8bdc0eaacdec62a5d0">rsb_flag_use_halfword_indices</a> = INT(Z"0000002", C_INT)</td></tr>
+<tr class="memdesc:ae2c87798ff9cee8bdc0eaacdec62a5d0"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>.  <a href="#ae2c87798ff9cee8bdc0eaacdec62a5d0"></a><br/></td></tr>
+<tr class="memitem:a68ace12ecb8cbcc9a7c686b2b9665c29"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a68ace12ecb8cbcc9a7c686b2b9665c29">rsb_flag_want_row_major_order</a> = INT(Z"0000000", C_INT)</td></tr>
+<tr class="memdesc:a68ace12ecb8cbcc9a7c686b2b9665c29"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>.  <a href="#a68ace12ecb8cbcc9a7c686b2b9665c29"></a><br/></td></tr>
+<tr class="memitem:a8786a38b2ca41b926b8ef6092a55b8a6"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a8786a38b2ca41b926b8ef6092a55b8a6">rsb_flag_want_column_major_order</a> = INT(Z"04000000", C_INT)</td></tr>
+<tr class="memdesc:a8786a38b2ca41b926b8ef6092a55b8a6"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>.  <a href="#a8786a38b2ca41b926b8ef6092a55b8a6"></a><br/></td></tr>
+<tr class="memitem:ade2657fb3c17b519cc4332eac06046d3"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ade2657fb3c17b519cc4332eac06046d3">rsb_flag_sorted_input</a> = INT(Z"0000004", C_INT)</td></tr>
+<tr class="memdesc:ade2657fb3c17b519cc4332eac06046d3"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a726fa64beccf21ae1b70149b88c3affb">RSB_FLAG_SORTED_INPUT</a>.  <a href="#ade2657fb3c17b519cc4332eac06046d3"></a><br/></td></tr>
+<tr class="memitem:a3ea9a964debcbac70d35e964666f7a1c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a3ea9a964debcbac70d35e964666f7a1c">rsb_flag_triangular</a> = INT(Z"0000008", C_INT)</td></tr>
+<tr class="memdesc:a3ea9a964debcbac70d35e964666f7a1c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a>.  <a href="#a3ea9a964debcbac70d35e964666f7a1c"></a><br/></td></tr>
+<tr class="memitem:a59dd2ec96582af74d563f8c9f1f44409"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a59dd2ec96582af74d563f8c9f1f44409">rsb_flag_lower</a> = INT(Z"0000010", C_INT)</td></tr>
+<tr class="memdesc:a59dd2ec96582af74d563f8c9f1f44409"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>.  <a href="#a59dd2ec96582af74d563f8c9f1f44409"></a><br/></td></tr>
+<tr class="memitem:a9d9497934ece76bcf860a2a563056eca"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a9d9497934ece76bcf860a2a563056eca">rsb_flag_upper</a> = INT(Z"0000020", C_INT)</td></tr>
+<tr class="memdesc:a9d9497934ece76bcf860a2a563056eca"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>.  <a href="#a9d9497934ece76bcf860a2a563056eca"></a><br/></td></tr>
+<tr class="memitem:a3e5c32923f3e360e980311315a27dc7d"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a3e5c32923f3e360e980311315a27dc7d">rsb_flag_unit_diag_implicit</a> = INT(Z"0000040", C_INT)</td></tr>
+<tr class="memdesc:a3e5c32923f3e360e980311315a27dc7d"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a4af24812309eb471c861ba618cb996f2">RSB_FLAG_UNIT_DIAG_IMPLICIT</a>.  <a href="#a3e5c32923f3e360e980311315a27dc7d"></a><br/></td></tr>
+<tr class="memitem:a9fda0eb0c128c193ba7d05bab64d7e90"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a9fda0eb0c128c193ba7d05bab64d7e90">rsb_flag_want_coo_storage</a> = INT(Z"0000100", C_INT)</td></tr>
+<tr class="memdesc:a9fda0eb0c128c193ba7d05bab64d7e90"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>.  <a href="#a9fda0eb0c128c193ba7d05bab64d7e90"></a><br/></td></tr>
+<tr class="memitem:ad6870000c6da71ba7e07676e9d9c5e42"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ad6870000c6da71ba7e07676e9d9c5e42">rsb_flag_duplicates_keep_last</a> = INT(Z"0000000", C_INT)</td></tr>
+<tr class="memdesc:ad6870000c6da71ba7e07676e9d9c5e42"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#aff85f26964888f838aa97eb371ce5da3">RSB_FLAG_DUPLICATES_KEEP_LAST</a>.  <a href="#ad6870000c6da71ba7e07676e9d9c5e42"></a><br/></td></tr>
+<tr class="memitem:aa1ca91fa56bb36b6eebbf47de8ccb1be"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa1ca91fa56bb36b6eebbf47de8ccb1be">rsb_flag_duplicates_default_handle</a> = INT(Z"0000000", C_INT)</td></tr>
+<tr class="memdesc:aa1ca91fa56bb36b6eebbf47de8ccb1be"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a7fee489042762b3b22d8184c592a9e52">RSB_FLAG_DUPLICATES_DEFAULT_HANDLE</a>.  <a href="#aa1ca91fa56bb36b6eebbf47de8ccb1be"></a><br/></td></tr>
+<tr class="memitem:a4e8c5001e9a26a86faefe9bd26989040"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a4e8c5001e9a26a86faefe9bd26989040">rsb_flag_duplicates_sum</a> = INT(Z"0000200", C_INT)</td></tr>
+<tr class="memdesc:a4e8c5001e9a26a86faefe9bd26989040"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">RSB_FLAG_DUPLICATES_SUM</a>.  <a href="#a4e8c5001e9a26a86faefe9bd26989040"></a><br/></td></tr>
+<tr class="memitem:a95b0cf20f4422b337c41f2388a59fb0b"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a95b0cf20f4422b337c41f2388a59fb0b">rsb_flag_discard_zeros</a> = INT(Z"0000400", C_INT)</td></tr>
+<tr class="memdesc:a95b0cf20f4422b337c41f2388a59fb0b"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#abf243a6f15925734e143703c4ad33512">RSB_FLAG_DISCARD_ZEROS</a>.  <a href="#a95b0cf20f4422b337c41f2388a59fb0b"></a><br/></td></tr>
+<tr class="memitem:a7a5366fbd6cd1814d44b1ab1068f88de"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a7a5366fbd6cd1814d44b1ab1068f88de">rsb_flag_quad_partitioning</a> = INT(Z"0002000", C_INT)</td></tr>
+<tr class="memdesc:a7a5366fbd6cd1814d44b1ab1068f88de"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>.  <a href="#a7a5366fbd6cd1814d44b1ab1068f88de"></a><br/></td></tr>
+<tr class="memitem:a8ad70221bf6a5f4b458f6b700b6af8df"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a8ad70221bf6a5f4b458f6b700b6af8df">rsb_flag_want_bcss_storage</a> = INT(Z"0004000", C_INT)</td></tr>
+<tr class="memdesc:a8ad70221bf6a5f4b458f6b700b6af8df"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>.  <a href="#a8ad70221bf6a5f4b458f6b700b6af8df"></a><br/></td></tr>
+<tr class="memitem:a25e0432a471ab3fca4105d40ce2e8f1e"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a25e0432a471ab3fca4105d40ce2e8f1e">rsb_flag_assembled_in_coo_arrays</a> = INT(Z"0040000", C_INT)</td></tr>
+<tr class="memdesc:a25e0432a471ab3fca4105d40ce2e8f1e"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#adce7e20015d4a549bb8c44a00a80fc7e">RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS</a>.  <a href="#a25e0432a471ab3fca4105d40ce2e8f1e"></a><br/></td></tr>
+<tr class="memitem:a6d6b68525e01bb7d91eb814216c0b5bf"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a6d6b68525e01bb7d91eb814216c0b5bf">rsb_flag_experimental_in_place_permutation_sort</a> = INT(Z"0080000", C_INT)</td></tr>
+<tr class="memdesc:a6d6b68525e01bb7d91eb814216c0b5bf"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a1d3b9bd7a31257cc8116be3dee0125b5">RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT</a>.  <a href="#a6d6b68525e01bb7d91eb814216c0b5bf"></a><br/></td></tr>
+<tr class="memitem:a8325109ecda447aa1e93e8d747673f4c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a8325109ecda447aa1e93e8d747673f4c">rsb_flag_symmetric</a> = INT(Z"0400000", C_INT)</td></tr>
+<tr class="memdesc:a8325109ecda447aa1e93e8d747673f4c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a>.  <a href="#a8325109ecda447aa1e93e8d747673f4c"></a><br/></td></tr>
+<tr class="memitem:a613fa635312f361ef115b68803807908"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a613fa635312f361ef115b68803807908">rsb_flag_hermitian</a> = INT(Z"0800000", C_INT)</td></tr>
+<tr class="memdesc:a613fa635312f361ef115b68803807908"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a>.  <a href="#a613fa635312f361ef115b68803807908"></a><br/></td></tr>
+<tr class="memitem:aff989c5cb6fa62c7ed25a72f30d6a864"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aff989c5cb6fa62c7ed25a72f30d6a864">rsb_flag_recursive_more_leaves_than_threads</a> = INT(Z"01000000", C_INT)</td></tr>
+<tr class="memdesc:aff989c5cb6fa62c7ed25a72f30d6a864"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a54d04b341465bf3dadc62ad99d55f8ca">RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS</a>.  <a href="#aff989c5cb6fa62c7ed25a72f30d6a864"></a><br/></td></tr>
+<tr class="memitem:abce4dd43d8147cb6fe505bda474e535c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#abce4dd43d8147cb6fe505bda474e535c">rsb_flag_recursive_subdivide_more_on_diag</a> = INT(Z"08000000", C_INT)</td></tr>
+<tr class="memdesc:abce4dd43d8147cb6fe505bda474e535c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#ad8e75dfa2b78fa82cdd31665a375d257">RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG</a>.  <a href="#abce4dd43d8147cb6fe505bda474e535c"></a><br/></td></tr>
+<tr class="memitem:ab8f28a0d2ec93bf0c85ef1f30fc51e24"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ab8f28a0d2ec93bf0c85ef1f30fc51e24">rsb_flag_externally_allocated_arrays</a> = INT(Z"040000000", C_INT)</td></tr>
+<tr class="memdesc:ab8f28a0d2ec93bf0c85ef1f30fc51e24"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a6abc0e23c782b817e2ef96d8294f990d">RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS</a>.  <a href="#ab8f28a0d2ec93bf0c85ef1f30fc51e24"></a><br/></td></tr>
+<tr class="memitem:a9d39857a6f2ae454fd20d5bcc03ef17c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a9d39857a6f2ae454fd20d5bcc03ef17c">rsb_flag_use_csr_reserved</a> = INT(Z"0200000", C_INT)</td></tr>
+<tr class="memdesc:a9d39857a6f2ae454fd20d5bcc03ef17c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>.  <a href="#a9d39857a6f2ae454fd20d5bcc03ef17c"></a><br/></td></tr>
+<tr class="memitem:ad27c22510fec7c8367bd34bf800cbd84"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ad27c22510fec7c8367bd34bf800cbd84">rsb_flag_default_storage_flags</a> = (<a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>+<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td></tr>
+<tr class="memdesc:ad27c22510fec7c8367bd34bf800cbd84"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#aa83897e25c1235a780ed7fe317c78555">RSB_FLAG_DEFAULT_STORAGE_FLAGS</a>.  <a href="#ad27c22510fec7c8367bd34bf800cbd84"></a><br/></td></tr>
+<tr class="memitem:aa1d8e9f835115cdac082812d5f74b6d4"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa1d8e9f835115cdac082812d5f74b6d4">rsb_flag_default_coo_matrix_flags</a> = <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a></td></tr>
+<tr class="memdesc:aa1d8e9f835115cdac082812d5f74b6d4"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a6b21a3edf4231070a10223f1a9ae1dc4">RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS</a>.  <a href="#aa1d8e9f835115cdac082812d5f74b6d4"></a><br/></td></tr>
+<tr class="memitem:a83848ae1b266eea31f4462821f8bc51b"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a83848ae1b266eea31f4462821f8bc51b">rsb_flag_default_csr_matrix_flags</a> = <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a></td></tr>
+<tr class="memdesc:a83848ae1b266eea31f4462821f8bc51b"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a8c90a9ad92722ffbbf1bfcadb805c520">RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS</a>.  <a href="#a83848ae1b266eea31f4462821f8bc51b"></a><br/></td></tr>
+<tr class="memitem:aba933b2d9b4534fa69226910ed84bd4c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aba933b2d9b4534fa69226910ed84bd4c">rsb_flag_default_rsb_matrix_flags</a> = (<a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>+<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>+< [...]
+<tr class="memdesc:aba933b2d9b4534fa69226910ed84bd4c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>.  <a href="#aba933b2d9b4534fa69226910ed84bd4c"></a><br/></td></tr>
+<tr class="memitem:a16cc953b0faf8ba964ba79930b51f93c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a16cc953b0faf8ba964ba79930b51f93c">rsb_flag_default_matrix_flags</a> = <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a></td></tr>
+<tr class="memdesc:a16cc953b0faf8ba964ba79930b51f93c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#acac4b9c09a3fd6be63e511fc5042038f">RSB_FLAG_DEFAULT_MATRIX_FLAGS</a>.  <a href="#a16cc953b0faf8ba964ba79930b51f93c"></a><br/></td></tr>
+<tr class="memitem:a2af139858170575356808c746b4a564a"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a2af139858170575356808c746b4a564a">rsb_flag_identical_flags</a> = <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a></td></tr>
+<tr class="memdesc:a2af139858170575356808c746b4a564a"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a>.  <a href="#a2af139858170575356808c746b4a564a"></a><br/></td></tr>
+<tr class="memitem:a163680fba55484e1d4e4c9a436ebc93b"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a163680fba55484e1d4e4c9a436ebc93b">rsb_flag_lower_hermitian</a> = (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memdesc:a163680fba55484e1d4e4c9a436ebc93b"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#aa06dcddcdd4f42fe2eeda8eb6168bd2d">RSB_FLAG_LOWER_HERMITIAN</a>.  <a href="#a163680fba55484e1d4e4c9a436ebc93b"></a><br/></td></tr>
+<tr class="memitem:a22eedbec9d19115a8658438f1c7cc496"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a22eedbec9d19115a8658438f1c7cc496">rsb_flag_upper_hermitian</a> = (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> + <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td></tr>
+<tr class="memdesc:a22eedbec9d19115a8658438f1c7cc496"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a0565be78af9bac79d07376d501237b00">RSB_FLAG_UPPER_HERMITIAN</a>.  <a href="#a22eedbec9d19115a8658438f1c7cc496"></a><br/></td></tr>
+<tr class="memitem:a7c3f1e6d9f61f9944a08efab6a00fe2f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a7c3f1e6d9f61f9944a08efab6a00fe2f">rsb_flag_lower_triangular</a> = (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memdesc:a7c3f1e6d9f61f9944a08efab6a00fe2f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">RSB_FLAG_LOWER_TRIANGULAR</a>.  <a href="#a7c3f1e6d9f61f9944a08efab6a00fe2f"></a><br/></td></tr>
+<tr class="memitem:ac3802654bb13df88bb2e7f371b12e5ea"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ac3802654bb13df88bb2e7f371b12e5ea">rsb_flag_upper_triangular</a> = (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> + <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td></tr>
+<tr class="memdesc:ac3802654bb13df88bb2e7f371b12e5ea"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">RSB_FLAG_UPPER_TRIANGULAR</a>.  <a href="#ac3802654bb13df88bb2e7f371b12e5ea"></a><br/></td></tr>
+<tr class="memitem:a1b31d44601cedab86c51a6ed2a8b0ca4"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a1b31d44601cedab86c51a6ed2a8b0ca4">rsb_flag_lower_symmetric</a> = (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memdesc:a1b31d44601cedab86c51a6ed2a8b0ca4"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a6933030c784596e3c8dbbbd8daf62805">RSB_FLAG_LOWER_SYMMETRIC</a>.  <a href="#a1b31d44601cedab86c51a6ed2a8b0ca4"></a><br/></td></tr>
+<tr class="memitem:a509eea3e97b56833df24cb9d2b064e26"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a509eea3e97b56833df24cb9d2b064e26">rsb_flag_diagonal</a> = (<a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memdesc:a509eea3e97b56833df24cb9d2b064e26"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#abccb47886fb3f8352e4e6ad801fd8efa">RSB_FLAG_DIAGONAL</a>.  <a href="#a509eea3e97b56833df24cb9d2b064e26"></a><br/></td></tr>
+<tr class="memitem:ab17822f489868813f38ba9609245ae55"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ab17822f489868813f38ba9609245ae55">rsb_flag_upper_symmetric</a> = (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> + <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td></tr>
+<tr class="memdesc:ab17822f489868813f38ba9609245ae55"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a3c2701b010fa2928685f3253a0ff1a99">RSB_FLAG_UPPER_SYMMETRIC</a>.  <a href="#ab17822f489868813f38ba9609245ae55"></a><br/></td></tr>
+<tr class="memitem:a7baa8d692038856c55489d2382f09e5d"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a7baa8d692038856c55489d2382f09e5d">rsb_flag_use_halfword_indices_csr</a> = (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>+<a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>)</ [...]
+<tr class="memdesc:a7baa8d692038856c55489d2382f09e5d"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_INDICES_CSR</a>.  <a href="#a7baa8d692038856c55489d2382f09e5d"></a><br/></td></tr>
+<tr class="memitem:a6ff989a0fe4da2a71e72091fcb30a334"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a6ff989a0fe4da2a71e72091fcb30a334">rsb_flag_use_halfword_indices_coo</a> = (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>+<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</ [...]
+<tr class="memdesc:a6ff989a0fe4da2a71e72091fcb30a334"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>.  <a href="#a6ff989a0fe4da2a71e72091fcb30a334"></a><br/></td></tr>
+<tr class="memitem:abf74a30d663a24ff5fde624217bfea37"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#abf74a30d663a24ff5fde624217bfea37">rsb_flag_mutually_exclusive_switches</a> = (<a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>+<a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_IND [...]
+<tr class="memdesc:abf74a30d663a24ff5fde624217bfea37"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a6f4335cce5234a69e06188bcad418091">RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES</a>.  <a href="#abf74a30d663a24ff5fde624217bfea37"></a><br/></td></tr>
+<tr class="memitem:a89c7627f24fecaf23ead8300f671314f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a89c7627f24fecaf23ead8300f671314f">rsb_transposition_n</a> = INT(Z"04E", C_INT)</td></tr>
+<tr class="memitem:a5c11d5b2aa58a9c9067ec914265cd28f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a5c11d5b2aa58a9c9067ec914265cd28f">rsb_transposition_t</a> = INT(Z"054", C_INT)</td></tr>
+<tr class="memitem:a2e308172e38ee4453d556792acbe464c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a2e308172e38ee4453d556792acbe464c">rsb_transposition_c</a> = INT(Z"043", C_INT)</td></tr>
+<tr class="memitem:a43c72bf61ae0f1961908e27c7dd76f01"><td class="memItemLeft" align="right" valign="top">integer(c_signed_char), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a43c72bf61ae0f1961908e27c7dd76f01">rsb_numerical_type_same_type</a> = 1</td></tr>
+<tr class="memitem:a31d8f196938e468a3891fb80f1decc1f"><td class="memItemLeft" align="right" valign="top">integer(c_signed_char), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a31d8f196938e468a3891fb80f1decc1f">rsb_numerical_type_int</a> = ICHAR('I')</td></tr>
+<tr class="memitem:af833bb7a31acb188d33424c3c16bd4cd"><td class="memItemLeft" align="right" valign="top">integer(c_signed_char), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#af833bb7a31acb188d33424c3c16bd4cd">rsb_numerical_type_double</a> = ICHAR('D')</td></tr>
+<tr class="memitem:ac18d8381c23b54ccd523e7b4e50af04a"><td class="memItemLeft" align="right" valign="top">integer(c_signed_char), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ac18d8381c23b54ccd523e7b4e50af04a">rsb_numerical_type_float</a> = ICHAR('S')</td></tr>
+<tr class="memitem:ace3d848255b280a0531407c19fffaec7"><td class="memItemLeft" align="right" valign="top">integer(c_signed_char), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ace3d848255b280a0531407c19fffaec7">rsb_numerical_type_float_complex</a> = ICHAR('C')</td></tr>
+<tr class="memitem:a1865b95dcc4fac4f0fe21dfe8c4ef036"><td class="memItemLeft" align="right" valign="top">integer(c_signed_char), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a1865b95dcc4fac4f0fe21dfe8c4ef036">rsb_numerical_type_double_complex</a> = ICHAR('Z')</td></tr>
+<tr class="memitem:abf4365a254c637b59b5f84dcef03c4e6"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#abf4365a254c637b59b5f84dcef03c4e6">rsb_io_want_verbose_init</a> = INT(Z"0000001", C_INT)</td></tr>
+<tr class="memdesc:abf4365a254c637b59b5f84dcef03c4e6"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">RSB_IO_WANT_VERBOSE_INIT</a>.  <a href="#abf4365a254c637b59b5f84dcef03c4e6"></a><br/></td></tr>
+<tr class="memitem:a33d3ac5b6383e375f2239b780af50d3f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a33d3ac5b6383e375f2239b780af50d3f">rsb_io_want_verbose_exit</a> = INT(Z"0000002", C_INT)</td></tr>
+<tr class="memdesc:a33d3ac5b6383e375f2239b780af50d3f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">RSB_IO_WANT_VERBOSE_EXIT</a>.  <a href="#a33d3ac5b6383e375f2239b780af50d3f"></a><br/></td></tr>
+<tr class="memitem:a72c4b7daa9a9ba1c7887bb05dfb96b2c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a72c4b7daa9a9ba1c7887bb05dfb96b2c">rsb_io_want_output_stream</a> = INT(Z"0000003", C_INT)</td></tr>
+<tr class="memdesc:a72c4b7daa9a9ba1c7887bb05dfb96b2c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">RSB_IO_WANT_OUTPUT_STREAM</a>.  <a href="#a72c4b7daa9a9ba1c7887bb05dfb96b2c"></a><br/></td></tr>
+<tr class="memitem:ae4176512451ec7387ee2fbaec0c7f861"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ae4176512451ec7387ee2fbaec0c7f861">rsb_io_want_sort_method</a> = INT(Z"0000004", C_INT)</td></tr>
+<tr class="memdesc:ae4176512451ec7387ee2fbaec0c7f861"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673">RSB_IO_WANT_SORT_METHOD</a>.  <a href="#ae4176512451ec7387ee2fbaec0c7f861"></a><br/></td></tr>
+<tr class="memitem:aaf22b4c404442175bc58dc513bf13a89"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aaf22b4c404442175bc58dc513bf13a89">rsb_io_want_cache_blocking_method</a> = INT(Z"0000005", C_INT)</td></tr>
+<tr class="memdesc:aaf22b4c404442175bc58dc513bf13a89"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7">RSB_IO_WANT_CACHE_BLOCKING_METHOD</a>.  <a href="#aaf22b4c404442175bc58dc513bf13a89"></a><br/></td></tr>
+<tr class="memitem:ad5a1220ce0e7d5c4ce517150de22d80b"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ad5a1220ce0e7d5c4ce517150de22d80b">rsb_io_want_subdivision_multiplier</a> = INT(Z"0000006", C_INT)</td></tr>
+<tr class="memdesc:ad5a1220ce0e7d5c4ce517150de22d80b"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a">RSB_IO_WANT_SUBDIVISION_MULTIPLIER</a>.  <a href="#ad5a1220ce0e7d5c4ce517150de22d80b"></a><br/></td></tr>
+<tr class="memitem:a90cf14925f34712589430925a0abb92e"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a90cf14925f34712589430925a0abb92e">rsb_io_want_verbose_errors</a> = INT(Z"0000007", C_INT)</td></tr>
+<tr class="memdesc:a90cf14925f34712589430925a0abb92e"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001">RSB_IO_WANT_VERBOSE_ERRORS</a>.  <a href="#a90cf14925f34712589430925a0abb92e"></a><br/></td></tr>
+<tr class="memitem:a81a7107ceaa5d934eced8144f7de2338"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a81a7107ceaa5d934eced8144f7de2338">rsb_io_want_bounded_box_computation</a> = INT(Z"0000008", C_INT)</td></tr>
+<tr class="memdesc:a81a7107ceaa5d934eced8144f7de2338"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</a>.  <a href="#a81a7107ceaa5d934eced8144f7de2338"></a><br/></td></tr>
+<tr class="memitem:a0c15802bcd77b9b98a0968beffaee9cc"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0c15802bcd77b9b98a0968beffaee9cc">rsb_io_want_executing_threads</a> = INT(Z"0000009", C_INT)</td></tr>
+<tr class="memdesc:a0c15802bcd77b9b98a0968beffaee9cc"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a>.  <a href="#a0c15802bcd77b9b98a0968beffaee9cc"></a><br/></td></tr>
+<tr class="memitem:a191f5492907ae4beca111b361955a791"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a191f5492907ae4beca111b361955a791">rsb_io_want_extra_verbose_interface</a> = INT(Z"0000010", C_INT)</td></tr>
+<tr class="memdesc:a191f5492907ae4beca111b361955a791"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a>.  <a href="#a191f5492907ae4beca111b361955a791"></a><br/></td></tr>
+<tr class="memitem:a81327bb47b51d6c50e12c02171c8c3fe"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a81327bb47b51d6c50e12c02171c8c3fe">rsb_io_want_memory_hierarchy_info_string</a> = INT(Z"0000011", C_INT)</td></tr>
+<tr class="memdesc:a81327bb47b51d6c50e12c02171c8c3fe"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d">RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING</a>.  <a href="#a81327bb47b51d6c50e12c02171c8c3fe"></a><br/></td></tr>
+<tr class="memitem:aed7dc0ecede60b677144e8aba46d28b9"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aed7dc0ecede60b677144e8aba46d28b9">rsb_io_want_is_initialized_marker</a> = INT(Z"0000012", C_INT)</td></tr>
+<tr class="memdesc:aed7dc0ecede60b677144e8aba46d28b9"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">RSB_IO_WANT_IS_INITIALIZED_MARKER</a>.  <a href="#aed7dc0ecede60b677144e8aba46d28b9"></a><br/></td></tr>
+<tr class="memitem:a658556e8116b0ff18bc19302fb66449a"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a658556e8116b0ff18bc19302fb66449a">rsb_io_want_mem_alloc_cnt</a> = INT(Z"0000013", C_INT)</td></tr>
+<tr class="memdesc:a658556e8116b0ff18bc19302fb66449a"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f">RSB_IO_WANT_MEM_ALLOC_CNT</a>.  <a href="#a658556e8116b0ff18bc19302fb66449a"></a><br/></td></tr>
+<tr class="memitem:a512361fe2c126a7baa412e4b680d8a2f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a512361fe2c126a7baa412e4b680d8a2f">rsb_io_want_mem_alloc_tot</a> = INT(Z"0000014", C_INT)</td></tr>
+<tr class="memdesc:a512361fe2c126a7baa412e4b680d8a2f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24">RSB_IO_WANT_MEM_ALLOC_TOT</a>.  <a href="#a512361fe2c126a7baa412e4b680d8a2f"></a><br/></td></tr>
+<tr class="memitem:aa89d96645cdd1a902fdfb0377a0a5ea2"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa89d96645cdd1a902fdfb0377a0a5ea2">rsb_io_want_leaf_level_multivec</a> = INT(Z"0000015", C_INT)</td></tr>
+<tr class="memdesc:aa89d96645cdd1a902fdfb0377a0a5ea2"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5">RSB_IO_WANT_LEAF_LEVEL_MULTIVEC</a>.  <a href="#aa89d96645cdd1a902fdfb0377a0a5ea2"></a><br/></td></tr>
+<tr class="memitem:afa4f68bc0184148f7790351c28cbae50"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#afa4f68bc0184148f7790351c28cbae50">rsb_io_want_max_memory_allocations</a> = INT(Z"0000016", C_INT)</td></tr>
+<tr class="memdesc:afa4f68bc0184148f7790351c28cbae50"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84">RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS</a>.  <a href="#afa4f68bc0184148f7790351c28cbae50"></a><br/></td></tr>
+<tr class="memitem:a0ceca511d93a29126225dd783af190d2"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0ceca511d93a29126225dd783af190d2">rsb_io_want_max_memory_allocated</a> = INT(Z"0000017", C_INT)</td></tr>
+<tr class="memdesc:a0ceca511d93a29126225dd783af190d2"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9">RSB_IO_WANT_MAX_MEMORY_ALLOCATED</a>.  <a href="#a0ceca511d93a29126225dd783af190d2"></a><br/></td></tr>
+<tr class="memitem:a565392da24b3006eaeaf5c0d1c5a424d"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a565392da24b3006eaeaf5c0d1c5a424d">rsb_io_want_librsb_etime</a> = INT(Z"0000018", C_INT)</td></tr>
+<tr class="memdesc:a565392da24b3006eaeaf5c0d1c5a424d"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">RSB_IO_WANT_LIBRSB_ETIME</a>.  <a href="#a565392da24b3006eaeaf5c0d1c5a424d"></a><br/></td></tr>
+<tr class="memitem:ac95404408be9bc2045e8455881d21377"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ac95404408be9bc2045e8455881d21377">rsb_io_want_verbose_tuning</a> = INT(Z"0000019", C_INT)</td></tr>
+<tr class="memdesc:ac95404408be9bc2045e8455881d21377"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">RSB_IO_WANT_VERBOSE_TUNING</a>.  <a href="#ac95404408be9bc2045e8455881d21377"></a><br/></td></tr>
+<tr class="memitem:a21ae01944a05b24822a824390789b1ee"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a21ae01944a05b24822a824390789b1ee">rsb_extf_norm_one</a> = INT(Z"000001001", C_INT)</td></tr>
+<tr class="memdesc:a21ae01944a05b24822a824390789b1ee"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">RSB_EXTF_NORM_ONE</a>.  <a href="#a21ae01944a05b24822a824390789b1ee"></a><br/></td></tr>
+<tr class="memitem:afb2e1af58af877281f96f6a2aeb77c99"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#afb2e1af58af877281f96f6a2aeb77c99">rsb_extf_norm_two</a> = INT(Z"000001002", C_INT)</td></tr>
+<tr class="memdesc:afb2e1af58af877281f96f6a2aeb77c99"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">RSB_EXTF_NORM_TWO</a>.  <a href="#afb2e1af58af877281f96f6a2aeb77c99"></a><br/></td></tr>
+<tr class="memitem:a396ba7496087621b292a7e2e68e976c8"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a396ba7496087621b292a7e2e68e976c8">rsb_extf_norm_inf</a> = INT(Z"000001003", C_INT)</td></tr>
+<tr class="memdesc:a396ba7496087621b292a7e2e68e976c8"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">RSB_EXTF_NORM_INF</a>.  <a href="#a396ba7496087621b292a7e2e68e976c8"></a><br/></td></tr>
+<tr class="memitem:a5228e51b964240df80dba35826a1a6c9"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a5228e51b964240df80dba35826a1a6c9">rsb_extf_sums_row</a> = INT(Z"000001004", C_INT)</td></tr>
+<tr class="memdesc:a5228e51b964240df80dba35826a1a6c9"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">RSB_EXTF_SUMS_ROW</a>.  <a href="#a5228e51b964240df80dba35826a1a6c9"></a><br/></td></tr>
+<tr class="memitem:a7aff705dacd272bad5d692b2775d5c93"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a7aff705dacd272bad5d692b2775d5c93">rsb_extf_sums_col</a> = INT(Z"000001005", C_INT)</td></tr>
+<tr class="memdesc:a7aff705dacd272bad5d692b2775d5c93"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e">RSB_EXTF_SUMS_COL</a>.  <a href="#a7aff705dacd272bad5d692b2775d5c93"></a><br/></td></tr>
+<tr class="memitem:ae6d4323a95cd3284314c787dfb05c854"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ae6d4323a95cd3284314c787dfb05c854">rsb_extf_asums_row</a> = INT(Z"000001006", C_INT)</td></tr>
+<tr class="memdesc:ae6d4323a95cd3284314c787dfb05c854"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54">RSB_EXTF_ASUMS_ROW</a>.  <a href="#ae6d4323a95cd3284314c787dfb05c854"></a><br/></td></tr>
+<tr class="memitem:af6f1f5ccf7d0c80b61bce19f5c64acc0"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#af6f1f5ccf7d0c80b61bce19f5c64acc0">rsb_extf_asums_col</a> = INT(Z"000001007", C_INT)</td></tr>
+<tr class="memdesc:af6f1f5ccf7d0c80b61bce19f5c64acc0"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada">RSB_EXTF_ASUMS_COL</a>.  <a href="#af6f1f5ccf7d0c80b61bce19f5c64acc0"></a><br/></td></tr>
+<tr class="memitem:a4019120043663ffa9e39b9e042d1e13a"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a4019120043663ffa9e39b9e042d1e13a">rsb_extf_diag</a> = INT(Z"000000004", C_INT)</td></tr>
+<tr class="memdesc:a4019120043663ffa9e39b9e042d1e13a"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">RSB_EXTF_DIAG</a>.  <a href="#a4019120043663ffa9e39b9e042d1e13a"></a><br/></td></tr>
+<tr class="memitem:a9aa6c9b3d7034de75ebca4a5c1eba668"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a9aa6c9b3d7034de75ebca4a5c1eba668">rsb_marf_rgb</a> = INT(Z"000000001", C_INT)</td></tr>
+<tr class="memdesc:a9aa6c9b3d7034de75ebca4a5c1eba668"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">RSB_MARF_RGB</a>.  <a href="#a9aa6c9b3d7034de75ebca4a5c1eba668"></a><br/></td></tr>
+<tr class="memitem:aa1f0a3a95206057e5be739fd9b114e12"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa1f0a3a95206057e5be739fd9b114e12">rsb_marf_eps_s</a> = INT(Z"000000010", C_INT)</td></tr>
+<tr class="memdesc:aa1f0a3a95206057e5be739fd9b114e12"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a8055e62d2824131421d22de1a0256f79">RSB_MARF_EPS_S</a>.  <a href="#aa1f0a3a95206057e5be739fd9b114e12"></a><br/></td></tr>
+<tr class="memitem:a448f95924a27a7bc591db9590b62d6b5"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a448f95924a27a7bc591db9590b62d6b5">rsb_marf_eps_b</a> = INT(Z"000000020", C_INT)</td></tr>
+<tr class="memdesc:a448f95924a27a7bc591db9590b62d6b5"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a77106fe2435306ef028060d0eb7dca14">RSB_MARF_EPS_B</a>.  <a href="#a448f95924a27a7bc591db9590b62d6b5"></a><br/></td></tr>
+<tr class="memitem:a862ec78887803b5649251bd70bd7cba0"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a862ec78887803b5649251bd70bd7cba0">rsb_marf_eps</a> = INT(Z"000000030", C_INT)</td></tr>
+<tr class="memdesc:a862ec78887803b5649251bd70bd7cba0"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a2d332e6ed899c019e54ab4e540c82fd8">RSB_MARF_EPS</a>.  <a href="#a862ec78887803b5649251bd70bd7cba0"></a><br/></td></tr>
+<tr class="memitem:aa7582e5c9cddf8ad409485cbfa6ebac4"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa7582e5c9cddf8ad409485cbfa6ebac4">rsb_marf_eps_l</a> = INT(Z"000000070", C_INT)</td></tr>
+<tr class="memdesc:aa7582e5c9cddf8ad409485cbfa6ebac4"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a3562195777ed886282bd6287551a235c">RSB_MARF_EPS_L</a>.  <a href="#aa7582e5c9cddf8ad409485cbfa6ebac4"></a><br/></td></tr>
+<tr class="memitem:a912caf1dfbc9eecd804ec0e9b330809f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a912caf1dfbc9eecd804ec0e9b330809f">rsb_mif_index_storage_in_bytes__to__size_t</a> = INT(Z"000000001", C_INT)</td></tr>
+<tr class="memdesc:a912caf1dfbc9eecd804ec0e9b330809f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858">RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T</a>.  <a href="#a912caf1dfbc9eecd804ec0e9b330809f"></a><br/></td></tr>
+<tr class="memitem:a728a103d20814d978ac073fc51791897"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a728a103d20814d978ac073fc51791897">rsb_mif_index_storage_in_bytes_per_nnz__to__rsb_real_t</a> = INT(Z"000000002", C_INT)</td></tr>
+<tr class="memdesc:a728a103d20814d978ac073fc51791897"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065">RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T</a>.  <a href="#a728a103d20814d978ac073fc51791897"></a><br/></td></tr>
+<tr class="memitem:adcdc1cf3fe0032524c482bc2be4b4b7d"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#adcdc1cf3fe0032524c482bc2be4b4b7d">rsb_mif_matrix_rows__to__rsb_coo_index_t</a> = INT(Z"000000004", C_INT)</td></tr>
+<tr class="memdesc:adcdc1cf3fe0032524c482bc2be4b4b7d"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954">RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T</a>.  <a href="#adcdc1cf3fe0032524c482bc2be4b4b7d"></a><br/></td></tr>
+<tr class="memitem:a0cb66e8ecfec31c29be967b928caf767"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0cb66e8ecfec31c29be967b928caf767">rsb_mif_matrix_cols__to__rsb_coo_index_t</a> = INT(Z"000000008", C_INT)</td></tr>
+<tr class="memdesc:a0cb66e8ecfec31c29be967b928caf767"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0">RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T</a>.  <a href="#a0cb66e8ecfec31c29be967b928caf767"></a><br/></td></tr>
+<tr class="memitem:ae3d4d4559c433e7ac5dd51f63bd1933f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ae3d4d4559c433e7ac5dd51f63bd1933f">rsb_mif_matrix_nnz__to__rsb_nnz_index_t</a> = INT(Z"000000010", C_INT)</td></tr>
+<tr class="memdesc:ae3d4d4559c433e7ac5dd51f63bd1933f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T</a>.  <a href="#ae3d4d4559c433e7ac5dd51f63bd1933f"></a><br/></td></tr>
+<tr class="memitem:a4f193b007e217530bf2a45c65d58673f"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a4f193b007e217530bf2a45c65d58673f">rsb_mif_total_size__to__size_t</a> = INT(Z"000000020", C_INT)</td></tr>
+<tr class="memdesc:a4f193b007e217530bf2a45c65d58673f"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f">RSB_MIF_TOTAL_SIZE__TO__SIZE_T</a>.  <a href="#a4f193b007e217530bf2a45c65d58673f"></a><br/></td></tr>
+<tr class="memitem:ad0e9b8ffe63a338a7e03ad62d3a4b046"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ad0e9b8ffe63a338a7e03ad62d3a4b046">rsb_mif_matrix_flags__to__rsb_flags_t</a> = INT(Z"000000040", C_INT)</td></tr>
+<tr class="memdesc:ad0e9b8ffe63a338a7e03ad62d3a4b046"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e">RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T</a>.  <a href="#ad0e9b8ffe63a338a7e03ad62d3a4b046"></a><br/></td></tr>
+<tr class="memitem:a008a647728ce9aa30846a913c0c620f9"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a008a647728ce9aa30846a913c0c620f9">rsb_mif_matrix_typecode__to__rsb_type_t</a> = INT(Z"000000080", C_INT)</td></tr>
+<tr class="memdesc:a008a647728ce9aa30846a913c0c620f9"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93">RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T</a>.  <a href="#a008a647728ce9aa30846a913c0c620f9"></a><br/></td></tr>
+<tr class="memitem:a24db597e798fc524428ff052bd5ee3bb"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a24db597e798fc524428ff052bd5ee3bb">rsb_mif_matrix_info__to__char_p</a> = INT(Z"000000100", C_INT)</td></tr>
+<tr class="memdesc:a24db597e798fc524428ff052bd5ee3bb"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81">RSB_MIF_MATRIX_INFO__TO__CHAR_P</a>.  <a href="#a24db597e798fc524428ff052bd5ee3bb"></a><br/></td></tr>
+<tr class="memitem:ac954dfff99410e7223094406be0f19f9"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ac954dfff99410e7223094406be0f19f9">rsb_mif_leaves_count__to__rsb_blk_index_t</a> = INT(Z"000000200", C_INT)</td></tr>
+<tr class="memdesc:ac954dfff99410e7223094406be0f19f9"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f">RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T</a>.  <a href="#ac954dfff99410e7223094406be0f19f9"></a><br/></td></tr>
+<tr class="memitem:ab8b26221a9c42a654a835a8f505cdd6d"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ab8b26221a9c42a654a835a8f505cdd6d">rsb_elopf_mul</a> = INT(Z"000000001", C_INT)</td></tr>
+<tr class="memdesc:ab8b26221a9c42a654a835a8f505cdd6d"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a">RSB_ELOPF_MUL</a>.  <a href="#ab8b26221a9c42a654a835a8f505cdd6d"></a><br/></td></tr>
+<tr class="memitem:a0fb14e2ce3e4033c5f2075a823a358e2"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0fb14e2ce3e4033c5f2075a823a358e2">rsb_elopf_div</a> = INT(Z"000000002", C_INT)</td></tr>
+<tr class="memdesc:a0fb14e2ce3e4033c5f2075a823a358e2"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969">RSB_ELOPF_DIV</a>.  <a href="#a0fb14e2ce3e4033c5f2075a823a358e2"></a><br/></td></tr>
+<tr class="memitem:a97685d0c5f78c8e996b85689f58309ba"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a97685d0c5f78c8e996b85689f58309ba">rsb_elopf_pow</a> = INT(Z"000000004", C_INT)</td></tr>
+<tr class="memdesc:a97685d0c5f78c8e996b85689f58309ba"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a">RSB_ELOPF_POW</a>.  <a href="#a97685d0c5f78c8e996b85689f58309ba"></a><br/></td></tr>
+<tr class="memitem:af632e8ad15e51d2fcdad5f81d22aecab"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#af632e8ad15e51d2fcdad5f81d22aecab">rsb_elopf_neg</a> = INT(Z"000000008", C_INT)</td></tr>
+<tr class="memdesc:af632e8ad15e51d2fcdad5f81d22aecab"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4">RSB_ELOPF_NEG</a>.  <a href="#af632e8ad15e51d2fcdad5f81d22aecab"></a><br/></td></tr>
+<tr class="memitem:a036bcb7f9a4156f984d9bfe8f7829c9c"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a036bcb7f9a4156f984d9bfe8f7829c9c">rsb_elopf_scale_rows</a> = INT(Z"000000010", C_INT)</td></tr>
+<tr class="memdesc:a036bcb7f9a4156f984d9bfe8f7829c9c"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287">RSB_ELOPF_SCALE_ROWS</a>.  <a href="#a036bcb7f9a4156f984d9bfe8f7829c9c"></a><br/></td></tr>
+<tr class="memitem:a0b36e2eac438cae0c3cb2171aa89a580"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a0b36e2eac438cae0c3cb2171aa89a580">rsb_elopf_scale_cols</a> = INT(Z"000000020", C_INT)</td></tr>
+<tr class="memdesc:a0b36e2eac438cae0c3cb2171aa89a580"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e">RSB_ELOPF_SCALE_COLS</a>.  <a href="#a0b36e2eac438cae0c3cb2171aa89a580"></a><br/></td></tr>
+<tr class="memitem:acc73315ac4e1af0fc6c90c3d189a1c2a"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#acc73315ac4e1af0fc6c90c3d189a1c2a">rsb_elopf_scale_rows_real</a> = INT(Z"000000040", C_INT)</td></tr>
+<tr class="memdesc:acc73315ac4e1af0fc6c90c3d189a1c2a"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5">RSB_ELOPF_SCALE_ROWS_REAL</a>.  <a href="#acc73315ac4e1af0fc6c90c3d189a1c2a"></a><br/></td></tr>
+<tr class="memitem:a224d2a379853ca7bad32b5921437f531"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a224d2a379853ca7bad32b5921437f531">rsb_elopf_scale_cols_real</a> = INT(Z"000000080", C_INT)</td></tr>
+<tr class="memdesc:a224d2a379853ca7bad32b5921437f531"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f">RSB_ELOPF_SCALE_COLS_REAL</a>.  <a href="#a224d2a379853ca7bad32b5921437f531"></a><br/></td></tr>
+<tr class="memitem:aa3e1b0443ca75f7f78983737770ee95a"><td class="memItemLeft" align="right" valign="top">integer(c_int), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#aa3e1b0443ca75f7f78983737770ee95a">rsb_precf_ilu0</a> = INT(Z"000000001", C_INT)</td></tr>
+<tr class="memdesc:aa3e1b0443ca75f7f78983737770ee95a"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a56bb6be11af9a5a0ed9aaa8774ab6db9">RSB_PRECF_ILU0</a>.  <a href="#aa3e1b0443ca75f7f78983737770ee95a"></a><br/></td></tr>
+<tr class="memitem:a313effa5a93a26ea72326e6c89bdaf82"><td class="memItemLeft" align="right" valign="top">type(c_ptr), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#a313effa5a93a26ea72326e6c89bdaf82">rsb_null_init_options</a> = C_NULL_PTR</td></tr>
+<tr class="memdesc:a313effa5a93a26ea72326e6c89bdaf82"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>.  <a href="#a313effa5a93a26ea72326e6c89bdaf82"></a><br/></td></tr>
+<tr class="memitem:ad1a0a65364c48d23f9c82ef83c97c420"><td class="memItemLeft" align="right" valign="top">type(c_ptr), parameter </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html#ad1a0a65364c48d23f9c82ef83c97c420">rsb_null_exit_options</a> = C_NULL_PTR</td></tr>
+<tr class="memdesc:ad1a0a65364c48d23f9c82ef83c97c420"><td class="mdescLeft"> </td><td class="mdescRight">See <a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>.  <a href="#ad1a0a65364c48d23f9c82ef83c97c420"></a><br/></td></tr>
+</table>
+<h2>Field Documentation</h2>
+<a class="anchor" id="a0fb14e2ce3e4033c5f2075a823a358e2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_div = INT(Z"000000002", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969">RSB_ELOPF_DIV</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab8b26221a9c42a654a835a8f505cdd6d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_mul = INT(Z"000000001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a">RSB_ELOPF_MUL</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="af632e8ad15e51d2fcdad5f81d22aecab"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_neg = INT(Z"000000008", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4">RSB_ELOPF_NEG</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a97685d0c5f78c8e996b85689f58309ba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_pow = INT(Z"000000004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a">RSB_ELOPF_POW</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0b36e2eac438cae0c3cb2171aa89a580"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_scale_cols = INT(Z"000000020", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e">RSB_ELOPF_SCALE_COLS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a224d2a379853ca7bad32b5921437f531"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_scale_cols_real = INT(Z"000000080", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f">RSB_ELOPF_SCALE_COLS_REAL</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a036bcb7f9a4156f984d9bfe8f7829c9c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_scale_rows = INT(Z"000000010", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287">RSB_ELOPF_SCALE_ROWS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="acc73315ac4e1af0fc6c90c3d189a1c2a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_elopf_scale_rows_real = INT(Z"000000040", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5">RSB_ELOPF_SCALE_ROWS_REAL</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a05f3d2c8888332697f182ea6d8ab66b0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_badargs = -INT(Z"0020", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#af0b262c6c554403269234219b3aec409">RSB_ERR_BADARGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4e124cfacc5e0492952ccda10905206a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_corrupt_input_data = -INT(Z"01000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a14103828be5eb82e40d3b772ce54abda">RSB_ERR_CORRUPT_INPUT_DATA</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a65da259a04a3b6b09b1e67d2aae53108"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_could_not_honour_externally_allocation_flags = -INT(Z"04000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a935de71c3acc5714ad539d65288e2593">RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0abffcaa259b8f2cbf1b025c4c179fb0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_enomem = -INT(Z"0040", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a538215b32e908646c979a2e446ae5467">RSB_ERR_ENOMEM</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa0868e7080760845d911eae040df8c44"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_failed_memhier_detection = -INT(Z"02000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a3cacb604d0ad892e195c7c97eda18dba">RSB_ERR_FAILED_MEMHIER_DETECTION</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="abe86debd990b7989427a98378c0c2ea4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_generic_error = -INT(Z"0001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">RSB_ERR_GENERIC_ERROR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a76c59842ba7bef3a5e0cfe577b45e3af"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_internal_error = -INT(Z"0010", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a8e650a7e3b5c5aa1fb9763b0f1498126">RSB_ERR_INTERNAL_ERROR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a018c06fd82826d0b56fdec98da22da17"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_invalid_numerical_data = -INT(Z"010000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a8d504baa13048da05bb71235e2c8d181">RSB_ERR_INVALID_NUMERICAL_DATA</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a20784aca964572d033d9f79a08b8842d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_limits = -INT(Z"0200", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a3d7758ee9127e0c93c9075402999d154">RSB_ERR_LIMITS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3534459ee186379f45444c289df70175"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_memory_leak = -INT(Z"020000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a1b63053f52d6426b726a05b206a3862a">RSB_ERR_MEMORY_LEAK</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a2f418e43e861a006b5aea1d55913fee2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_no_error = -INT(Z"0000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8dec384225c4700df1b201b6dbc5aa60"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_no_stream_output_configured_out = -INT(Z"08000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aed70b921cdbe20cc81d03c9b9c7aab38"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_no_user_configuration = -INT(Z"0800", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a5ab0f86009e1f934b25b23fc4837b9b0">RSB_ERR_NO_USER_CONFIGURATION</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4405be6ac615c1db2c161185d455374c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_unimplemented_yet = -INT(Z"0100", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a0bd20d0f68cf911bf9dfda495d8e12db">RSB_ERR_UNIMPLEMENTED_YET</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac81e797f7f250fb3d2c20f2a46360838"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_unsupported_feature = -INT(Z"0400", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a48a68ee015ab06c1b72e26659479cd9e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_unsupported_format = -INT(Z"0008", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#ac00cd41eab18a0d2b9323b401029dd73">RSB_ERR_UNSUPPORTED_FORMAT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa9069fa99bea2127f31ac62365b19bcd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_unsupported_operation = -INT(Z"0002", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#ab4f407e7c8364bee51cc77546d6f0922">RSB_ERR_UNSUPPORTED_OPERATION</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab8643c59b36b245e6f59ce00e10ad17f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_err_unsupported_type = -INT(Z"0004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#afdf2ab3912960ee19f23e7d585371548">RSB_ERR_UNSUPPORTED_TYPE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="af6f1f5ccf7d0c80b61bce19f5c64acc0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_asums_col = INT(Z"000001007", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada">RSB_EXTF_ASUMS_COL</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ae6d4323a95cd3284314c787dfb05c854"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_asums_row = INT(Z"000001006", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54">RSB_EXTF_ASUMS_ROW</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4019120043663ffa9e39b9e042d1e13a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_diag = INT(Z"000000004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">RSB_EXTF_DIAG</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a396ba7496087621b292a7e2e68e976c8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_norm_inf = INT(Z"000001003", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">RSB_EXTF_NORM_INF</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a21ae01944a05b24822a824390789b1ee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_norm_one = INT(Z"000001001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">RSB_EXTF_NORM_ONE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="afb2e1af58af877281f96f6a2aeb77c99"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_norm_two = INT(Z"000001002", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">RSB_EXTF_NORM_TWO</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7aff705dacd272bad5d692b2775d5c93"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_sums_col = INT(Z"000001005", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e">RSB_EXTF_SUMS_COL</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a5228e51b964240df80dba35826a1a6c9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_extf_sums_row = INT(Z"000001004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">RSB_EXTF_SUMS_ROW</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a25e0432a471ab3fca4105d40ce2e8f1e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_assembled_in_coo_arrays = INT(Z"0040000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#adce7e20015d4a549bb8c44a00a80fc7e">RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0cd8d81bf275bfdc685080e0d855fbb1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_c_indices_interface = INT(Z"0000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa1d8e9f835115cdac082812d5f74b6d4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_default_coo_matrix_flags = <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a6b21a3edf4231070a10223f1a9ae1dc4">RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a83848ae1b266eea31f4462821f8bc51b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_default_csr_matrix_flags = <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a8c90a9ad92722ffbbf1bfcadb805c520">RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a16cc953b0faf8ba964ba79930b51f93c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_default_matrix_flags = <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#acac4b9c09a3fd6be63e511fc5042038f">RSB_FLAG_DEFAULT_MATRIX_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aba933b2d9b4534fa69226910ed84bd4c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_default_rsb_matrix_flags = (<a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>+<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>+<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>+<a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad27c22510fec7c8367bd34bf800cbd84"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_default_storage_flags = (<a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>+<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#aa83897e25c1235a780ed7fe317c78555">RSB_FLAG_DEFAULT_STORAGE_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a509eea3e97b56833df24cb9d2b064e26"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_diagonal = (<a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#abccb47886fb3f8352e4e6ad801fd8efa">RSB_FLAG_DIAGONAL</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a95b0cf20f4422b337c41f2388a59fb0b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_discard_zeros = INT(Z"0000400", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#abf243a6f15925734e143703c4ad33512">RSB_FLAG_DISCARD_ZEROS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa1ca91fa56bb36b6eebbf47de8ccb1be"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_duplicates_default_handle = INT(Z"0000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a7fee489042762b3b22d8184c592a9e52">RSB_FLAG_DUPLICATES_DEFAULT_HANDLE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad6870000c6da71ba7e07676e9d9c5e42"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_duplicates_keep_last = INT(Z"0000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#aff85f26964888f838aa97eb371ce5da3">RSB_FLAG_DUPLICATES_KEEP_LAST</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4e8c5001e9a26a86faefe9bd26989040"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_duplicates_sum = INT(Z"0000200", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">RSB_FLAG_DUPLICATES_SUM</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a6d6b68525e01bb7d91eb814216c0b5bf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_experimental_in_place_permutation_sort = INT(Z"0080000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a1d3b9bd7a31257cc8116be3dee0125b5">RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab8f28a0d2ec93bf0c85ef1f30fc51e24"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_externally_allocated_arrays = INT(Z"040000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a6abc0e23c782b817e2ef96d8294f990d">RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8ca3ae90c2f8e0923f80f04e53ad2c37"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_fortran_indices_interface = INT(Z"0000001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a613fa635312f361ef115b68803807908"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_hermitian = INT(Z"0800000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a2af139858170575356808c746b4a564a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_identical_flags = <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a59dd2ec96582af74d563f8c9f1f44409"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_lower = INT(Z"0000010", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a163680fba55484e1d4e4c9a436ebc93b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_lower_hermitian = (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#aa06dcddcdd4f42fe2eeda8eb6168bd2d">RSB_FLAG_LOWER_HERMITIAN</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a1b31d44601cedab86c51a6ed2a8b0ca4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_lower_symmetric = (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a6933030c784596e3c8dbbbd8daf62805">RSB_FLAG_LOWER_SYMMETRIC</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7c3f1e6d9f61f9944a08efab6a00fe2f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_lower_triangular = (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> + <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">RSB_FLAG_LOWER_TRIANGULAR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="abf74a30d663a24ff5fde624217bfea37"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_mutually_exclusive_switches = (<a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>+<a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_INDICES_CSR</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a6f4335cce5234a69e06188bcad418091">RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a65dbcb1d6e6347e5b7e85b5aa49db90c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_noflags = INT(Z"0000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7a5366fbd6cd1814d44b1ab1068f88de"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_quad_partitioning = INT(Z"0002000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aff989c5cb6fa62c7ed25a72f30d6a864"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_recursive_more_leaves_than_threads = INT(Z"01000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a54d04b341465bf3dadc62ad99d55f8ca">RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="abce4dd43d8147cb6fe505bda474e535c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_recursive_subdivide_more_on_diag = INT(Z"08000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#ad8e75dfa2b78fa82cdd31665a375d257">RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ade2657fb3c17b519cc4332eac06046d3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_sorted_input = INT(Z"0000004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a726fa64beccf21ae1b70149b88c3affb">RSB_FLAG_SORTED_INPUT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8325109ecda447aa1e93e8d747673f4c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_symmetric = INT(Z"0400000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3ea9a964debcbac70d35e964666f7a1c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_triangular = INT(Z"0000008", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3e5c32923f3e360e980311315a27dc7d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_unit_diag_implicit = INT(Z"0000040", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a4af24812309eb471c861ba618cb996f2">RSB_FLAG_UNIT_DIAG_IMPLICIT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9d9497934ece76bcf860a2a563056eca"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_upper = INT(Z"0000020", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a22eedbec9d19115a8658438f1c7cc496"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_upper_hermitian = (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> + <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a0565be78af9bac79d07376d501237b00">RSB_FLAG_UPPER_HERMITIAN</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab17822f489868813f38ba9609245ae55"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_upper_symmetric = (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> + <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a3c2701b010fa2928685f3253a0ff1a99">RSB_FLAG_UPPER_SYMMETRIC</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac3802654bb13df88bb2e7f371b12e5ea"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_upper_triangular = (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> + <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">RSB_FLAG_UPPER_TRIANGULAR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9d39857a6f2ae454fd20d5bcc03ef17c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_use_csr_reserved = INT(Z"0200000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ae2c87798ff9cee8bdc0eaacdec62a5d0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_use_halfword_indices = INT(Z"0000002", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a6ff989a0fe4da2a71e72091fcb30a334"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_use_halfword_indices_coo = (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>+<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7baa8d692038856c55489d2382f09e5d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_use_halfword_indices_csr = (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>+<a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_INDICES_CSR</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8ad70221bf6a5f4b458f6b700b6af8df"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_want_bcss_storage = INT(Z"0004000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8786a38b2ca41b926b8ef6092a55b8a6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_want_column_major_order = INT(Z"04000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9fda0eb0c128c193ba7d05bab64d7e90"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_want_coo_storage = INT(Z"0000100", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a68ace12ecb8cbcc9a7c686b2b9665c29"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_flag_want_row_major_order = INT(Z"0000000", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a81a7107ceaa5d934eced8144f7de2338"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_bounded_box_computation = INT(Z"0000008", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aaf22b4c404442175bc58dc513bf13a89"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_cache_blocking_method = INT(Z"0000005", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7">RSB_IO_WANT_CACHE_BLOCKING_METHOD</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0c15802bcd77b9b98a0968beffaee9cc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_executing_threads = INT(Z"0000009", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a191f5492907ae4beca111b361955a791"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_extra_verbose_interface = INT(Z"0000010", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aed7dc0ecede60b677144e8aba46d28b9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_is_initialized_marker = INT(Z"0000012", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">RSB_IO_WANT_IS_INITIALIZED_MARKER</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa89d96645cdd1a902fdfb0377a0a5ea2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_leaf_level_multivec = INT(Z"0000015", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5">RSB_IO_WANT_LEAF_LEVEL_MULTIVEC</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a565392da24b3006eaeaf5c0d1c5a424d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_librsb_etime = INT(Z"0000018", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">RSB_IO_WANT_LIBRSB_ETIME</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0ceca511d93a29126225dd783af190d2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_max_memory_allocated = INT(Z"0000017", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9">RSB_IO_WANT_MAX_MEMORY_ALLOCATED</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="afa4f68bc0184148f7790351c28cbae50"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_max_memory_allocations = INT(Z"0000016", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84">RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a658556e8116b0ff18bc19302fb66449a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_mem_alloc_cnt = INT(Z"0000013", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f">RSB_IO_WANT_MEM_ALLOC_CNT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a512361fe2c126a7baa412e4b680d8a2f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_mem_alloc_tot = INT(Z"0000014", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24">RSB_IO_WANT_MEM_ALLOC_TOT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a81327bb47b51d6c50e12c02171c8c3fe"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_memory_hierarchy_info_string = INT(Z"0000011", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d">RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a72c4b7daa9a9ba1c7887bb05dfb96b2c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_output_stream = INT(Z"0000003", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">RSB_IO_WANT_OUTPUT_STREAM</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ae4176512451ec7387ee2fbaec0c7f861"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_sort_method = INT(Z"0000004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673">RSB_IO_WANT_SORT_METHOD</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad5a1220ce0e7d5c4ce517150de22d80b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_subdivision_multiplier = INT(Z"0000006", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a">RSB_IO_WANT_SUBDIVISION_MULTIPLIER</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a90cf14925f34712589430925a0abb92e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_verbose_errors = INT(Z"0000007", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001">RSB_IO_WANT_VERBOSE_ERRORS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a33d3ac5b6383e375f2239b780af50d3f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_verbose_exit = INT(Z"0000002", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">RSB_IO_WANT_VERBOSE_EXIT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="abf4365a254c637b59b5f84dcef03c4e6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_verbose_init = INT(Z"0000001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">RSB_IO_WANT_VERBOSE_INIT</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac95404408be9bc2045e8455881d21377"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_io_want_verbose_tuning = INT(Z"0000019", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">RSB_IO_WANT_VERBOSE_TUNING</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a862ec78887803b5649251bd70bd7cba0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_marf_eps = INT(Z"000000030", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a2d332e6ed899c019e54ab4e540c82fd8">RSB_MARF_EPS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a448f95924a27a7bc591db9590b62d6b5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_marf_eps_b = INT(Z"000000020", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a77106fe2435306ef028060d0eb7dca14">RSB_MARF_EPS_B</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa7582e5c9cddf8ad409485cbfa6ebac4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_marf_eps_l = INT(Z"000000070", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a3562195777ed886282bd6287551a235c">RSB_MARF_EPS_L</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa1f0a3a95206057e5be739fd9b114e12"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_marf_eps_s = INT(Z"000000010", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a8055e62d2824131421d22de1a0256f79">RSB_MARF_EPS_S</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9aa6c9b3d7034de75ebca4a5c1eba668"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_marf_rgb = INT(Z"000000001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">RSB_MARF_RGB</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a912caf1dfbc9eecd804ec0e9b330809f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_index_storage_in_bytes__to__size_t = INT(Z"000000001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858">RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a728a103d20814d978ac073fc51791897"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_index_storage_in_bytes_per_nnz__to__rsb_real_t = INT(Z"000000002", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065">RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac954dfff99410e7223094406be0f19f9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_leaves_count__to__rsb_blk_index_t = INT(Z"000000200", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f">RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0cb66e8ecfec31c29be967b928caf767"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_matrix_cols__to__rsb_coo_index_t = INT(Z"000000008", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0">RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad0e9b8ffe63a338a7e03ad62d3a4b046"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_matrix_flags__to__rsb_flags_t = INT(Z"000000040", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e">RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a24db597e798fc524428ff052bd5ee3bb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_matrix_info__to__char_p = INT(Z"000000100", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81">RSB_MIF_MATRIX_INFO__TO__CHAR_P</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ae3d4d4559c433e7ac5dd51f63bd1933f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_matrix_nnz__to__rsb_nnz_index_t = INT(Z"000000010", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="adcdc1cf3fe0032524c482bc2be4b4b7d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_matrix_rows__to__rsb_coo_index_t = INT(Z"000000004", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954">RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a008a647728ce9aa30846a913c0c620f9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_matrix_typecode__to__rsb_type_t = INT(Z"000000080", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93">RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4f193b007e217530bf2a45c65d58673f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_mif_total_size__to__size_t = INT(Z"000000020", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f">RSB_MIF_TOTAL_SIZE__TO__SIZE_T</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad1a0a65364c48d23f9c82ef83c97c420"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">type(c_ptr), parameter rsb::rsb_null_exit_options = C_NULL_PTR</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a313effa5a93a26ea72326e6c89bdaf82"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">type(c_ptr), parameter rsb::rsb_null_init_options = C_NULL_PTR</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="af833bb7a31acb188d33424c3c16bd4cd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_signed_char), parameter rsb::rsb_numerical_type_double = ICHAR('D')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a1865b95dcc4fac4f0fe21dfe8c4ef036"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_signed_char), parameter rsb::rsb_numerical_type_double_complex = ICHAR('Z')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ac18d8381c23b54ccd523e7b4e50af04a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_signed_char), parameter rsb::rsb_numerical_type_float = ICHAR('S')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="ace3d848255b280a0531407c19fffaec7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_signed_char), parameter rsb::rsb_numerical_type_float_complex = ICHAR('C')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a31d8f196938e468a3891fb80f1decc1f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_signed_char), parameter rsb::rsb_numerical_type_int = ICHAR('I')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a43c72bf61ae0f1961908e27c7dd76f01"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_signed_char), parameter rsb::rsb_numerical_type_same_type = 1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="aa3e1b0443ca75f7f78983737770ee95a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_precf_ilu0 = INT(Z"000000001", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>See <a class="el" href="rsb_8h.html#a56bb6be11af9a5a0ed9aaa8774ab6db9">RSB_PRECF_ILU0</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a2e308172e38ee4453d556792acbe464c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_transposition_c = INT(Z"043", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a89c7627f24fecaf23ead8300f671314f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_transposition_n = INT(Z"04E", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a5c11d5b2aa58a9c9067ec914265cd28f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">integer(c_int), parameter rsb::rsb_transposition_t = INT(Z"054", C_INT)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this module was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/closed.png b/doc/html/closed.png
new file mode 100644
index 0000000..98cc2c9
Binary files /dev/null and b/doc/html/closed.png differ
diff --git a/doc/html/deprecated.html b/doc/html/deprecated.html
new file mode 100644
index 0000000..f2f051e
--- /dev/null
+++ b/doc/html/deprecated.html
@@ -0,0 +1,76 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Deprecated List</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li class="current"><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Deprecated List </div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock"><dl class="reflist">
+<dt><a class="anchor" id="_deprecated000011"></a>Global <a class="el" href="rsb_8h.html#a97106c8db99424b5b69cd6be5bf59937">rsb_file_mtx_get_dimensions</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#a97106c8db99424b5b69cd6be5bf59937">rsb_file_mtx_get_dimensions</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>.  </dd>
+<dt><a class="anchor" id="_deprecated000006"></a>Global <a class="el" href="rsb_8h.html#a191af7bdb17d4b0abb3a195c11e56c3b">rsb_file_mtx_render</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#a191af7bdb17d4b0abb3a195c11e56c3b">rsb_file_mtx_render</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>.  </dd>
+<dt><a class="anchor" id="_deprecated000005"></a>Global <a class="el" href="rsb_8h.html#a552fe79778c824e8d88ddfd0d9c58586">rsb_mtx_get_norm</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#a552fe79778c824e8d88ddfd0d9c58586">rsb_mtx_get_norm</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a> .  </dd>
+<dt><a class="anchor" id="_deprecated000008"></a>Global <a class="el" href="rsb_8h.html#a8ba1704fe1f07cb9abe856d9a1a20ea9">rsb_mtx_get_preconditioner</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#a8ba1704fe1f07cb9abe856d9a1a20ea9">rsb_mtx_get_preconditioner</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a>.  </dd>
+<dt><a class="anchor" id="_deprecated000010"></a>Global <a class="el" href="rsb_8h.html#af08b72a410e54fd7db6dcb12db232aec">rsb_mtx_get_values</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#af08b72a410e54fd7db6dcb12db232aec">rsb_mtx_get_values</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a>.  </dd>
+<dt><a class="anchor" id="_deprecated000009"></a>Global <a class="el" href="rsb_8h.html#a5b622f80450cdef4f8a06742eacbb045">rsb_mtx_set_values</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#a5b622f80450cdef4f8a06742eacbb045">rsb_mtx_set_values</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>.  </dd>
+<dt><a class="anchor" id="_deprecated000007"></a>Global <a class="el" href="rsb_8h.html#a40d40562867aceec2899cdddf79b3086">rsb_mtx_upd_values</a>  </dt>
+<dd><a class="el" href="rsb_8h.html#a40d40562867aceec2899cdddf79b3086">rsb_mtx_upd_values</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>.  </dd>
+<dt><a class="anchor" id="_deprecated000001"></a>Global <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>  (IOF, IOP, IOS, ERRVAL)</dt>
+<dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> or <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a> instead.  </dd>
+<dt><a class="anchor" id="_deprecated000002"></a>Global <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">RSB_REINIT_SINGLE_VALUE_C_IOP</a>  (IOF, IOP, IOS, ERRVAL)</dt>
+<dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> instead.  </dd>
+<dt><a class="anchor" id="_deprecated000004"></a>Global <a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a>  (IOF, IOP, ERRVAL)</dt>
+<dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a> instead.  </dd>
+<dt><a class="anchor" id="_deprecated000003"></a>Global <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_REINIT_SINGLE_VALUE_SET</a>  (IOF, IOP, ERRVAL)</dt>
+<dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> instead. </dd>
+</dl>
+</div></div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/doxygen.css b/doc/html/doxygen.css
new file mode 100644
index 0000000..0c559a0
--- /dev/null
+++ b/doc/html/doxygen.css
@@ -0,0 +1,1163 @@
+/* The standard CSS for doxygen */
+
+body, table, div, p, dl {
+	font: 400 14px/19px Roboto,sans-serif;
+}
+
+/* @group Heading Levels */
+
+h1 {
+	font-size: 150%;
+}
+
+.title {
+	font-size: 150%;
+	font-weight: bold;
+	margin: 10px 2px;
+}
+
+h2 {
+	border-bottom: 1px solid #879ECB;
+	color: #354C7B;
+	font-size: 150%;
+	font-weight: normal;
+	margin-top: 1.75em;
+	padding-top: 8px;
+	padding-bottom: 4px;
+	width: 100%;
+}
+
+h3 {
+	font-size: 100%;
+}
+
+h1, h2, h3, h4, h5, h6 {
+	-webkit-transition: text-shadow 0.5s linear;
+	-moz-transition: text-shadow 0.5s linear;
+	-ms-transition: text-shadow 0.5s linear;
+	-o-transition: text-shadow 0.5s linear;
+	transition: text-shadow 0.5s linear;
+	margin-right: 15px;
+}
+
+h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow {
+	text-shadow: 0 0 15px cyan;
+}
+
+dt {
+	font-weight: bold;
+}
+
+div.multicol {
+	-moz-column-gap: 1em;
+	-webkit-column-gap: 1em;
+	-moz-column-count: 3;
+	-webkit-column-count: 3;
+}
+
+p.startli, p.startdd, p.starttd {
+	margin-top: 2px;
+}
+
+p.endli {
+	margin-bottom: 0px;
+}
+
+p.enddd {
+	margin-bottom: 4px;
+}
+
+p.endtd {
+	margin-bottom: 2px;
+}
+
+/* @end */
+
+caption {
+	font-weight: bold;
+}
+
+span.legend {
+        font-size: 70%;
+        text-align: center;
+}
+
+h3.version {
+        font-size: 90%;
+        text-align: center;
+}
+
+div.qindex, div.navtab{
+	background-color: #EBEFF6;
+	border: 1px solid #A3B4D7;
+	text-align: center;
+}
+
+div.qindex, div.navpath {
+	width: 100%;
+	line-height: 140%;
+}
+
+div.navtab {
+	margin-right: 15px;
+}
+
+/* @group Link Styling */
+
+a {
+	color: #3D578C;
+	font-weight: normal;
+	text-decoration: none;
+}
+
+.contents a:visited {
+	color: #4665A2;
+}
+
+a:hover {
+	text-decoration: underline;
+}
+
+a.qindex {
+	font-weight: bold;
+}
+
+a.qindexHL {
+	font-weight: bold;
+	background-color: #9CAFD4;
+	color: #ffffff;
+	border: 1px double #869DCA;
+}
+
+.contents a.qindexHL:visited {
+        color: #ffffff;
+}
+
+a.el {
+	font-weight: bold;
+}
+
+a.elRef {
+}
+
+a.code, a.code:visited {
+	color: #4665A2; 
+}
+
+a.codeRef, a.codeRef:visited {
+	color: #4665A2; 
+}
+
+/* @end */
+
+dl.el {
+	margin-left: -1cm;
+}
+
+pre.fragment {
+        border: 1px solid #C4CFE5;
+        background-color: #FBFCFD;
+        padding: 4px 6px;
+        margin: 4px 8px 4px 2px;
+        overflow: auto;
+        word-wrap: break-word;
+        font-size:  9pt;
+        line-height: 125%;
+        font-family: monospace, fixed;
+        font-size: 105%;
+}
+
+div.fragment {
+        padding: 4px;
+        margin: 4px;
+	background-color: #FBFCFD;
+	border: 1px solid #C4CFE5;
+}
+
+div.line {
+	font-family: monospace, fixed;
+        font-size: 13px;
+	min-height: 13px;
+	line-height: 1.0;
+	text-wrap: unrestricted;
+	white-space: -moz-pre-wrap; /* Moz */
+	white-space: -pre-wrap;     /* Opera 4-6 */
+	white-space: -o-pre-wrap;   /* Opera 7 */
+	white-space: pre-wrap;      /* CSS3  */
+	word-wrap: break-word;      /* IE 5.5+ */
+	text-indent: -53px;
+	padding-left: 53px;
+	padding-bottom: 0px;
+	margin: 0px;
+	-webkit-transition-property: background-color, box-shadow;
+	-webkit-transition-duration: 0.5s;
+	-moz-transition-property: background-color, box-shadow;
+	-moz-transition-duration: 0.5s;
+	-ms-transition-property: background-color, box-shadow;
+	-ms-transition-duration: 0.5s;
+	-o-transition-property: background-color, box-shadow;
+	-o-transition-duration: 0.5s;
+	transition-property: background-color, box-shadow;
+	transition-duration: 0.5s;
+}
+
+div.line.glow {
+	background-color: cyan;
+	box-shadow: 0 0 10px cyan;
+}
+
+
+span.lineno {
+	padding-right: 4px;
+	text-align: right;
+	border-right: 2px solid #0F0;
+	background-color: #E8E8E8;
+        white-space: pre;
+}
+span.lineno a {
+	background-color: #D8D8D8;
+}
+
+span.lineno a:hover {
+	background-color: #C8C8C8;
+}
+
+div.ah {
+	background-color: black;
+	font-weight: bold;
+	color: #ffffff;
+	margin-bottom: 3px;
+	margin-top: 3px;
+	padding: 0.2em;
+	border: solid thin #333;
+	border-radius: 0.5em;
+	-webkit-border-radius: .5em;
+	-moz-border-radius: .5em;
+	box-shadow: 2px 2px 3px #999;
+	-webkit-box-shadow: 2px 2px 3px #999;
+	-moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
+	background-image: -webkit-gradient(linear, left top, left bottom, from(#eee), to(#000),color-stop(0.3, #444));
+	background-image: -moz-linear-gradient(center top, #eee 0%, #444 40%, #000);
+}
+
+div.groupHeader {
+	margin-left: 16px;
+	margin-top: 12px;
+	font-weight: bold;
+}
+
+div.groupText {
+	margin-left: 16px;
+	font-style: italic;
+}
+
+body {
+	background-color: white;
+	color: black;
+        margin: 0;
+}
+
+div.contents {
+	margin-top: 10px;
+	margin-left: 12px;
+	margin-right: 8px;
+}
+
+td.indexkey {
+	background-color: #EBEFF6;
+	font-weight: bold;
+	border: 1px solid #C4CFE5;
+	margin: 2px 0px 2px 0;
+	padding: 2px 10px;
+        white-space: nowrap;
+        vertical-align: top;
+}
+
+td.indexvalue {
+	background-color: #EBEFF6;
+	border: 1px solid #C4CFE5;
+	padding: 2px 10px;
+	margin: 2px 0px;
+}
+
+tr.memlist {
+	background-color: #EEF1F7;
+}
+
+p.formulaDsp {
+	text-align: center;
+}
+
+img.formulaDsp {
+	
+}
+
+img.formulaInl {
+	vertical-align: middle;
+}
+
+div.center {
+	text-align: center;
+        margin-top: 0px;
+        margin-bottom: 0px;
+        padding: 0px;
+}
+
+div.center img {
+	border: 0px;
+}
+
+address.footer {
+	text-align: right;
+	padding-right: 12px;
+}
+
+img.footer {
+	border: 0px;
+	vertical-align: middle;
+}
+
+/* @group Code Colorization */
+
+span.keyword {
+	color: #008000
+}
+
+span.keywordtype {
+	color: #604020
+}
+
+span.keywordflow {
+	color: #e08000
+}
+
+span.comment {
+	color: #800000
+}
+
+span.preprocessor {
+	color: #806020
+}
+
+span.stringliteral {
+	color: #002080
+}
+
+span.charliteral {
+	color: #008080
+}
+
+span.vhdldigit { 
+	color: #ff00ff 
+}
+
+span.vhdlchar { 
+	color: #000000 
+}
+
+span.vhdlkeyword { 
+	color: #700070 
+}
+
+span.vhdllogic { 
+	color: #ff0000 
+}
+
+blockquote {
+        background-color: #F7F8FB;
+        border-left: 2px solid #9CAFD4;
+        margin: 0 24px 0 4px;
+        padding: 0 12px 0 16px;
+}
+
+/* @end */
+
+/*
+.search {
+	color: #003399;
+	font-weight: bold;
+}
+
+form.search {
+	margin-bottom: 0px;
+	margin-top: 0px;
+}
+
+input.search {
+	font-size: 75%;
+	color: #000080;
+	font-weight: normal;
+	background-color: #e8eef2;
+}
+*/
+
+td.tiny {
+	font-size: 75%;
+}
+
+.dirtab {
+	padding: 4px;
+	border-collapse: collapse;
+	border: 1px solid #A3B4D7;
+}
+
+th.dirtab {
+	background: #EBEFF6;
+	font-weight: bold;
+}
+
+hr {
+	height: 0px;
+	border: none;
+	border-top: 1px solid #4A6AAA;
+}
+
+hr.footer {
+	height: 1px;
+}
+
+/* @group Member Descriptions */
+
+table.memberdecls {
+	border-spacing: 0px;
+	padding: 0px;
+}
+
+.memberdecls td, .fieldtable tr {
+	-webkit-transition-property: background-color, box-shadow;
+	-webkit-transition-duration: 0.5s;
+	-moz-transition-property: background-color, box-shadow;
+	-moz-transition-duration: 0.5s;
+	-ms-transition-property: background-color, box-shadow;
+	-ms-transition-duration: 0.5s;
+	-o-transition-property: background-color, box-shadow;
+	-o-transition-duration: 0.5s;
+	transition-property: background-color, box-shadow;
+	transition-duration: 0.5s;
+}
+
+.memberdecls td.glow, .fieldtable tr.glow {
+	background-color: cyan;
+	box-shadow: 0 0 15px cyan;
+}
+
+.mdescLeft, .mdescRight,
+.memItemLeft, .memItemRight,
+.memTemplItemLeft, .memTemplItemRight, .memTemplParams {
+	background-color: #F9FAFC;
+	border: none;
+	margin: 4px;
+	padding: 1px 0 0 8px;
+}
+
+.mdescLeft, .mdescRight {
+	padding: 0px 8px 4px 8px;
+	color: #555;
+}
+
+.memItemLeft, .memItemRight, .memTemplParams {
+	border-bottom: 1px solid #DEE4F0;
+}
+
+.memItemLeft, .memTemplItemLeft {
+        white-space: nowrap;
+}
+
+.memItemRight {
+	width: 100%;
+}
+
+.memTemplParams {
+	color: #4665A2;
+        white-space: nowrap;
+}
+
+/* @end */
+
+/* @group Member Details */
+
+/* Styles for detailed member documentation */
+
+.memtemplate {
+	font-size: 80%;
+	color: #4665A2;
+	font-weight: normal;
+	margin-left: 9px;
+}
+
+.memnav {
+	background-color: #EBEFF6;
+	border: 1px solid #A3B4D7;
+	text-align: center;
+	margin: 2px;
+	margin-right: 15px;
+	padding: 2px;
+}
+
+.mempage {
+	width: 100%;
+}
+
+.memitem {
+	padding: 0;
+	margin-bottom: 10px;
+	margin-right: 5px;
+        -webkit-transition: box-shadow 0.5s linear;
+        -moz-transition: box-shadow 0.5s linear;
+        -ms-transition: box-shadow 0.5s linear;
+        -o-transition: box-shadow 0.5s linear;
+        transition: box-shadow 0.5s linear;
+        display: table !important;
+        width: 100%;
+}
+
+.memitem.glow {
+         box-shadow: 0 0 15px cyan;
+}
+
+.memname {
+        font-weight: bold;
+        margin-left: 6px;
+}
+
+.memname td {
+	vertical-align: bottom;
+}
+
+.memproto, dl.reflist dt {
+        border-top: 1px solid #A8B8D9;
+        border-left: 1px solid #A8B8D9;
+        border-right: 1px solid #A8B8D9;
+        padding: 6px 0px 6px 0px;
+        color: #253555;
+        font-weight: bold;
+        text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
+        background-image:url('nav_f.png');
+        background-repeat:repeat-x;
+        background-color: #E2E8F2;
+        /* opera specific markup */
+        box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
+        border-top-right-radius: 4px;
+        border-top-left-radius: 4px;
+        /* firefox specific markup */
+        -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
+        -moz-border-radius-topright: 4px;
+        -moz-border-radius-topleft: 4px;
+        /* webkit specific markup */
+        -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
+        -webkit-border-top-right-radius: 4px;
+        -webkit-border-top-left-radius: 4px;
+
+}
+
+.memdoc, dl.reflist dd {
+        border-bottom: 1px solid #A8B8D9;      
+        border-left: 1px solid #A8B8D9;      
+        border-right: 1px solid #A8B8D9; 
+        padding: 6px 10px 2px 10px;
+        background-color: #FBFCFD;
+        border-top-width: 0;
+        background-image:url('nav_g.png');
+        background-repeat:repeat-x;
+        background-color: #FFFFFF;
+        /* opera specific markup */
+        border-bottom-left-radius: 4px;
+        border-bottom-right-radius: 4px;
+        box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
+        /* firefox specific markup */
+        -moz-border-radius-bottomleft: 4px;
+        -moz-border-radius-bottomright: 4px;
+        -moz-box-shadow: rgba(0, 0, 0, 0.15) 5px 5px 5px;
+        /* webkit specific markup */
+        -webkit-border-bottom-left-radius: 4px;
+        -webkit-border-bottom-right-radius: 4px;
+        -webkit-box-shadow: 5px 5px 5px rgba(0, 0, 0, 0.15);
+}
+
+dl.reflist dt {
+        padding: 5px;
+}
+
+dl.reflist dd {
+        margin: 0px 0px 10px 0px;
+        padding: 5px;
+}
+
+.paramkey {
+	text-align: right;
+}
+
+.paramtype {
+	white-space: nowrap;
+}
+
+.paramname {
+	color: #602020;
+	white-space: nowrap;
+}
+.paramname em {
+	font-style: normal;
+}
+.paramname code {
+        line-height: 14px;
+}
+
+.params, .retval, .exception, .tparams {
+        margin-left: 0px;
+        padding-left: 0px;
+}       
+
+.params .paramname, .retval .paramname {
+        font-weight: bold;
+        vertical-align: top;
+}
+        
+.params .paramtype {
+        font-style: italic;
+        vertical-align: top;
+}       
+        
+.params .paramdir {
+        font-family: "courier new",courier,monospace;
+        vertical-align: top;
+}
+
+table.mlabels {
+	border-spacing: 0px;
+}
+
+td.mlabels-left {
+	width: 100%;
+	padding: 0px;
+}
+
+td.mlabels-right {
+	vertical-align: bottom;
+	padding: 0px;
+	white-space: nowrap;
+}
+
+span.mlabels {
+        margin-left: 8px;
+}
+
+span.mlabel {
+        background-color: #728DC1;
+        border-top:1px solid #5373B4;
+        border-left:1px solid #5373B4;
+        border-right:1px solid #C4CFE5;
+        border-bottom:1px solid #C4CFE5;
+	text-shadow: none;
+        color: white;
+        margin-right: 4px;
+        padding: 2px 3px;
+        border-radius: 3px;
+        font-size: 7pt;
+	white-space: nowrap;
+}
+
+
+
+/* @end */
+
+/* these are for tree view when not used as main index */
+
+div.directory {
+        margin: 10px 0px;
+        border-top: 1px solid #A8B8D9;
+        border-bottom: 1px solid #A8B8D9;
+        width: 100%;
+}
+
+.directory table {
+        border-collapse:collapse;
+}
+
+.directory td {
+        margin: 0px;
+        padding: 0px;
+	vertical-align: top;
+}
+
+.directory td.entry {
+        white-space: nowrap;
+        padding-right: 6px;
+}
+
+.directory td.entry a {
+        outline:none;
+}
+
+.directory td.entry a img {
+        border: none;
+}
+
+.directory td.desc {
+        width: 100%;
+        padding-left: 6px;
+	padding-right: 6px;
+	padding-top: 3px;
+	border-left: 1px solid rgba(0,0,0,0.05);
+}
+
+.directory tr.even {
+	padding-left: 6px;
+	background-color: #F7F8FB;
+}
+
+.directory img {
+	vertical-align: -30%;
+}
+
+.directory .levels {
+        white-space: nowrap;
+        width: 100%;
+        text-align: right;
+        font-size: 9pt;
+}
+
+.directory .levels span {
+        cursor: pointer;
+        padding-left: 2px;
+        padding-right: 2px;
+	color: #3D578C;
+}
+
+div.dynheader {
+        margin-top: 8px;
+	-webkit-touch-callout: none;
+	-webkit-user-select: none;
+	-khtml-user-select: none;
+	-moz-user-select: none;
+	-ms-user-select: none;
+	user-select: none;
+}
+
+address {
+	font-style: normal;
+	color: #2A3D61;
+}
+
+table.doxtable {
+	border-collapse:collapse;
+        margin-top: 4px;
+        margin-bottom: 4px;
+}
+
+table.doxtable td, table.doxtable th {
+	border: 1px solid #2D4068;
+	padding: 3px 7px 2px;
+}
+
+table.doxtable th {
+	background-color: #374F7F;
+	color: #FFFFFF;
+	font-size: 110%;
+	padding-bottom: 4px;
+	padding-top: 5px;
+}
+
+table.fieldtable {
+        width: 100%;
+        margin-bottom: 10px;
+        border: 1px solid #A8B8D9;
+        border-spacing: 0px;
+        -moz-border-radius: 4px;
+        -webkit-border-radius: 4px;
+        border-radius: 4px;
+        -moz-box-shadow: rgba(0, 0, 0, 0.15) 2px 2px 2px;
+        -webkit-box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
+        box-shadow: 2px 2px 2px rgba(0, 0, 0, 0.15);
+}
+
+.fieldtable td, .fieldtable th {
+        padding: 3px 7px 2px;
+}
+
+.fieldtable td.fieldtype, .fieldtable td.fieldname {
+        white-space: nowrap;
+        border-right: 1px solid #A8B8D9;
+        border-bottom: 1px solid #A8B8D9;
+        vertical-align: top;
+}
+
+.fieldtable td.fielddoc {
+        border-bottom: 1px solid #A8B8D9;
+        width: 100%;
+}
+
+.fieldtable tr:last-child td {
+        border-bottom: none;
+}
+
+.fieldtable th {
+        background-image:url('nav_f.png');
+        background-repeat:repeat-x;
+        background-color: #E2E8F2;
+        font-size: 90%;
+        color: #253555;
+        padding-bottom: 4px;
+        padding-top: 5px;
+        text-align:left;
+        -moz-border-radius-topleft: 4px;
+        -moz-border-radius-topright: 4px;
+        -webkit-border-top-left-radius: 4px;
+        -webkit-border-top-right-radius: 4px;
+        border-top-left-radius: 4px;
+        border-top-right-radius: 4px;
+        border-bottom: 1px solid #A8B8D9;
+}
+
+
+.tabsearch {
+	top: 0px;
+	left: 10px;
+	height: 36px;
+	background-image: url('tab_b.png');
+	z-index: 101;
+	overflow: hidden;
+	font-size: 13px;
+}
+
+.navpath ul
+{
+	font-size: 11px;
+	background-image:url('tab_b.png');
+	background-repeat:repeat-x;
+	height:30px;
+	line-height:30px;
+	color:#8AA0CC;
+	border:solid 1px #C2CDE4;
+	overflow:hidden;
+	margin:0px;
+	padding:0px;
+}
+
+.navpath li
+{
+	list-style-type:none;
+	float:left;
+	padding-left:10px;
+	padding-right:15px;
+	background-image:url('bc_s.png');
+	background-repeat:no-repeat;
+	background-position:right;
+	color:#364D7C;
+}
+
+.navpath li.navelem a
+{
+	height:32px;
+	display:block;
+	text-decoration: none;
+	outline: none;
+	font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+}
+
+.navpath li.navelem a:hover
+{
+	color:#6884BD;
+}
+
+.navpath li.footer
+{
+        list-style-type:none;
+        float:right;
+        padding-left:10px;
+        padding-right:15px;
+        background-image:none;
+        background-repeat:no-repeat;
+        background-position:right;
+        color:#364D7C;
+        font-size: 8pt;
+}
+
+
+div.summary
+{
+	float: right;
+	font-size: 8pt;
+	padding-right: 5px;
+	width: 50%;
+	text-align: right;
+}       
+
+div.summary a
+{
+	white-space: nowrap;
+}
+
+div.ingroups
+{
+	font-size: 8pt;
+	width: 50%;
+	text-align: left;
+}
+
+div.ingroups a
+{
+	white-space: nowrap;
+}
+
+div.header
+{
+        background-image:url('nav_h.png');
+        background-repeat:repeat-x;
+	background-color: #F9FAFC;
+	margin:  0px;
+	border-bottom: 1px solid #C4CFE5;
+}
+
+div.headertitle
+{
+	padding: 5px 5px 5px 10px;
+}
+
+dl
+{
+        padding: 0 0 0 10px;
+}
+
+/* dl.note, dl.warning, dl.attention, dl.pre, dl.post, dl.invariant, dl.deprecated, dl.todo, dl.test, dl.bug */
+dl.section
+{
+	margin-left: 0px;
+	padding-left: 0px;
+}
+
+dl.note
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #D0C000;
+}
+
+dl.warning, dl.attention
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #FF0000;
+}
+
+dl.pre, dl.post, dl.invariant
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #00D000;
+}
+
+dl.deprecated
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #505050;
+}
+
+dl.todo
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #00C0E0;
+}
+
+dl.test
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #3030E0;
+}
+
+dl.bug
+{
+        margin-left:-7px;
+        padding-left: 3px;
+        border-left:4px solid;
+        border-color: #C08050;
+}
+
+dl.section dd {
+	margin-bottom: 6px;
+}
+
+
+#projectlogo
+{
+	text-align: center;
+	vertical-align: bottom;
+	border-collapse: separate;
+}
+ 
+#projectlogo img
+{ 
+	border: 0px none;
+}
+ 
+#projectname
+{
+	font: 300% Tahoma, Arial,sans-serif;
+	margin: 0px;
+	padding: 2px 0px;
+}
+    
+#projectbrief
+{
+	font: 120% Tahoma, Arial,sans-serif;
+	margin: 0px;
+	padding: 0px;
+}
+
+#projectnumber
+{
+	font: 50% Tahoma, Arial,sans-serif;
+	margin: 0px;
+	padding: 0px;
+}
+
+#titlearea
+{
+	padding: 0px;
+	margin: 0px;
+	width: 100%;
+	border-bottom: 1px solid #5373B4;
+}
+
+.image
+{
+        text-align: center;
+}
+
+.dotgraph
+{
+        text-align: center;
+}
+
+.mscgraph
+{
+        text-align: center;
+}
+
+.caption
+{
+	font-weight: bold;
+}
+
+div.zoom
+{
+	border: 1px solid #90A5CE;
+}
+
+dl.citelist {
+        margin-bottom:50px;
+}
+
+dl.citelist dt {
+        color:#334975;
+        float:left;
+        font-weight:bold;
+        margin-right:10px;
+        padding:5px;
+}
+
+dl.citelist dd {
+        margin:2px 0;
+        padding:5px 0;
+}
+
+div.toc {
+        padding: 14px 25px;
+        background-color: #F4F6FA;
+        border: 1px solid #D8DFEE;
+        border-radius: 7px 7px 7px 7px;
+        float: right;
+        height: auto;
+        margin: 0 20px 10px 10px;
+        width: 200px;
+}
+
+div.toc li {
+        background: url("bdwn.png") no-repeat scroll 0 5px transparent;
+        font: 10px/1.2 Verdana,DejaVu Sans,Geneva,sans-serif;
+        margin-top: 5px;
+        padding-left: 10px;
+        padding-top: 2px;
+}
+
+div.toc h3 {
+        font: bold 12px/1.2 Arial,FreeSans,sans-serif;
+	color: #4665A2;
+        border-bottom: 0 none;
+        margin: 0;
+}
+
+div.toc ul {
+        list-style: none outside none;
+        border: medium none;
+        padding: 0px;
+}       
+
+div.toc li.level1 {
+        margin-left: 0px;
+}
+
+div.toc li.level2 {
+        margin-left: 15px;
+}
+
+div.toc li.level3 {
+        margin-left: 30px;
+}
+
+div.toc li.level4 {
+        margin-left: 45px;
+}
+
+.inherit_header {
+        font-weight: bold;
+        color: gray;
+        cursor: pointer;
+	-webkit-touch-callout: none;
+	-webkit-user-select: none;
+	-khtml-user-select: none;
+	-moz-user-select: none;
+	-ms-user-select: none;
+	user-select: none;
+}
+
+.inherit_header td {
+        padding: 6px 0px 2px 5px;
+}
+
+.inherit {
+        display: none;
+}
+
+tr.heading h2 {
+        margin-top: 12px;
+        margin-bottom: 4px;
+}
+
+ at media print
+{
+  #top { display: none; }
+  #side-nav { display: none; }
+  #nav-path { display: none; }
+  body { overflow:visible; }
+  h1, h2, h3, h4, h5, h6 { page-break-after: avoid; }
+  .summary { display: none; }
+  .memitem { page-break-inside: avoid; }
+  #doc-content
+  {
+    margin-left:0 !important;
+    height:auto !important;
+    width:auto !important;
+    overflow:inherit;
+    display:inline;
+  }
+}
+
diff --git a/doc/html/doxygen.png b/doc/html/doxygen.png
new file mode 100644
index 0000000..3ff17d8
Binary files /dev/null and b/doc/html/doxygen.png differ
diff --git a/doc/html/dynsections.js b/doc/html/dynsections.js
new file mode 100644
index 0000000..116542f
--- /dev/null
+++ b/doc/html/dynsections.js
@@ -0,0 +1,78 @@
+function toggleVisibility(linkObj)
+{
+ var base = $(linkObj).attr('id');
+ var summary = $('#'+base+'-summary');
+ var content = $('#'+base+'-content');
+ var trigger = $('#'+base+'-trigger');
+ var src=$(trigger).attr('src');
+ if (content.is(':visible')===true) {
+   content.hide();
+   summary.show();
+   $(linkObj).addClass('closed').removeClass('opened');
+   $(trigger).attr('src',src.substring(0,src.length-8)+'closed.png');
+ } else {
+   content.show();
+   summary.hide();
+   $(linkObj).removeClass('closed').addClass('opened');
+   $(trigger).attr('src',src.substring(0,src.length-10)+'open.png');
+ } 
+ return false;
+}
+
+function updateStripes()
+{
+  $('table.directory tr').
+       removeClass('even').filter(':visible:even').addClass('even');
+}
+function toggleLevel(level)
+{
+  $('table.directory tr').each(function(){ 
+    var l = this.id.split('_').length-1;
+    var i = $('#img'+this.id.substring(3));
+    var a = $('#arr'+this.id.substring(3));
+    if (l<level+1) {
+      i.attr('src','ftv2folderopen.png');
+      a.attr('src','ftv2mnode.png');
+      $(this).show();
+    } else if (l==level+1) {
+      i.attr('src','ftv2folderclosed.png');
+      a.attr('src','ftv2pnode.png');
+      $(this).show();
+    } else {
+      $(this).hide();
+    }
+  });
+  updateStripes();
+}
+function toggleFolder(id) 
+{
+  var n = $('[id^=row_'+id+']');
+  var i = $('[id^=img_'+id+']');
+  var a = $('[id^=arr_'+id+']');
+  var c = n.slice(1);
+  if (c.filter(':first').is(':visible')===true) {
+    i.attr('src','ftv2folderclosed.png');
+    a.attr('src','ftv2pnode.png');
+    c.hide();
+  } else {
+    i.attr('src','ftv2folderopen.png');
+    a.attr('src','ftv2mnode.png');
+    c.show();
+  }
+  updateStripes();
+}
+
+function toggleInherit(id)
+{
+  var rows = $('tr.inherit.'+id);
+  var img = $('tr.inherit_header.'+id+' img');
+  var src = $(img).attr('src');
+  if (rows.filter(':first').is(':visible')===true) {
+    rows.css('display','none');
+    $(img).attr('src',src.substring(0,src.length-8)+'closed.png');
+  } else {
+    rows.css('display','table-row'); // using show() causes jump in firefox
+    $(img).attr('src',src.substring(0,src.length-10)+'open.png');
+  }
+}
+
diff --git a/doc/html/files.html b/doc/html/files.html
new file mode 100644
index 0000000..4e90975
--- /dev/null
+++ b/doc/html/files.html
@@ -0,0 +1,75 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: File List</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">File List</div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock">Here is a list of all files with brief descriptions:</div><div class="directory">
+<table class="directory">
+<tr id="row_0_" class="even"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="blas__sparse_8h.html" target="_self">blas_sparse.h</a></td><td class="desc">This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) </td></tr>
+<tr id="row_1_"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb_8F90.html" target="_self">rsb.F90</a></td><td class="desc">Header file automatically generated from <<a class="el" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>>, offering ISO-C-BINDING interfaces to <<a class="el" href="rsb_8h [...]
+<tr id="row_2_" class="even"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb_8h.html" target="_self">rsb.h</a></td><td class="desc">This file declares the user interface functions and data structures for the <code>librsb</code> library</td></tr>
+<tr id="row_3_"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb__blas__sparse_8F90.html" target="_self">rsb_blas_sparse.F90</a></td><td class="desc">This file implements the Fortran Sparse BLAS interface to <code>librsb</code> </td></tr>
+<tr id="row_4_" class="even"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb__libspblas_8c.html" target="_self">rsb_libspblas.c</a></td><td class="desc">This file implements Sparse BLAS for librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) </td></tr>
+<tr id="row_5_"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb__libspblas_8h.html" target="_self">rsb_libspblas.h</a></td><td class="desc">This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) </td></tr>
+<tr id="row_6_" class="even"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb__libspblas__handle_8c.html" target="_self">rsb_libspblas_handle.c</a></td><td class="desc"></td></tr>
+<tr id="row_7_"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb__rsb_8c.html" target="_self">rsb_rsb.c</a></td><td class="desc">Implementation of the library user interface</td></tr>
+<tr id="row_8_" class="even"><td class="entry"><img src="ftv2lastnode.png" alt="\" width="16" height="22" /><img src="ftv2doc.png" alt="*" width="24" height="22" /><a class="el" href="rsb__types_8h.html" target="_self">rsb_types.h</a></td><td class="desc">Macros and constants, which are type specific. <br/>
+ Here reside declarations related to supported matrix numerical types, and other declarations according to the build time options. <br/>
+ If you wish to use this library with different matrix numerical types, you shall regenerate the library source code accordingly; see the README file how to do this. <br/>
+ Only a small part of these declarations is needed to the user (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). <br/>
+ Therefore, only the declarations which are commented are actually meant to be used in functions; please regard the remaining ones as internal</td></tr>
+</table>
+</div><!-- directory -->
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/form_0.png b/doc/html/form_0.png
new file mode 100644
index 0000000..d4acc39
Binary files /dev/null and b/doc/html/form_0.png differ
diff --git a/doc/html/form_1.png b/doc/html/form_1.png
new file mode 100644
index 0000000..e14f9ce
Binary files /dev/null and b/doc/html/form_1.png differ
diff --git a/doc/html/form_10.png b/doc/html/form_10.png
new file mode 100644
index 0000000..6bd12e6
Binary files /dev/null and b/doc/html/form_10.png differ
diff --git a/doc/html/form_11.png b/doc/html/form_11.png
new file mode 100644
index 0000000..2430c11
Binary files /dev/null and b/doc/html/form_11.png differ
diff --git a/doc/html/form_12.png b/doc/html/form_12.png
new file mode 100644
index 0000000..146eff1
Binary files /dev/null and b/doc/html/form_12.png differ
diff --git a/doc/html/form_13.png b/doc/html/form_13.png
new file mode 100644
index 0000000..50d4bba
Binary files /dev/null and b/doc/html/form_13.png differ
diff --git a/doc/html/form_14.png b/doc/html/form_14.png
new file mode 100644
index 0000000..b61eaab
Binary files /dev/null and b/doc/html/form_14.png differ
diff --git a/doc/html/form_15.png b/doc/html/form_15.png
new file mode 100644
index 0000000..7362e94
Binary files /dev/null and b/doc/html/form_15.png differ
diff --git a/doc/html/form_16.png b/doc/html/form_16.png
new file mode 100644
index 0000000..1180664
Binary files /dev/null and b/doc/html/form_16.png differ
diff --git a/doc/html/form_17.png b/doc/html/form_17.png
new file mode 100644
index 0000000..594b053
Binary files /dev/null and b/doc/html/form_17.png differ
diff --git a/doc/html/form_18.png b/doc/html/form_18.png
new file mode 100644
index 0000000..b132929
Binary files /dev/null and b/doc/html/form_18.png differ
diff --git a/doc/html/form_19.png b/doc/html/form_19.png
new file mode 100644
index 0000000..2300f4d
Binary files /dev/null and b/doc/html/form_19.png differ
diff --git a/doc/html/form_2.png b/doc/html/form_2.png
new file mode 100644
index 0000000..e59c391
Binary files /dev/null and b/doc/html/form_2.png differ
diff --git a/doc/html/form_20.png b/doc/html/form_20.png
new file mode 100644
index 0000000..d70f11f
Binary files /dev/null and b/doc/html/form_20.png differ
diff --git a/doc/html/form_21.png b/doc/html/form_21.png
new file mode 100644
index 0000000..8d57347
Binary files /dev/null and b/doc/html/form_21.png differ
diff --git a/doc/html/form_22.png b/doc/html/form_22.png
new file mode 100644
index 0000000..4d02d03
Binary files /dev/null and b/doc/html/form_22.png differ
diff --git a/doc/html/form_23.png b/doc/html/form_23.png
new file mode 100644
index 0000000..dbb15c1
Binary files /dev/null and b/doc/html/form_23.png differ
diff --git a/doc/html/form_24.png b/doc/html/form_24.png
new file mode 100644
index 0000000..fe56437
Binary files /dev/null and b/doc/html/form_24.png differ
diff --git a/doc/html/form_25.png b/doc/html/form_25.png
new file mode 100644
index 0000000..a2aedaa
Binary files /dev/null and b/doc/html/form_25.png differ
diff --git a/doc/html/form_26.png b/doc/html/form_26.png
new file mode 100644
index 0000000..c2ee163
Binary files /dev/null and b/doc/html/form_26.png differ
diff --git a/doc/html/form_27.png b/doc/html/form_27.png
new file mode 100644
index 0000000..910372b
Binary files /dev/null and b/doc/html/form_27.png differ
diff --git a/doc/html/form_28.png b/doc/html/form_28.png
new file mode 100644
index 0000000..5a2ed80
Binary files /dev/null and b/doc/html/form_28.png differ
diff --git a/doc/html/form_29.png b/doc/html/form_29.png
new file mode 100644
index 0000000..c43f35c
Binary files /dev/null and b/doc/html/form_29.png differ
diff --git a/doc/html/form_3.png b/doc/html/form_3.png
new file mode 100644
index 0000000..333671f
Binary files /dev/null and b/doc/html/form_3.png differ
diff --git a/doc/html/form_30.png b/doc/html/form_30.png
new file mode 100644
index 0000000..40dab25
Binary files /dev/null and b/doc/html/form_30.png differ
diff --git a/doc/html/form_31.png b/doc/html/form_31.png
new file mode 100644
index 0000000..ac6778d
Binary files /dev/null and b/doc/html/form_31.png differ
diff --git a/doc/html/form_32.png b/doc/html/form_32.png
new file mode 100644
index 0000000..44ec2a4
Binary files /dev/null and b/doc/html/form_32.png differ
diff --git a/doc/html/form_33.png b/doc/html/form_33.png
new file mode 100644
index 0000000..68e52ff
Binary files /dev/null and b/doc/html/form_33.png differ
diff --git a/doc/html/form_34.png b/doc/html/form_34.png
new file mode 100644
index 0000000..af22473
Binary files /dev/null and b/doc/html/form_34.png differ
diff --git a/doc/html/form_35.png b/doc/html/form_35.png
new file mode 100644
index 0000000..9fd81bc
Binary files /dev/null and b/doc/html/form_35.png differ
diff --git a/doc/html/form_36.png b/doc/html/form_36.png
new file mode 100644
index 0000000..31ad283
Binary files /dev/null and b/doc/html/form_36.png differ
diff --git a/doc/html/form_37.png b/doc/html/form_37.png
new file mode 100644
index 0000000..dbecdee
Binary files /dev/null and b/doc/html/form_37.png differ
diff --git a/doc/html/form_38.png b/doc/html/form_38.png
new file mode 100644
index 0000000..c433d59
Binary files /dev/null and b/doc/html/form_38.png differ
diff --git a/doc/html/form_39.png b/doc/html/form_39.png
new file mode 100644
index 0000000..59a6dfb
Binary files /dev/null and b/doc/html/form_39.png differ
diff --git a/doc/html/form_4.png b/doc/html/form_4.png
new file mode 100644
index 0000000..4ce0ac8
Binary files /dev/null and b/doc/html/form_4.png differ
diff --git a/doc/html/form_40.png b/doc/html/form_40.png
new file mode 100644
index 0000000..fd637e9
Binary files /dev/null and b/doc/html/form_40.png differ
diff --git a/doc/html/form_41.png b/doc/html/form_41.png
new file mode 100644
index 0000000..817cf50
Binary files /dev/null and b/doc/html/form_41.png differ
diff --git a/doc/html/form_42.png b/doc/html/form_42.png
new file mode 100644
index 0000000..c1d791e
Binary files /dev/null and b/doc/html/form_42.png differ
diff --git a/doc/html/form_43.png b/doc/html/form_43.png
new file mode 100644
index 0000000..0cee8b7
Binary files /dev/null and b/doc/html/form_43.png differ
diff --git a/doc/html/form_44.png b/doc/html/form_44.png
new file mode 100644
index 0000000..2eb0f8b
Binary files /dev/null and b/doc/html/form_44.png differ
diff --git a/doc/html/form_45.png b/doc/html/form_45.png
new file mode 100644
index 0000000..8d57347
Binary files /dev/null and b/doc/html/form_45.png differ
diff --git a/doc/html/form_46.png b/doc/html/form_46.png
new file mode 100644
index 0000000..ef7cd44
Binary files /dev/null and b/doc/html/form_46.png differ
diff --git a/doc/html/form_47.png b/doc/html/form_47.png
new file mode 100644
index 0000000..e5db560
Binary files /dev/null and b/doc/html/form_47.png differ
diff --git a/doc/html/form_48.png b/doc/html/form_48.png
new file mode 100644
index 0000000..392c4cb
Binary files /dev/null and b/doc/html/form_48.png differ
diff --git a/doc/html/form_49.png b/doc/html/form_49.png
new file mode 100644
index 0000000..7116e0c
Binary files /dev/null and b/doc/html/form_49.png differ
diff --git a/doc/html/form_5.png b/doc/html/form_5.png
new file mode 100644
index 0000000..a931550
Binary files /dev/null and b/doc/html/form_5.png differ
diff --git a/doc/html/form_50.png b/doc/html/form_50.png
new file mode 100644
index 0000000..9c32e9d
Binary files /dev/null and b/doc/html/form_50.png differ
diff --git a/doc/html/form_6.png b/doc/html/form_6.png
new file mode 100644
index 0000000..bd29a7b
Binary files /dev/null and b/doc/html/form_6.png differ
diff --git a/doc/html/form_7.png b/doc/html/form_7.png
new file mode 100644
index 0000000..e46fa00
Binary files /dev/null and b/doc/html/form_7.png differ
diff --git a/doc/html/form_8.png b/doc/html/form_8.png
new file mode 100644
index 0000000..92a2eb5
Binary files /dev/null and b/doc/html/form_8.png differ
diff --git a/doc/html/form_9.png b/doc/html/form_9.png
new file mode 100644
index 0000000..baca358
Binary files /dev/null and b/doc/html/form_9.png differ
diff --git a/doc/html/formula.repository b/doc/html/formula.repository
new file mode 100644
index 0000000..48b5968
--- /dev/null
+++ b/doc/html/formula.repository
@@ -0,0 +1,51 @@
+\form#0:$r \leftarrow X^T Y,$
+\form#1:$r \leftarrow X^H Y$
+\form#2:$Y$
+\form#3:$X$
+\form#4:$Y \leftarrow \alpha X + Y$
+\form#5:$X \leftarrow Y |_x$
+\form#6:$X \leftarrow Y |_x;Y|_x\leftarrow 0$
+\form#7:$Y |_x\leftarrow X$
+\form#8:$Y \leftarrow \alpha A X + Y ,$
+\form#9:$Y \leftarrow \alpha A^T X + Y,$
+\form#10:$Y \leftarrow \alpha A^H X + Y$
+\form#11:$ \alpha $
+\form#12:$X \leftarrow \alpha T^{-1}X,$
+\form#13:$X \leftarrow \alpha T^{-T}X,$
+\form#14:$X \leftarrow \alpha T^{-H}X$
+\form#15:$C \leftarrow \alpha AB+C,$
+\form#16:$C \leftarrow \alpha A^T B+C,$
+\form#17:$C \leftarrow \alpha A^H B+C$
+\form#18:$B \leftarrow \alpha T^{-1} B,$
+\form#19:$B \leftarrow \alpha T^{-T} B,$
+\form#20:$B \leftarrow \alpha T^{-H} B$
+\form#21:$A$
+\form#22:$d\leftarrow diag(A)$
+\form#23:$A_{i,j}$
+\form#24:$B$
+\form#25:$opa(A)$
+\form#26:$Y \leftarrow \beta Y + \alpha \cdot opa(A) \cdot X $
+\form#27:$opa( A )=A$
+\form#28:$opa( A )= A ^T$
+\form#29:$opa( A )= A ^H$
+\form#30:$Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot X $
+\form#31:$T$
+\form#32:$opt( T )=T$
+\form#33:$opt( T )= T ^T$
+\form#34:$opt( T )= T ^H$
+\form#35:$Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot B $
+\form#36:$ A \leftarrow op (A,\Omega) $
+\form#37:$op$
+\form#38:$C \leftarrow \alpha\cdot transA(A) + \beta\cdot transB{B} $
+\form#39:$opb( B )=B$
+\form#40:$opb( B )= B ^T$
+\form#41:$opb( B )= B ^H$
+\form#42:$C \leftarrow \alpha \cdot opa(A) \cdot \beta \cdot opb(B) $
+\form#43:$\alpha=1.0$
+\form#44:$\beta=1.0$
+\form#45:${A}$
+\form#46:$B \leftarrow B + \alpha {A} $
+\form#47:$ C \leftarrow \beta\cdot C + \alpha\cdot opa(A) \cdot B $
+\form#48:$C \leftarrow \alpha opa(A) \cdot \beta \cdot opb(B) $
+\form#49:$C$
+\form#50:$D$
diff --git a/doc/html/ftv2blank.png b/doc/html/ftv2blank.png
new file mode 100644
index 0000000..63c605b
Binary files /dev/null and b/doc/html/ftv2blank.png differ
diff --git a/doc/html/ftv2cl.png b/doc/html/ftv2cl.png
new file mode 100644
index 0000000..132f657
Binary files /dev/null and b/doc/html/ftv2cl.png differ
diff --git a/doc/html/ftv2doc.png b/doc/html/ftv2doc.png
new file mode 100644
index 0000000..17edabf
Binary files /dev/null and b/doc/html/ftv2doc.png differ
diff --git a/doc/html/ftv2folderclosed.png b/doc/html/ftv2folderclosed.png
new file mode 100644
index 0000000..bb8ab35
Binary files /dev/null and b/doc/html/ftv2folderclosed.png differ
diff --git a/doc/html/ftv2folderopen.png b/doc/html/ftv2folderopen.png
new file mode 100644
index 0000000..d6c7f67
Binary files /dev/null and b/doc/html/ftv2folderopen.png differ
diff --git a/doc/html/ftv2lastnode.png b/doc/html/ftv2lastnode.png
new file mode 100644
index 0000000..63c605b
Binary files /dev/null and b/doc/html/ftv2lastnode.png differ
diff --git a/doc/html/ftv2link.png b/doc/html/ftv2link.png
new file mode 100644
index 0000000..17edabf
Binary files /dev/null and b/doc/html/ftv2link.png differ
diff --git a/doc/html/ftv2mlastnode.png b/doc/html/ftv2mlastnode.png
new file mode 100644
index 0000000..0b63f6d
Binary files /dev/null and b/doc/html/ftv2mlastnode.png differ
diff --git a/doc/html/ftv2mnode.png b/doc/html/ftv2mnode.png
new file mode 100644
index 0000000..0b63f6d
Binary files /dev/null and b/doc/html/ftv2mnode.png differ
diff --git a/doc/html/ftv2mo.png b/doc/html/ftv2mo.png
new file mode 100644
index 0000000..4bfb80f
Binary files /dev/null and b/doc/html/ftv2mo.png differ
diff --git a/doc/html/ftv2node.png b/doc/html/ftv2node.png
new file mode 100644
index 0000000..63c605b
Binary files /dev/null and b/doc/html/ftv2node.png differ
diff --git a/doc/html/ftv2ns.png b/doc/html/ftv2ns.png
new file mode 100644
index 0000000..72e3d71
Binary files /dev/null and b/doc/html/ftv2ns.png differ
diff --git a/doc/html/ftv2plastnode.png b/doc/html/ftv2plastnode.png
new file mode 100644
index 0000000..c6ee22f
Binary files /dev/null and b/doc/html/ftv2plastnode.png differ
diff --git a/doc/html/ftv2pnode.png b/doc/html/ftv2pnode.png
new file mode 100644
index 0000000..c6ee22f
Binary files /dev/null and b/doc/html/ftv2pnode.png differ
diff --git a/doc/html/ftv2splitbar.png b/doc/html/ftv2splitbar.png
new file mode 100644
index 0000000..fe895f2
Binary files /dev/null and b/doc/html/ftv2splitbar.png differ
diff --git a/doc/html/ftv2vertline.png b/doc/html/ftv2vertline.png
new file mode 100644
index 0000000..63c605b
Binary files /dev/null and b/doc/html/ftv2vertline.png differ
diff --git a/doc/html/functions.html b/doc/html/functions.html
new file mode 100644
index 0000000..64edb08
--- /dev/null
+++ b/doc/html/functions.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_a"></a>- a -</h3><ul>
+<li>action
+: <a class="el" href="structrsb__initopts.html#ad087930c58602fd3c0761f5af3aae7ce">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x62.html b/doc/html/functions_0x62.html
new file mode 100644
index 0000000..e02a59f
--- /dev/null
+++ b/doc/html/functions_0x62.html
@@ -0,0 +1,329 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li class="current"><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>blas_base
+: <a class="el" href="classblas__sparse.html#ac6324bd9c488f6ad4c176fd05a5c1a94">blas_sparse</a>
+</li>
+<li>blas_block
+: <a class="el" href="classblas__sparse.html#aef5b352231bcff68b28b97742899558e">blas_sparse</a>
+</li>
+<li>blas_colmajor
+: <a class="el" href="classblas__sparse.html#af4ede8e7f0445be25841733354b747bd">blas_sparse</a>
+</li>
+<li>blas_complex
+: <a class="el" href="classblas__sparse.html#ac72dca9b25a744006fb7e2b272958494">blas_sparse</a>
+</li>
+<li>blas_conj
+: <a class="el" href="classblas__sparse.html#a1964a262e04c046d0f97c7de7cf1d916">blas_sparse</a>
+</li>
+<li>blas_conj_trans
+: <a class="el" href="classblas__sparse.html#a7b8d414b608929ba0abced46c98889d6">blas_sparse</a>
+</li>
+<li>blas_decreasing_order
+: <a class="el" href="classblas__sparse.html#aedf0364e33ddfd0ee88e93acf3683cf7">blas_sparse</a>
+</li>
+<li>blas_double_precision
+: <a class="el" href="classblas__sparse.html#a9c54c439abc55e509b4a7ec35f6faa4e">blas_sparse</a>
+</li>
+<li>blas_emax
+: <a class="el" href="classblas__sparse.html#a18b1555d2c4e1d8b3a8d38bc7105c3fa">blas_sparse</a>
+</li>
+<li>blas_emin
+: <a class="el" href="classblas__sparse.html#a150f1864fd1bf514a9769914490a23ba">blas_sparse</a>
+</li>
+<li>blas_eps
+: <a class="el" href="classblas__sparse.html#acbf407624c42cad4f1ea47776977d160">blas_sparse</a>
+</li>
+<li>blas_frobenius_norm
+: <a class="el" href="classblas__sparse.html#af1dbfea000291bde9fe93507f62a31ba">blas_sparse</a>
+</li>
+<li>blas_general
+: <a class="el" href="classblas__sparse.html#a6dd42fe3a5c74d293855e6ed0825cc67">blas_sparse</a>
+</li>
+<li>blas_hermitian
+: <a class="el" href="classblas__sparse.html#aaab0006bc8bcddf6cba32a69d3ddbf95">blas_sparse</a>
+</li>
+<li>blas_ieee
+: <a class="el" href="classblas__sparse.html#a114a6fabae21d32477af3acee15b9d5d">blas_sparse</a>
+</li>
+<li>blas_increasing_order
+: <a class="el" href="classblas__sparse.html#ae972ff04001d8bbcc52f242134af52d8">blas_sparse</a>
+</li>
+<li>blas_inf_norm
+: <a class="el" href="classblas__sparse.html#afb9ba15096a7184519256ec2923fda49">blas_sparse</a>
+</li>
+<li>blas_invalid_handle
+: <a class="el" href="classblas__sparse.html#aa9ee2ffde87e203fd37719979e7b546d">blas_sparse</a>
+</li>
+<li>blas_irregular
+: <a class="el" href="classblas__sparse.html#a9893aa4d547b371f6ba59a8615aa752e">blas_sparse</a>
+</li>
+<li>blas_jrot_inner
+: <a class="el" href="classblas__sparse.html#a2fc184f889f72b17fdb2ba6266c25b02">blas_sparse</a>
+</li>
+<li>blas_jrot_outer
+: <a class="el" href="classblas__sparse.html#ac526dad147c751bb8b175edd47d29c22">blas_sparse</a>
+</li>
+<li>blas_jrot_sorted
+: <a class="el" href="classblas__sparse.html#a0612a82431d61a1b6c4c5030e65c5e31">blas_sparse</a>
+</li>
+<li>blas_left_side
+: <a class="el" href="classblas__sparse.html#a5059846f8eba839bb1afc32abac380e4">blas_sparse</a>
+</li>
+<li>blas_lower
+: <a class="el" href="classblas__sparse.html#a67e376dc6a7cc769ee24415dc2a8d9d1">blas_sparse</a>
+</li>
+<li>blas_lower_hermitian
+: <a class="el" href="classblas__sparse.html#a101f5eab06d45474d64bff200d2387ec">blas_sparse</a>
+</li>
+<li>blas_lower_symmetric
+: <a class="el" href="classblas__sparse.html#ad474a894be1f45a6937c2a880963b1c7">blas_sparse</a>
+</li>
+<li>blas_lower_triangular
+: <a class="el" href="classblas__sparse.html#a280772883a7487fa68aabd98e4a49342">blas_sparse</a>
+</li>
+<li>blas_max_norm
+: <a class="el" href="classblas__sparse.html#a487afe34859579523bba7b4851e106c6">blas_sparse</a>
+</li>
+<li>blas_new_handle
+: <a class="el" href="classblas__sparse.html#a817d9813d36f7a7abea44e3b781e21ba">blas_sparse</a>
+</li>
+<li>blas_no_conj
+: <a class="el" href="classblas__sparse.html#aabedcf272e063a48f7e310ce04784b17">blas_sparse</a>
+</li>
+<li>blas_no_trans
+: <a class="el" href="classblas__sparse.html#a5b700c1a472d7d12decf3d7d7fd244c2">blas_sparse</a>
+</li>
+<li>blas_non_unit_diag
+: <a class="el" href="classblas__sparse.html#a8e76732b3c06d9fc27669bda37f24ed6">blas_sparse</a>
+</li>
+<li>blas_num_cols
+: <a class="el" href="classblas__sparse.html#a726b73c19dae30439aa65988fa5b5dd1">blas_sparse</a>
+</li>
+<li>blas_num_nonzeros
+: <a class="el" href="classblas__sparse.html#ab01ecdd54a1f10e944e446e0efed3bc3">blas_sparse</a>
+</li>
+<li>blas_num_rows
+: <a class="el" href="classblas__sparse.html#afe9e1f52ba336f041e1e750b3a989510">blas_sparse</a>
+</li>
+<li>blas_one_base
+: <a class="el" href="classblas__sparse.html#a60fbe98d827ebea9e7c431e7698bc462">blas_sparse</a>
+</li>
+<li>blas_one_norm
+: <a class="el" href="classblas__sparse.html#a05c66c0c87c72e39580258418c46341f">blas_sparse</a>
+</li>
+<li>blas_open_handle
+: <a class="el" href="classblas__sparse.html#a0936a1798a61b56c52c116e428a4e6b7">blas_sparse</a>
+</li>
+<li>blas_overflow
+: <a class="el" href="classblas__sparse.html#ad8f2f29c92552e53910e5c92feb3567d">blas_sparse</a>
+</li>
+<li>blas_prec
+: <a class="el" href="classblas__sparse.html#a8c3f3a997d8d96f470d44d1f34e3ed39">blas_sparse</a>
+</li>
+<li>blas_prec_double
+: <a class="el" href="classblas__sparse.html#a6f0692e06d3b42828813a7a0a9ec59bb">blas_sparse</a>
+</li>
+<li>blas_prec_extra
+: <a class="el" href="classblas__sparse.html#a305383f56368c35429bfd9e7ca23a0f5">blas_sparse</a>
+</li>
+<li>blas_prec_indigenous
+: <a class="el" href="classblas__sparse.html#a71c278e64f30229d19b46376c4385669">blas_sparse</a>
+</li>
+<li>blas_prec_single
+: <a class="el" href="classblas__sparse.html#ab378aeb6aa39b2495f084cd31e32e5a6">blas_sparse</a>
+</li>
+<li>blas_real
+: <a class="el" href="classblas__sparse.html#a0d365ccd71fdedaa5cf30a46f34bcf37">blas_sparse</a>
+</li>
+<li>blas_real_inf_norm
+: <a class="el" href="classblas__sparse.html#a0a60f070ff9a1a864af39e4489c93e31">blas_sparse</a>
+</li>
+<li>blas_real_max_norm
+: <a class="el" href="classblas__sparse.html#afecc0bd8ce11628fe2bbe55b9244c295">blas_sparse</a>
+</li>
+<li>blas_real_one_norm
+: <a class="el" href="classblas__sparse.html#aca85a61f11b3c36113209d61a89e4957">blas_sparse</a>
+</li>
+<li>blas_regular
+: <a class="el" href="classblas__sparse.html#a21d31433d4f29a6fd54c214b7a26c7d4">blas_sparse</a>
+</li>
+<li>blas_right_side
+: <a class="el" href="classblas__sparse.html#a6285c5cc8fe45bb73ece03c7900d5a18">blas_sparse</a>
+</li>
+<li>blas_rnd
+: <a class="el" href="classblas__sparse.html#af75f815c459344a5a38cd0794b93504a">blas_sparse</a>
+</li>
+<li>blas_rowmajor
+: <a class="el" href="classblas__sparse.html#aa1d4df9e25dcab40269247450e1b3e4e">blas_sparse</a>
+</li>
+<li>blas_rsb_autotune_next_operation
+: <a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_sparse</a>
+</li>
+<li>blas_rsb_duplicates_ovw
+: <a class="el" href="classblas__sparse.html#a10ebafcdf3cc36cf0471ba20ffcd2980">blas_sparse</a>
+</li>
+<li>blas_rsb_duplicates_sum
+: <a class="el" href="classblas__sparse.html#a508fec1d9853698fd08c239dd08a7291">blas_sparse</a>
+</li>
+<li>blas_rsb_rep_coo
+: <a class="el" href="classblas__sparse.html#abb6a552efaab32ed9687f2e2df895783">blas_sparse</a>
+</li>
+<li>blas_rsb_rep_csr
+: <a class="el" href="classblas__sparse.html#a9fe6012ccac0890c7f7a8500e77e9ff7">blas_sparse</a>
+</li>
+<li>blas_rsb_rep_rsb
+: <a class="el" href="classblas__sparse.html#a9cd9a2263c79534384bef6bf27e65787">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_autotuning_off
+: <a class="el" href="classblas__sparse.html#a8cc6d8c9036cb66051cc1cfb7c739b5e">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_autotuning_on
+: <a class="el" href="classblas__sparse.html#afab5c86162fcf329199b0666f33cde96">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_off
+: <a class="el" href="classblas__sparse.html#aefa9f681506ee4ceb578f11b9a0e664c">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_on
+: <a class="el" href="classblas__sparse.html#abd8e06d35f2c4c3a6ecc1eb315548c43">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_off
+: <a class="el" href="classblas__sparse.html#a56dc72776b8dcdc43f0cebbdc93dcd21">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_on
+: <a class="el" href="classblas__sparse.html#a7bfbab78e4c5a789e2d76274a2fbc96c">blas_sparse</a>
+</li>
+<li>blas_sfmin
+: <a class="el" href="classblas__sparse.html#a0b1596744fa5acb891d4908588249c54">blas_sparse</a>
+</li>
+<li>blas_single_precision
+: <a class="el" href="classblas__sparse.html#a0badb7c2679a5d0ba4e90f599b678768">blas_sparse</a>
+</li>
+<li>blas_sparse_const_failure
+: <a class="el" href="classblas__sparse.html#a5d97ddcd53d2bba670233f5335b44f55">blas_sparse</a>
+</li>
+<li>blas_sparse_const_not_available
+: <a class="el" href="classblas__sparse.html#aae79119fabe06a887f461eda50c97d0a">blas_sparse</a>
+</li>
+<li>blas_sparse_const_success
+: <a class="el" href="classblas__sparse.html#a0e333ba9a5cc3b014697d0a12d08f6b2">blas_sparse</a>
+</li>
+<li>blas_symmetric
+: <a class="el" href="classblas__sparse.html#a13161955ecb9fc2ce12963cc319c93d2">blas_sparse</a>
+</li>
+<li>blas_t
+: <a class="el" href="classblas__sparse.html#a6acca6eab87ec90dcf71b8c7b40aaa8f">blas_sparse</a>
+</li>
+<li>blas_trans
+: <a class="el" href="classblas__sparse.html#a12f06635d9f1c40722ad4bd757e737bb">blas_sparse</a>
+</li>
+<li>blas_triangular
+: <a class="el" href="classblas__sparse.html#a04619b8ef6be6983ded4e3c22fce63b8">blas_sparse</a>
+</li>
+<li>blas_two_norm
+: <a class="el" href="classblas__sparse.html#abb84b2b7195d878e71760bdad596d693">blas_sparse</a>
+</li>
+<li>blas_unassembled
+: <a class="el" href="classblas__sparse.html#a50d0da49cbf6822ed5e9a8ff81faf6d5">blas_sparse</a>
+</li>
+<li>blas_underflow
+: <a class="el" href="classblas__sparse.html#ae9f01b90527ebe6b178d4c73a46bbf25">blas_sparse</a>
+</li>
+<li>blas_unit_diag
+: <a class="el" href="classblas__sparse.html#af1e902c099efbedd09c7ce65b4772626">blas_sparse</a>
+</li>
+<li>blas_upper
+: <a class="el" href="classblas__sparse.html#a8be70a15dda0ebf3b782b66e72f924d2">blas_sparse</a>
+</li>
+<li>blas_upper_hermitian
+: <a class="el" href="classblas__sparse.html#a4fcd2dfde7722199b9125542622c8c4a">blas_sparse</a>
+</li>
+<li>blas_upper_symmetric
+: <a class="el" href="classblas__sparse.html#afd82b7f277c54dfa83ab44ea6ed89fb1">blas_sparse</a>
+</li>
+<li>blas_upper_triangular
+: <a class="el" href="classblas__sparse.html#a97b523912445087a965737cb8cfd69af">blas_sparse</a>
+</li>
+<li>blas_valid_handle
+: <a class="el" href="classblas__sparse.html#a83704034c72929f3b1df534034b2786d">blas_sparse</a>
+</li>
+<li>blas_zero_base
+: <a class="el" href="classblas__sparse.html#a8c89ffd8863d708e55c5330d11c772f2">blas_sparse</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x63.html b/doc/html/functions_0x63.html
new file mode 100644
index 0000000..f10a016
--- /dev/null
+++ b/doc/html/functions_0x63.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li class="current"><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_c"></a>- c -</h3><ul>
+<li>cuscr_begin()
+: <a class="el" href="classblas__sparse.html#af4e9f97f85799c5e8f60c78d40d906f3">blas_sparse</a>
+</li>
+<li>cuscr_block_begin()
+: <a class="el" href="classblas__sparse.html#a6085ddf99c2459e051a6106e4a2c4785">blas_sparse</a>
+</li>
+<li>cuscr_end()
+: <a class="el" href="classblas__sparse.html#a9878426469b215a78642e5245a054203">blas_sparse</a>
+</li>
+<li>cuscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#af239abd22080f58d5cf0ea2dfdd78953">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#add06f42953fc4dff9dafc487f58172ee">blas_sparse</a>
+</li>
+<li>cuscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#af3f921a0867dbd20c1499660a2b78376">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#af9a2f1bf6543dccc8b5bb1b5d0f35636">blas_sparse</a>
+</li>
+<li>cuscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a380ea4ffed92a6cf0e73a50952fc6a64">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a847661e819534c083984a453a1e282ea">blas_sparse</a>
+</li>
+<li>cuscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a5035e49b6a0d45c9aee959667fd567b2">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#a6ee075639a028bfbb8d8c3652bb3c147">blas_sparse</a>
+</li>
+<li>cuscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#aea33478c2c2911daf74c478ded2ed39e">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#a4bee5ce9a9bb94863469797ca22d44b0">blas_sparse</a>
+</li>
+<li>cuscr_insert_row()
+: <a class="el" href="classblas__sparse.html#aad23b1379a471af392fa33066fd66140">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#a59a678b947de912694a162cafb171100">blas_sparse::uscr_insert_row</a>
+</li>
+<li>cuscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#abd5c88929ed1c7133169c401881fa1c7">blas_sparse</a>
+</li>
+<li>cusmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#a0a2303f12cfe05ba01cdb52b751d5f33">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#a4f7e8d071d2309ed60cb9d588fd749b6">blas_sparse</a>
+</li>
+<li>cusmv()
+: <a class="el" href="classblas__sparse.html#a437ce36d8520ffeadfbb6e6f9885b9f3">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#a43d3541d816401bb2581913cfa2070bb">blas_sparse::usmv</a>
+</li>
+<li>cussm()
+: <a class="el" href="classblas__sparse.html#abc00bd143edf8a993e7d79a1d8baf636">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#a19ec9206f2a3e66ccfddff2be3fb55ad">blas_sparse::ussm</a>
+</li>
+<li>cussv()
+: <a class="el" href="classblas__sparse.html#a8f2db2c64bbd1ecd032fb7a103e30c97">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a8e0df43045904452d698c18dbb8b33a1">blas_sparse::ussv</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x64.html b/doc/html/functions_0x64.html
new file mode 100644
index 0000000..16682a8
--- /dev/null
+++ b/doc/html/functions_0x64.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li class="current"><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_d"></a>- d -</h3><ul>
+<li>duscr_begin()
+: <a class="el" href="classblas__sparse.html#acf14608f8b0375ca133b7f850bde3b50">blas_sparse</a>
+</li>
+<li>duscr_block_begin()
+: <a class="el" href="classblas__sparse.html#ab33c2f497f0a53213f38cd8449ab4349">blas_sparse</a>
+</li>
+<li>duscr_end()
+: <a class="el" href="classblas__sparse.html#a88d066acac28b6fe7c7cdc9e6941ff8f">blas_sparse</a>
+</li>
+<li>duscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#aa74a6aa929703b1221d125dabf0610a9">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#a40b43e04b282dd6f6ad11f51701a9b81">blas_sparse</a>
+</li>
+<li>duscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#a24491a9b6aeae9698aacf9c649fabcfc">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#a533a7082811ea859d079b5e9513ce1b4">blas_sparse</a>
+</li>
+<li>duscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a8dfa301a73cd1bf09b66a0b2e9c704a8">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a1ed0bf47156c5d299ef678b71aec7ef0">blas_sparse</a>
+</li>
+<li>duscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a6d994adf4a26516c4bbd08020a923a5a">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#a87f44b33cf81a30af58fe9a299ea78a3">blas_sparse</a>
+</li>
+<li>duscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#ac1bd26e50082f7eb1123a59794ae3f1c">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#ae3706fcae9dcbf6ebe96335717823939">blas_sparse</a>
+</li>
+<li>duscr_insert_row()
+: <a class="el" href="classblas__sparse.html#a055df1b4ef9aa7e7937bb1dfe1f228b9">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#a14fd80441fbbc200a36de62c86f12538">blas_sparse::uscr_insert_row</a>
+</li>
+<li>duscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#ab1fd9e9f8cdd5f79134873fd6af47c28">blas_sparse</a>
+</li>
+<li>dusmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#a444e03055975d19e0907fdc774d6419f">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#ae717638ebcf6e277f2621fd8eae75249">blas_sparse</a>
+</li>
+<li>dusmv()
+: <a class="el" href="classblas__sparse.html#a9fd3bf400531b8277a082b0663491329">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#ab356fcfdebfd118dd5e6165e66a3dc70">blas_sparse::usmv</a>
+</li>
+<li>dussm()
+: <a class="el" href="classblas__sparse.html#a0eb97f56a6467e87ce06f8be8a50e88d">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#a25c815e459c07efcba93c29b156136c0">blas_sparse::ussm</a>
+</li>
+<li>dussv()
+: <a class="el" href="classblas__sparse.html#aae591d7a08af50e34313f347d888779d">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a45d49f6f9887a808109bbb4467efb1dc">blas_sparse::ussv</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x6b.html b/doc/html/functions_0x6b.html
new file mode 100644
index 0000000..d7cc50b
--- /dev/null
+++ b/doc/html/functions_0x6b.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li class="current"><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_k"></a>- k -</h3><ul>
+<li>keys
+: <a class="el" href="structrsb__initopts.html#a4319168f5f1183d3ea65960e7111e7ee">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x6e.html b/doc/html/functions_0x6e.html
new file mode 100644
index 0000000..f65d059
--- /dev/null
+++ b/doc/html/functions_0x6e.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li class="current"><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_n"></a>- n -</h3><ul>
+<li>n_pairs
+: <a class="el" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x72.html b/doc/html/functions_0x72.html
new file mode 100644
index 0000000..da3473a
--- /dev/null
+++ b/doc/html/functions_0x72.html
@@ -0,0 +1,602 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li class="current"><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>rsb_blas_get_mtx()
+: <a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html#ab7287586fccf2ade719b9a0b0585fb6a">blas_sparse::rsb_blas_get_mtx</a>
+</li>
+<li>rsb_coo_sort()
+: <a class="el" href="interfacersb_1_1rsb__coo__sort.html#a5712d2c61081ca75f636c2474c7d815e">rsb::rsb_coo_sort</a>
+</li>
+<li>rsb_elopf_div
+: <a class="el" href="classrsb.html#a0fb14e2ce3e4033c5f2075a823a358e2">rsb</a>
+</li>
+<li>rsb_elopf_mul
+: <a class="el" href="classrsb.html#ab8b26221a9c42a654a835a8f505cdd6d">rsb</a>
+</li>
+<li>rsb_elopf_neg
+: <a class="el" href="classrsb.html#af632e8ad15e51d2fcdad5f81d22aecab">rsb</a>
+</li>
+<li>rsb_elopf_pow
+: <a class="el" href="classrsb.html#a97685d0c5f78c8e996b85689f58309ba">rsb</a>
+</li>
+<li>rsb_elopf_scale_cols
+: <a class="el" href="classrsb.html#a0b36e2eac438cae0c3cb2171aa89a580">rsb</a>
+</li>
+<li>rsb_elopf_scale_cols_real
+: <a class="el" href="classrsb.html#a224d2a379853ca7bad32b5921437f531">rsb</a>
+</li>
+<li>rsb_elopf_scale_rows
+: <a class="el" href="classrsb.html#a036bcb7f9a4156f984d9bfe8f7829c9c">rsb</a>
+</li>
+<li>rsb_elopf_scale_rows_real
+: <a class="el" href="classrsb.html#acc73315ac4e1af0fc6c90c3d189a1c2a">rsb</a>
+</li>
+<li>rsb_err_badargs
+: <a class="el" href="classrsb.html#a05f3d2c8888332697f182ea6d8ab66b0">rsb</a>
+</li>
+<li>rsb_err_corrupt_input_data
+: <a class="el" href="classrsb.html#a4e124cfacc5e0492952ccda10905206a">rsb</a>
+</li>
+<li>rsb_err_could_not_honour_externally_allocation_flags
+: <a class="el" href="classrsb.html#a65da259a04a3b6b09b1e67d2aae53108">rsb</a>
+</li>
+<li>rsb_err_enomem
+: <a class="el" href="classrsb.html#a0abffcaa259b8f2cbf1b025c4c179fb0">rsb</a>
+</li>
+<li>rsb_err_failed_memhier_detection
+: <a class="el" href="classrsb.html#aa0868e7080760845d911eae040df8c44">rsb</a>
+</li>
+<li>rsb_err_generic_error
+: <a class="el" href="classrsb.html#abe86debd990b7989427a98378c0c2ea4">rsb</a>
+</li>
+<li>rsb_err_internal_error
+: <a class="el" href="classrsb.html#a76c59842ba7bef3a5e0cfe577b45e3af">rsb</a>
+</li>
+<li>rsb_err_invalid_numerical_data
+: <a class="el" href="classrsb.html#a018c06fd82826d0b56fdec98da22da17">rsb</a>
+</li>
+<li>rsb_err_limits
+: <a class="el" href="classrsb.html#a20784aca964572d033d9f79a08b8842d">rsb</a>
+</li>
+<li>rsb_err_memory_leak
+: <a class="el" href="classrsb.html#a3534459ee186379f45444c289df70175">rsb</a>
+</li>
+<li>rsb_err_no_error
+: <a class="el" href="classrsb.html#a2f418e43e861a006b5aea1d55913fee2">rsb</a>
+</li>
+<li>rsb_err_no_stream_output_configured_out
+: <a class="el" href="classrsb.html#a8dec384225c4700df1b201b6dbc5aa60">rsb</a>
+</li>
+<li>rsb_err_no_user_configuration
+: <a class="el" href="classrsb.html#aed70b921cdbe20cc81d03c9b9c7aab38">rsb</a>
+</li>
+<li>rsb_err_unimplemented_yet
+: <a class="el" href="classrsb.html#a4405be6ac615c1db2c161185d455374c">rsb</a>
+</li>
+<li>rsb_err_unsupported_feature
+: <a class="el" href="classrsb.html#ac81e797f7f250fb3d2c20f2a46360838">rsb</a>
+</li>
+<li>rsb_err_unsupported_format
+: <a class="el" href="classrsb.html#a48a68ee015ab06c1b72e26659479cd9e">rsb</a>
+</li>
+<li>rsb_err_unsupported_operation
+: <a class="el" href="classrsb.html#aa9069fa99bea2127f31ac62365b19bcd">rsb</a>
+</li>
+<li>rsb_err_unsupported_type
+: <a class="el" href="classrsb.html#ab8643c59b36b245e6f59ce00e10ad17f">rsb</a>
+</li>
+<li>rsb_extf_asums_col
+: <a class="el" href="classrsb.html#af6f1f5ccf7d0c80b61bce19f5c64acc0">rsb</a>
+</li>
+<li>rsb_extf_asums_row
+: <a class="el" href="classrsb.html#ae6d4323a95cd3284314c787dfb05c854">rsb</a>
+</li>
+<li>rsb_extf_diag
+: <a class="el" href="classrsb.html#a4019120043663ffa9e39b9e042d1e13a">rsb</a>
+</li>
+<li>rsb_extf_norm_inf
+: <a class="el" href="classrsb.html#a396ba7496087621b292a7e2e68e976c8">rsb</a>
+</li>
+<li>rsb_extf_norm_one
+: <a class="el" href="classrsb.html#a21ae01944a05b24822a824390789b1ee">rsb</a>
+</li>
+<li>rsb_extf_norm_two
+: <a class="el" href="classrsb.html#afb2e1af58af877281f96f6a2aeb77c99">rsb</a>
+</li>
+<li>rsb_extf_sums_col
+: <a class="el" href="classrsb.html#a7aff705dacd272bad5d692b2775d5c93">rsb</a>
+</li>
+<li>rsb_extf_sums_row
+: <a class="el" href="classrsb.html#a5228e51b964240df80dba35826a1a6c9">rsb</a>
+</li>
+<li>rsb_file_mtx_get_dims()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html#ac465d04ed5f480a291981ae4a853257f">rsb::rsb_file_mtx_get_dims</a>
+</li>
+<li>rsb_file_mtx_load()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__load.html#a7a8a1195bbef16b39f8e68a4286e2ea1">rsb::rsb_file_mtx_load</a>
+</li>
+<li>rsb_file_mtx_rndr()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html#a325a0bceb4ab80ba11a3f7e99235936d">rsb::rsb_file_mtx_rndr</a>
+</li>
+<li>rsb_file_mtx_save()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__save.html#a819ab03ec355b43ecde9c6b43336d991">rsb::rsb_file_mtx_save</a>
+</li>
+<li>rsb_file_vec_load()
+: <a class="el" href="interfacersb_1_1rsb__file__vec__load.html#a13774316ce6035da0ff647ca917d0d33">rsb::rsb_file_vec_load</a>
+</li>
+<li>rsb_file_vec_save()
+: <a class="el" href="interfacersb_1_1rsb__file__vec__save.html#a1a4271ef2990d373fb92e4d438a36678">rsb::rsb_file_vec_save</a>
+</li>
+<li>rsb_flag_assembled_in_coo_arrays
+: <a class="el" href="classrsb.html#a25e0432a471ab3fca4105d40ce2e8f1e">rsb</a>
+</li>
+<li>rsb_flag_c_indices_interface
+: <a class="el" href="classrsb.html#a0cd8d81bf275bfdc685080e0d855fbb1">rsb</a>
+</li>
+<li>rsb_flag_default_coo_matrix_flags
+: <a class="el" href="classrsb.html#aa1d8e9f835115cdac082812d5f74b6d4">rsb</a>
+</li>
+<li>rsb_flag_default_csr_matrix_flags
+: <a class="el" href="classrsb.html#a83848ae1b266eea31f4462821f8bc51b">rsb</a>
+</li>
+<li>rsb_flag_default_matrix_flags
+: <a class="el" href="classrsb.html#a16cc953b0faf8ba964ba79930b51f93c">rsb</a>
+</li>
+<li>rsb_flag_default_rsb_matrix_flags
+: <a class="el" href="classrsb.html#aba933b2d9b4534fa69226910ed84bd4c">rsb</a>
+</li>
+<li>rsb_flag_default_storage_flags
+: <a class="el" href="classrsb.html#ad27c22510fec7c8367bd34bf800cbd84">rsb</a>
+</li>
+<li>rsb_flag_diagonal
+: <a class="el" href="classrsb.html#a509eea3e97b56833df24cb9d2b064e26">rsb</a>
+</li>
+<li>rsb_flag_discard_zeros
+: <a class="el" href="classrsb.html#a95b0cf20f4422b337c41f2388a59fb0b">rsb</a>
+</li>
+<li>rsb_flag_duplicates_default_handle
+: <a class="el" href="classrsb.html#aa1ca91fa56bb36b6eebbf47de8ccb1be">rsb</a>
+</li>
+<li>rsb_flag_duplicates_keep_last
+: <a class="el" href="classrsb.html#ad6870000c6da71ba7e07676e9d9c5e42">rsb</a>
+</li>
+<li>rsb_flag_duplicates_sum
+: <a class="el" href="classrsb.html#a4e8c5001e9a26a86faefe9bd26989040">rsb</a>
+</li>
+<li>rsb_flag_experimental_in_place_permutation_sort
+: <a class="el" href="classrsb.html#a6d6b68525e01bb7d91eb814216c0b5bf">rsb</a>
+</li>
+<li>rsb_flag_externally_allocated_arrays
+: <a class="el" href="classrsb.html#ab8f28a0d2ec93bf0c85ef1f30fc51e24">rsb</a>
+</li>
+<li>rsb_flag_fortran_indices_interface
+: <a class="el" href="classrsb.html#a8ca3ae90c2f8e0923f80f04e53ad2c37">rsb</a>
+</li>
+<li>rsb_flag_hermitian
+: <a class="el" href="classrsb.html#a613fa635312f361ef115b68803807908">rsb</a>
+</li>
+<li>rsb_flag_identical_flags
+: <a class="el" href="classrsb.html#a2af139858170575356808c746b4a564a">rsb</a>
+</li>
+<li>rsb_flag_lower
+: <a class="el" href="classrsb.html#a59dd2ec96582af74d563f8c9f1f44409">rsb</a>
+</li>
+<li>rsb_flag_lower_hermitian
+: <a class="el" href="classrsb.html#a163680fba55484e1d4e4c9a436ebc93b">rsb</a>
+</li>
+<li>rsb_flag_lower_symmetric
+: <a class="el" href="classrsb.html#a1b31d44601cedab86c51a6ed2a8b0ca4">rsb</a>
+</li>
+<li>rsb_flag_lower_triangular
+: <a class="el" href="classrsb.html#a7c3f1e6d9f61f9944a08efab6a00fe2f">rsb</a>
+</li>
+<li>rsb_flag_mutually_exclusive_switches
+: <a class="el" href="classrsb.html#abf74a30d663a24ff5fde624217bfea37">rsb</a>
+</li>
+<li>rsb_flag_noflags
+: <a class="el" href="classrsb.html#a65dbcb1d6e6347e5b7e85b5aa49db90c">rsb</a>
+</li>
+<li>rsb_flag_quad_partitioning
+: <a class="el" href="classrsb.html#a7a5366fbd6cd1814d44b1ab1068f88de">rsb</a>
+</li>
+<li>rsb_flag_recursive_more_leaves_than_threads
+: <a class="el" href="classrsb.html#aff989c5cb6fa62c7ed25a72f30d6a864">rsb</a>
+</li>
+<li>rsb_flag_recursive_subdivide_more_on_diag
+: <a class="el" href="classrsb.html#abce4dd43d8147cb6fe505bda474e535c">rsb</a>
+</li>
+<li>rsb_flag_sorted_input
+: <a class="el" href="classrsb.html#ade2657fb3c17b519cc4332eac06046d3">rsb</a>
+</li>
+<li>rsb_flag_symmetric
+: <a class="el" href="classrsb.html#a8325109ecda447aa1e93e8d747673f4c">rsb</a>
+</li>
+<li>rsb_flag_triangular
+: <a class="el" href="classrsb.html#a3ea9a964debcbac70d35e964666f7a1c">rsb</a>
+</li>
+<li>rsb_flag_unit_diag_implicit
+: <a class="el" href="classrsb.html#a3e5c32923f3e360e980311315a27dc7d">rsb</a>
+</li>
+<li>rsb_flag_upper
+: <a class="el" href="classrsb.html#a9d9497934ece76bcf860a2a563056eca">rsb</a>
+</li>
+<li>rsb_flag_upper_hermitian
+: <a class="el" href="classrsb.html#a22eedbec9d19115a8658438f1c7cc496">rsb</a>
+</li>
+<li>rsb_flag_upper_symmetric
+: <a class="el" href="classrsb.html#ab17822f489868813f38ba9609245ae55">rsb</a>
+</li>
+<li>rsb_flag_upper_triangular
+: <a class="el" href="classrsb.html#ac3802654bb13df88bb2e7f371b12e5ea">rsb</a>
+</li>
+<li>rsb_flag_use_csr_reserved
+: <a class="el" href="classrsb.html#a9d39857a6f2ae454fd20d5bcc03ef17c">rsb</a>
+</li>
+<li>rsb_flag_use_halfword_indices
+: <a class="el" href="classrsb.html#ae2c87798ff9cee8bdc0eaacdec62a5d0">rsb</a>
+</li>
+<li>rsb_flag_use_halfword_indices_coo
+: <a class="el" href="classrsb.html#a6ff989a0fe4da2a71e72091fcb30a334">rsb</a>
+</li>
+<li>rsb_flag_use_halfword_indices_csr
+: <a class="el" href="classrsb.html#a7baa8d692038856c55489d2382f09e5d">rsb</a>
+</li>
+<li>rsb_flag_want_bcss_storage
+: <a class="el" href="classrsb.html#a8ad70221bf6a5f4b458f6b700b6af8df">rsb</a>
+</li>
+<li>rsb_flag_want_column_major_order
+: <a class="el" href="classrsb.html#a8786a38b2ca41b926b8ef6092a55b8a6">rsb</a>
+</li>
+<li>rsb_flag_want_coo_storage
+: <a class="el" href="classrsb.html#a9fda0eb0c128c193ba7d05bab64d7e90">rsb</a>
+</li>
+<li>rsb_flag_want_row_major_order
+: <a class="el" href="classrsb.html#a68ace12ecb8cbcc9a7c686b2b9665c29">rsb</a>
+</li>
+<li>rsb_io_want_bounded_box_computation
+: <a class="el" href="classrsb.html#a81a7107ceaa5d934eced8144f7de2338">rsb</a>
+</li>
+<li>rsb_io_want_cache_blocking_method
+: <a class="el" href="classrsb.html#aaf22b4c404442175bc58dc513bf13a89">rsb</a>
+</li>
+<li>rsb_io_want_executing_threads
+: <a class="el" href="classrsb.html#a0c15802bcd77b9b98a0968beffaee9cc">rsb</a>
+</li>
+<li>rsb_io_want_extra_verbose_interface
+: <a class="el" href="classrsb.html#a191f5492907ae4beca111b361955a791">rsb</a>
+</li>
+<li>rsb_io_want_is_initialized_marker
+: <a class="el" href="classrsb.html#aed7dc0ecede60b677144e8aba46d28b9">rsb</a>
+</li>
+<li>rsb_io_want_leaf_level_multivec
+: <a class="el" href="classrsb.html#aa89d96645cdd1a902fdfb0377a0a5ea2">rsb</a>
+</li>
+<li>rsb_io_want_librsb_etime
+: <a class="el" href="classrsb.html#a565392da24b3006eaeaf5c0d1c5a424d">rsb</a>
+</li>
+<li>rsb_io_want_max_memory_allocated
+: <a class="el" href="classrsb.html#a0ceca511d93a29126225dd783af190d2">rsb</a>
+</li>
+<li>rsb_io_want_max_memory_allocations
+: <a class="el" href="classrsb.html#afa4f68bc0184148f7790351c28cbae50">rsb</a>
+</li>
+<li>rsb_io_want_mem_alloc_cnt
+: <a class="el" href="classrsb.html#a658556e8116b0ff18bc19302fb66449a">rsb</a>
+</li>
+<li>rsb_io_want_mem_alloc_tot
+: <a class="el" href="classrsb.html#a512361fe2c126a7baa412e4b680d8a2f">rsb</a>
+</li>
+<li>rsb_io_want_memory_hierarchy_info_string
+: <a class="el" href="classrsb.html#a81327bb47b51d6c50e12c02171c8c3fe">rsb</a>
+</li>
+<li>rsb_io_want_output_stream
+: <a class="el" href="classrsb.html#a72c4b7daa9a9ba1c7887bb05dfb96b2c">rsb</a>
+</li>
+<li>rsb_io_want_sort_method
+: <a class="el" href="classrsb.html#ae4176512451ec7387ee2fbaec0c7f861">rsb</a>
+</li>
+<li>rsb_io_want_subdivision_multiplier
+: <a class="el" href="classrsb.html#ad5a1220ce0e7d5c4ce517150de22d80b">rsb</a>
+</li>
+<li>rsb_io_want_verbose_errors
+: <a class="el" href="classrsb.html#a90cf14925f34712589430925a0abb92e">rsb</a>
+</li>
+<li>rsb_io_want_verbose_exit
+: <a class="el" href="classrsb.html#a33d3ac5b6383e375f2239b780af50d3f">rsb</a>
+</li>
+<li>rsb_io_want_verbose_init
+: <a class="el" href="classrsb.html#abf4365a254c637b59b5f84dcef03c4e6">rsb</a>
+</li>
+<li>rsb_io_want_verbose_tuning
+: <a class="el" href="classrsb.html#ac95404408be9bc2045e8455881d21377">rsb</a>
+</li>
+<li>rsb_lib_exit()
+: <a class="el" href="interfacersb_1_1rsb__lib__exit.html#a7f978ab1fb36092abb76da1d2abefe44">rsb::rsb_lib_exit</a>
+</li>
+<li>rsb_lib_get_opt()
+: <a class="el" href="interfacersb_1_1rsb__lib__get__opt.html#ab87dc671d4ddb02ef1ff4a438c30f5a1">rsb::rsb_lib_get_opt</a>
+</li>
+<li>rsb_lib_init()
+: <a class="el" href="interfacersb_1_1rsb__lib__init.html#a769d6bd7b5f59ebf378fd9d956a10970">rsb::rsb_lib_init</a>
+</li>
+<li>rsb_lib_reinit()
+: <a class="el" href="interfacersb_1_1rsb__lib__reinit.html#af222e6ea5e86eb4dd635aed1b435cec2">rsb::rsb_lib_reinit</a>
+</li>
+<li>rsb_lib_set_opt()
+: <a class="el" href="interfacersb_1_1rsb__lib__set__opt.html#a676b1645de5c3b9c3b14c87ceb12ce3e">rsb::rsb_lib_set_opt</a>
+</li>
+<li>rsb_lib_set_opt_str()
+: <a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html#af0f406c52171320cf25529324b064db8">rsb::rsb_lib_set_opt_str</a>
+</li>
+<li>rsb_marf_eps
+: <a class="el" href="classrsb.html#a862ec78887803b5649251bd70bd7cba0">rsb</a>
+</li>
+<li>rsb_marf_eps_b
+: <a class="el" href="classrsb.html#a448f95924a27a7bc591db9590b62d6b5">rsb</a>
+</li>
+<li>rsb_marf_eps_l
+: <a class="el" href="classrsb.html#aa7582e5c9cddf8ad409485cbfa6ebac4">rsb</a>
+</li>
+<li>rsb_marf_eps_s
+: <a class="el" href="classrsb.html#aa1f0a3a95206057e5be739fd9b114e12">rsb</a>
+</li>
+<li>rsb_marf_rgb
+: <a class="el" href="classrsb.html#a9aa6c9b3d7034de75ebca4a5c1eba668">rsb</a>
+</li>
+<li>rsb_mif_index_storage_in_bytes__to__size_t
+: <a class="el" href="classrsb.html#a912caf1dfbc9eecd804ec0e9b330809f">rsb</a>
+</li>
+<li>rsb_mif_index_storage_in_bytes_per_nnz__to__rsb_real_t
+: <a class="el" href="classrsb.html#a728a103d20814d978ac073fc51791897">rsb</a>
+</li>
+<li>rsb_mif_leaves_count__to__rsb_blk_index_t
+: <a class="el" href="classrsb.html#ac954dfff99410e7223094406be0f19f9">rsb</a>
+</li>
+<li>rsb_mif_matrix_cols__to__rsb_coo_index_t
+: <a class="el" href="classrsb.html#a0cb66e8ecfec31c29be967b928caf767">rsb</a>
+</li>
+<li>rsb_mif_matrix_flags__to__rsb_flags_t
+: <a class="el" href="classrsb.html#ad0e9b8ffe63a338a7e03ad62d3a4b046">rsb</a>
+</li>
+<li>rsb_mif_matrix_info__to__char_p
+: <a class="el" href="classrsb.html#a24db597e798fc524428ff052bd5ee3bb">rsb</a>
+</li>
+<li>rsb_mif_matrix_nnz__to__rsb_nnz_index_t
+: <a class="el" href="classrsb.html#ae3d4d4559c433e7ac5dd51f63bd1933f">rsb</a>
+</li>
+<li>rsb_mif_matrix_rows__to__rsb_coo_index_t
+: <a class="el" href="classrsb.html#adcdc1cf3fe0032524c482bc2be4b4b7d">rsb</a>
+</li>
+<li>rsb_mif_matrix_typecode__to__rsb_type_t
+: <a class="el" href="classrsb.html#a008a647728ce9aa30846a913c0c620f9">rsb</a>
+</li>
+<li>rsb_mif_total_size__to__size_t
+: <a class="el" href="classrsb.html#a4f193b007e217530bf2a45c65d58673f">rsb</a>
+</li>
+<li>rsb_mtx_add_to_dense()
+: <a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html#a073fda633ce6ad1ac0128e7e80cc7a1a">rsb::rsb_mtx_add_to_dense</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_begin()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html#acdbe2149810598ad743510fb43850063">rsb::rsb_mtx_alloc_from_coo_begin</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_const()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html#a343f0c34a21b70af5723b84f906f04ed">rsb::rsb_mtx_alloc_from_coo_const</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_end()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html#acb65cf6cb8c6965a1f19f3a77a3bb635">rsb::rsb_mtx_alloc_from_coo_end</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_inplace()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html#a79dd14c8140e12f2f4490a488468c406">rsb::rsb_mtx_alloc_from_coo_inplace</a>
+</li>
+<li>rsb_mtx_alloc_from_csc_const()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html#ae148c3e7567302bb4b8312482f47b057">rsb::rsb_mtx_alloc_from_csc_const</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_const()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html#ac4d4d18a8a83b5790b9472750ef96e0e">rsb::rsb_mtx_alloc_from_csr_const</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_inplace()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html#ae987825747d0697dd1fa7b7b67a8b509">rsb::rsb_mtx_alloc_from_csr_inplace</a>
+</li>
+<li>rsb_mtx_clone()
+: <a class="el" href="interfacersb_1_1rsb__mtx__clone.html#a9d94feec5e252fa47ee272dc6dc9d896">rsb::rsb_mtx_clone</a>
+</li>
+<li>rsb_mtx_free()
+: <a class="el" href="interfacersb_1_1rsb__mtx__free.html#a09a2dbae0d1161971139c2156cbef16d">rsb::rsb_mtx_free</a>
+</li>
+<li>rsb_mtx_get_coo()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html#ad56231129b3d2be969605ab3c43020fe">rsb::rsb_mtx_get_coo</a>
+</li>
+<li>rsb_mtx_get_coo_block()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html#a15cad9939688f96a5164b0ed6873bf00">rsb::rsb_mtx_get_coo_block</a>
+</li>
+<li>rsb_mtx_get_csr()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html#a26e57debd9264300f0436440df805625">rsb::rsb_mtx_get_csr</a>
+</li>
+<li>rsb_mtx_get_info()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__info.html#a279563ac765d73fed65942786f0b56f3">rsb::rsb_mtx_get_info</a>
+</li>
+<li>rsb_mtx_get_info_str()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html#a36f95acdfcb25020c8ef5cc3e46f65f5">rsb::rsb_mtx_get_info_str</a>
+</li>
+<li>rsb_mtx_get_nrm()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html#ac31aeb4a3fa773f965833de0a7f430f8">rsb::rsb_mtx_get_nrm</a>
+</li>
+<li>rsb_mtx_get_prec()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html#a348e683f8b908ee70aa854c80803aafc">rsb::rsb_mtx_get_prec</a>
+</li>
+<li>rsb_mtx_get_rows_sparse()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html#a048ca91f617db2fd2e8fbd250068829b">rsb::rsb_mtx_get_rows_sparse</a>
+</li>
+<li>rsb_mtx_get_vals()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html#a5f501a125d2fd5b4138c81dce37a427e">rsb::rsb_mtx_get_vals</a>
+</li>
+<li>rsb_mtx_get_vec()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html#a8da26f8850a32ea89255ba5c946b9be3">rsb::rsb_mtx_get_vec</a>
+</li>
+<li>rsb_mtx_rndr()
+: <a class="el" href="interfacersb_1_1rsb__mtx__rndr.html#acedd2acc7f1393e056d36cbea3c4cdaa">rsb::rsb_mtx_rndr</a>
+</li>
+<li>rsb_mtx_set_vals()
+: <a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html#a67f48229a8cc61f12c2dd6ca7c3d3d44">rsb::rsb_mtx_set_vals</a>
+</li>
+<li>rsb_mtx_switch_to_coo()
+: <a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html#a4054c2c9dbf8dd8ad06a551f7eadf23f">rsb::rsb_mtx_switch_to_coo</a>
+</li>
+<li>rsb_mtx_switch_to_csr()
+: <a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html#ab085734a0dd0cbdad2bcff6e62718379">rsb::rsb_mtx_switch_to_csr</a>
+</li>
+<li>rsb_mtx_upd_vals()
+: <a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html#abaf717d65f4cce3f643dbad78dd43f5d">rsb::rsb_mtx_upd_vals</a>
+</li>
+<li>rsb_null_exit_options
+: <a class="el" href="classrsb.html#ad1a0a65364c48d23f9c82ef83c97c420">rsb</a>
+</li>
+<li>rsb_null_init_options
+: <a class="el" href="classrsb.html#a313effa5a93a26ea72326e6c89bdaf82">rsb</a>
+</li>
+<li>rsb_numerical_type_double
+: <a class="el" href="classrsb.html#af833bb7a31acb188d33424c3c16bd4cd">rsb</a>
+</li>
+<li>rsb_numerical_type_double_complex
+: <a class="el" href="classrsb.html#a1865b95dcc4fac4f0fe21dfe8c4ef036">rsb</a>
+</li>
+<li>rsb_numerical_type_float
+: <a class="el" href="classrsb.html#ac18d8381c23b54ccd523e7b4e50af04a">rsb</a>
+</li>
+<li>rsb_numerical_type_float_complex
+: <a class="el" href="classrsb.html#ace3d848255b280a0531407c19fffaec7">rsb</a>
+</li>
+<li>rsb_numerical_type_int
+: <a class="el" href="classrsb.html#a31d8f196938e468a3891fb80f1decc1f">rsb</a>
+</li>
+<li>rsb_numerical_type_same_type
+: <a class="el" href="classrsb.html#a43c72bf61ae0f1961908e27c7dd76f01">rsb</a>
+</li>
+<li>rsb_perror()
+: <a class="el" href="interfacersb_1_1rsb__perror.html#a72bfc792fff96e8db48fafdd91669751">rsb::rsb_perror</a>
+</li>
+<li>rsb_precf_ilu0
+: <a class="el" href="classrsb.html#aa3e1b0443ca75f7f78983737770ee95a">rsb</a>
+</li>
+<li>rsb_psblas_trans_to_rsb_trans()
+: <a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html#af3f97f3e696d1309ab86da4a3e0f6de8">rsb::rsb_psblas_trans_to_rsb_trans</a>
+</li>
+<li>rsb_spmm()
+: <a class="el" href="interfacersb_1_1rsb__spmm.html#a7af958e6026d556fc5e4e994514d8ac1">rsb::rsb_spmm</a>
+</li>
+<li>rsb_spmsp()
+: <a class="el" href="interfacersb_1_1rsb__spmsp.html#a935b754474aa9edc1234f1efc16c8e3b">rsb::rsb_spmsp</a>
+</li>
+<li>rsb_spmsp_to_dense()
+: <a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html#ad1e0ae6df6ef2842d5fff69204253568">rsb::rsb_spmsp_to_dense</a>
+</li>
+<li>rsb_spmv()
+: <a class="el" href="interfacersb_1_1rsb__spmv.html#ad340345701bc3e8b0d26f56820ff2842">rsb::rsb_spmv</a>
+</li>
+<li>rsb_sppsp()
+: <a class="el" href="interfacersb_1_1rsb__sppsp.html#a157e6aec78681df74866193b32b76101">rsb::rsb_sppsp</a>
+</li>
+<li>rsb_spsm()
+: <a class="el" href="interfacersb_1_1rsb__spsm.html#a1e87e26c84faeac8cda8e6ed3cf77e35">rsb::rsb_spsm</a>
+</li>
+<li>rsb_spsv()
+: <a class="el" href="interfacersb_1_1rsb__spsv.html#aaaa26b35783e2a125255025de14c18e0">rsb::rsb_spsv</a>
+</li>
+<li>rsb_strerror_r()
+: <a class="el" href="interfacersb_1_1rsb__strerror__r.html#a67a031bb42f0e21ddc01ce3c5f12400f">rsb::rsb_strerror_r</a>
+</li>
+<li>rsb_time()
+: <a class="el" href="interfacersb_1_1rsb__time.html#a60b407a11a393bd8b6106dab907c5e92">rsb::rsb_time</a>
+</li>
+<li>rsb_transposition_c
+: <a class="el" href="classrsb.html#a2e308172e38ee4453d556792acbe464c">rsb</a>
+</li>
+<li>rsb_transposition_n
+: <a class="el" href="classrsb.html#a89c7627f24fecaf23ead8300f671314f">rsb</a>
+</li>
+<li>rsb_transposition_t
+: <a class="el" href="classrsb.html#a5c11d5b2aa58a9c9067ec914265cd28f">rsb</a>
+</li>
+<li>rsb_tune_spmm()
+: <a class="el" href="interfacersb_1_1rsb__tune__spmm.html#a50a0bd8eb0673e0bac6375f25e719c81">rsb::rsb_tune_spmm</a>
+</li>
+<li>rsb_tune_spsm()
+: <a class="el" href="interfacersb_1_1rsb__tune__spsm.html#a6cac8625fd1e15c4686b56faa31cf663">rsb::rsb_tune_spsm</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x73.html b/doc/html/functions_0x73.html
new file mode 100644
index 0000000..83fcd5a
--- /dev/null
+++ b/doc/html/functions_0x73.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li class="current"><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_s"></a>- s -</h3><ul>
+<li>suscr_begin()
+: <a class="el" href="classblas__sparse.html#ae78739e1ebe48fe8b9752a43cd5c15a0">blas_sparse</a>
+</li>
+<li>suscr_block_begin()
+: <a class="el" href="classblas__sparse.html#a8ccdce913bf1b8a1d30b6889611143cb">blas_sparse</a>
+</li>
+<li>suscr_end()
+: <a class="el" href="classblas__sparse.html#a38d9574e6360fcaa6035eaf9518001d8">blas_sparse</a>
+</li>
+<li>suscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#ab52411aa7d878e2fc62abc2983b9871f">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#a1f4709630ab2be2247580eb1fbb48472">blas_sparse</a>
+</li>
+<li>suscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#ae4d7b020b0c50e575aa6a80b44ab8a53">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#a508ee5b058f7c6a1a3d21d3f706cddd4">blas_sparse</a>
+</li>
+<li>suscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a46d6cb6bd1b38c5c75eef95cb9ce4135">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a183a3ff9aa5af1dcedc5cf7bd4918b5e">blas_sparse</a>
+</li>
+<li>suscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a68b943e8b2dfb946299b80b38397a05d">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#a5c4a2d0b9164fb232c102426693ccfd1">blas_sparse</a>
+</li>
+<li>suscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#a3b1e4b0dddeb275de32edeafda52990f">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#a26a40430bf4de9b01eaf9dacf999dea6">blas_sparse</a>
+</li>
+<li>suscr_insert_row()
+: <a class="el" href="classblas__sparse.html#a9ec4465da954f0761c7edfd78d2be717">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#ad625073be16e7d5ebe9a66f73f9da15c">blas_sparse::uscr_insert_row</a>
+</li>
+<li>suscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#aab5942faf7f9fe31f9dfd13143f37dc7">blas_sparse</a>
+</li>
+<li>susmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#abeab18a2f4b8c597aad8e7e618d12bfc">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#a03977fef75f9ee8773400c08153069d5">blas_sparse</a>
+</li>
+<li>susmv()
+: <a class="el" href="classblas__sparse.html#afb6e4dbb50553fa86818408d9db6d7c3">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#a94abb35b1f09c96790c08bbcc6adedb9">blas_sparse::usmv</a>
+</li>
+<li>sussm()
+: <a class="el" href="classblas__sparse.html#a617ef412adc547e3f050610874549889">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#a30729d148522f306da9f787961ddeae6">blas_sparse::ussm</a>
+</li>
+<li>sussv()
+: <a class="el" href="classblas__sparse.html#ab21d16c7bda69becec8edf113b62dee0">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a3c65b4e4dcd66663b1424378932549c8">blas_sparse::ussv</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x75.html b/doc/html/functions_0x75.html
new file mode 100644
index 0000000..d060ec2
--- /dev/null
+++ b/doc/html/functions_0x75.html
@@ -0,0 +1,92 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li class="current"><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_u"></a>- u -</h3><ul>
+<li>uscr_end()
+: <a class="el" href="classblas__sparse.html#a48f1e1b82322910d45a1b2455421745f">blas_sparse</a>
+</li>
+<li>usds()
+: <a class="el" href="classblas__sparse.html#a8a3b6cd055048ab5e15b1b18be291f32">blas_sparse</a>
+</li>
+<li>usgp()
+: <a class="el" href="classblas__sparse.html#a1e0eb1ccd8ffbf49baefe455a248f7fe">blas_sparse</a>
+</li>
+<li>ussp()
+: <a class="el" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">blas_sparse</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x76.html b/doc/html/functions_0x76.html
new file mode 100644
index 0000000..e3f6546
--- /dev/null
+++ b/doc/html/functions_0x76.html
@@ -0,0 +1,83 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li class="current"><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_v"></a>- v -</h3><ul>
+<li>values
+: <a class="el" href="structrsb__initopts.html#a0a64d546db2c6445e4a33068cffa6694">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_0x7a.html b/doc/html/functions_0x7a.html
new file mode 100644
index 0000000..f154f3f
--- /dev/null
+++ b/doc/html/functions_0x7a.html
@@ -0,0 +1,132 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_0x63.html#index_c"><span>c</span></a></li>
+      <li><a href="functions_0x64.html#index_d"><span>d</span></a></li>
+      <li><a href="functions_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_0x73.html#index_s"><span>s</span></a></li>
+      <li><a href="functions_0x75.html#index_u"><span>u</span></a></li>
+      <li><a href="functions_0x76.html#index_v"><span>v</span></a></li>
+      <li class="current"><a href="functions_0x7a.html#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all struct and union fields with links to the structures/unions they belong to:</div>
+
+<h3><a class="anchor" id="index_z"></a>- z -</h3><ul>
+<li>zuscr_begin()
+: <a class="el" href="classblas__sparse.html#a9ec8326625fe0762e3e6e523260d2655">blas_sparse</a>
+</li>
+<li>zuscr_block_begin()
+: <a class="el" href="classblas__sparse.html#a5fbd2bae9f3849fda1be4691ca3df5ea">blas_sparse</a>
+</li>
+<li>zuscr_end()
+: <a class="el" href="classblas__sparse.html#a5f00b912397c8dc3ee87fecdf4cf98aa">blas_sparse</a>
+</li>
+<li>zuscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#ad4e920769d6a259d1b2fae20e6fb2853">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#a0ea88b095d147ffe96d05c5d53b4480a">blas_sparse</a>
+</li>
+<li>zuscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#af457ebe2c2b2112ba6cdb94e9bb53928">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#a0b7e474844552d62f72e18bac4592ced">blas_sparse</a>
+</li>
+<li>zuscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a397d0fb9bd1ba1bddc0eaeb4d3e47a5c">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a3a2dcc960e33dbae28abc3f1fdd52e66">blas_sparse</a>
+</li>
+<li>zuscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#ae73d20580b844428d7ca4834e578d448">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#ad28c55a5ed7b359a30a2538a45878e08">blas_sparse</a>
+</li>
+<li>zuscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#a3eae411ca3d10ec5dfddbdb53a5a7d4d">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#af003c4713fb7a39896ad1537fec94ea9">blas_sparse</a>
+</li>
+<li>zuscr_insert_row()
+: <a class="el" href="classblas__sparse.html#a7114c5a5b9487634711034c693b5e9b3">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#adf35f583386e093b7805b732d52aa95b">blas_sparse::uscr_insert_row</a>
+</li>
+<li>zuscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#a700e8b151004b9c8829a1fe4fd331465">blas_sparse</a>
+</li>
+<li>zusmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#a76c548fa7c494d5e8aba03ab1dc4bc39">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#ae1048833494ef86cd0d74648989599db">blas_sparse</a>
+</li>
+<li>zusmv()
+: <a class="el" href="classblas__sparse.html#af67f81abcb78cc03000257888e47f517">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#a34dd1627794af46998a243677e1bbaa8">blas_sparse::usmv</a>
+</li>
+<li>zussm()
+: <a class="el" href="classblas__sparse.html#a772211da8da7a031fe7845be6a2dd403">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#ab838e61a288bbd7b055ea37222d2c567">blas_sparse::ussm</a>
+</li>
+<li>zussv()
+: <a class="el" href="classblas__sparse.html#a2331da0465b9a3298f8b6dd1c3c7c150">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a9ff3d54dd856f144f7f22e9d6e5d3135">blas_sparse::ussv</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_func.html b/doc/html/functions_func.html
new file mode 100644
index 0000000..00492be
--- /dev/null
+++ b/doc/html/functions_func.html
@@ -0,0 +1,471 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Functions</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li class="current"><a href="functions_func.html"><span>Functions</span></a></li>
+      <li><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="#index_c"><span>c</span></a></li>
+      <li><a href="#index_d"><span>d</span></a></li>
+      <li><a href="#index_r"><span>r</span></a></li>
+      <li><a href="#index_s"><span>s</span></a></li>
+      <li><a href="#index_u"><span>u</span></a></li>
+      <li><a href="#index_z"><span>z</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_c"></a>- c -</h3><ul>
+<li>cuscr_begin()
+: <a class="el" href="classblas__sparse.html#af4e9f97f85799c5e8f60c78d40d906f3">blas_sparse</a>
+</li>
+<li>cuscr_block_begin()
+: <a class="el" href="classblas__sparse.html#a6085ddf99c2459e051a6106e4a2c4785">blas_sparse</a>
+</li>
+<li>cuscr_end()
+: <a class="el" href="classblas__sparse.html#a9878426469b215a78642e5245a054203">blas_sparse</a>
+</li>
+<li>cuscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#af239abd22080f58d5cf0ea2dfdd78953">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#add06f42953fc4dff9dafc487f58172ee">blas_sparse</a>
+</li>
+<li>cuscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#af3f921a0867dbd20c1499660a2b78376">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#af9a2f1bf6543dccc8b5bb1b5d0f35636">blas_sparse</a>
+</li>
+<li>cuscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a380ea4ffed92a6cf0e73a50952fc6a64">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a847661e819534c083984a453a1e282ea">blas_sparse</a>
+</li>
+<li>cuscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a5035e49b6a0d45c9aee959667fd567b2">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#a6ee075639a028bfbb8d8c3652bb3c147">blas_sparse</a>
+</li>
+<li>cuscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#aea33478c2c2911daf74c478ded2ed39e">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#a4bee5ce9a9bb94863469797ca22d44b0">blas_sparse</a>
+</li>
+<li>cuscr_insert_row()
+: <a class="el" href="classblas__sparse.html#aad23b1379a471af392fa33066fd66140">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#a59a678b947de912694a162cafb171100">blas_sparse::uscr_insert_row</a>
+</li>
+<li>cuscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#abd5c88929ed1c7133169c401881fa1c7">blas_sparse</a>
+</li>
+<li>cusmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#a0a2303f12cfe05ba01cdb52b751d5f33">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#a4f7e8d071d2309ed60cb9d588fd749b6">blas_sparse</a>
+</li>
+<li>cusmv()
+: <a class="el" href="classblas__sparse.html#a437ce36d8520ffeadfbb6e6f9885b9f3">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#a43d3541d816401bb2581913cfa2070bb">blas_sparse::usmv</a>
+</li>
+<li>cussm()
+: <a class="el" href="classblas__sparse.html#abc00bd143edf8a993e7d79a1d8baf636">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#a19ec9206f2a3e66ccfddff2be3fb55ad">blas_sparse::ussm</a>
+</li>
+<li>cussv()
+: <a class="el" href="classblas__sparse.html#a8f2db2c64bbd1ecd032fb7a103e30c97">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a8e0df43045904452d698c18dbb8b33a1">blas_sparse::ussv</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_d"></a>- d -</h3><ul>
+<li>duscr_begin()
+: <a class="el" href="classblas__sparse.html#acf14608f8b0375ca133b7f850bde3b50">blas_sparse</a>
+</li>
+<li>duscr_block_begin()
+: <a class="el" href="classblas__sparse.html#ab33c2f497f0a53213f38cd8449ab4349">blas_sparse</a>
+</li>
+<li>duscr_end()
+: <a class="el" href="classblas__sparse.html#a88d066acac28b6fe7c7cdc9e6941ff8f">blas_sparse</a>
+</li>
+<li>duscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#aa74a6aa929703b1221d125dabf0610a9">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#a40b43e04b282dd6f6ad11f51701a9b81">blas_sparse</a>
+</li>
+<li>duscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#a24491a9b6aeae9698aacf9c649fabcfc">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#a533a7082811ea859d079b5e9513ce1b4">blas_sparse</a>
+</li>
+<li>duscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a8dfa301a73cd1bf09b66a0b2e9c704a8">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a1ed0bf47156c5d299ef678b71aec7ef0">blas_sparse</a>
+</li>
+<li>duscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a6d994adf4a26516c4bbd08020a923a5a">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#a87f44b33cf81a30af58fe9a299ea78a3">blas_sparse</a>
+</li>
+<li>duscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#ac1bd26e50082f7eb1123a59794ae3f1c">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#ae3706fcae9dcbf6ebe96335717823939">blas_sparse</a>
+</li>
+<li>duscr_insert_row()
+: <a class="el" href="classblas__sparse.html#a055df1b4ef9aa7e7937bb1dfe1f228b9">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#a14fd80441fbbc200a36de62c86f12538">blas_sparse::uscr_insert_row</a>
+</li>
+<li>duscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#ab1fd9e9f8cdd5f79134873fd6af47c28">blas_sparse</a>
+</li>
+<li>dusmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#a444e03055975d19e0907fdc774d6419f">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#ae717638ebcf6e277f2621fd8eae75249">blas_sparse</a>
+</li>
+<li>dusmv()
+: <a class="el" href="classblas__sparse.html#a9fd3bf400531b8277a082b0663491329">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#ab356fcfdebfd118dd5e6165e66a3dc70">blas_sparse::usmv</a>
+</li>
+<li>dussm()
+: <a class="el" href="classblas__sparse.html#a0eb97f56a6467e87ce06f8be8a50e88d">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#a25c815e459c07efcba93c29b156136c0">blas_sparse::ussm</a>
+</li>
+<li>dussv()
+: <a class="el" href="classblas__sparse.html#aae591d7a08af50e34313f347d888779d">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a45d49f6f9887a808109bbb4467efb1dc">blas_sparse::ussv</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>rsb_blas_get_mtx()
+: <a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html#ab7287586fccf2ade719b9a0b0585fb6a">blas_sparse::rsb_blas_get_mtx</a>
+</li>
+<li>rsb_coo_sort()
+: <a class="el" href="interfacersb_1_1rsb__coo__sort.html#a5712d2c61081ca75f636c2474c7d815e">rsb::rsb_coo_sort</a>
+</li>
+<li>rsb_file_mtx_get_dims()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html#ac465d04ed5f480a291981ae4a853257f">rsb::rsb_file_mtx_get_dims</a>
+</li>
+<li>rsb_file_mtx_load()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__load.html#a7a8a1195bbef16b39f8e68a4286e2ea1">rsb::rsb_file_mtx_load</a>
+</li>
+<li>rsb_file_mtx_rndr()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html#a325a0bceb4ab80ba11a3f7e99235936d">rsb::rsb_file_mtx_rndr</a>
+</li>
+<li>rsb_file_mtx_save()
+: <a class="el" href="interfacersb_1_1rsb__file__mtx__save.html#a819ab03ec355b43ecde9c6b43336d991">rsb::rsb_file_mtx_save</a>
+</li>
+<li>rsb_file_vec_load()
+: <a class="el" href="interfacersb_1_1rsb__file__vec__load.html#a13774316ce6035da0ff647ca917d0d33">rsb::rsb_file_vec_load</a>
+</li>
+<li>rsb_file_vec_save()
+: <a class="el" href="interfacersb_1_1rsb__file__vec__save.html#a1a4271ef2990d373fb92e4d438a36678">rsb::rsb_file_vec_save</a>
+</li>
+<li>rsb_lib_exit()
+: <a class="el" href="interfacersb_1_1rsb__lib__exit.html#a7f978ab1fb36092abb76da1d2abefe44">rsb::rsb_lib_exit</a>
+</li>
+<li>rsb_lib_get_opt()
+: <a class="el" href="interfacersb_1_1rsb__lib__get__opt.html#ab87dc671d4ddb02ef1ff4a438c30f5a1">rsb::rsb_lib_get_opt</a>
+</li>
+<li>rsb_lib_init()
+: <a class="el" href="interfacersb_1_1rsb__lib__init.html#a769d6bd7b5f59ebf378fd9d956a10970">rsb::rsb_lib_init</a>
+</li>
+<li>rsb_lib_reinit()
+: <a class="el" href="interfacersb_1_1rsb__lib__reinit.html#af222e6ea5e86eb4dd635aed1b435cec2">rsb::rsb_lib_reinit</a>
+</li>
+<li>rsb_lib_set_opt()
+: <a class="el" href="interfacersb_1_1rsb__lib__set__opt.html#a676b1645de5c3b9c3b14c87ceb12ce3e">rsb::rsb_lib_set_opt</a>
+</li>
+<li>rsb_lib_set_opt_str()
+: <a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html#af0f406c52171320cf25529324b064db8">rsb::rsb_lib_set_opt_str</a>
+</li>
+<li>rsb_mtx_add_to_dense()
+: <a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html#a073fda633ce6ad1ac0128e7e80cc7a1a">rsb::rsb_mtx_add_to_dense</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_begin()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html#acdbe2149810598ad743510fb43850063">rsb::rsb_mtx_alloc_from_coo_begin</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_const()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html#a343f0c34a21b70af5723b84f906f04ed">rsb::rsb_mtx_alloc_from_coo_const</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_end()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html#acb65cf6cb8c6965a1f19f3a77a3bb635">rsb::rsb_mtx_alloc_from_coo_end</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_inplace()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html#a79dd14c8140e12f2f4490a488468c406">rsb::rsb_mtx_alloc_from_coo_inplace</a>
+</li>
+<li>rsb_mtx_alloc_from_csc_const()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html#ae148c3e7567302bb4b8312482f47b057">rsb::rsb_mtx_alloc_from_csc_const</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_const()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html#ac4d4d18a8a83b5790b9472750ef96e0e">rsb::rsb_mtx_alloc_from_csr_const</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_inplace()
+: <a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html#ae987825747d0697dd1fa7b7b67a8b509">rsb::rsb_mtx_alloc_from_csr_inplace</a>
+</li>
+<li>rsb_mtx_clone()
+: <a class="el" href="interfacersb_1_1rsb__mtx__clone.html#a9d94feec5e252fa47ee272dc6dc9d896">rsb::rsb_mtx_clone</a>
+</li>
+<li>rsb_mtx_free()
+: <a class="el" href="interfacersb_1_1rsb__mtx__free.html#a09a2dbae0d1161971139c2156cbef16d">rsb::rsb_mtx_free</a>
+</li>
+<li>rsb_mtx_get_coo()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html#ad56231129b3d2be969605ab3c43020fe">rsb::rsb_mtx_get_coo</a>
+</li>
+<li>rsb_mtx_get_coo_block()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html#a15cad9939688f96a5164b0ed6873bf00">rsb::rsb_mtx_get_coo_block</a>
+</li>
+<li>rsb_mtx_get_csr()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html#a26e57debd9264300f0436440df805625">rsb::rsb_mtx_get_csr</a>
+</li>
+<li>rsb_mtx_get_info()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__info.html#a279563ac765d73fed65942786f0b56f3">rsb::rsb_mtx_get_info</a>
+</li>
+<li>rsb_mtx_get_info_str()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html#a36f95acdfcb25020c8ef5cc3e46f65f5">rsb::rsb_mtx_get_info_str</a>
+</li>
+<li>rsb_mtx_get_nrm()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html#ac31aeb4a3fa773f965833de0a7f430f8">rsb::rsb_mtx_get_nrm</a>
+</li>
+<li>rsb_mtx_get_prec()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html#a348e683f8b908ee70aa854c80803aafc">rsb::rsb_mtx_get_prec</a>
+</li>
+<li>rsb_mtx_get_rows_sparse()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html#a048ca91f617db2fd2e8fbd250068829b">rsb::rsb_mtx_get_rows_sparse</a>
+</li>
+<li>rsb_mtx_get_vals()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html#a5f501a125d2fd5b4138c81dce37a427e">rsb::rsb_mtx_get_vals</a>
+</li>
+<li>rsb_mtx_get_vec()
+: <a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html#a8da26f8850a32ea89255ba5c946b9be3">rsb::rsb_mtx_get_vec</a>
+</li>
+<li>rsb_mtx_rndr()
+: <a class="el" href="interfacersb_1_1rsb__mtx__rndr.html#acedd2acc7f1393e056d36cbea3c4cdaa">rsb::rsb_mtx_rndr</a>
+</li>
+<li>rsb_mtx_set_vals()
+: <a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html#a67f48229a8cc61f12c2dd6ca7c3d3d44">rsb::rsb_mtx_set_vals</a>
+</li>
+<li>rsb_mtx_switch_to_coo()
+: <a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html#a4054c2c9dbf8dd8ad06a551f7eadf23f">rsb::rsb_mtx_switch_to_coo</a>
+</li>
+<li>rsb_mtx_switch_to_csr()
+: <a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html#ab085734a0dd0cbdad2bcff6e62718379">rsb::rsb_mtx_switch_to_csr</a>
+</li>
+<li>rsb_mtx_upd_vals()
+: <a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html#abaf717d65f4cce3f643dbad78dd43f5d">rsb::rsb_mtx_upd_vals</a>
+</li>
+<li>rsb_perror()
+: <a class="el" href="interfacersb_1_1rsb__perror.html#a72bfc792fff96e8db48fafdd91669751">rsb::rsb_perror</a>
+</li>
+<li>rsb_psblas_trans_to_rsb_trans()
+: <a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html#af3f97f3e696d1309ab86da4a3e0f6de8">rsb::rsb_psblas_trans_to_rsb_trans</a>
+</li>
+<li>rsb_spmm()
+: <a class="el" href="interfacersb_1_1rsb__spmm.html#a7af958e6026d556fc5e4e994514d8ac1">rsb::rsb_spmm</a>
+</li>
+<li>rsb_spmsp()
+: <a class="el" href="interfacersb_1_1rsb__spmsp.html#a935b754474aa9edc1234f1efc16c8e3b">rsb::rsb_spmsp</a>
+</li>
+<li>rsb_spmsp_to_dense()
+: <a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html#ad1e0ae6df6ef2842d5fff69204253568">rsb::rsb_spmsp_to_dense</a>
+</li>
+<li>rsb_spmv()
+: <a class="el" href="interfacersb_1_1rsb__spmv.html#ad340345701bc3e8b0d26f56820ff2842">rsb::rsb_spmv</a>
+</li>
+<li>rsb_sppsp()
+: <a class="el" href="interfacersb_1_1rsb__sppsp.html#a157e6aec78681df74866193b32b76101">rsb::rsb_sppsp</a>
+</li>
+<li>rsb_spsm()
+: <a class="el" href="interfacersb_1_1rsb__spsm.html#a1e87e26c84faeac8cda8e6ed3cf77e35">rsb::rsb_spsm</a>
+</li>
+<li>rsb_spsv()
+: <a class="el" href="interfacersb_1_1rsb__spsv.html#aaaa26b35783e2a125255025de14c18e0">rsb::rsb_spsv</a>
+</li>
+<li>rsb_strerror_r()
+: <a class="el" href="interfacersb_1_1rsb__strerror__r.html#a67a031bb42f0e21ddc01ce3c5f12400f">rsb::rsb_strerror_r</a>
+</li>
+<li>rsb_time()
+: <a class="el" href="interfacersb_1_1rsb__time.html#a60b407a11a393bd8b6106dab907c5e92">rsb::rsb_time</a>
+</li>
+<li>rsb_tune_spmm()
+: <a class="el" href="interfacersb_1_1rsb__tune__spmm.html#a50a0bd8eb0673e0bac6375f25e719c81">rsb::rsb_tune_spmm</a>
+</li>
+<li>rsb_tune_spsm()
+: <a class="el" href="interfacersb_1_1rsb__tune__spsm.html#a6cac8625fd1e15c4686b56faa31cf663">rsb::rsb_tune_spsm</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_s"></a>- s -</h3><ul>
+<li>suscr_begin()
+: <a class="el" href="classblas__sparse.html#ae78739e1ebe48fe8b9752a43cd5c15a0">blas_sparse</a>
+</li>
+<li>suscr_block_begin()
+: <a class="el" href="classblas__sparse.html#a8ccdce913bf1b8a1d30b6889611143cb">blas_sparse</a>
+</li>
+<li>suscr_end()
+: <a class="el" href="classblas__sparse.html#a38d9574e6360fcaa6035eaf9518001d8">blas_sparse</a>
+</li>
+<li>suscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#ab52411aa7d878e2fc62abc2983b9871f">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#a1f4709630ab2be2247580eb1fbb48472">blas_sparse</a>
+</li>
+<li>suscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#ae4d7b020b0c50e575aa6a80b44ab8a53">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#a508ee5b058f7c6a1a3d21d3f706cddd4">blas_sparse</a>
+</li>
+<li>suscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a46d6cb6bd1b38c5c75eef95cb9ce4135">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a183a3ff9aa5af1dcedc5cf7bd4918b5e">blas_sparse</a>
+</li>
+<li>suscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a68b943e8b2dfb946299b80b38397a05d">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#a5c4a2d0b9164fb232c102426693ccfd1">blas_sparse</a>
+</li>
+<li>suscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#a3b1e4b0dddeb275de32edeafda52990f">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#a26a40430bf4de9b01eaf9dacf999dea6">blas_sparse</a>
+</li>
+<li>suscr_insert_row()
+: <a class="el" href="classblas__sparse.html#a9ec4465da954f0761c7edfd78d2be717">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#ad625073be16e7d5ebe9a66f73f9da15c">blas_sparse::uscr_insert_row</a>
+</li>
+<li>suscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#aab5942faf7f9fe31f9dfd13143f37dc7">blas_sparse</a>
+</li>
+<li>susmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#abeab18a2f4b8c597aad8e7e618d12bfc">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#a03977fef75f9ee8773400c08153069d5">blas_sparse</a>
+</li>
+<li>susmv()
+: <a class="el" href="classblas__sparse.html#afb6e4dbb50553fa86818408d9db6d7c3">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#a94abb35b1f09c96790c08bbcc6adedb9">blas_sparse::usmv</a>
+</li>
+<li>sussm()
+: <a class="el" href="classblas__sparse.html#a617ef412adc547e3f050610874549889">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#a30729d148522f306da9f787961ddeae6">blas_sparse::ussm</a>
+</li>
+<li>sussv()
+: <a class="el" href="classblas__sparse.html#ab21d16c7bda69becec8edf113b62dee0">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a3c65b4e4dcd66663b1424378932549c8">blas_sparse::ussv</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_u"></a>- u -</h3><ul>
+<li>uscr_end()
+: <a class="el" href="classblas__sparse.html#a48f1e1b82322910d45a1b2455421745f">blas_sparse</a>
+</li>
+<li>usds()
+: <a class="el" href="classblas__sparse.html#a8a3b6cd055048ab5e15b1b18be291f32">blas_sparse</a>
+</li>
+<li>usgp()
+: <a class="el" href="classblas__sparse.html#a1e0eb1ccd8ffbf49baefe455a248f7fe">blas_sparse</a>
+</li>
+<li>ussp()
+: <a class="el" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">blas_sparse</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_z"></a>- z -</h3><ul>
+<li>zuscr_begin()
+: <a class="el" href="classblas__sparse.html#a9ec8326625fe0762e3e6e523260d2655">blas_sparse</a>
+</li>
+<li>zuscr_block_begin()
+: <a class="el" href="classblas__sparse.html#a5fbd2bae9f3849fda1be4691ca3df5ea">blas_sparse</a>
+</li>
+<li>zuscr_end()
+: <a class="el" href="classblas__sparse.html#a5f00b912397c8dc3ee87fecdf4cf98aa">blas_sparse</a>
+</li>
+<li>zuscr_insert_block()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#ad4e920769d6a259d1b2fae20e6fb2853">blas_sparse::uscr_insert_block</a>
+, <a class="el" href="classblas__sparse.html#a0ea88b095d147ffe96d05c5d53b4480a">blas_sparse</a>
+</li>
+<li>zuscr_insert_clique()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#af457ebe2c2b2112ba6cdb94e9bb53928">blas_sparse::uscr_insert_clique</a>
+, <a class="el" href="classblas__sparse.html#a0b7e474844552d62f72e18bac4592ced">blas_sparse</a>
+</li>
+<li>zuscr_insert_col()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a397d0fb9bd1ba1bddc0eaeb4d3e47a5c">blas_sparse::uscr_insert_col</a>
+, <a class="el" href="classblas__sparse.html#a3a2dcc960e33dbae28abc3f1fdd52e66">blas_sparse</a>
+</li>
+<li>zuscr_insert_entries()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#ae73d20580b844428d7ca4834e578d448">blas_sparse::uscr_insert_entries</a>
+, <a class="el" href="classblas__sparse.html#ad28c55a5ed7b359a30a2538a45878e08">blas_sparse</a>
+</li>
+<li>zuscr_insert_entry()
+: <a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#a3eae411ca3d10ec5dfddbdb53a5a7d4d">blas_sparse::uscr_insert_entry</a>
+, <a class="el" href="classblas__sparse.html#af003c4713fb7a39896ad1537fec94ea9">blas_sparse</a>
+</li>
+<li>zuscr_insert_row()
+: <a class="el" href="classblas__sparse.html#a7114c5a5b9487634711034c693b5e9b3">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#adf35f583386e093b7805b732d52aa95b">blas_sparse::uscr_insert_row</a>
+</li>
+<li>zuscr_variable_block_begin()
+: <a class="el" href="classblas__sparse.html#a700e8b151004b9c8829a1fe4fd331465">blas_sparse</a>
+</li>
+<li>zusmm()
+: <a class="el" href="interfaceblas__sparse_1_1usmm.html#a76c548fa7c494d5e8aba03ab1dc4bc39">blas_sparse::usmm</a>
+, <a class="el" href="classblas__sparse.html#ae1048833494ef86cd0d74648989599db">blas_sparse</a>
+</li>
+<li>zusmv()
+: <a class="el" href="classblas__sparse.html#af67f81abcb78cc03000257888e47f517">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1usmv.html#a34dd1627794af46998a243677e1bbaa8">blas_sparse::usmv</a>
+</li>
+<li>zussm()
+: <a class="el" href="classblas__sparse.html#a772211da8da7a031fe7845be6a2dd403">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussm.html#ab838e61a288bbd7b055ea37222d2c567">blas_sparse::ussm</a>
+</li>
+<li>zussv()
+: <a class="el" href="classblas__sparse.html#a2331da0465b9a3298f8b6dd1c3c7c150">blas_sparse</a>
+, <a class="el" href="interfaceblas__sparse_1_1ussv.html#a9ff3d54dd856f144f7f22e9d6e5d3135">blas_sparse::ussv</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_vars.html b/doc/html/functions_vars.html
new file mode 100644
index 0000000..e19e4f4
--- /dev/null
+++ b/doc/html/functions_vars.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Variables</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li class="current"><a href="functions_vars.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_vars_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_vars_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_vars_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_vars_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_vars_0x76.html#index_v"><span>v</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_a"></a>- a -</h3><ul>
+<li>action
+: <a class="el" href="structrsb__initopts.html#ad087930c58602fd3c0761f5af3aae7ce">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_vars_0x62.html b/doc/html/functions_vars_0x62.html
new file mode 100644
index 0000000..064df8e
--- /dev/null
+++ b/doc/html/functions_vars_0x62.html
@@ -0,0 +1,324 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Variables</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions_vars.html#index_a"><span>a</span></a></li>
+      <li class="current"><a href="functions_vars_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_vars_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_vars_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_vars_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_vars_0x76.html#index_v"><span>v</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>blas_base
+: <a class="el" href="classblas__sparse.html#ac6324bd9c488f6ad4c176fd05a5c1a94">blas_sparse</a>
+</li>
+<li>blas_block
+: <a class="el" href="classblas__sparse.html#aef5b352231bcff68b28b97742899558e">blas_sparse</a>
+</li>
+<li>blas_colmajor
+: <a class="el" href="classblas__sparse.html#af4ede8e7f0445be25841733354b747bd">blas_sparse</a>
+</li>
+<li>blas_complex
+: <a class="el" href="classblas__sparse.html#ac72dca9b25a744006fb7e2b272958494">blas_sparse</a>
+</li>
+<li>blas_conj
+: <a class="el" href="classblas__sparse.html#a1964a262e04c046d0f97c7de7cf1d916">blas_sparse</a>
+</li>
+<li>blas_conj_trans
+: <a class="el" href="classblas__sparse.html#a7b8d414b608929ba0abced46c98889d6">blas_sparse</a>
+</li>
+<li>blas_decreasing_order
+: <a class="el" href="classblas__sparse.html#aedf0364e33ddfd0ee88e93acf3683cf7">blas_sparse</a>
+</li>
+<li>blas_double_precision
+: <a class="el" href="classblas__sparse.html#a9c54c439abc55e509b4a7ec35f6faa4e">blas_sparse</a>
+</li>
+<li>blas_emax
+: <a class="el" href="classblas__sparse.html#a18b1555d2c4e1d8b3a8d38bc7105c3fa">blas_sparse</a>
+</li>
+<li>blas_emin
+: <a class="el" href="classblas__sparse.html#a150f1864fd1bf514a9769914490a23ba">blas_sparse</a>
+</li>
+<li>blas_eps
+: <a class="el" href="classblas__sparse.html#acbf407624c42cad4f1ea47776977d160">blas_sparse</a>
+</li>
+<li>blas_frobenius_norm
+: <a class="el" href="classblas__sparse.html#af1dbfea000291bde9fe93507f62a31ba">blas_sparse</a>
+</li>
+<li>blas_general
+: <a class="el" href="classblas__sparse.html#a6dd42fe3a5c74d293855e6ed0825cc67">blas_sparse</a>
+</li>
+<li>blas_hermitian
+: <a class="el" href="classblas__sparse.html#aaab0006bc8bcddf6cba32a69d3ddbf95">blas_sparse</a>
+</li>
+<li>blas_ieee
+: <a class="el" href="classblas__sparse.html#a114a6fabae21d32477af3acee15b9d5d">blas_sparse</a>
+</li>
+<li>blas_increasing_order
+: <a class="el" href="classblas__sparse.html#ae972ff04001d8bbcc52f242134af52d8">blas_sparse</a>
+</li>
+<li>blas_inf_norm
+: <a class="el" href="classblas__sparse.html#afb9ba15096a7184519256ec2923fda49">blas_sparse</a>
+</li>
+<li>blas_invalid_handle
+: <a class="el" href="classblas__sparse.html#aa9ee2ffde87e203fd37719979e7b546d">blas_sparse</a>
+</li>
+<li>blas_irregular
+: <a class="el" href="classblas__sparse.html#a9893aa4d547b371f6ba59a8615aa752e">blas_sparse</a>
+</li>
+<li>blas_jrot_inner
+: <a class="el" href="classblas__sparse.html#a2fc184f889f72b17fdb2ba6266c25b02">blas_sparse</a>
+</li>
+<li>blas_jrot_outer
+: <a class="el" href="classblas__sparse.html#ac526dad147c751bb8b175edd47d29c22">blas_sparse</a>
+</li>
+<li>blas_jrot_sorted
+: <a class="el" href="classblas__sparse.html#a0612a82431d61a1b6c4c5030e65c5e31">blas_sparse</a>
+</li>
+<li>blas_left_side
+: <a class="el" href="classblas__sparse.html#a5059846f8eba839bb1afc32abac380e4">blas_sparse</a>
+</li>
+<li>blas_lower
+: <a class="el" href="classblas__sparse.html#a67e376dc6a7cc769ee24415dc2a8d9d1">blas_sparse</a>
+</li>
+<li>blas_lower_hermitian
+: <a class="el" href="classblas__sparse.html#a101f5eab06d45474d64bff200d2387ec">blas_sparse</a>
+</li>
+<li>blas_lower_symmetric
+: <a class="el" href="classblas__sparse.html#ad474a894be1f45a6937c2a880963b1c7">blas_sparse</a>
+</li>
+<li>blas_lower_triangular
+: <a class="el" href="classblas__sparse.html#a280772883a7487fa68aabd98e4a49342">blas_sparse</a>
+</li>
+<li>blas_max_norm
+: <a class="el" href="classblas__sparse.html#a487afe34859579523bba7b4851e106c6">blas_sparse</a>
+</li>
+<li>blas_new_handle
+: <a class="el" href="classblas__sparse.html#a817d9813d36f7a7abea44e3b781e21ba">blas_sparse</a>
+</li>
+<li>blas_no_conj
+: <a class="el" href="classblas__sparse.html#aabedcf272e063a48f7e310ce04784b17">blas_sparse</a>
+</li>
+<li>blas_no_trans
+: <a class="el" href="classblas__sparse.html#a5b700c1a472d7d12decf3d7d7fd244c2">blas_sparse</a>
+</li>
+<li>blas_non_unit_diag
+: <a class="el" href="classblas__sparse.html#a8e76732b3c06d9fc27669bda37f24ed6">blas_sparse</a>
+</li>
+<li>blas_num_cols
+: <a class="el" href="classblas__sparse.html#a726b73c19dae30439aa65988fa5b5dd1">blas_sparse</a>
+</li>
+<li>blas_num_nonzeros
+: <a class="el" href="classblas__sparse.html#ab01ecdd54a1f10e944e446e0efed3bc3">blas_sparse</a>
+</li>
+<li>blas_num_rows
+: <a class="el" href="classblas__sparse.html#afe9e1f52ba336f041e1e750b3a989510">blas_sparse</a>
+</li>
+<li>blas_one_base
+: <a class="el" href="classblas__sparse.html#a60fbe98d827ebea9e7c431e7698bc462">blas_sparse</a>
+</li>
+<li>blas_one_norm
+: <a class="el" href="classblas__sparse.html#a05c66c0c87c72e39580258418c46341f">blas_sparse</a>
+</li>
+<li>blas_open_handle
+: <a class="el" href="classblas__sparse.html#a0936a1798a61b56c52c116e428a4e6b7">blas_sparse</a>
+</li>
+<li>blas_overflow
+: <a class="el" href="classblas__sparse.html#ad8f2f29c92552e53910e5c92feb3567d">blas_sparse</a>
+</li>
+<li>blas_prec
+: <a class="el" href="classblas__sparse.html#a8c3f3a997d8d96f470d44d1f34e3ed39">blas_sparse</a>
+</li>
+<li>blas_prec_double
+: <a class="el" href="classblas__sparse.html#a6f0692e06d3b42828813a7a0a9ec59bb">blas_sparse</a>
+</li>
+<li>blas_prec_extra
+: <a class="el" href="classblas__sparse.html#a305383f56368c35429bfd9e7ca23a0f5">blas_sparse</a>
+</li>
+<li>blas_prec_indigenous
+: <a class="el" href="classblas__sparse.html#a71c278e64f30229d19b46376c4385669">blas_sparse</a>
+</li>
+<li>blas_prec_single
+: <a class="el" href="classblas__sparse.html#ab378aeb6aa39b2495f084cd31e32e5a6">blas_sparse</a>
+</li>
+<li>blas_real
+: <a class="el" href="classblas__sparse.html#a0d365ccd71fdedaa5cf30a46f34bcf37">blas_sparse</a>
+</li>
+<li>blas_real_inf_norm
+: <a class="el" href="classblas__sparse.html#a0a60f070ff9a1a864af39e4489c93e31">blas_sparse</a>
+</li>
+<li>blas_real_max_norm
+: <a class="el" href="classblas__sparse.html#afecc0bd8ce11628fe2bbe55b9244c295">blas_sparse</a>
+</li>
+<li>blas_real_one_norm
+: <a class="el" href="classblas__sparse.html#aca85a61f11b3c36113209d61a89e4957">blas_sparse</a>
+</li>
+<li>blas_regular
+: <a class="el" href="classblas__sparse.html#a21d31433d4f29a6fd54c214b7a26c7d4">blas_sparse</a>
+</li>
+<li>blas_right_side
+: <a class="el" href="classblas__sparse.html#a6285c5cc8fe45bb73ece03c7900d5a18">blas_sparse</a>
+</li>
+<li>blas_rnd
+: <a class="el" href="classblas__sparse.html#af75f815c459344a5a38cd0794b93504a">blas_sparse</a>
+</li>
+<li>blas_rowmajor
+: <a class="el" href="classblas__sparse.html#aa1d4df9e25dcab40269247450e1b3e4e">blas_sparse</a>
+</li>
+<li>blas_rsb_autotune_next_operation
+: <a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_sparse</a>
+</li>
+<li>blas_rsb_duplicates_ovw
+: <a class="el" href="classblas__sparse.html#a10ebafcdf3cc36cf0471ba20ffcd2980">blas_sparse</a>
+</li>
+<li>blas_rsb_duplicates_sum
+: <a class="el" href="classblas__sparse.html#a508fec1d9853698fd08c239dd08a7291">blas_sparse</a>
+</li>
+<li>blas_rsb_rep_coo
+: <a class="el" href="classblas__sparse.html#abb6a552efaab32ed9687f2e2df895783">blas_sparse</a>
+</li>
+<li>blas_rsb_rep_csr
+: <a class="el" href="classblas__sparse.html#a9fe6012ccac0890c7f7a8500e77e9ff7">blas_sparse</a>
+</li>
+<li>blas_rsb_rep_rsb
+: <a class="el" href="classblas__sparse.html#a9cd9a2263c79534384bef6bf27e65787">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_autotuning_off
+: <a class="el" href="classblas__sparse.html#a8cc6d8c9036cb66051cc1cfb7c739b5e">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_autotuning_on
+: <a class="el" href="classblas__sparse.html#afab5c86162fcf329199b0666f33cde96">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_off
+: <a class="el" href="classblas__sparse.html#aefa9f681506ee4ceb578f11b9a0e664c">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_on
+: <a class="el" href="classblas__sparse.html#abd8e06d35f2c4c3a6ecc1eb315548c43">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_off
+: <a class="el" href="classblas__sparse.html#a56dc72776b8dcdc43f0cebbdc93dcd21">blas_sparse</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_on
+: <a class="el" href="classblas__sparse.html#a7bfbab78e4c5a789e2d76274a2fbc96c">blas_sparse</a>
+</li>
+<li>blas_sfmin
+: <a class="el" href="classblas__sparse.html#a0b1596744fa5acb891d4908588249c54">blas_sparse</a>
+</li>
+<li>blas_single_precision
+: <a class="el" href="classblas__sparse.html#a0badb7c2679a5d0ba4e90f599b678768">blas_sparse</a>
+</li>
+<li>blas_sparse_const_failure
+: <a class="el" href="classblas__sparse.html#a5d97ddcd53d2bba670233f5335b44f55">blas_sparse</a>
+</li>
+<li>blas_sparse_const_not_available
+: <a class="el" href="classblas__sparse.html#aae79119fabe06a887f461eda50c97d0a">blas_sparse</a>
+</li>
+<li>blas_sparse_const_success
+: <a class="el" href="classblas__sparse.html#a0e333ba9a5cc3b014697d0a12d08f6b2">blas_sparse</a>
+</li>
+<li>blas_symmetric
+: <a class="el" href="classblas__sparse.html#a13161955ecb9fc2ce12963cc319c93d2">blas_sparse</a>
+</li>
+<li>blas_t
+: <a class="el" href="classblas__sparse.html#a6acca6eab87ec90dcf71b8c7b40aaa8f">blas_sparse</a>
+</li>
+<li>blas_trans
+: <a class="el" href="classblas__sparse.html#a12f06635d9f1c40722ad4bd757e737bb">blas_sparse</a>
+</li>
+<li>blas_triangular
+: <a class="el" href="classblas__sparse.html#a04619b8ef6be6983ded4e3c22fce63b8">blas_sparse</a>
+</li>
+<li>blas_two_norm
+: <a class="el" href="classblas__sparse.html#abb84b2b7195d878e71760bdad596d693">blas_sparse</a>
+</li>
+<li>blas_unassembled
+: <a class="el" href="classblas__sparse.html#a50d0da49cbf6822ed5e9a8ff81faf6d5">blas_sparse</a>
+</li>
+<li>blas_underflow
+: <a class="el" href="classblas__sparse.html#ae9f01b90527ebe6b178d4c73a46bbf25">blas_sparse</a>
+</li>
+<li>blas_unit_diag
+: <a class="el" href="classblas__sparse.html#af1e902c099efbedd09c7ce65b4772626">blas_sparse</a>
+</li>
+<li>blas_upper
+: <a class="el" href="classblas__sparse.html#a8be70a15dda0ebf3b782b66e72f924d2">blas_sparse</a>
+</li>
+<li>blas_upper_hermitian
+: <a class="el" href="classblas__sparse.html#a4fcd2dfde7722199b9125542622c8c4a">blas_sparse</a>
+</li>
+<li>blas_upper_symmetric
+: <a class="el" href="classblas__sparse.html#afd82b7f277c54dfa83ab44ea6ed89fb1">blas_sparse</a>
+</li>
+<li>blas_upper_triangular
+: <a class="el" href="classblas__sparse.html#a97b523912445087a965737cb8cfd69af">blas_sparse</a>
+</li>
+<li>blas_valid_handle
+: <a class="el" href="classblas__sparse.html#a83704034c72929f3b1df534034b2786d">blas_sparse</a>
+</li>
+<li>blas_zero_base
+: <a class="el" href="classblas__sparse.html#a8c89ffd8863d708e55c5330d11c772f2">blas_sparse</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_vars_0x6b.html b/doc/html/functions_vars_0x6b.html
new file mode 100644
index 0000000..3a5662f
--- /dev/null
+++ b/doc/html/functions_vars_0x6b.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Variables</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions_vars.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_vars_0x62.html#index_b"><span>b</span></a></li>
+      <li class="current"><a href="functions_vars_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_vars_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_vars_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_vars_0x76.html#index_v"><span>v</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_k"></a>- k -</h3><ul>
+<li>keys
+: <a class="el" href="structrsb__initopts.html#a4319168f5f1183d3ea65960e7111e7ee">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_vars_0x6e.html b/doc/html/functions_vars_0x6e.html
new file mode 100644
index 0000000..364310f
--- /dev/null
+++ b/doc/html/functions_vars_0x6e.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Variables</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions_vars.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_vars_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_vars_0x6b.html#index_k"><span>k</span></a></li>
+      <li class="current"><a href="functions_vars_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_vars_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_vars_0x76.html#index_v"><span>v</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_n"></a>- n -</h3><ul>
+<li>n_pairs
+: <a class="el" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_vars_0x72.html b/doc/html/functions_vars_0x72.html
new file mode 100644
index 0000000..14aeb2c
--- /dev/null
+++ b/doc/html/functions_vars_0x72.html
@@ -0,0 +1,441 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Variables</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions_vars.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_vars_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_vars_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_vars_0x6e.html#index_n"><span>n</span></a></li>
+      <li class="current"><a href="functions_vars_0x72.html#index_r"><span>r</span></a></li>
+      <li><a href="functions_vars_0x76.html#index_v"><span>v</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>rsb_elopf_div
+: <a class="el" href="classrsb.html#a0fb14e2ce3e4033c5f2075a823a358e2">rsb</a>
+</li>
+<li>rsb_elopf_mul
+: <a class="el" href="classrsb.html#ab8b26221a9c42a654a835a8f505cdd6d">rsb</a>
+</li>
+<li>rsb_elopf_neg
+: <a class="el" href="classrsb.html#af632e8ad15e51d2fcdad5f81d22aecab">rsb</a>
+</li>
+<li>rsb_elopf_pow
+: <a class="el" href="classrsb.html#a97685d0c5f78c8e996b85689f58309ba">rsb</a>
+</li>
+<li>rsb_elopf_scale_cols
+: <a class="el" href="classrsb.html#a0b36e2eac438cae0c3cb2171aa89a580">rsb</a>
+</li>
+<li>rsb_elopf_scale_cols_real
+: <a class="el" href="classrsb.html#a224d2a379853ca7bad32b5921437f531">rsb</a>
+</li>
+<li>rsb_elopf_scale_rows
+: <a class="el" href="classrsb.html#a036bcb7f9a4156f984d9bfe8f7829c9c">rsb</a>
+</li>
+<li>rsb_elopf_scale_rows_real
+: <a class="el" href="classrsb.html#acc73315ac4e1af0fc6c90c3d189a1c2a">rsb</a>
+</li>
+<li>rsb_err_badargs
+: <a class="el" href="classrsb.html#a05f3d2c8888332697f182ea6d8ab66b0">rsb</a>
+</li>
+<li>rsb_err_corrupt_input_data
+: <a class="el" href="classrsb.html#a4e124cfacc5e0492952ccda10905206a">rsb</a>
+</li>
+<li>rsb_err_could_not_honour_externally_allocation_flags
+: <a class="el" href="classrsb.html#a65da259a04a3b6b09b1e67d2aae53108">rsb</a>
+</li>
+<li>rsb_err_enomem
+: <a class="el" href="classrsb.html#a0abffcaa259b8f2cbf1b025c4c179fb0">rsb</a>
+</li>
+<li>rsb_err_failed_memhier_detection
+: <a class="el" href="classrsb.html#aa0868e7080760845d911eae040df8c44">rsb</a>
+</li>
+<li>rsb_err_generic_error
+: <a class="el" href="classrsb.html#abe86debd990b7989427a98378c0c2ea4">rsb</a>
+</li>
+<li>rsb_err_internal_error
+: <a class="el" href="classrsb.html#a76c59842ba7bef3a5e0cfe577b45e3af">rsb</a>
+</li>
+<li>rsb_err_invalid_numerical_data
+: <a class="el" href="classrsb.html#a018c06fd82826d0b56fdec98da22da17">rsb</a>
+</li>
+<li>rsb_err_limits
+: <a class="el" href="classrsb.html#a20784aca964572d033d9f79a08b8842d">rsb</a>
+</li>
+<li>rsb_err_memory_leak
+: <a class="el" href="classrsb.html#a3534459ee186379f45444c289df70175">rsb</a>
+</li>
+<li>rsb_err_no_error
+: <a class="el" href="classrsb.html#a2f418e43e861a006b5aea1d55913fee2">rsb</a>
+</li>
+<li>rsb_err_no_stream_output_configured_out
+: <a class="el" href="classrsb.html#a8dec384225c4700df1b201b6dbc5aa60">rsb</a>
+</li>
+<li>rsb_err_no_user_configuration
+: <a class="el" href="classrsb.html#aed70b921cdbe20cc81d03c9b9c7aab38">rsb</a>
+</li>
+<li>rsb_err_unimplemented_yet
+: <a class="el" href="classrsb.html#a4405be6ac615c1db2c161185d455374c">rsb</a>
+</li>
+<li>rsb_err_unsupported_feature
+: <a class="el" href="classrsb.html#ac81e797f7f250fb3d2c20f2a46360838">rsb</a>
+</li>
+<li>rsb_err_unsupported_format
+: <a class="el" href="classrsb.html#a48a68ee015ab06c1b72e26659479cd9e">rsb</a>
+</li>
+<li>rsb_err_unsupported_operation
+: <a class="el" href="classrsb.html#aa9069fa99bea2127f31ac62365b19bcd">rsb</a>
+</li>
+<li>rsb_err_unsupported_type
+: <a class="el" href="classrsb.html#ab8643c59b36b245e6f59ce00e10ad17f">rsb</a>
+</li>
+<li>rsb_extf_asums_col
+: <a class="el" href="classrsb.html#af6f1f5ccf7d0c80b61bce19f5c64acc0">rsb</a>
+</li>
+<li>rsb_extf_asums_row
+: <a class="el" href="classrsb.html#ae6d4323a95cd3284314c787dfb05c854">rsb</a>
+</li>
+<li>rsb_extf_diag
+: <a class="el" href="classrsb.html#a4019120043663ffa9e39b9e042d1e13a">rsb</a>
+</li>
+<li>rsb_extf_norm_inf
+: <a class="el" href="classrsb.html#a396ba7496087621b292a7e2e68e976c8">rsb</a>
+</li>
+<li>rsb_extf_norm_one
+: <a class="el" href="classrsb.html#a21ae01944a05b24822a824390789b1ee">rsb</a>
+</li>
+<li>rsb_extf_norm_two
+: <a class="el" href="classrsb.html#afb2e1af58af877281f96f6a2aeb77c99">rsb</a>
+</li>
+<li>rsb_extf_sums_col
+: <a class="el" href="classrsb.html#a7aff705dacd272bad5d692b2775d5c93">rsb</a>
+</li>
+<li>rsb_extf_sums_row
+: <a class="el" href="classrsb.html#a5228e51b964240df80dba35826a1a6c9">rsb</a>
+</li>
+<li>rsb_flag_assembled_in_coo_arrays
+: <a class="el" href="classrsb.html#a25e0432a471ab3fca4105d40ce2e8f1e">rsb</a>
+</li>
+<li>rsb_flag_c_indices_interface
+: <a class="el" href="classrsb.html#a0cd8d81bf275bfdc685080e0d855fbb1">rsb</a>
+</li>
+<li>rsb_flag_default_coo_matrix_flags
+: <a class="el" href="classrsb.html#aa1d8e9f835115cdac082812d5f74b6d4">rsb</a>
+</li>
+<li>rsb_flag_default_csr_matrix_flags
+: <a class="el" href="classrsb.html#a83848ae1b266eea31f4462821f8bc51b">rsb</a>
+</li>
+<li>rsb_flag_default_matrix_flags
+: <a class="el" href="classrsb.html#a16cc953b0faf8ba964ba79930b51f93c">rsb</a>
+</li>
+<li>rsb_flag_default_rsb_matrix_flags
+: <a class="el" href="classrsb.html#aba933b2d9b4534fa69226910ed84bd4c">rsb</a>
+</li>
+<li>rsb_flag_default_storage_flags
+: <a class="el" href="classrsb.html#ad27c22510fec7c8367bd34bf800cbd84">rsb</a>
+</li>
+<li>rsb_flag_diagonal
+: <a class="el" href="classrsb.html#a509eea3e97b56833df24cb9d2b064e26">rsb</a>
+</li>
+<li>rsb_flag_discard_zeros
+: <a class="el" href="classrsb.html#a95b0cf20f4422b337c41f2388a59fb0b">rsb</a>
+</li>
+<li>rsb_flag_duplicates_default_handle
+: <a class="el" href="classrsb.html#aa1ca91fa56bb36b6eebbf47de8ccb1be">rsb</a>
+</li>
+<li>rsb_flag_duplicates_keep_last
+: <a class="el" href="classrsb.html#ad6870000c6da71ba7e07676e9d9c5e42">rsb</a>
+</li>
+<li>rsb_flag_duplicates_sum
+: <a class="el" href="classrsb.html#a4e8c5001e9a26a86faefe9bd26989040">rsb</a>
+</li>
+<li>rsb_flag_experimental_in_place_permutation_sort
+: <a class="el" href="classrsb.html#a6d6b68525e01bb7d91eb814216c0b5bf">rsb</a>
+</li>
+<li>rsb_flag_externally_allocated_arrays
+: <a class="el" href="classrsb.html#ab8f28a0d2ec93bf0c85ef1f30fc51e24">rsb</a>
+</li>
+<li>rsb_flag_fortran_indices_interface
+: <a class="el" href="classrsb.html#a8ca3ae90c2f8e0923f80f04e53ad2c37">rsb</a>
+</li>
+<li>rsb_flag_hermitian
+: <a class="el" href="classrsb.html#a613fa635312f361ef115b68803807908">rsb</a>
+</li>
+<li>rsb_flag_identical_flags
+: <a class="el" href="classrsb.html#a2af139858170575356808c746b4a564a">rsb</a>
+</li>
+<li>rsb_flag_lower
+: <a class="el" href="classrsb.html#a59dd2ec96582af74d563f8c9f1f44409">rsb</a>
+</li>
+<li>rsb_flag_lower_hermitian
+: <a class="el" href="classrsb.html#a163680fba55484e1d4e4c9a436ebc93b">rsb</a>
+</li>
+<li>rsb_flag_lower_symmetric
+: <a class="el" href="classrsb.html#a1b31d44601cedab86c51a6ed2a8b0ca4">rsb</a>
+</li>
+<li>rsb_flag_lower_triangular
+: <a class="el" href="classrsb.html#a7c3f1e6d9f61f9944a08efab6a00fe2f">rsb</a>
+</li>
+<li>rsb_flag_mutually_exclusive_switches
+: <a class="el" href="classrsb.html#abf74a30d663a24ff5fde624217bfea37">rsb</a>
+</li>
+<li>rsb_flag_noflags
+: <a class="el" href="classrsb.html#a65dbcb1d6e6347e5b7e85b5aa49db90c">rsb</a>
+</li>
+<li>rsb_flag_quad_partitioning
+: <a class="el" href="classrsb.html#a7a5366fbd6cd1814d44b1ab1068f88de">rsb</a>
+</li>
+<li>rsb_flag_recursive_more_leaves_than_threads
+: <a class="el" href="classrsb.html#aff989c5cb6fa62c7ed25a72f30d6a864">rsb</a>
+</li>
+<li>rsb_flag_recursive_subdivide_more_on_diag
+: <a class="el" href="classrsb.html#abce4dd43d8147cb6fe505bda474e535c">rsb</a>
+</li>
+<li>rsb_flag_sorted_input
+: <a class="el" href="classrsb.html#ade2657fb3c17b519cc4332eac06046d3">rsb</a>
+</li>
+<li>rsb_flag_symmetric
+: <a class="el" href="classrsb.html#a8325109ecda447aa1e93e8d747673f4c">rsb</a>
+</li>
+<li>rsb_flag_triangular
+: <a class="el" href="classrsb.html#a3ea9a964debcbac70d35e964666f7a1c">rsb</a>
+</li>
+<li>rsb_flag_unit_diag_implicit
+: <a class="el" href="classrsb.html#a3e5c32923f3e360e980311315a27dc7d">rsb</a>
+</li>
+<li>rsb_flag_upper
+: <a class="el" href="classrsb.html#a9d9497934ece76bcf860a2a563056eca">rsb</a>
+</li>
+<li>rsb_flag_upper_hermitian
+: <a class="el" href="classrsb.html#a22eedbec9d19115a8658438f1c7cc496">rsb</a>
+</li>
+<li>rsb_flag_upper_symmetric
+: <a class="el" href="classrsb.html#ab17822f489868813f38ba9609245ae55">rsb</a>
+</li>
+<li>rsb_flag_upper_triangular
+: <a class="el" href="classrsb.html#ac3802654bb13df88bb2e7f371b12e5ea">rsb</a>
+</li>
+<li>rsb_flag_use_csr_reserved
+: <a class="el" href="classrsb.html#a9d39857a6f2ae454fd20d5bcc03ef17c">rsb</a>
+</li>
+<li>rsb_flag_use_halfword_indices
+: <a class="el" href="classrsb.html#ae2c87798ff9cee8bdc0eaacdec62a5d0">rsb</a>
+</li>
+<li>rsb_flag_use_halfword_indices_coo
+: <a class="el" href="classrsb.html#a6ff989a0fe4da2a71e72091fcb30a334">rsb</a>
+</li>
+<li>rsb_flag_use_halfword_indices_csr
+: <a class="el" href="classrsb.html#a7baa8d692038856c55489d2382f09e5d">rsb</a>
+</li>
+<li>rsb_flag_want_bcss_storage
+: <a class="el" href="classrsb.html#a8ad70221bf6a5f4b458f6b700b6af8df">rsb</a>
+</li>
+<li>rsb_flag_want_column_major_order
+: <a class="el" href="classrsb.html#a8786a38b2ca41b926b8ef6092a55b8a6">rsb</a>
+</li>
+<li>rsb_flag_want_coo_storage
+: <a class="el" href="classrsb.html#a9fda0eb0c128c193ba7d05bab64d7e90">rsb</a>
+</li>
+<li>rsb_flag_want_row_major_order
+: <a class="el" href="classrsb.html#a68ace12ecb8cbcc9a7c686b2b9665c29">rsb</a>
+</li>
+<li>rsb_io_want_bounded_box_computation
+: <a class="el" href="classrsb.html#a81a7107ceaa5d934eced8144f7de2338">rsb</a>
+</li>
+<li>rsb_io_want_cache_blocking_method
+: <a class="el" href="classrsb.html#aaf22b4c404442175bc58dc513bf13a89">rsb</a>
+</li>
+<li>rsb_io_want_executing_threads
+: <a class="el" href="classrsb.html#a0c15802bcd77b9b98a0968beffaee9cc">rsb</a>
+</li>
+<li>rsb_io_want_extra_verbose_interface
+: <a class="el" href="classrsb.html#a191f5492907ae4beca111b361955a791">rsb</a>
+</li>
+<li>rsb_io_want_is_initialized_marker
+: <a class="el" href="classrsb.html#aed7dc0ecede60b677144e8aba46d28b9">rsb</a>
+</li>
+<li>rsb_io_want_leaf_level_multivec
+: <a class="el" href="classrsb.html#aa89d96645cdd1a902fdfb0377a0a5ea2">rsb</a>
+</li>
+<li>rsb_io_want_librsb_etime
+: <a class="el" href="classrsb.html#a565392da24b3006eaeaf5c0d1c5a424d">rsb</a>
+</li>
+<li>rsb_io_want_max_memory_allocated
+: <a class="el" href="classrsb.html#a0ceca511d93a29126225dd783af190d2">rsb</a>
+</li>
+<li>rsb_io_want_max_memory_allocations
+: <a class="el" href="classrsb.html#afa4f68bc0184148f7790351c28cbae50">rsb</a>
+</li>
+<li>rsb_io_want_mem_alloc_cnt
+: <a class="el" href="classrsb.html#a658556e8116b0ff18bc19302fb66449a">rsb</a>
+</li>
+<li>rsb_io_want_mem_alloc_tot
+: <a class="el" href="classrsb.html#a512361fe2c126a7baa412e4b680d8a2f">rsb</a>
+</li>
+<li>rsb_io_want_memory_hierarchy_info_string
+: <a class="el" href="classrsb.html#a81327bb47b51d6c50e12c02171c8c3fe">rsb</a>
+</li>
+<li>rsb_io_want_output_stream
+: <a class="el" href="classrsb.html#a72c4b7daa9a9ba1c7887bb05dfb96b2c">rsb</a>
+</li>
+<li>rsb_io_want_sort_method
+: <a class="el" href="classrsb.html#ae4176512451ec7387ee2fbaec0c7f861">rsb</a>
+</li>
+<li>rsb_io_want_subdivision_multiplier
+: <a class="el" href="classrsb.html#ad5a1220ce0e7d5c4ce517150de22d80b">rsb</a>
+</li>
+<li>rsb_io_want_verbose_errors
+: <a class="el" href="classrsb.html#a90cf14925f34712589430925a0abb92e">rsb</a>
+</li>
+<li>rsb_io_want_verbose_exit
+: <a class="el" href="classrsb.html#a33d3ac5b6383e375f2239b780af50d3f">rsb</a>
+</li>
+<li>rsb_io_want_verbose_init
+: <a class="el" href="classrsb.html#abf4365a254c637b59b5f84dcef03c4e6">rsb</a>
+</li>
+<li>rsb_io_want_verbose_tuning
+: <a class="el" href="classrsb.html#ac95404408be9bc2045e8455881d21377">rsb</a>
+</li>
+<li>rsb_marf_eps
+: <a class="el" href="classrsb.html#a862ec78887803b5649251bd70bd7cba0">rsb</a>
+</li>
+<li>rsb_marf_eps_b
+: <a class="el" href="classrsb.html#a448f95924a27a7bc591db9590b62d6b5">rsb</a>
+</li>
+<li>rsb_marf_eps_l
+: <a class="el" href="classrsb.html#aa7582e5c9cddf8ad409485cbfa6ebac4">rsb</a>
+</li>
+<li>rsb_marf_eps_s
+: <a class="el" href="classrsb.html#aa1f0a3a95206057e5be739fd9b114e12">rsb</a>
+</li>
+<li>rsb_marf_rgb
+: <a class="el" href="classrsb.html#a9aa6c9b3d7034de75ebca4a5c1eba668">rsb</a>
+</li>
+<li>rsb_mif_index_storage_in_bytes__to__size_t
+: <a class="el" href="classrsb.html#a912caf1dfbc9eecd804ec0e9b330809f">rsb</a>
+</li>
+<li>rsb_mif_index_storage_in_bytes_per_nnz__to__rsb_real_t
+: <a class="el" href="classrsb.html#a728a103d20814d978ac073fc51791897">rsb</a>
+</li>
+<li>rsb_mif_leaves_count__to__rsb_blk_index_t
+: <a class="el" href="classrsb.html#ac954dfff99410e7223094406be0f19f9">rsb</a>
+</li>
+<li>rsb_mif_matrix_cols__to__rsb_coo_index_t
+: <a class="el" href="classrsb.html#a0cb66e8ecfec31c29be967b928caf767">rsb</a>
+</li>
+<li>rsb_mif_matrix_flags__to__rsb_flags_t
+: <a class="el" href="classrsb.html#ad0e9b8ffe63a338a7e03ad62d3a4b046">rsb</a>
+</li>
+<li>rsb_mif_matrix_info__to__char_p
+: <a class="el" href="classrsb.html#a24db597e798fc524428ff052bd5ee3bb">rsb</a>
+</li>
+<li>rsb_mif_matrix_nnz__to__rsb_nnz_index_t
+: <a class="el" href="classrsb.html#ae3d4d4559c433e7ac5dd51f63bd1933f">rsb</a>
+</li>
+<li>rsb_mif_matrix_rows__to__rsb_coo_index_t
+: <a class="el" href="classrsb.html#adcdc1cf3fe0032524c482bc2be4b4b7d">rsb</a>
+</li>
+<li>rsb_mif_matrix_typecode__to__rsb_type_t
+: <a class="el" href="classrsb.html#a008a647728ce9aa30846a913c0c620f9">rsb</a>
+</li>
+<li>rsb_mif_total_size__to__size_t
+: <a class="el" href="classrsb.html#a4f193b007e217530bf2a45c65d58673f">rsb</a>
+</li>
+<li>rsb_null_exit_options
+: <a class="el" href="classrsb.html#ad1a0a65364c48d23f9c82ef83c97c420">rsb</a>
+</li>
+<li>rsb_null_init_options
+: <a class="el" href="classrsb.html#a313effa5a93a26ea72326e6c89bdaf82">rsb</a>
+</li>
+<li>rsb_numerical_type_double
+: <a class="el" href="classrsb.html#af833bb7a31acb188d33424c3c16bd4cd">rsb</a>
+</li>
+<li>rsb_numerical_type_double_complex
+: <a class="el" href="classrsb.html#a1865b95dcc4fac4f0fe21dfe8c4ef036">rsb</a>
+</li>
+<li>rsb_numerical_type_float
+: <a class="el" href="classrsb.html#ac18d8381c23b54ccd523e7b4e50af04a">rsb</a>
+</li>
+<li>rsb_numerical_type_float_complex
+: <a class="el" href="classrsb.html#ace3d848255b280a0531407c19fffaec7">rsb</a>
+</li>
+<li>rsb_numerical_type_int
+: <a class="el" href="classrsb.html#a31d8f196938e468a3891fb80f1decc1f">rsb</a>
+</li>
+<li>rsb_numerical_type_same_type
+: <a class="el" href="classrsb.html#a43c72bf61ae0f1961908e27c7dd76f01">rsb</a>
+</li>
+<li>rsb_precf_ilu0
+: <a class="el" href="classrsb.html#aa3e1b0443ca75f7f78983737770ee95a">rsb</a>
+</li>
+<li>rsb_transposition_c
+: <a class="el" href="classrsb.html#a2e308172e38ee4453d556792acbe464c">rsb</a>
+</li>
+<li>rsb_transposition_n
+: <a class="el" href="classrsb.html#a89c7627f24fecaf23ead8300f671314f">rsb</a>
+</li>
+<li>rsb_transposition_t
+: <a class="el" href="classrsb.html#a5c11d5b2aa58a9c9067ec914265cd28f">rsb</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/functions_vars_0x76.html b/doc/html/functions_vars_0x76.html
new file mode 100644
index 0000000..266d445
--- /dev/null
+++ b/doc/html/functions_vars_0x76.html
@@ -0,0 +1,78 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Data Fields - Variables</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="functions.html"><span>All</span></a></li>
+      <li><a href="functions_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="functions_vars.html"><span>Variables</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="functions_vars.html#index_a"><span>a</span></a></li>
+      <li><a href="functions_vars_0x62.html#index_b"><span>b</span></a></li>
+      <li><a href="functions_vars_0x6b.html#index_k"><span>k</span></a></li>
+      <li><a href="functions_vars_0x6e.html#index_n"><span>n</span></a></li>
+      <li><a href="functions_vars_0x72.html#index_r"><span>r</span></a></li>
+      <li class="current"><a href="functions_vars_0x76.html#index_v"><span>v</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_v"></a>- v -</h3><ul>
+<li>values
+: <a class="el" href="structrsb__initopts.html#a0a64d546db2c6445e4a33068cffa6694">rsb_initopts</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals.html b/doc/html/globals.html
new file mode 100644
index 0000000..11d8658
--- /dev/null
+++ b/doc/html/globals.html
@@ -0,0 +1,1729 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li class="current"><a href="globals.html#index_b"><span>b</span></a></li>
+      <li><a href="globals_0x72.html#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all functions, variables, defines, enums, and typedefs with links to the files they belong to:</div>
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>blas_base
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">rsb_libspblas.h</a>
+</li>
+<li>blas_base_type
+: <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9e">rsb_libspblas.h</a>
+</li>
+<li>blas_block
+: <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_sparse.h</a>
+</li>
+<li>blas_cmach_type
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aa">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">rsb_libspblas.h</a>
+</li>
+<li>blas_colmajor
+: <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">rsb_libspblas.h</a>
+</li>
+<li>blas_complex
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">rsb_libspblas.h</a>
+</li>
+<li>blas_conj
+: <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">rsb_libspblas.h</a>
+</li>
+<li>blas_conj_trans
+: <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">rsb_libspblas.h</a>
+</li>
+<li>blas_conj_type
+: <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">rsb_libspblas.h</a>
+</li>
+<li>blas_cusaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">blas_sparse.h</a>
+</li>
+<li>BLAS_cuscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">rsb_libspblas.h</a>
+</li>
+<li>blas_cusdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">rsb_libspblas.h</a>
+</li>
+<li>blas_cusga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">blas_sparse.h</a>
+</li>
+<li>BLAS_cusget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">rsb_libspblas.h</a>
+</li>
+<li>blas_cusgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">rsb_libspblas.h</a>
+</li>
+<li>blas_cusmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">rsb_libspblas.h</a>
+</li>
+<li>blas_cusmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">rsb_libspblas.h</a>
+</li>
+<li>blas_cusrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">rsb_libspblas.h</a>
+</li>
+<li>blas_cussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">rsb_libspblas.h</a>
+</li>
+<li>blas_cusset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">rsb_libspblas.h</a>
+</li>
+<li>blas_cusset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">rsb_libspblas.h</a>
+</li>
+<li>blas_cussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">rsb_libspblas.h</a>
+</li>
+<li>blas_cussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">rsb_libspblas.h</a>
+</li>
+<li>blas_decreasing_order
+: <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">rsb_libspblas.h</a>
+</li>
+<li>blas_diag_type
+: <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">rsb_libspblas.h</a>
+</li>
+<li>blas_double_precision
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">rsb_libspblas.h</a>
+</li>
+<li>blas_dusaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">rsb_libspblas.h</a>
+</li>
+<li>blas_dusdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">rsb_libspblas.h</a>
+</li>
+<li>blas_dusga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">rsb_libspblas.h</a>
+</li>
+<li>blas_dusgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">rsb_libspblas.h</a>
+</li>
+<li>blas_dusmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">rsb_libspblas.h</a>
+</li>
+<li>blas_dusmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">rsb_libspblas.h</a>
+</li>
+<li>blas_dusrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">rsb_libspblas.h</a>
+</li>
+<li>blas_dussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">rsb_libspblas.h</a>
+</li>
+<li>blas_dusset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">rsb_libspblas.h</a>
+</li>
+<li>blas_dusset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">rsb_libspblas.h</a>
+</li>
+<li>blas_dussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">rsb_libspblas.h</a>
+</li>
+<li>blas_dussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">rsb_libspblas.h</a>
+</li>
+<li>blas_emax
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">rsb_libspblas.h</a>
+</li>
+<li>blas_emin
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">rsb_libspblas.h</a>
+</li>
+<li>BLAS_ENUM_H
+: <a class="el" href="blas__sparse_8h.html#aab00e94b9818e92bb03c32f7ec677932">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aab00e94b9818e92bb03c32f7ec677932">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aab00e94b9818e92bb03c32f7ec677932">rsb_libspblas.h</a>
+</li>
+<li>blas_eps
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">rsb_libspblas.h</a>
+</li>
+<li>blas_field_type
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8">rsb_libspblas.h</a>
+</li>
+<li>blas_frobenius_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">rsb_libspblas.h</a>
+</li>
+<li>blas_general
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">rsb_libspblas.h</a>
+</li>
+<li>blas_handle_type
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">rsb_libspblas.h</a>
+</li>
+<li>blas_hermitian
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">rsb_libspblas.h</a>
+</li>
+<li>blas_ieee
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">rsb_libspblas.h</a>
+</li>
+<li>blas_increasing_order
+: <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">rsb_libspblas.h</a>
+</li>
+<li>blas_inf_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">rsb_libspblas.h</a>
+</li>
+<li>blas_invalid_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">rsb_libspblas.h</a>
+</li>
+<li>blas_irregular
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_inner
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_outer
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_sorted
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_type
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">rsb_libspblas.h</a>
+</li>
+<li>blas_left_side
+: <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">rsb_libspblas.h</a>
+</li>
+<li>blas_lower
+: <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">rsb_libspblas.h</a>
+</li>
+<li>blas_lower_hermitian
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">rsb_libspblas.h</a>
+</li>
+<li>blas_lower_symmetric
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">rsb_libspblas.h</a>
+</li>
+<li>blas_lower_triangular
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">rsb_libspblas.h</a>
+</li>
+<li>blas_max_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">rsb_libspblas.h</a>
+</li>
+<li>blas_new_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">rsb_libspblas.h</a>
+</li>
+<li>blas_no_conj
+: <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">rsb_libspblas.h</a>
+</li>
+<li>blas_no_trans
+: <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">rsb_libspblas.h</a>
+</li>
+<li>blas_non_unit_diag
+: <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">rsb_libspblas.h</a>
+</li>
+<li>blas_norm_type
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952b">rsb_libspblas.h</a>
+</li>
+<li>blas_num_cols
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">rsb_libspblas.h</a>
+</li>
+<li>blas_num_nonzeros
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">rsb_libspblas.h</a>
+</li>
+<li>blas_num_rows
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">rsb_libspblas.h</a>
+</li>
+<li>blas_one_base
+: <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">rsb_libspblas.h</a>
+</li>
+<li>blas_one_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">rsb_libspblas.h</a>
+</li>
+<li>blas_open_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">rsb_libspblas.h</a>
+</li>
+<li>blas_order_type
+: <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">rsb_libspblas.h</a>
+</li>
+<li>blas_overflow
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">rsb_libspblas.h</a>
+</li>
+<li>blas_prec
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_double
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_extra
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_indigenous
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_single
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_type
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475">rsb_libspblas.h</a>
+</li>
+<li>blas_real
+: <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">rsb_libspblas.h</a>
+</li>
+<li>blas_real_inf_norm
+: <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">rsb_libspblas.c</a>
+</li>
+<li>blas_real_max_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">rsb_libspblas.h</a>
+</li>
+<li>blas_real_one_norm
+: <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_sparse.h</a>
+</li>
+<li>blas_regular
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">rsb_libspblas.h</a>
+</li>
+<li>blas_right_side
+: <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">rsb_libspblas.h</a>
+</li>
+<li>blas_rnd
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">rsb_libspblas.h</a>
+</li>
+<li>blas_rowmajor
+: <a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_sparse.h</a>
+</li>
+<li>blas_rsb_autotune_next_operation
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_duplicates_ovw
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_duplicates_sum
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_ext_type
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729">blas_sparse.h</a>
+</li>
+<li>blas_rsb_rep_coo
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_rep_csr
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_rep_rsb
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_spmv_autotuning_off
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_spmv_autotuning_on
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_off
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_on
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_off
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_sparse.h</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_on
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">rsb_libspblas.h</a>
+</li>
+<li>blas_sfmin
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">rsb_libspblas.h</a>
+</li>
+<li>blas_side_type
+: <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">rsb_libspblas.h</a>
+</li>
+<li>blas_single_precision
+: <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_sparse.h</a>
+</li>
+<li>blas_size_type
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6">rsb_libspblas.c</a>
+</li>
+<li>blas_sort_type
+: <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14">rsb_libspblas.h</a>
+</li>
+<li>blas_sparse_matrix
+: <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6f56456b01e0cc6b25b81201aa67c163">rsb_libspblas.h</a>
+</li>
+<li>blas_sparsity_optimization_type
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5">rsb_libspblas.c</a>
+</li>
+<li>BLAS_susaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">rsb_libspblas.h</a>
+</li>
+<li>blas_susaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">blas_sparse.h</a>
+</li>
+<li>blas_suscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">blas_sparse.h</a>
+</li>
+<li>blas_suscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">rsb_libspblas.c</a>
+</li>
+<li>BLAS_suscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">blas_sparse.h</a>
+</li>
+<li>blas_suscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">rsb_libspblas.c</a>
+</li>
+<li>BLAS_suscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">rsb_libspblas.c</a>
+</li>
+<li>BLAS_suscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">blas_sparse.h</a>
+</li>
+<li>blas_suscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">blas_sparse.h</a>
+</li>
+<li>blas_susdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">rsb_libspblas.c</a>
+</li>
+<li>BLAS_susga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">rsb_libspblas.h</a>
+</li>
+<li>blas_susga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">blas_sparse.h</a>
+</li>
+<li>blas_susget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">blas_sparse.h</a>
+</li>
+<li>blas_susget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">rsb_libspblas.c</a>
+</li>
+<li>blas_susget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">rsb_libspblas.c</a>
+</li>
+<li>BLAS_susget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">blas_sparse.h</a>
+</li>
+<li>blas_susget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">blas_sparse.h</a>
+</li>
+<li>BLAS_susgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">rsb_libspblas.h</a>
+</li>
+<li>blas_susgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">blas_sparse.h</a>
+</li>
+<li>BLAS_susmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">rsb_libspblas.c</a>
+</li>
+<li>blas_susmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">blas_sparse.h</a>
+</li>
+<li>BLAS_susmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">rsb_libspblas.c</a>
+</li>
+<li>blas_susmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">blas_sparse.h</a>
+</li>
+<li>blas_susrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">rsb_libspblas.c</a>
+</li>
+<li>BLAS_sussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">rsb_libspblas.h</a>
+</li>
+<li>blas_sussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">blas_sparse.h</a>
+</li>
+<li>BLAS_susset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">rsb_libspblas.c</a>
+</li>
+<li>blas_susset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">rsb_libspblas.c</a>
+</li>
+<li>BLAS_susset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">rsb_libspblas.h</a>
+</li>
+<li>blas_susset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_sussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">blas_sparse.h</a>
+</li>
+<li>blas_sussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">rsb_libspblas.c</a>
+</li>
+<li>BLAS_sussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">rsb_libspblas.h</a>
+</li>
+<li>blas_sussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">rsb_libspblas.c</a>
+</li>
+<li>blas_symmetric
+: <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">rsb_libspblas.h</a>
+</li>
+<li>blas_symmetry_type
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63b">rsb_libspblas.c</a>
+</li>
+<li>blas_t
+: <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_sparse.h</a>
+</li>
+<li>blas_trans
+: <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">rsb_libspblas.c</a>
+</li>
+<li>blas_trans_type
+: <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_sparse.h</a>
+</li>
+<li>blas_triangular
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">rsb_libspblas.h</a>
+</li>
+<li>blas_two_norm
+: <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_sparse.h</a>
+</li>
+<li>blas_unassembled
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">rsb_libspblas.h</a>
+</li>
+<li>blas_underflow
+: <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">rsb_libspblas.h</a>
+</li>
+<li>blas_unit_diag
+: <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">rsb_libspblas.c</a>
+</li>
+<li>blas_uplo_type
+: <a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_sparse.h</a>
+</li>
+<li>blas_upper
+: <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">rsb_libspblas.c</a>
+</li>
+<li>blas_upper_hermitian
+: <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_sparse.h</a>
+</li>
+<li>blas_upper_symmetric
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">rsb_libspblas.c</a>
+</li>
+<li>blas_upper_triangular
+: <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_sparse.h</a>
+</li>
+<li>BLAS_uscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">rsb_libspblas.c</a>
+</li>
+<li>blas_uscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">blas_sparse.h</a>
+</li>
+<li>BLAS_usds()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">rsb_libspblas.h</a>
+</li>
+<li>blas_usds_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">rsb_libspblas.c</a>
+</li>
+<li>BLAS_usgp
+: <a class="el" href="rsb__libspblas_8h.html#a5eec91b6d95962811bd9cb4e37266214">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">rsb_libspblas.c</a>
+</li>
+<li>blas_usgp_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2cb97e106eb117547157a8fc61491b91">rsb_libspblas.c</a>
+</li>
+<li>BLAS_ussp
+: <a class="el" href="rsb__libspblas_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">rsb_libspblas.c</a>
+</li>
+<li>blas_ussp_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5ea0303be1db6c9dd73c03bba6dc6158">rsb_libspblas.c</a>
+</li>
+<li>blas_valid_handle
+: <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">rsb_libspblas.h</a>
+</li>
+<li>blas_zero_base
+: <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_sparse.h</a>
+</li>
+<li>BLAS_zusaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">rsb_libspblas.h</a>
+</li>
+<li>blas_zusaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">blas_sparse.h</a>
+</li>
+<li>BLAS_zuscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">rsb_libspblas.c</a>
+</li>
+<li>blas_zuscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">blas_sparse.h</a>
+</li>
+<li>BLAS_zuscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zuscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">rsb_libspblas.c</a>
+</li>
+<li>blas_zuscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zuscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">blas_sparse.h</a>
+</li>
+<li>BLAS_zuscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">blas_sparse.h</a>
+</li>
+<li>BLAS_zuscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">rsb_libspblas.h</a>
+</li>
+<li>blas_zusdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">rsb_libspblas.c</a>
+</li>
+<li>blas_zusga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">blas_sparse.h</a>
+</li>
+<li>blas_zusget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">blas_sparse.h</a>
+</li>
+<li>blas_zusget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">blas_sparse.h</a>
+</li>
+<li>BLAS_zusgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">rsb_libspblas.c</a>
+</li>
+<li>blas_zusgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">blas_sparse.h</a>
+</li>
+<li>blas_zusmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">rsb_libspblas.h</a>
+</li>
+<li>blas_zusmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">blas_sparse.h</a>
+</li>
+<li>blas_zusrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">rsb_libspblas.c</a>
+</li>
+<li>blas_zussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">rsb_libspblas.h</a>
+</li>
+<li>blas_zusset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">blas_sparse.h</a>
+</li>
+<li>BLAS_zusset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">rsb_libspblas.h</a>
+</li>
+<li>blas_zusset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">rsb_libspblas.c</a>
+</li>
+<li>blas_zussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">blas_sparse.h</a>
+</li>
+<li>BLAS_zussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">blas_sparse.h</a>
+</li>
+<li>blas_zussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">rsb_libspblas.c</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_0x72.html b/doc/html/globals_0x72.html
new file mode 100644
index 0000000..eae8698
--- /dev/null
+++ b/doc/html/globals_0x72.html
@@ -0,0 +1,966 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li class="current"><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="globals.html#index_b"><span>b</span></a></li>
+      <li class="current"><a href="globals_0x72.html#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+<div class="textblock">Here is a list of all functions, variables, defines, enums, and typedefs with links to the files they belong to:</div>
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>rsb_blas_get_mtx()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_libspblas.h</a>
+</li>
+<li>rsb_blk_idx_t
+: <a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb.h</a>
+</li>
+<li>RSB_BOOL_FALSE
+: <a class="el" href="rsb_8h.html#ad396755fe9a1d81991d5ac238058db18">rsb.h</a>
+</li>
+<li>rsb_bool_t
+: <a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb.h</a>
+</li>
+<li>RSB_BOOL_TRUE
+: <a class="el" href="rsb_8h.html#af580e920b9f507028d3b7d34b4dadd6f">rsb.h</a>
+</li>
+<li>RSB_CHAR_AS_TRANSPOSITION
+: <a class="el" href="rsb__types_8h.html#a6ea10439ed32405f43a9f5e6c9b64787">rsb_types.h</a>
+</li>
+<li>RSB_CHAR_BIT
+: <a class="el" href="rsb_8h.html#a5749695a0fccd6348d669c6790185a68">rsb.h</a>
+</li>
+<li>rsb_char_t
+: <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb.h</a>
+</li>
+<li>RSB_CONST_MAX_TUNING_ROUNDS
+: <a class="el" href="rsb__types_8h.html#aef1f5467f82116857e5003daa0f75ccd">rsb_types.h</a>
+</li>
+<li>rsb_coo_idx_t
+: <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb.h</a>
+</li>
+<li>rsb_coo_sort()
+: <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_rsb.c</a>
+</li>
+<li>RSB_DEFAULT_BLOCKING
+: <a class="el" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">rsb.h</a>
+</li>
+<li>RSB_DEFAULT_COL_BLOCKING
+: <a class="el" href="rsb_8h.html#a0f7e634867763b3cc1faaa3ba8e106db">rsb.h</a>
+</li>
+<li>RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE
+: <a class="el" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_POSSIBLY_INTEGER_TYPE
+: <a class="el" href="rsb__types_8h.html#ab2ec9d6e0af8a10a032d597423fef559">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_POSSIBLY_INTEGER_TYPE_STRING
+: <a class="el" href="rsb__types_8h.html#a2a35f3f9a39d1b2016cf6aae4bfbf3e4">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_ROW_BLOCKING
+: <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">rsb.h</a>
+</li>
+<li>RSB_DEFAULT_SYMMETRY
+: <a class="el" href="rsb__types_8h.html#a898310ae6ad07802d6d261b6053cc3c5">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_TRANSPOSITION
+: <a class="el" href="rsb__types_8h.html#a2fb899b07173e590c8a13ae2b32ca383">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_TYPE
+: <a class="el" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_TYPE_STRING
+: <a class="el" href="rsb__types_8h.html#acf1cad553e2bb07697c34bc5a6123ca1">rsb_types.h</a>
+</li>
+<li>RSB_DO_FLAG_ADD
+: <a class="el" href="rsb_8h.html#a3949d8af584a0e0e0a17e96d28b8d078">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_DEL
+: <a class="el" href="rsb_8h.html#aee33ededde2130f79f6c84966f1a180b">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_FILTERONLY
+: <a class="el" href="rsb_8h.html#a70e87c7a0afaf9b27650d252086559f7">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_FILTEROUT
+: <a class="el" href="rsb_8h.html#a23beda4691d4e83e6d3984960dc9f422">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_HAS
+: <a class="el" href="rsb_8h.html#ad155950ce44eddd61911184bccba86ab">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_HAS_INTERSECTION
+: <a class="el" href="rsb_8h.html#a116d0af2caf6bddd358035597a260244">rsb.h</a>
+</li>
+<li>RSB_ELOPF_DIV
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969">rsb.h</a>
+</li>
+<li>RSB_ELOPF_MUL
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a">rsb.h</a>
+</li>
+<li>RSB_ELOPF_NEG
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4">rsb.h</a>
+</li>
+<li>RSB_ELOPF_POW
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_COLS
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_COLS_REAL
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_ROWS
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_ROWS_REAL
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5">rsb.h</a>
+</li>
+<li>rsb_elopf_t
+: <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb.h</a>
+</li>
+<li>RSB_ERR_BADARGS
+: <a class="el" href="rsb_8h.html#af0b262c6c554403269234219b3aec409">rsb.h</a>
+</li>
+<li>RSB_ERR_CAST
+: <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">rsb.h</a>
+</li>
+<li>RSB_ERR_CORRUPT_INPUT_DATA
+: <a class="el" href="rsb_8h.html#a14103828be5eb82e40d3b772ce54abda">rsb.h</a>
+</li>
+<li>RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS
+: <a class="el" href="rsb_8h.html#a935de71c3acc5714ad539d65288e2593">rsb.h</a>
+</li>
+<li>RSB_ERR_ENOMEM
+: <a class="el" href="rsb_8h.html#a538215b32e908646c979a2e446ae5467">rsb.h</a>
+</li>
+<li>RSB_ERR_FAILED_MEMHIER_DETECTION
+: <a class="el" href="rsb_8h.html#a3cacb604d0ad892e195c7c97eda18dba">rsb.h</a>
+</li>
+<li>RSB_ERR_FORTRAN_ERROR
+: <a class="el" href="rsb_8h.html#a40628c24058f45a481e18b6ad491bf1b">rsb.h</a>
+</li>
+<li>RSB_ERR_GENERIC_ERROR
+: <a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">rsb.h</a>
+</li>
+<li>RSB_ERR_INTERNAL_ERROR
+: <a class="el" href="rsb_8h.html#a8e650a7e3b5c5aa1fb9763b0f1498126">rsb.h</a>
+</li>
+<li>RSB_ERR_INVALID_NUMERICAL_DATA
+: <a class="el" href="rsb_8h.html#a8d504baa13048da05bb71235e2c8d181">rsb.h</a>
+</li>
+<li>RSB_ERR_LIMITS
+: <a class="el" href="rsb_8h.html#a3d7758ee9127e0c93c9075402999d154">rsb.h</a>
+</li>
+<li>RSB_ERR_MEMORY_LEAK
+: <a class="el" href="rsb_8h.html#a1b63053f52d6426b726a05b206a3862a">rsb.h</a>
+</li>
+<li>RSB_ERR_NO_ERROR
+: <a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">rsb.h</a>
+</li>
+<li>RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT
+: <a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">rsb.h</a>
+</li>
+<li>RSB_ERR_NO_USER_CONFIGURATION
+: <a class="el" href="rsb_8h.html#a5ab0f86009e1f934b25b23fc4837b9b0">rsb.h</a>
+</li>
+<li>rsb_err_t
+: <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb.h</a>
+</li>
+<li>RSB_ERR_TO_PROGRAM_ERROR
+: <a class="el" href="rsb_8h.html#a9738e6b8b638ca234acd92b49c6ac1db">rsb.h</a>
+</li>
+<li>RSB_ERR_UNIMPLEMENTED_YET
+: <a class="el" href="rsb_8h.html#a0bd20d0f68cf911bf9dfda495d8e12db">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_FEATURE
+: <a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_FORMAT
+: <a class="el" href="rsb_8h.html#ac00cd41eab18a0d2b9323b401029dd73">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_OPERATION
+: <a class="el" href="rsb_8h.html#ab4f407e7c8364bee51cc77546d6f0922">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_TYPE
+: <a class="el" href="rsb_8h.html#afdf2ab3912960ee19f23e7d585371548">rsb.h</a>
+</li>
+<li>RSB_ERRS_UNSUPPORTED_FEATURES
+: <a class="el" href="rsb_8h.html#a4d8eb05488b681b75449f64c418b8893">rsb.h</a>
+</li>
+<li>RSB_EXPOSE_NEW_GENERAL_INTERFACE
+: <a class="el" href="rsb__rsb_8c.html#a6a77bece998693a0c9bc500d444eb8a3">rsb_rsb.c</a>
+</li>
+<li>RSB_EXTF_ASUMS_COL
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada">rsb.h</a>
+</li>
+<li>RSB_EXTF_ASUMS_ROW
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54">rsb.h</a>
+</li>
+<li>RSB_EXTF_DIAG
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">rsb.h</a>
+</li>
+<li>RSB_EXTF_NORM_INF
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">rsb.h</a>
+</li>
+<li>RSB_EXTF_NORM_ONE
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">rsb.h</a>
+</li>
+<li>RSB_EXTF_NORM_TWO
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">rsb.h</a>
+</li>
+<li>RSB_EXTF_SUMS_COL
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e">rsb.h</a>
+</li>
+<li>RSB_EXTF_SUMS_ROW
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">rsb.h</a>
+</li>
+<li>rsb_extff_t
+: <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb.h</a>
+</li>
+<li>rsb_file_mtx_get_dimensions
+: <a class="el" href="rsb_8h.html#a97106c8db99424b5b69cd6be5bf59937">rsb.h</a>
+</li>
+<li>rsb_file_mtx_get_dims()
+: <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_load()
+: <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_render
+: <a class="el" href="rsb_8h.html#a191af7bdb17d4b0abb3a195c11e56c3b">rsb.h</a>
+</li>
+<li>rsb_file_mtx_rndr()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_save()
+: <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_rsb.c</a>
+</li>
+<li>rsb_file_vec_load()
+: <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_rsb.c</a>
+</li>
+<li>rsb_file_vec_save()
+: <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_rsb.c</a>
+</li>
+<li>RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS
+: <a class="el" href="rsb_8h.html#adce7e20015d4a549bb8c44a00a80fc7e">rsb.h</a>
+</li>
+<li>RSB_FLAG_C_INDICES_INTERFACE
+: <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#a6b21a3edf4231070a10223f1a9ae1dc4">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#a8c90a9ad92722ffbbf1bfcadb805c520">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#acac4b9c09a3fd6be63e511fc5042038f">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_STORAGE_FLAGS
+: <a class="el" href="rsb_8h.html#aa83897e25c1235a780ed7fe317c78555">rsb.h</a>
+</li>
+<li>RSB_FLAG_DIAGONAL
+: <a class="el" href="rsb_8h.html#abccb47886fb3f8352e4e6ad801fd8efa">rsb.h</a>
+</li>
+<li>RSB_FLAG_DISCARD_ZEROS
+: <a class="el" href="rsb_8h.html#abf243a6f15925734e143703c4ad33512">rsb.h</a>
+</li>
+<li>RSB_FLAG_DUPLICATES_DEFAULT_HANDLE
+: <a class="el" href="rsb_8h.html#a7fee489042762b3b22d8184c592a9e52">rsb.h</a>
+</li>
+<li>RSB_FLAG_DUPLICATES_KEEP_LAST
+: <a class="el" href="rsb_8h.html#aff85f26964888f838aa97eb371ce5da3">rsb.h</a>
+</li>
+<li>RSB_FLAG_DUPLICATES_SUM
+: <a class="el" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">rsb.h</a>
+</li>
+<li>RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT
+: <a class="el" href="rsb_8h.html#a1d3b9bd7a31257cc8116be3dee0125b5">rsb.h</a>
+</li>
+<li>RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS
+: <a class="el" href="rsb_8h.html#a6abc0e23c782b817e2ef96d8294f990d">rsb.h</a>
+</li>
+<li>RSB_FLAG_FORTRAN_INDICES_INTERFACE
+: <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">rsb.h</a>
+</li>
+<li>RSB_FLAG_HERMITIAN
+: <a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">rsb.h</a>
+</li>
+<li>RSB_FLAG_IDENTICAL_FLAGS
+: <a class="el" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER
+: <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER_HERMITIAN
+: <a class="el" href="rsb_8h.html#aa06dcddcdd4f42fe2eeda8eb6168bd2d">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER_SYMMETRIC
+: <a class="el" href="rsb_8h.html#a6933030c784596e3c8dbbbd8daf62805">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER_TRIANGULAR
+: <a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">rsb.h</a>
+</li>
+<li>RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES
+: <a class="el" href="rsb_8h.html#a6f4335cce5234a69e06188bcad418091">rsb.h</a>
+</li>
+<li>RSB_FLAG_NOFLAGS
+: <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">rsb.h</a>
+</li>
+<li>RSB_FLAG_QUAD_PARTITIONING
+: <a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">rsb.h</a>
+</li>
+<li>RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS
+: <a class="el" href="rsb_8h.html#a54d04b341465bf3dadc62ad99d55f8ca">rsb.h</a>
+</li>
+<li>RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG
+: <a class="el" href="rsb_8h.html#ad8e75dfa2b78fa82cdd31665a375d257">rsb.h</a>
+</li>
+<li>RSB_FLAG_SORTED_INPUT
+: <a class="el" href="rsb_8h.html#a726fa64beccf21ae1b70149b88c3affb">rsb.h</a>
+</li>
+<li>RSB_FLAG_SYMMETRIC
+: <a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">rsb.h</a>
+</li>
+<li>RSB_FLAG_TRIANGULAR
+: <a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">rsb.h</a>
+</li>
+<li>RSB_FLAG_UNIT_DIAG_IMPLICIT
+: <a class="el" href="rsb_8h.html#a4af24812309eb471c861ba618cb996f2">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER
+: <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER_HERMITIAN
+: <a class="el" href="rsb_8h.html#a0565be78af9bac79d07376d501237b00">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER_SYMMETRIC
+: <a class="el" href="rsb_8h.html#a3c2701b010fa2928685f3253a0ff1a99">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER_TRIANGULAR
+: <a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_CSR_RESERVED
+: <a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_HALFWORD_INDICES
+: <a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_HALFWORD_INDICES_COO
+: <a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_HALFWORD_INDICES_CSR
+: <a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_BCSS_STORAGE
+: <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_COLUMN_MAJOR_ORDER
+: <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_COO_STORAGE
+: <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_ROW_MAJOR_ORDER
+: <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">rsb.h</a>
+</li>
+<li>rsb_flags_t
+: <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb.h</a>
+</li>
+<li>RSB_HALF_MAX_SIGNED
+: <a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">rsb.h</a>
+</li>
+<li>RSB_HAVE_RSB_KERNELS
+: <a class="el" href="rsb__blas__sparse_8F90.html#a151b6b061725a39255ee4de3db2faf8e">rsb_blas_sparse.F90</a>
+</li>
+<li>RSB_HAVE_TYPE_DOUBLE
+: <a class="el" href="rsb__types_8h.html#a50018495517829b14797a568788e1526">rsb_types.h</a>
+</li>
+<li>RSB_HAVE_TYPE_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a922101e7269ccc3184935c451b606a2c">rsb_types.h</a>
+</li>
+<li>RSB_HAVE_TYPE_FLOAT
+: <a class="el" href="rsb__types_8h.html#a82f77f519ff60dffac284034c12d2635">rsb_types.h</a>
+</li>
+<li>RSB_HAVE_TYPE_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a782af474ca5eba101233fc265965fbbb">rsb_types.h</a>
+</li>
+<li>RSB_HEADER_VERSION_STRING
+: <a class="el" href="rsb__types_8h.html#a8bc9584f994ecb2639ee548156562aae">rsb_types.h</a>
+</li>
+<li>rsb_int_t
+: <a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb.h</a>
+</li>
+<li>RSB_INTERFACE_RETURN_ERR
+: <a class="el" href="rsb__rsb_8c.html#a85dee9bd15f321bfac4a8f055f072d1b">rsb_rsb.c</a>
+</li>
+<li>RSB_INTERFACE_RETURN_MTX
+: <a class="el" href="rsb__rsb_8c.html#abd0f924354130cfb2cbe4b8345dbc6fd">rsb_rsb.c</a>
+</li>
+<li>RSB_INTERFACE_RETURN_MTX_ERRP
+: <a class="el" href="rsb__rsb_8c.html#af6ebbe2e678aef616abb33526b312f65">rsb_rsb.c</a>
+</li>
+<li>RSB_INTERFACE_RETURN_VAL
+: <a class="el" href="rsb__rsb_8c.html#a619e228eb1a40cb1ae303be5ca6fa2ed">rsb_rsb.c</a>
+</li>
+<li>RSB_INVALID_COO_IDX_VAL
+: <a class="el" href="rsb_8h.html#a88e6b599d650b509b54d4fe7c3008b12">rsb.h</a>
+</li>
+<li>RSB_INVALID_NNZ_IDX_VAL
+: <a class="el" href="rsb_8h.html#a20253111f2fa6a4bc0c75fe7e6430890">rsb.h</a>
+</li>
+<li>RSB_IO_SPECIFIER_GET
+: <a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">rsb.h</a>
+</li>
+<li>RSB_IO_SPECIFIER_SET
+: <a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_BOUNDED_BOX_COMPUTATION
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_CACHE_BLOCKING_METHOD
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_EXECUTING_THREADS
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_IS_INITIALIZED_MARKER
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_LEAF_LEVEL_MULTIVEC
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_LIBRSB_ETIME
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MAX_MEMORY_ALLOCATED
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MEM_ALLOC_CNT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MEM_ALLOC_TOT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_OUTPUT_STREAM
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_SORT_METHOD
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_SUBDIVISION_MULTIPLIER
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_ERRORS
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_EXIT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_INIT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_TUNING
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">rsb.h</a>
+</li>
+<li>RSB_IS_SIGNED
+: <a class="el" href="rsb_8h.html#af7d43df61fa72c8971cece701ae53a22">rsb.h</a>
+</li>
+<li>rsb_lib_exit()
+: <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_get_opt()
+: <a class="el" href="rsb_8h.html#a96a28efc32dd050d2a74208b3ad2f227">rsb.h</a>
+, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_init()
+: <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_reinit()
+: <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_set_opt()
+: <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_rsb.c</a>
+, <a class="el" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb.h</a>
+</li>
+<li>rsb_lib_set_opt_str()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_rsb.c</a>
+</li>
+<li>RSB_LIBRSB_VER
+: <a class="el" href="rsb__types_8h.html#a08fbe9d2c97a5b73bdad3dbe1402c83b">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_DATE
+: <a class="el" href="rsb__types_8h.html#ae26b1dec914b2cf2f233c07d2f4815d1">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_MAJOR
+: <a class="el" href="rsb__types_8h.html#a7fd4e640e7aa86fdce8f3d25ac230b5c">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_MINOR
+: <a class="el" href="rsb__types_8h.html#af8d3f63778c3120b14c3126259872cfe">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_PATCH
+: <a class="el" href="rsb__types_8h.html#ab3384c84112fe759dc57c5dd206a0cde">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_STRING
+: <a class="el" href="rsb__types_8h.html#af66941d5b1f1595c29f9c7e131d22242">rsb_types.h</a>
+</li>
+<li>rsb_load_spblas_matrix_file_as_matrix_market()
+: <a class="el" href="blas__sparse_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas__handle_8c.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_libspblas_handle.c</a>
+</li>
+<li>RSB_MARF_EPS
+: <a class="el" href="rsb_8h.html#a2d332e6ed899c019e54ab4e540c82fd8">rsb.h</a>
+</li>
+<li>RSB_MARF_EPS_B
+: <a class="el" href="rsb_8h.html#a77106fe2435306ef028060d0eb7dca14">rsb.h</a>
+</li>
+<li>RSB_MARF_EPS_L
+: <a class="el" href="rsb_8h.html#a3562195777ed886282bd6287551a235c">rsb.h</a>
+</li>
+<li>RSB_MARF_EPS_S
+: <a class="el" href="rsb_8h.html#a8055e62d2824131421d22de1a0256f79">rsb.h</a>
+</li>
+<li>RSB_MARF_RGB
+: <a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">rsb.h</a>
+</li>
+<li>rsb_marf_t
+: <a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb.h</a>
+</li>
+<li>RSB_MARKER_COO_VALUE
+: <a class="el" href="rsb_8h.html#af88edb77d90929bf6cef617ab862d2bc">rsb.h</a>
+</li>
+<li>RSB_MARKER_NNZ_VALUE
+: <a class="el" href="rsb_8h.html#a967c5aae0dc536668ed67d810378e7fc">rsb.h</a>
+</li>
+<li>RSB_MAX_MATRIX_DIM
+: <a class="el" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">rsb.h</a>
+</li>
+<li>RSB_MAX_MATRIX_NNZ
+: <a class="el" href="rsb_8h.html#a63c69ef30355064d818326768674c9b2">rsb.h</a>
+</li>
+<li>RSB_MAX_SIGNED
+: <a class="el" href="rsb_8h.html#a465659728318d495a364e906806ffae7">rsb.h</a>
+</li>
+<li>RSB_MAX_UNSIGNED
+: <a class="el" href="rsb_8h.html#a9ea900484e72f4876b3fd8d9f402ea39">rsb.h</a>
+</li>
+<li>RSB_MAX_VALUE_FOR_TYPE
+: <a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">rsb.h</a>
+</li>
+<li>RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858">rsb.h</a>
+</li>
+<li>RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065">rsb.h</a>
+</li>
+<li>RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_INFO__TO__CHAR_P
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93">rsb.h</a>
+</li>
+<li>rsb_mif_t
+: <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb.h</a>
+</li>
+<li>RSB_MIF_TOTAL_SIZE__TO__SIZE_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f">rsb.h</a>
+</li>
+<li>RSB_MIN_MATRIX_DIM
+: <a class="el" href="rsb_8h.html#abaccfe39f69712cebf501c9d55b1a4b8">rsb.h</a>
+</li>
+<li>RSB_MIN_MATRIX_NNZ
+: <a class="el" href="rsb_8h.html#a425f78c0a49004e45df20db728f8196d">rsb.h</a>
+</li>
+<li>rsb_mtx_add_to_dense()
+: <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_begin()
+: <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb.h</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_const()
+: <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_end()
+: <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb.h</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_inplace()
+: <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_csc_const()
+: <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb.h</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_const()
+: <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_inplace()
+: <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_clone()
+: <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_free()
+: <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_coo()
+: <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_coo_block()
+: <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_csr()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb.h</a>
+</li>
+<li>rsb_mtx_get_info()
+: <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_info_str()
+: <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb.h</a>
+</li>
+<li>rsb_mtx_get_norm
+: <a class="el" href="rsb_8h.html#a552fe79778c824e8d88ddfd0d9c58586">rsb.h</a>
+</li>
+<li>rsb_mtx_get_nrm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb.h</a>
+</li>
+<li>rsb_mtx_get_prec()
+: <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_preconditioner
+: <a class="el" href="rsb_8h.html#a8ba1704fe1f07cb9abe856d9a1a20ea9">rsb.h</a>
+</li>
+<li>rsb_mtx_get_rows_sparse()
+: <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_vals()
+: <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_values
+: <a class="el" href="rsb_8h.html#af08b72a410e54fd7db6dcb12db232aec">rsb.h</a>
+</li>
+<li>rsb_mtx_get_vec()
+: <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_rndr()
+: <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_set_vals()
+: <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_set_values
+: <a class="el" href="rsb_8h.html#a5b622f80450cdef4f8a06742eacbb045">rsb.h</a>
+</li>
+<li>rsb_mtx_switch_to_coo()
+: <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb.h</a>
+</li>
+<li>rsb_mtx_switch_to_csr()
+: <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb.h</a>
+</li>
+<li>rsb_mtx_upd_vals()
+: <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb.h</a>
+</li>
+<li>rsb_mtx_upd_values
+: <a class="el" href="rsb_8h.html#a40d40562867aceec2899cdddf79b3086">rsb.h</a>
+</li>
+<li>RSB_NNZ_BLK_MAX
+: <a class="el" href="rsb_8h.html#af576621f0846e0b9a999ea21641e13c8">rsb.h</a>
+</li>
+<li>rsb_nnz_idx_t
+: <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb.h</a>
+</li>
+<li>RSB_NULL_EXIT_OPTIONS
+: <a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">rsb.h</a>
+</li>
+<li>RSB_NULL_INIT_OPTIONS
+: <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">rsb.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DEFAULT
+: <a class="el" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DEFAULT_INTEGER
+: <a class="el" href="rsb__types_8h.html#a70b99562829107b4fe1f529aacd4729a">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DOUBLE
+: <a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a51ca2ff55d0c852f659f5c76ecd536cd">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FIRST_BLAS
+: <a class="el" href="rsb__types_8h.html#ac51619f9cbe0a9a4cbc55e0451bfb59d">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FLOAT
+: <a class="el" href="rsb__types_8h.html#a7628cd01c7e84e4ada529b3412d118b3">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#ac46f79bff4499a5e8b6075150ecabf69">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE
+: <a class="el" href="rsb__types_8h.html#af465e222cfdede5b5df9a26a35b5e115">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a2fc48337d7c3ac2cd4e9e509c73edbf9">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_FLOAT
+: <a class="el" href="rsb__types_8h.html#a262db8d5b52285bd503cc1e60039135a">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a3bab97530d248482496ac20667e102f4">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_INT
+: <a class="el" href="rsb__types_8h.html#a16d646278df635b6e4fc57c43241fb98">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_SAME_TYPE
+: <a class="el" href="rsb__types_8h.html#a17195a2481a24153b99f2be1f0577ff1">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_INVALID_TYPE
+: <a class="el" href="rsb__types_8h.html#ac418f097835ff41e0baaf5635d21b6f9">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_SAME_TYPE
+: <a class="el" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">rsb_types.h</a>
+</li>
+<li>rsb_opt_t
+: <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb.h</a>
+</li>
+<li>rsb_perror()
+: <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb.h</a>
+</li>
+<li>RSB_PRECF_ILU0
+: <a class="el" href="rsb_8h.html#a56bb6be11af9a5a0ed9aaa8774ab6db9">rsb.h</a>
+</li>
+<li>rsb_precf_t
+: <a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb.h</a>
+</li>
+<li>RSB_PROGRAM_ERROR
+: <a class="el" href="rsb_8h.html#a7f6f859f61b0855e5389e1bc98829bd4">rsb.h</a>
+</li>
+<li>RSB_PROGRAM_SUCCESS
+: <a class="el" href="rsb_8h.html#a61f8a9ebc9bced69076389ba3cd2cce8">rsb.h</a>
+</li>
+<li>rsb_psblas_trans_to_rsb_trans()
+: <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_rsb.c</a>
+</li>
+<li>rsb_real_t
+: <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE
+: <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE_C_IOP
+: <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE_GET
+: <a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE_SET
+: <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">rsb.h</a>
+</li>
+<li>RSB_ROWS_TRANSPOSITIONS_ARRAY
+: <a class="el" href="rsb__types_8h.html#a9fcc01fb97c5b5482be8ab4cd7c2ee33">rsb_types.h</a>
+</li>
+<li>RSB_SIZEOF
+: <a class="el" href="group__rsb__doc__rsb.html#ga68e662dcfb6981c1efc8eb03ef327182">rsb.h</a>
+</li>
+<li>rsb_spmm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_rsb.c</a>
+</li>
+<li>rsb_spmsp()
+: <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_rsb.c</a>
+</li>
+<li>rsb_spmsp_to_dense()
+: <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_rsb.c</a>
+</li>
+<li>rsb_spmv()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb.h</a>
+</li>
+<li>rsb_sppsp()
+: <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_rsb.c</a>
+</li>
+<li>rsb_spsm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_rsb.c</a>
+</li>
+<li>rsb_spsv()
+: <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_rsb.c</a>
+</li>
+<li>rsb_strerror_r()
+: <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb.h</a>
+</li>
+<li>rsb_time()
+: <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_rsb.c</a>
+</li>
+<li>rsb_time_t
+: <a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb.h</a>
+</li>
+<li>rsb_trans_t
+: <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb.h</a>
+</li>
+<li>RSB_TRANSPOSITION_C
+: <a class="el" href="rsb__types_8h.html#abd3aaf223656dece97dee2107e485217">rsb_types.h</a>
+</li>
+<li>RSB_TRANSPOSITION_N
+: <a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">rsb_types.h</a>
+</li>
+<li>RSB_TRANSPOSITION_T
+: <a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">rsb_types.h</a>
+</li>
+<li>rsb_tune_spmm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb.h</a>
+</li>
+<li>rsb_tune_spsm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_rsb.c</a>
+</li>
+<li>RSB_TYPE_INDEX_DOUBLE
+: <a class="el" href="rsb__types_8h.html#a4abf98873753295350143ca544b79db3">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a1a13d13b3c7f84e7fc8ca1df3878a07d">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_FLOAT
+: <a class="el" href="rsb__types_8h.html#a8d5222339367566d624a1e678d116d0d">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a8445bf2e852a4b20d178ae4b475f4552">rsb_types.h</a>
+</li>
+<li>rsb_type_t
+: <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_defs.html b/doc/html/globals_defs.html
new file mode 100644
index 0000000..5deea45
--- /dev/null
+++ b/doc/html/globals_defs.html
@@ -0,0 +1,580 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li class="current"><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="#index_b"><span>b</span></a></li>
+      <li><a href="#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>BLAS_ENUM_H
+: <a class="el" href="blas__sparse_8h.html#aab00e94b9818e92bb03c32f7ec677932">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aab00e94b9818e92bb03c32f7ec677932">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aab00e94b9818e92bb03c32f7ec677932">rsb_libspblas.h</a>
+</li>
+<li>BLAS_usgp
+: <a class="el" href="rsb__libspblas_8h.html#a5eec91b6d95962811bd9cb4e37266214">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">blas_sparse.h</a>
+</li>
+<li>BLAS_ussp
+: <a class="el" href="blas__sparse_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">rsb_libspblas.h</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>RSB_BOOL_FALSE
+: <a class="el" href="rsb_8h.html#ad396755fe9a1d81991d5ac238058db18">rsb.h</a>
+</li>
+<li>RSB_BOOL_TRUE
+: <a class="el" href="rsb_8h.html#af580e920b9f507028d3b7d34b4dadd6f">rsb.h</a>
+</li>
+<li>RSB_CHAR_AS_TRANSPOSITION
+: <a class="el" href="rsb__types_8h.html#a6ea10439ed32405f43a9f5e6c9b64787">rsb_types.h</a>
+</li>
+<li>RSB_CHAR_BIT
+: <a class="el" href="rsb_8h.html#a5749695a0fccd6348d669c6790185a68">rsb.h</a>
+</li>
+<li>RSB_CONST_MAX_TUNING_ROUNDS
+: <a class="el" href="rsb__types_8h.html#aef1f5467f82116857e5003daa0f75ccd">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_BLOCKING
+: <a class="el" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">rsb.h</a>
+</li>
+<li>RSB_DEFAULT_COL_BLOCKING
+: <a class="el" href="rsb_8h.html#a0f7e634867763b3cc1faaa3ba8e106db">rsb.h</a>
+</li>
+<li>RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE
+: <a class="el" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_POSSIBLY_INTEGER_TYPE
+: <a class="el" href="rsb__types_8h.html#ab2ec9d6e0af8a10a032d597423fef559">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_POSSIBLY_INTEGER_TYPE_STRING
+: <a class="el" href="rsb__types_8h.html#a2a35f3f9a39d1b2016cf6aae4bfbf3e4">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_ROW_BLOCKING
+: <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">rsb.h</a>
+</li>
+<li>RSB_DEFAULT_SYMMETRY
+: <a class="el" href="rsb__types_8h.html#a898310ae6ad07802d6d261b6053cc3c5">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_TRANSPOSITION
+: <a class="el" href="rsb__types_8h.html#a2fb899b07173e590c8a13ae2b32ca383">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_TYPE
+: <a class="el" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">rsb_types.h</a>
+</li>
+<li>RSB_DEFAULT_TYPE_STRING
+: <a class="el" href="rsb__types_8h.html#acf1cad553e2bb07697c34bc5a6123ca1">rsb_types.h</a>
+</li>
+<li>RSB_DO_FLAG_ADD
+: <a class="el" href="rsb_8h.html#a3949d8af584a0e0e0a17e96d28b8d078">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_DEL
+: <a class="el" href="rsb_8h.html#aee33ededde2130f79f6c84966f1a180b">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_FILTERONLY
+: <a class="el" href="rsb_8h.html#a70e87c7a0afaf9b27650d252086559f7">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_FILTEROUT
+: <a class="el" href="rsb_8h.html#a23beda4691d4e83e6d3984960dc9f422">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_HAS
+: <a class="el" href="rsb_8h.html#ad155950ce44eddd61911184bccba86ab">rsb.h</a>
+</li>
+<li>RSB_DO_FLAG_HAS_INTERSECTION
+: <a class="el" href="rsb_8h.html#a116d0af2caf6bddd358035597a260244">rsb.h</a>
+</li>
+<li>RSB_ERR_BADARGS
+: <a class="el" href="rsb_8h.html#af0b262c6c554403269234219b3aec409">rsb.h</a>
+</li>
+<li>RSB_ERR_CAST
+: <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">rsb.h</a>
+</li>
+<li>RSB_ERR_CORRUPT_INPUT_DATA
+: <a class="el" href="rsb_8h.html#a14103828be5eb82e40d3b772ce54abda">rsb.h</a>
+</li>
+<li>RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS
+: <a class="el" href="rsb_8h.html#a935de71c3acc5714ad539d65288e2593">rsb.h</a>
+</li>
+<li>RSB_ERR_ENOMEM
+: <a class="el" href="rsb_8h.html#a538215b32e908646c979a2e446ae5467">rsb.h</a>
+</li>
+<li>RSB_ERR_FAILED_MEMHIER_DETECTION
+: <a class="el" href="rsb_8h.html#a3cacb604d0ad892e195c7c97eda18dba">rsb.h</a>
+</li>
+<li>RSB_ERR_FORTRAN_ERROR
+: <a class="el" href="rsb_8h.html#a40628c24058f45a481e18b6ad491bf1b">rsb.h</a>
+</li>
+<li>RSB_ERR_GENERIC_ERROR
+: <a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">rsb.h</a>
+</li>
+<li>RSB_ERR_INTERNAL_ERROR
+: <a class="el" href="rsb_8h.html#a8e650a7e3b5c5aa1fb9763b0f1498126">rsb.h</a>
+</li>
+<li>RSB_ERR_INVALID_NUMERICAL_DATA
+: <a class="el" href="rsb_8h.html#a8d504baa13048da05bb71235e2c8d181">rsb.h</a>
+</li>
+<li>RSB_ERR_LIMITS
+: <a class="el" href="rsb_8h.html#a3d7758ee9127e0c93c9075402999d154">rsb.h</a>
+</li>
+<li>RSB_ERR_MEMORY_LEAK
+: <a class="el" href="rsb_8h.html#a1b63053f52d6426b726a05b206a3862a">rsb.h</a>
+</li>
+<li>RSB_ERR_NO_ERROR
+: <a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">rsb.h</a>
+</li>
+<li>RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT
+: <a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">rsb.h</a>
+</li>
+<li>RSB_ERR_NO_USER_CONFIGURATION
+: <a class="el" href="rsb_8h.html#a5ab0f86009e1f934b25b23fc4837b9b0">rsb.h</a>
+</li>
+<li>RSB_ERR_TO_PROGRAM_ERROR
+: <a class="el" href="rsb_8h.html#a9738e6b8b638ca234acd92b49c6ac1db">rsb.h</a>
+</li>
+<li>RSB_ERR_UNIMPLEMENTED_YET
+: <a class="el" href="rsb_8h.html#a0bd20d0f68cf911bf9dfda495d8e12db">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_FEATURE
+: <a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_FORMAT
+: <a class="el" href="rsb_8h.html#ac00cd41eab18a0d2b9323b401029dd73">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_OPERATION
+: <a class="el" href="rsb_8h.html#ab4f407e7c8364bee51cc77546d6f0922">rsb.h</a>
+</li>
+<li>RSB_ERR_UNSUPPORTED_TYPE
+: <a class="el" href="rsb_8h.html#afdf2ab3912960ee19f23e7d585371548">rsb.h</a>
+</li>
+<li>RSB_ERRS_UNSUPPORTED_FEATURES
+: <a class="el" href="rsb_8h.html#a4d8eb05488b681b75449f64c418b8893">rsb.h</a>
+</li>
+<li>RSB_EXPOSE_NEW_GENERAL_INTERFACE
+: <a class="el" href="rsb__rsb_8c.html#a6a77bece998693a0c9bc500d444eb8a3">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_get_dimensions
+: <a class="el" href="rsb_8h.html#a97106c8db99424b5b69cd6be5bf59937">rsb.h</a>
+</li>
+<li>rsb_file_mtx_render
+: <a class="el" href="rsb_8h.html#a191af7bdb17d4b0abb3a195c11e56c3b">rsb.h</a>
+</li>
+<li>RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS
+: <a class="el" href="rsb_8h.html#adce7e20015d4a549bb8c44a00a80fc7e">rsb.h</a>
+</li>
+<li>RSB_FLAG_C_INDICES_INTERFACE
+: <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#a6b21a3edf4231070a10223f1a9ae1dc4">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#a8c90a9ad92722ffbbf1bfcadb805c520">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#acac4b9c09a3fd6be63e511fc5042038f">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS
+: <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">rsb.h</a>
+</li>
+<li>RSB_FLAG_DEFAULT_STORAGE_FLAGS
+: <a class="el" href="rsb_8h.html#aa83897e25c1235a780ed7fe317c78555">rsb.h</a>
+</li>
+<li>RSB_FLAG_DIAGONAL
+: <a class="el" href="rsb_8h.html#abccb47886fb3f8352e4e6ad801fd8efa">rsb.h</a>
+</li>
+<li>RSB_FLAG_DISCARD_ZEROS
+: <a class="el" href="rsb_8h.html#abf243a6f15925734e143703c4ad33512">rsb.h</a>
+</li>
+<li>RSB_FLAG_DUPLICATES_DEFAULT_HANDLE
+: <a class="el" href="rsb_8h.html#a7fee489042762b3b22d8184c592a9e52">rsb.h</a>
+</li>
+<li>RSB_FLAG_DUPLICATES_KEEP_LAST
+: <a class="el" href="rsb_8h.html#aff85f26964888f838aa97eb371ce5da3">rsb.h</a>
+</li>
+<li>RSB_FLAG_DUPLICATES_SUM
+: <a class="el" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">rsb.h</a>
+</li>
+<li>RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT
+: <a class="el" href="rsb_8h.html#a1d3b9bd7a31257cc8116be3dee0125b5">rsb.h</a>
+</li>
+<li>RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS
+: <a class="el" href="rsb_8h.html#a6abc0e23c782b817e2ef96d8294f990d">rsb.h</a>
+</li>
+<li>RSB_FLAG_FORTRAN_INDICES_INTERFACE
+: <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">rsb.h</a>
+</li>
+<li>RSB_FLAG_HERMITIAN
+: <a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">rsb.h</a>
+</li>
+<li>RSB_FLAG_IDENTICAL_FLAGS
+: <a class="el" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER
+: <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER_HERMITIAN
+: <a class="el" href="rsb_8h.html#aa06dcddcdd4f42fe2eeda8eb6168bd2d">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER_SYMMETRIC
+: <a class="el" href="rsb_8h.html#a6933030c784596e3c8dbbbd8daf62805">rsb.h</a>
+</li>
+<li>RSB_FLAG_LOWER_TRIANGULAR
+: <a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">rsb.h</a>
+</li>
+<li>RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES
+: <a class="el" href="rsb_8h.html#a6f4335cce5234a69e06188bcad418091">rsb.h</a>
+</li>
+<li>RSB_FLAG_NOFLAGS
+: <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">rsb.h</a>
+</li>
+<li>RSB_FLAG_QUAD_PARTITIONING
+: <a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">rsb.h</a>
+</li>
+<li>RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS
+: <a class="el" href="rsb_8h.html#a54d04b341465bf3dadc62ad99d55f8ca">rsb.h</a>
+</li>
+<li>RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG
+: <a class="el" href="rsb_8h.html#ad8e75dfa2b78fa82cdd31665a375d257">rsb.h</a>
+</li>
+<li>RSB_FLAG_SORTED_INPUT
+: <a class="el" href="rsb_8h.html#a726fa64beccf21ae1b70149b88c3affb">rsb.h</a>
+</li>
+<li>RSB_FLAG_SYMMETRIC
+: <a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">rsb.h</a>
+</li>
+<li>RSB_FLAG_TRIANGULAR
+: <a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">rsb.h</a>
+</li>
+<li>RSB_FLAG_UNIT_DIAG_IMPLICIT
+: <a class="el" href="rsb_8h.html#a4af24812309eb471c861ba618cb996f2">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER
+: <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER_HERMITIAN
+: <a class="el" href="rsb_8h.html#a0565be78af9bac79d07376d501237b00">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER_SYMMETRIC
+: <a class="el" href="rsb_8h.html#a3c2701b010fa2928685f3253a0ff1a99">rsb.h</a>
+</li>
+<li>RSB_FLAG_UPPER_TRIANGULAR
+: <a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_CSR_RESERVED
+: <a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_HALFWORD_INDICES
+: <a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_HALFWORD_INDICES_COO
+: <a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">rsb.h</a>
+</li>
+<li>RSB_FLAG_USE_HALFWORD_INDICES_CSR
+: <a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_BCSS_STORAGE
+: <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_COLUMN_MAJOR_ORDER
+: <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_COO_STORAGE
+: <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">rsb.h</a>
+</li>
+<li>RSB_FLAG_WANT_ROW_MAJOR_ORDER
+: <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">rsb.h</a>
+</li>
+<li>RSB_HALF_MAX_SIGNED
+: <a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">rsb.h</a>
+</li>
+<li>RSB_HAVE_RSB_KERNELS
+: <a class="el" href="rsb__blas__sparse_8F90.html#a151b6b061725a39255ee4de3db2faf8e">rsb_blas_sparse.F90</a>
+</li>
+<li>RSB_HAVE_TYPE_DOUBLE
+: <a class="el" href="rsb__types_8h.html#a50018495517829b14797a568788e1526">rsb_types.h</a>
+</li>
+<li>RSB_HAVE_TYPE_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a922101e7269ccc3184935c451b606a2c">rsb_types.h</a>
+</li>
+<li>RSB_HAVE_TYPE_FLOAT
+: <a class="el" href="rsb__types_8h.html#a82f77f519ff60dffac284034c12d2635">rsb_types.h</a>
+</li>
+<li>RSB_HAVE_TYPE_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a782af474ca5eba101233fc265965fbbb">rsb_types.h</a>
+</li>
+<li>RSB_HEADER_VERSION_STRING
+: <a class="el" href="rsb__types_8h.html#a8bc9584f994ecb2639ee548156562aae">rsb_types.h</a>
+</li>
+<li>RSB_INTERFACE_RETURN_ERR
+: <a class="el" href="rsb__rsb_8c.html#a85dee9bd15f321bfac4a8f055f072d1b">rsb_rsb.c</a>
+</li>
+<li>RSB_INTERFACE_RETURN_MTX
+: <a class="el" href="rsb__rsb_8c.html#abd0f924354130cfb2cbe4b8345dbc6fd">rsb_rsb.c</a>
+</li>
+<li>RSB_INTERFACE_RETURN_MTX_ERRP
+: <a class="el" href="rsb__rsb_8c.html#af6ebbe2e678aef616abb33526b312f65">rsb_rsb.c</a>
+</li>
+<li>RSB_INTERFACE_RETURN_VAL
+: <a class="el" href="rsb__rsb_8c.html#a619e228eb1a40cb1ae303be5ca6fa2ed">rsb_rsb.c</a>
+</li>
+<li>RSB_INVALID_COO_IDX_VAL
+: <a class="el" href="rsb_8h.html#a88e6b599d650b509b54d4fe7c3008b12">rsb.h</a>
+</li>
+<li>RSB_INVALID_NNZ_IDX_VAL
+: <a class="el" href="rsb_8h.html#a20253111f2fa6a4bc0c75fe7e6430890">rsb.h</a>
+</li>
+<li>RSB_IO_SPECIFIER_GET
+: <a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">rsb.h</a>
+</li>
+<li>RSB_IO_SPECIFIER_SET
+: <a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">rsb.h</a>
+</li>
+<li>RSB_IS_SIGNED
+: <a class="el" href="rsb_8h.html#af7d43df61fa72c8971cece701ae53a22">rsb.h</a>
+</li>
+<li>RSB_LIBRSB_VER
+: <a class="el" href="rsb__types_8h.html#a08fbe9d2c97a5b73bdad3dbe1402c83b">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_DATE
+: <a class="el" href="rsb__types_8h.html#ae26b1dec914b2cf2f233c07d2f4815d1">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_MAJOR
+: <a class="el" href="rsb__types_8h.html#a7fd4e640e7aa86fdce8f3d25ac230b5c">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_MINOR
+: <a class="el" href="rsb__types_8h.html#af8d3f63778c3120b14c3126259872cfe">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_PATCH
+: <a class="el" href="rsb__types_8h.html#ab3384c84112fe759dc57c5dd206a0cde">rsb_types.h</a>
+</li>
+<li>RSB_LIBRSB_VER_STRING
+: <a class="el" href="rsb__types_8h.html#af66941d5b1f1595c29f9c7e131d22242">rsb_types.h</a>
+</li>
+<li>RSB_MARF_EPS
+: <a class="el" href="rsb_8h.html#a2d332e6ed899c019e54ab4e540c82fd8">rsb.h</a>
+</li>
+<li>RSB_MARF_EPS_B
+: <a class="el" href="rsb_8h.html#a77106fe2435306ef028060d0eb7dca14">rsb.h</a>
+</li>
+<li>RSB_MARF_EPS_L
+: <a class="el" href="rsb_8h.html#a3562195777ed886282bd6287551a235c">rsb.h</a>
+</li>
+<li>RSB_MARF_EPS_S
+: <a class="el" href="rsb_8h.html#a8055e62d2824131421d22de1a0256f79">rsb.h</a>
+</li>
+<li>RSB_MARF_RGB
+: <a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">rsb.h</a>
+</li>
+<li>RSB_MARKER_COO_VALUE
+: <a class="el" href="rsb_8h.html#af88edb77d90929bf6cef617ab862d2bc">rsb.h</a>
+</li>
+<li>RSB_MARKER_NNZ_VALUE
+: <a class="el" href="rsb_8h.html#a967c5aae0dc536668ed67d810378e7fc">rsb.h</a>
+</li>
+<li>RSB_MAX_MATRIX_DIM
+: <a class="el" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">rsb.h</a>
+</li>
+<li>RSB_MAX_MATRIX_NNZ
+: <a class="el" href="rsb_8h.html#a63c69ef30355064d818326768674c9b2">rsb.h</a>
+</li>
+<li>RSB_MAX_SIGNED
+: <a class="el" href="rsb_8h.html#a465659728318d495a364e906806ffae7">rsb.h</a>
+</li>
+<li>RSB_MAX_UNSIGNED
+: <a class="el" href="rsb_8h.html#a9ea900484e72f4876b3fd8d9f402ea39">rsb.h</a>
+</li>
+<li>RSB_MAX_VALUE_FOR_TYPE
+: <a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">rsb.h</a>
+</li>
+<li>RSB_MIN_MATRIX_DIM
+: <a class="el" href="rsb_8h.html#abaccfe39f69712cebf501c9d55b1a4b8">rsb.h</a>
+</li>
+<li>RSB_MIN_MATRIX_NNZ
+: <a class="el" href="rsb_8h.html#a425f78c0a49004e45df20db728f8196d">rsb.h</a>
+</li>
+<li>rsb_mtx_get_norm
+: <a class="el" href="rsb_8h.html#a552fe79778c824e8d88ddfd0d9c58586">rsb.h</a>
+</li>
+<li>rsb_mtx_get_preconditioner
+: <a class="el" href="rsb_8h.html#a8ba1704fe1f07cb9abe856d9a1a20ea9">rsb.h</a>
+</li>
+<li>rsb_mtx_get_values
+: <a class="el" href="rsb_8h.html#af08b72a410e54fd7db6dcb12db232aec">rsb.h</a>
+</li>
+<li>rsb_mtx_set_values
+: <a class="el" href="rsb_8h.html#a5b622f80450cdef4f8a06742eacbb045">rsb.h</a>
+</li>
+<li>rsb_mtx_upd_values
+: <a class="el" href="rsb_8h.html#a40d40562867aceec2899cdddf79b3086">rsb.h</a>
+</li>
+<li>RSB_NNZ_BLK_MAX
+: <a class="el" href="rsb_8h.html#af576621f0846e0b9a999ea21641e13c8">rsb.h</a>
+</li>
+<li>RSB_NULL_EXIT_OPTIONS
+: <a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">rsb.h</a>
+</li>
+<li>RSB_NULL_INIT_OPTIONS
+: <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">rsb.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DEFAULT
+: <a class="el" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DEFAULT_INTEGER
+: <a class="el" href="rsb__types_8h.html#a70b99562829107b4fe1f529aacd4729a">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DOUBLE
+: <a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a51ca2ff55d0c852f659f5c76ecd536cd">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FIRST_BLAS
+: <a class="el" href="rsb__types_8h.html#ac51619f9cbe0a9a4cbc55e0451bfb59d">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FLOAT
+: <a class="el" href="rsb__types_8h.html#a7628cd01c7e84e4ada529b3412d118b3">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#ac46f79bff4499a5e8b6075150ecabf69">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE
+: <a class="el" href="rsb__types_8h.html#af465e222cfdede5b5df9a26a35b5e115">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a2fc48337d7c3ac2cd4e9e509c73edbf9">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_FLOAT
+: <a class="el" href="rsb__types_8h.html#a262db8d5b52285bd503cc1e60039135a">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a3bab97530d248482496ac20667e102f4">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_INT
+: <a class="el" href="rsb__types_8h.html#a16d646278df635b6e4fc57c43241fb98">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_FORTRAN_SAME_TYPE
+: <a class="el" href="rsb__types_8h.html#a17195a2481a24153b99f2be1f0577ff1">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_INVALID_TYPE
+: <a class="el" href="rsb__types_8h.html#ac418f097835ff41e0baaf5635d21b6f9">rsb_types.h</a>
+</li>
+<li>RSB_NUMERICAL_TYPE_SAME_TYPE
+: <a class="el" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">rsb_types.h</a>
+</li>
+<li>RSB_PRECF_ILU0
+: <a class="el" href="rsb_8h.html#a56bb6be11af9a5a0ed9aaa8774ab6db9">rsb.h</a>
+</li>
+<li>RSB_PROGRAM_ERROR
+: <a class="el" href="rsb_8h.html#a7f6f859f61b0855e5389e1bc98829bd4">rsb.h</a>
+</li>
+<li>RSB_PROGRAM_SUCCESS
+: <a class="el" href="rsb_8h.html#a61f8a9ebc9bced69076389ba3cd2cce8">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE
+: <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE_C_IOP
+: <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE_GET
+: <a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">rsb.h</a>
+</li>
+<li>RSB_REINIT_SINGLE_VALUE_SET
+: <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">rsb.h</a>
+</li>
+<li>RSB_ROWS_TRANSPOSITIONS_ARRAY
+: <a class="el" href="rsb__types_8h.html#a9fcc01fb97c5b5482be8ab4cd7c2ee33">rsb_types.h</a>
+</li>
+<li>RSB_SIZEOF
+: <a class="el" href="group__rsb__doc__rsb.html#ga68e662dcfb6981c1efc8eb03ef327182">rsb.h</a>
+</li>
+<li>RSB_TRANSPOSITION_C
+: <a class="el" href="rsb__types_8h.html#abd3aaf223656dece97dee2107e485217">rsb_types.h</a>
+</li>
+<li>RSB_TRANSPOSITION_N
+: <a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">rsb_types.h</a>
+</li>
+<li>RSB_TRANSPOSITION_T
+: <a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_DOUBLE
+: <a class="el" href="rsb__types_8h.html#a4abf98873753295350143ca544b79db3">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_DOUBLE_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a1a13d13b3c7f84e7fc8ca1df3878a07d">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_FLOAT
+: <a class="el" href="rsb__types_8h.html#a8d5222339367566d624a1e678d116d0d">rsb_types.h</a>
+</li>
+<li>RSB_TYPE_INDEX_FLOAT_COMPLEX
+: <a class="el" href="rsb__types_8h.html#a8445bf2e852a4b20d178ae4b475f4552">rsb_types.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_enum.html b/doc/html/globals_enum.html
new file mode 100644
index 0000000..a9e6807
--- /dev/null
+++ b/doc/html/globals_enum.html
@@ -0,0 +1,180 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li class="current"><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="#index_b"><span>b</span></a></li>
+      <li><a href="#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>blas_base_type
+: <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9e">rsb_libspblas.h</a>
+</li>
+<li>blas_cmach_type
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aa">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">rsb_libspblas.h</a>
+</li>
+<li>blas_conj_type
+: <a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_sparse.h</a>
+</li>
+<li>blas_diag_type
+: <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">rsb_libspblas.h</a>
+</li>
+<li>blas_field_type
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8">rsb_libspblas.h</a>
+</li>
+<li>blas_handle_type
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_type
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">rsb_libspblas.h</a>
+</li>
+<li>blas_norm_type
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952b">rsb_libspblas.h</a>
+</li>
+<li>blas_order_type
+: <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_type
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_ext_type
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729">blas_sparse.h</a>
+</li>
+<li>blas_side_type
+: <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695">rsb_libspblas.c</a>
+</li>
+<li>blas_size_type
+: <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_sparse.h</a>
+</li>
+<li>blas_sort_type
+: <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14">rsb_libspblas.c</a>
+</li>
+<li>blas_sparsity_optimization_type
+: <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparse.h</a>
+</li>
+<li>blas_symmetry_type
+: <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63b">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_sparse.h</a>
+</li>
+<li>blas_trans_type
+: <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7">rsb_libspblas.h</a>
+</li>
+<li>blas_uplo_type
+: <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820">rsb_libspblas.c</a>
+</li>
+</ul>
+
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>rsb_elopf_t
+: <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb.h</a>
+</li>
+<li>rsb_extff_t
+: <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb.h</a>
+</li>
+<li>rsb_mif_t
+: <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb.h</a>
+</li>
+<li>rsb_opt_t
+: <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_eval.html b/doc/html/globals_eval.html
new file mode 100644
index 0000000..1d9360c
--- /dev/null
+++ b/doc/html/globals_eval.html
@@ -0,0 +1,474 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li class="current"><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li class="current"><a href="globals_eval.html#index_b"><span>b</span></a></li>
+      <li><a href="globals_eval_0x72.html#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>blas_base
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">rsb_libspblas.h</a>
+</li>
+<li>blas_block
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">rsb_libspblas.h</a>
+</li>
+<li>blas_colmajor
+: <a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_sparse.h</a>
+</li>
+<li>blas_complex
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">rsb_libspblas.h</a>
+</li>
+<li>blas_conj
+: <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">rsb_libspblas.h</a>
+</li>
+<li>blas_conj_trans
+: <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">rsb_libspblas.h</a>
+</li>
+<li>blas_decreasing_order
+: <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">rsb_libspblas.h</a>
+</li>
+<li>blas_double_precision
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">rsb_libspblas.h</a>
+</li>
+<li>blas_emax
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">rsb_libspblas.h</a>
+</li>
+<li>blas_emin
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">rsb_libspblas.h</a>
+</li>
+<li>blas_eps
+: <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_sparse.h</a>
+</li>
+<li>blas_frobenius_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">rsb_libspblas.h</a>
+</li>
+<li>blas_general
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">rsb_libspblas.h</a>
+</li>
+<li>blas_hermitian
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">rsb_libspblas.h</a>
+</li>
+<li>blas_ieee
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">rsb_libspblas.h</a>
+</li>
+<li>blas_increasing_order
+: <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">rsb_libspblas.h</a>
+</li>
+<li>blas_inf_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">rsb_libspblas.h</a>
+</li>
+<li>blas_invalid_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">rsb_libspblas.h</a>
+</li>
+<li>blas_irregular
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_inner
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_outer
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">rsb_libspblas.h</a>
+</li>
+<li>blas_jrot_sorted
+: <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">rsb_libspblas.h</a>
+</li>
+<li>blas_left_side
+: <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">rsb_libspblas.h</a>
+</li>
+<li>blas_lower
+: <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">rsb_libspblas.h</a>
+</li>
+<li>blas_lower_hermitian
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">rsb_libspblas.h</a>
+</li>
+<li>blas_lower_symmetric
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">rsb_libspblas.h</a>
+</li>
+<li>blas_lower_triangular
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">rsb_libspblas.h</a>
+</li>
+<li>blas_max_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">rsb_libspblas.h</a>
+</li>
+<li>blas_new_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">rsb_libspblas.h</a>
+</li>
+<li>blas_no_conj
+: <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">rsb_libspblas.h</a>
+</li>
+<li>blas_no_trans
+: <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">rsb_libspblas.h</a>
+</li>
+<li>blas_non_unit_diag
+: <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">rsb_libspblas.h</a>
+</li>
+<li>blas_num_cols
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">rsb_libspblas.h</a>
+</li>
+<li>blas_num_nonzeros
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">rsb_libspblas.h</a>
+</li>
+<li>blas_num_rows
+: <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">rsb_libspblas.h</a>
+</li>
+<li>blas_one_base
+: <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">rsb_libspblas.h</a>
+</li>
+<li>blas_one_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">rsb_libspblas.h</a>
+</li>
+<li>blas_open_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">rsb_libspblas.h</a>
+</li>
+<li>blas_overflow
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">rsb_libspblas.h</a>
+</li>
+<li>blas_prec
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_double
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_extra
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">rsb_libspblas.h</a>
+</li>
+<li>blas_prec_indigenous
+: <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_sparse.h</a>
+</li>
+<li>blas_prec_single
+: <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">rsb_libspblas.h</a>
+</li>
+<li>blas_real
+: <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">rsb_libspblas.h</a>
+</li>
+<li>blas_real_inf_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">rsb_libspblas.h</a>
+</li>
+<li>blas_real_max_norm
+: <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_sparse.h</a>
+</li>
+<li>blas_real_one_norm
+: <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">rsb_libspblas.c</a>
+</li>
+<li>blas_regular
+: <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">rsb_libspblas.h</a>
+</li>
+<li>blas_right_side
+: <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">rsb_libspblas.h</a>
+</li>
+<li>blas_rnd
+: <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_sparse.h</a>
+</li>
+<li>blas_rowmajor
+: <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_autotune_next_operation
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_duplicates_ovw
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_duplicates_sum
+: <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_rep_coo
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_rep_csr
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">rsb_libspblas.h</a>
+</li>
+<li>blas_rsb_rep_rsb
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_sparse.h</a>
+</li>
+<li>blas_rsb_spmv_autotuning_off
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_sparse.h</a>
+</li>
+<li>blas_rsb_spmv_autotuning_on
+: <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_off
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_sparse.h</a>
+</li>
+<li>blas_rsb_spmv_n_autotuning_on
+: <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">rsb_libspblas.c</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_off
+: <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_sparse.h</a>
+</li>
+<li>blas_rsb_spmv_t_autotuning_on
+: <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">rsb_libspblas.c</a>
+</li>
+<li>blas_sfmin
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">rsb_libspblas.h</a>
+</li>
+<li>blas_single_precision
+: <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">rsb_libspblas.h</a>
+</li>
+<li>blas_symmetric
+: <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">rsb_libspblas.c</a>
+</li>
+<li>blas_t
+: <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">rsb_libspblas.h</a>
+</li>
+<li>blas_trans
+: <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_sparse.h</a>
+</li>
+<li>blas_triangular
+: <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_sparse.h</a>
+</li>
+<li>blas_two_norm
+: <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">rsb_libspblas.h</a>
+</li>
+<li>blas_unassembled
+: <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_sparse.h</a>
+</li>
+<li>blas_underflow
+: <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_sparse.h</a>
+</li>
+<li>blas_unit_diag
+: <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">rsb_libspblas.h</a>
+</li>
+<li>blas_upper
+: <a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_sparse.h</a>
+</li>
+<li>blas_upper_hermitian
+: <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_sparse.h</a>
+</li>
+<li>blas_upper_symmetric
+: <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">rsb_libspblas.c</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">rsb_libspblas.h</a>
+</li>
+<li>blas_upper_triangular
+: <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">rsb_libspblas.h</a>
+, <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_sparse.h</a>
+</li>
+<li>blas_valid_handle
+: <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">rsb_libspblas.c</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">rsb_libspblas.h</a>
+</li>
+<li>blas_zero_base
+: <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">rsb_libspblas.h</a>
+, <a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">rsb_libspblas.c</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_eval_0x72.html b/doc/html/globals_eval_0x72.html
new file mode 100644
index 0000000..7cd13be
--- /dev/null
+++ b/doc/html/globals_eval_0x72.html
@@ -0,0 +1,209 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li class="current"><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="globals_eval.html#index_b"><span>b</span></a></li>
+      <li class="current"><a href="globals_eval_0x72.html#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>RSB_ELOPF_DIV
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969">rsb.h</a>
+</li>
+<li>RSB_ELOPF_MUL
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a">rsb.h</a>
+</li>
+<li>RSB_ELOPF_NEG
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4">rsb.h</a>
+</li>
+<li>RSB_ELOPF_POW
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_COLS
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_COLS_REAL
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_ROWS
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287">rsb.h</a>
+</li>
+<li>RSB_ELOPF_SCALE_ROWS_REAL
+: <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5">rsb.h</a>
+</li>
+<li>RSB_EXTF_ASUMS_COL
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada">rsb.h</a>
+</li>
+<li>RSB_EXTF_ASUMS_ROW
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54">rsb.h</a>
+</li>
+<li>RSB_EXTF_DIAG
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">rsb.h</a>
+</li>
+<li>RSB_EXTF_NORM_INF
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">rsb.h</a>
+</li>
+<li>RSB_EXTF_NORM_ONE
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">rsb.h</a>
+</li>
+<li>RSB_EXTF_NORM_TWO
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">rsb.h</a>
+</li>
+<li>RSB_EXTF_SUMS_COL
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e">rsb.h</a>
+</li>
+<li>RSB_EXTF_SUMS_ROW
+: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_BOUNDED_BOX_COMPUTATION
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_CACHE_BLOCKING_METHOD
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_EXECUTING_THREADS
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_IS_INITIALIZED_MARKER
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_LEAF_LEVEL_MULTIVEC
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_LIBRSB_ETIME
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MAX_MEMORY_ALLOCATED
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MEM_ALLOC_CNT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MEM_ALLOC_TOT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_OUTPUT_STREAM
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_SORT_METHOD
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_SUBDIVISION_MULTIPLIER
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_ERRORS
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_EXIT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_INIT
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">rsb.h</a>
+</li>
+<li>RSB_IO_WANT_VERBOSE_TUNING
+: <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">rsb.h</a>
+</li>
+<li>RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858">rsb.h</a>
+</li>
+<li>RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065">rsb.h</a>
+</li>
+<li>RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_INFO__TO__CHAR_P
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954">rsb.h</a>
+</li>
+<li>RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93">rsb.h</a>
+</li>
+<li>RSB_MIF_TOTAL_SIZE__TO__SIZE_T
+: <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f">rsb.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_func.html b/doc/html/globals_func.html
new file mode 100644
index 0000000..81eeede
--- /dev/null
+++ b/doc/html/globals_func.html
@@ -0,0 +1,1230 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li class="current"><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li class="current"><a href="globals_func.html#index_b"><span>b</span></a></li>
+      <li><a href="globals_func_0x72.html#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_b"></a>- b -</h3><ul>
+<li>BLAS_cusaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">rsb_libspblas.h</a>
+</li>
+<li>blas_cusaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">blas_sparse.h</a>
+</li>
+<li>blas_cuscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">blas_sparse.h</a>
+</li>
+<li>blas_cuscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cuscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">rsb_libspblas.h</a>
+</li>
+<li>blas_cuscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">rsb_libspblas.h</a>
+</li>
+<li>blas_cusdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">rsb_libspblas.h</a>
+</li>
+<li>blas_cusga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">rsb_libspblas.h</a>
+</li>
+<li>blas_cusget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">rsb_libspblas.h</a>
+</li>
+<li>blas_cusgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">rsb_libspblas.h</a>
+</li>
+<li>blas_cusmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">blas_sparse.h</a>
+</li>
+<li>blas_cusmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">rsb_libspblas.h</a>
+</li>
+<li>blas_cusrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">rsb_libspblas.h</a>
+</li>
+<li>blas_cussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">rsb_libspblas.h</a>
+</li>
+<li>blas_cusset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cusset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">rsb_libspblas.h</a>
+</li>
+<li>blas_cusset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">rsb_libspblas.h</a>
+</li>
+<li>blas_cussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">rsb_libspblas.h</a>
+</li>
+<li>BLAS_cussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">rsb_libspblas.h</a>
+</li>
+<li>blas_cussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">rsb_libspblas.h</a>
+</li>
+<li>blas_dusaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_duscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">rsb_libspblas.h</a>
+</li>
+<li>blas_duscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">rsb_libspblas.h</a>
+</li>
+<li>blas_dusdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">rsb_libspblas.h</a>
+</li>
+<li>blas_dusga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">rsb_libspblas.h</a>
+</li>
+<li>blas_dusget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">rsb_libspblas.h</a>
+</li>
+<li>blas_dusgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">rsb_libspblas.h</a>
+</li>
+<li>blas_dusmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">rsb_libspblas.h</a>
+</li>
+<li>blas_dusmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">rsb_libspblas.h</a>
+</li>
+<li>blas_dusrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">rsb_libspblas.h</a>
+</li>
+<li>blas_dussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">rsb_libspblas.h</a>
+</li>
+<li>blas_dusset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dusset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">rsb_libspblas.h</a>
+</li>
+<li>blas_dusset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">rsb_libspblas.h</a>
+</li>
+<li>blas_dussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">rsb_libspblas.h</a>
+</li>
+<li>BLAS_dussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">rsb_libspblas.h</a>
+</li>
+<li>blas_dussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">rsb_libspblas.h</a>
+</li>
+<li>blas_susaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">rsb_libspblas.c</a>
+</li>
+<li>blas_suscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">rsb_libspblas.c</a>
+</li>
+<li>blas_suscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">rsb_libspblas.c</a>
+</li>
+<li>blas_suscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">rsb_libspblas.h</a>
+</li>
+<li>blas_suscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">rsb_libspblas.h</a>
+</li>
+<li>BLAS_suscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">rsb_libspblas.c</a>
+</li>
+<li>blas_suscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">rsb_libspblas.h</a>
+</li>
+<li>blas_susdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">rsb_libspblas.c</a>
+</li>
+<li>blas_susga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">rsb_libspblas.c</a>
+</li>
+<li>blas_susget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">rsb_libspblas.c</a>
+</li>
+<li>BLAS_susget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">rsb_libspblas.h</a>
+</li>
+<li>blas_susget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">rsb_libspblas.h</a>
+</li>
+<li>blas_susgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">rsb_libspblas.c</a>
+</li>
+<li>BLAS_susmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">rsb_libspblas.h</a>
+</li>
+<li>blas_susmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">rsb_libspblas.h</a>
+</li>
+<li>blas_susmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">rsb_libspblas.h</a>
+</li>
+<li>BLAS_susrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">blas_sparse.h</a>
+</li>
+<li>blas_susrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">blas_sparse.h</a>
+</li>
+<li>BLAS_sussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">rsb_libspblas.h</a>
+</li>
+<li>blas_sussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">blas_sparse.h</a>
+</li>
+<li>BLAS_susset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">rsb_libspblas.c</a>
+</li>
+<li>blas_susset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">blas_sparse.h</a>
+</li>
+<li>BLAS_susset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">rsb_libspblas.c</a>
+</li>
+<li>blas_susset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_sussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">rsb_libspblas.h</a>
+</li>
+<li>blas_sussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">blas_sparse.h</a>
+</li>
+<li>BLAS_sussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">rsb_libspblas.c</a>
+</li>
+<li>blas_sussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">rsb_libspblas.h</a>
+</li>
+<li>BLAS_uscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">rsb_libspblas.c</a>
+</li>
+<li>blas_uscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">blas_sparse.h</a>
+</li>
+<li>BLAS_usds()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">blas_sparse.h</a>
+</li>
+<li>blas_usds_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">rsb_libspblas.c</a>
+</li>
+<li>BLAS_usgp()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">rsb_libspblas.c</a>
+</li>
+<li>blas_usgp_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2cb97e106eb117547157a8fc61491b91">rsb_libspblas.c</a>
+</li>
+<li>BLAS_ussp()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">rsb_libspblas.h</a>
+</li>
+<li>blas_ussp_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5ea0303be1db6c9dd73c03bba6dc6158">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusaxpy()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">blas_sparse.h</a>
+</li>
+<li>blas_zusaxpy_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zuscr_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">blas_sparse.h</a>
+</li>
+<li>BLAS_zuscr_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_end()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">blas_sparse.h</a>
+</li>
+<li>blas_zuscr_end_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_insert_block()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">rsb_libspblas.c</a>
+</li>
+<li>blas_zuscr_insert_block_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_insert_clique()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_insert_clique_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">blas_sparse.h</a>
+</li>
+<li>BLAS_zuscr_insert_col()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_insert_col_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_insert_entries()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_insert_entries_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zuscr_insert_entry()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">rsb_libspblas.c</a>
+</li>
+<li>blas_zuscr_insert_entry_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_insert_row()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_insert_row_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zuscr_variable_block_begin()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">rsb_libspblas.h</a>
+</li>
+<li>blas_zuscr_variable_block_begin_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">blas_sparse.h</a>
+</li>
+<li>BLAS_zusdot()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">blas_sparse.h</a>
+</li>
+<li>blas_zusdot_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusga()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">rsb_libspblas.h</a>
+</li>
+<li>blas_zusga_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_diag()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">blas_sparse.h</a>
+</li>
+<li>blas_zusget_diag_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">blas_sparse.h</a>
+</li>
+<li>BLAS_zusget_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_infinity_norm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_infinity_norm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_matrix_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">rsb_libspblas.h</a>
+</li>
+<li>blas_zusget_matrix_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusget_rows_nnz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">blas_sparse.h</a>
+</li>
+<li>blas_zusget_rows_nnz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusget_rows_sparse()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">blas_sparse.h</a>
+</li>
+<li>blas_zusget_rows_sparse_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusgz()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">rsb_libspblas.c</a>
+</li>
+<li>blas_zusgz_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusmm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">rsb_libspblas.c</a>
+</li>
+<li>blas_zusmm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zusmv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">rsb_libspblas.h</a>
+</li>
+<li>blas_zusmv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">rsb_libspblas.h</a>
+</li>
+<li>BLAS_zusrows_scale()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">blas_sparse.h</a>
+</li>
+<li>blas_zusrows_scale_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">blas_sparse.h</a>
+</li>
+<li>BLAS_zussc()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">rsb_libspblas.c</a>
+</li>
+<li>blas_zussc_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">blas_sparse.h</a>
+</li>
+<li>BLAS_zusset_element()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">blas_sparse.h</a>
+</li>
+<li>blas_zusset_element_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">blas_sparse.h</a>
+</li>
+<li>BLAS_zusset_elements()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">rsb_libspblas.h</a>
+</li>
+<li>blas_zusset_elements_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">rsb_libspblas.c</a>
+</li>
+<li>BLAS_zussm()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">rsb_libspblas.h</a>
+</li>
+<li>blas_zussm_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">rsb_libspblas.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">blas_sparse.h</a>
+</li>
+<li>BLAS_zussv()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">rsb_libspblas.h</a>
+</li>
+<li>blas_zussv_()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">rsb_libspblas.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_func_0x72.html b/doc/html/globals_func_0x72.html
new file mode 100644
index 0000000..f3e6c07
--- /dev/null
+++ b/doc/html/globals_func_0x72.html
@@ -0,0 +1,288 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li class="current"><a href="globals_func.html"><span>Functions</span></a></li>
+      <li><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow4" class="tabs3">
+    <ul class="tablist">
+      <li><a href="globals_func.html#index_b"><span>b</span></a></li>
+      <li class="current"><a href="globals_func_0x72.html#index_r"><span>r</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ 
+
+<h3><a class="anchor" id="index_r"></a>- r -</h3><ul>
+<li>rsb_blas_get_mtx()
+: <a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">blas_sparse.h</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_libspblas.c</a>
+, <a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_libspblas.h</a>
+</li>
+<li>rsb_coo_sort()
+: <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_get_dims()
+: <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_load()
+: <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_rndr()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_rsb.c</a>
+</li>
+<li>rsb_file_mtx_save()
+: <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_rsb.c</a>
+</li>
+<li>rsb_file_vec_load()
+: <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_rsb.c</a>
+</li>
+<li>rsb_file_vec_save()
+: <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_exit()
+: <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_get_opt()
+: <a class="el" href="rsb_8h.html#a96a28efc32dd050d2a74208b3ad2f227">rsb.h</a>
+, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_init()
+: <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_reinit()
+: <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_set_opt()
+: <a class="el" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb.h</a>
+, <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_rsb.c</a>
+</li>
+<li>rsb_lib_set_opt_str()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_rsb.c</a>
+</li>
+<li>rsb_load_spblas_matrix_file_as_matrix_market()
+: <a class="el" href="rsb__libspblas__handle_8c.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_libspblas_handle.c</a>
+, <a class="el" href="blas__sparse_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_libspblas.h</a>
+</li>
+<li>rsb_mtx_add_to_dense()
+: <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_begin()
+: <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_const()
+: <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_end()
+: <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_coo_inplace()
+: <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_csc_const()
+: <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_const()
+: <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_alloc_from_csr_inplace()
+: <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_clone()
+: <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_free()
+: <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_coo()
+: <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_coo_block()
+: <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_csr()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_info()
+: <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_info_str()
+: <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_nrm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_prec()
+: <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_rows_sparse()
+: <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_vals()
+: <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_get_vec()
+: <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb.h</a>
+</li>
+<li>rsb_mtx_rndr()
+: <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_set_vals()
+: <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb.h</a>
+</li>
+<li>rsb_mtx_switch_to_coo()
+: <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_rsb.c</a>
+</li>
+<li>rsb_mtx_switch_to_csr()
+: <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb.h</a>
+</li>
+<li>rsb_mtx_upd_vals()
+: <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb.h</a>
+</li>
+<li>rsb_perror()
+: <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb.h</a>
+</li>
+<li>rsb_psblas_trans_to_rsb_trans()
+: <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_rsb.c</a>
+</li>
+<li>rsb_spmm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_rsb.c</a>
+</li>
+<li>rsb_spmsp()
+: <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb.h</a>
+</li>
+<li>rsb_spmsp_to_dense()
+: <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb.h</a>
+</li>
+<li>rsb_spmv()
+: <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb.h</a>
+</li>
+<li>rsb_sppsp()
+: <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb.h</a>
+</li>
+<li>rsb_spsm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb.h</a>
+</li>
+<li>rsb_spsv()
+: <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_rsb.c</a>
+</li>
+<li>rsb_strerror_r()
+: <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_rsb.c</a>
+</li>
+<li>rsb_time()
+: <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_rsb.c</a>
+</li>
+<li>rsb_tune_spmm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb.h</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_rsb.c</a>
+</li>
+<li>rsb_tune_spsm()
+: <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_rsb.c</a>
+, <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/globals_type.html b/doc/html/globals_type.html
new file mode 100644
index 0000000..afefa70
--- /dev/null
+++ b/doc/html/globals_type.html
@@ -0,0 +1,112 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Globals</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li class="current"><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow3" class="tabs2">
+    <ul class="tablist">
+      <li><a href="globals.html"><span>All</span></a></li>
+      <li><a href="globals_func.html"><span>Functions</span></a></li>
+      <li class="current"><a href="globals_type.html"><span>Typedefs</span></a></li>
+      <li><a href="globals_enum.html"><span>Enumerations</span></a></li>
+      <li><a href="globals_eval.html"><span>Enumerator</span></a></li>
+      <li><a href="globals_defs.html"><span>Macros</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="contents">
+ <ul>
+<li>blas_sparse_matrix
+: <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse.h</a>
+, <a class="el" href="rsb__libspblas_8h.html#a6f56456b01e0cc6b25b81201aa67c163">rsb_libspblas.h</a>
+</li>
+<li>rsb_blk_idx_t
+: <a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb.h</a>
+</li>
+<li>rsb_bool_t
+: <a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb.h</a>
+</li>
+<li>rsb_char_t
+: <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb.h</a>
+</li>
+<li>rsb_coo_idx_t
+: <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb.h</a>
+</li>
+<li>rsb_err_t
+: <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb.h</a>
+</li>
+<li>rsb_flags_t
+: <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb.h</a>
+</li>
+<li>rsb_int_t
+: <a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb.h</a>
+</li>
+<li>rsb_marf_t
+: <a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb.h</a>
+</li>
+<li>rsb_nnz_idx_t
+: <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb.h</a>
+</li>
+<li>rsb_precf_t
+: <a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb.h</a>
+</li>
+<li>rsb_real_t
+: <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb.h</a>
+</li>
+<li>rsb_time_t
+: <a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb.h</a>
+</li>
+<li>rsb_trans_t
+: <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb.h</a>
+</li>
+<li>rsb_type_t
+: <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb.h</a>
+</li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:26 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/group__rsb__doc__examples.html b/doc/html/group__rsb__doc__examples.html
new file mode 100644
index 0000000..9f545ce
--- /dev/null
+++ b/doc/html/group__rsb__doc__examples.html
@@ -0,0 +1,1713 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Example programs and code</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Example programs and code</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>Examples of usage of <code>librsb</code>.  
+<a href="#details">More...</a></p>
+<p>Examples of usage of <code>librsb</code>. </p>
+<pre class="fragment">    The following fully working example programs illustrate correct ways of using the library.
+   The script displayed here should be sufficient to build them.
+</pre> <div class="fragment"><div class="line"><span class="preprocessor">#!/bin/bash</span></div>
+<div class="line"><span class="preprocessor"></span><span class="preprocessor"># Script to build the librsb example programs.</span></div>
+<div class="line"><span class="preprocessor"></span></div>
+<div class="line">LIBRSB_CONFIG=${LIBRSB_CONFIG:-librsb-config}</div>
+<div class="line"></div>
+<div class="line"><span class="keywordflow">for</span> s in *.c</div>
+<div class="line"><span class="keywordflow">do</span></div>
+<div class="line">        p=${s/.c/}</div>
+<div class="line">        rm -f $p </div>
+<div class="line">        CFLAGS=`${LIBRSB_CONFIG} --I_opts`</div>
+<div class="line">        LDFLAGS=`${LIBRSB_CONFIG} --<span class="keyword">static</span> --ldflags --extra_libs`</div>
+<div class="line">        CC=`${LIBRSB_CONFIG} --cc`</div>
+<div class="line">        cmd=<span class="stringliteral">"$CC $CFLAGS $s $LDFLAGS -o $p"</span></div>
+<div class="line">        echo $cmd</div>
+<div class="line">        $cmd</div>
+<div class="line">done</div>
+<div class="line"></div>
+<div class="line"><span class="preprocessor"># replace false with true if you have built the Fortran modules and installed them in the include directory.</span></div>
+<div class="line"><span class="preprocessor"></span><span class="keywordflow">if</span> false ; then</div>
+<div class="line"><span class="keywordflow">for</span> s in *.F90</div>
+<div class="line"><span class="keywordflow">do</span></div>
+<div class="line">        p=${s/.F90/}</div>
+<div class="line">        rm -f $p </div>
+<div class="line">        CFLAGS=`${LIBRSB_CONFIG} --I_opts`</div>
+<div class="line">        LDFLAGS=`${LIBRSB_CONFIG} --<span class="keyword">static</span> --ldflags --extra_libs`</div>
+<div class="line">        FC=`${LIBRSB_CONFIG} --fc`</div>
+<div class="line">        cmd=<span class="stringliteral">"$FC $CFLAGS $s $LDFLAGS -o $p"</span></div>
+<div class="line">        echo $cmd</div>
+<div class="line">        $cmd</div>
+<div class="line">done</div>
+<div class="line">fi</div>
+<div class="line"></div>
+</div><!-- fragment --><div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is a first "hello RSB" example program.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include hello.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* librsb header to include */</span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf() */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{<span class="comment"></span></div>
+<div class="line"><span class="comment">        /*!</span></div>
+<div class="line"><span class="comment">          A Hello-RSB program.</span></div>
+<div class="line"><span class="comment">         </span></div>
+<div class="line"><span class="comment">          This program shows how to use the rsb.h interface correctly to:</span></div>
+<div class="line"><span class="comment">         </span></div>
+<div class="line"><span class="comment">          - initialize the library using #rsb_lib_init()</span></div>
+<div class="line"><span class="comment">          - set library options using #rsb_lib_set_opt()</span></div>
+<div class="line"><span class="comment">          - revert such changes </span></div>
+<div class="line"><span class="comment">          - allocate (build) a single sparse matrix in the RSB format</span></div>
+<div class="line"><span class="comment">            using #rsb_mtx_alloc_from_coo_const()</span></div>
+<div class="line"><span class="comment">          - prints information obtained via #rsb_mtx_get_info_str()</span></div>
+<div class="line"><span class="comment">          - multiply the matrix times a vector using #rsb_spmv()</span></div>
+<div class="line"><span class="comment">          - deallocate the matrix using #rsb_mtx_free() </span></div>
+<div class="line"><span class="comment">          - finalize the library using #rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) </span></div>
+<div class="line"><span class="comment">         </span></div>
+<div class="line"><span class="comment">          In this example, we use #RSB_DEFAULT_TYPE as matrix type.</span></div>
+<div class="line"><span class="comment">          This type depends on what was configured at library build time.</span></div>
+<div class="line"><span class="comment">         * */</span></div>
+<div class="line">        <span class="keyword">struct </span>rsb_mtx_t *mtxAp = NULL; <span class="comment">/* matrix structure pointer */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> bs = <a class="code" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>;</div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> brA = bs, bcA = bs;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> one = 1;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = <a class="code" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">RSB_NUMERICAL_TYPE_DEFAULT</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA = 4;           <span class="comment">/* matrix nonzeroes count */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> nrA = 3;            <span class="comment">/* matrix rows count */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ncA = 3;            <span class="comment">/* matrix columns count */</span></div>
+<div class="line">        <span class="comment">/* nonzero row indices coordinates: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> IA[] = {0,1,2,2};</div>
+<div class="line">        <span class="comment">/* nonzero column indices coordinates: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> JA[] = {0,1,2,2};</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> VA[] = {11,22,32,1};<span class="comment">/* values of nonzeroes */</span></div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> X[] = { 0, 0, 0 };     <span class="comment">/* X vector's array */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> B[] = { -1, -2, -5 }; <span class="comment">/* B vector's array */</span></div>
+<div class="line">        <span class="keywordtype">char</span> ib[200];</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Hello, RSB!\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Initializing the library...\n"</span>);</div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)) != </div>
+<div class="line">                        <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error initializing the library!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly initialized the library.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Attempting to set the"</span></div>
+<div class="line">               <span class="stringliteral">" RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE library option.\n"</span>);</div>
+<div class="line">        {</div>
+<div class="line">                <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> evi=1; </div>
+<div class="line">                <span class="comment">/* Setting a single optional library parameter. */</span></div>
+<div class="line">                errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(</div>
+<div class="line">                        <a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a>, &evi);</div>
+<div class="line">                <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                {</div>
+<div class="line">                        <span class="keywordtype">char</span> errbuf[256];</div>
+<div class="line">                        <a class="code" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a>(errval,&errbuf[0],<span class="keyword">sizeof</span>(errbuf));</div>
+<div class="line">                        printf(<span class="stringliteral">"Failed setting the"</span></div>
+<div class="line">                        <span class="stringliteral">" RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE"</span></div>
+<div class="line">                        <span class="stringliteral">" library option (reason string:\n%s).\n"</span>,errbuf);</div>
+<div class="line">                        <span class="keywordflow">if</span>(errval&<a class="code" href="rsb_8h.html#a4d8eb05488b681b75449f64c418b8893">RSB_ERRS_UNSUPPORTED_FEATURES</a>)</div>
+<div class="line">                        {</div>
+<div class="line">                          printf(<span class="stringliteral">"This error may be safely ignored.\n"</span>);</div>
+<div class="line">                        }</div>
+<div class="line">                        <span class="keywordflow">else</span></div>
+<div class="line">                        {</div>
+<div class="line">                          printf(<span class="stringliteral">"Some unexpected error occurred!\n"</span>);</div>
+<div class="line">                          <span class="keywordflow">goto</span> err;</div>
+<div class="line">                        }</div>
+<div class="line">                }</div>
+<div class="line">                <span class="keywordflow">else</span></div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"Setting back the "</span></div>
+<div class="line">                                <span class="stringliteral">"RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE"</span></div>
+<div class="line">                                <span class="stringliteral">" library option.\n"</span>);</div>
+<div class="line">                        evi = 0;</div>
+<div class="line">                        errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a>,</div>
+<div class="line">                                        &evi);</div>
+<div class="line">                        errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">                }</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        mtxAp = <a class="code" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>(</div>
+<div class="line">                VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,</div>
+<div class="line">                <a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>    <span class="comment">/* default format will be chosen */</span></div>
+<div class="line">                |<a class="code" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">RSB_FLAG_DUPLICATES_SUM</a><span class="comment">/* duplicates will be summed */</span></div>
+<div class="line">                        ,&errval);</div>
+<div class="line">        <span class="keywordflow">if</span>((!mtxAp) || (errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>))</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error while allocating the matrix!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly allocated a matrix.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Summary information of the matrix:\n"</span>);</div>
+<div class="line">        <span class="comment">/* print out the matrix summary information  */</span></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,<span class="stringliteral">"RSB_MIF_MATRIX_INFO__TO__CHAR_P"</span>,</div>
+<div class="line">                        ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">        printf(<span class="stringliteral">"%s"</span>,ib);</div>
+<div class="line">        printf(<span class="stringliteral">"\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = </div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>(<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>,&one,mtxAp,B,1,&one,X,1))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error performing a multiplication!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly performed a SPMV.\n"</span>);</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly freed the matrix.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error finalizing the library!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly finalized the library.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with no error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line">}</div>
+<div class="line"></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is a first "hello RSB" example program using </span></div>
+<div class="line"><span class="comment">        a Sparse BLAS interface.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include hello-spblas.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* for rsb_lib_init */</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="blas__sparse_8h.html" title="This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .">blas_sparse.h</a>></span>        <span class="comment">/* Sparse BLAS on the top of librsb */</span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{<span class="comment"></span></div>
+<div class="line"><span class="comment">        /*!</span></div>
+<div class="line"><span class="comment">         * A Hello/Sparse BLAS program.</span></div>
+<div class="line"><span class="comment">         *</span></div>
+<div class="line"><span class="comment">         * This program shows how to use the blas_sparse.h</span></div>
+<div class="line"><span class="comment">         * interface correctly to:</span></div>
+<div class="line"><span class="comment">         *</span></div>
+<div class="line"><span class="comment">         * - initialize the library using #rsb_lib_init()</span></div>
+<div class="line"><span class="comment">         * - allocate (build) a single sparse matrix in the RSB</span></div>
+<div class="line"><span class="comment">         *   format using #BLAS_duscr_begin()/#BLAS_duscr_insert_entries()</span></div>
+<div class="line"><span class="comment">         *   /#BLAS_duscr_end()</span></div>
+<div class="line"><span class="comment">         * - extract one matrix element with #BLAS_dusget_element()</span></div>
+<div class="line"><span class="comment">         * - multiply the matrix times a vector using #BLAS_dusmv()</span></div>
+<div class="line"><span class="comment">         * - deallocate the matrix using #BLAS_usds() </span></div>
+<div class="line"><span class="comment">         * - finalize the library using</span></div>
+<div class="line"><span class="comment">         *   #rsb_lib_exit(#RSB_NULL_EXIT_OPTIONS) </span></div>
+<div class="line"><span class="comment">        */</span></div>
+<div class="line"><span class="preprocessor">#ifndef RSB_NUMERICAL_TYPE_DOUBLE   </span></div>
+<div class="line"><span class="preprocessor"></span>        printf(<span class="stringliteral">"'double' type configured out."</span></div>
+<div class="line">        <span class="stringliteral">" Please reconfigure the library with it and recompile.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line"><span class="preprocessor">#else </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>        <a class="code" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A = <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>; <span class="comment">/* handle for A */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> nnz = 4;      <span class="comment">/* number of nonzeroes of matrix A */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span>  nr = 3;      <span class="comment">/* number of A's rows */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span>  nc = 3;      <span class="comment">/* number of A's columns */</span></div>
+<div class="line">        <span class="comment">/* A's nonzero elements row indices (coordinates): */</span></div>
+<div class="line">        <span class="keywordtype">int</span>   IA[] = { 0, 1, 2, 2 };</div>
+<div class="line">        <span class="comment">/* A's nonzero elements column indices (coordinates): */</span></div>
+<div class="line">        <span class="keywordtype">int</span>   JA[] = { 0, 1, 0, 2 };</div>
+<div class="line">        <span class="comment">/* A's nonzero values (matrix coefficients): */</span></div>
+<div class="line">        <span class="keywordtype">double</span> VA[] = { 11.0, 22.0, 13.0, 33.0  };</div>
+<div class="line">        <span class="comment">/* the X vector's array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> X[] = { 0.0, 0.0, 0.0 };</div>
+<div class="line">        <span class="comment">/* the B vector's array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> B[] = { -1.0, -2.0, -2.0 };</div>
+<div class="line">        <span class="comment">/* the (known) result array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> AB[] = { 11.0+26.0, 44.0, 66.0+13.0 };</div>
+<div class="line">        <span class="comment">/* rsb error variable: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <span class="keywordtype">int</span> i;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Hello, RSB!\n"</span>);</div>
+<div class="line">        <span class="comment">/* initialize the library */</span></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)) </div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly initialized the library.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* initialize a matrix descriptor */</span></div>
+<div class="line">        A = <a class="code" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a>(nr,nc);</div>
+<div class="line">        <span class="keywordflow">if</span>( A == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        </div>
+<div class="line">        <span class="comment">/* specify properties (e.g.: symmetry)*/</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">BLAS_ussp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>) != 0 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* get properties (e.g.: symmetry) */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>) != 1 )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Symmetry property non set ?!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* insert the nonzeroes (here, all at once) */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a>(A, nnz, VA, IA, JA)</div>
+<div class="line">                        == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* finalize (allocate) the matrix build  */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>(A) == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly allocated a matrix.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        VA[0] = 0.0;</div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a>(A, IA[0], JA[0], &VA[0]) )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* a check */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( VA[0] != 11.0 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* compute X = X + (-1) * A * B   */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>(<a class="code" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a>,-1,A,B,1,X,1))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>( i = 0 ; i < nc; ++i )</div>
+<div class="line">                <span class="keywordflow">if</span>( X[i] != AB[i] )</div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"Computed SPMV result seems wrong. Terminating.\n"</span>);</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">                }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly performed a SPMV.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* deallocate matrix A */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a>(A) )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly freed the matrix.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* finalize the library */</span></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly finalized the library.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with no error.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line"><span class="preprocessor">#endif </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>}</div>
+<div class="line"></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is a first "hello RSB" example program using </span></div>
+<div class="line"><span class="comment">        a Sparse BLAS interface.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include hello-spblas.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* for rsb_lib_init */</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="blas__sparse_8h.html" title="This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .">blas_sparse.h</a>></span>        <span class="comment">/* Sparse BLAS on the top of librsb */</span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{<span class="comment"></span></div>
+<div class="line"><span class="comment">        /*!</span></div>
+<div class="line"><span class="comment">         * A Hello/Sparse BLAS program.</span></div>
+<div class="line"><span class="comment">         *</span></div>
+<div class="line"><span class="comment">         * This program shows how to use the blas_sparse.h</span></div>
+<div class="line"><span class="comment">         * interface correctly to:</span></div>
+<div class="line"><span class="comment">         *</span></div>
+<div class="line"><span class="comment">         * - initialize the library using #rsb_lib_init()</span></div>
+<div class="line"><span class="comment">         * - allocate (build) a single sparse matrix in the RSB</span></div>
+<div class="line"><span class="comment">         *   format using #BLAS_duscr_begin()/#BLAS_duscr_insert_entries()</span></div>
+<div class="line"><span class="comment">         *   /#BLAS_duscr_end()</span></div>
+<div class="line"><span class="comment">         * - extract one matrix element with #BLAS_dusget_element()</span></div>
+<div class="line"><span class="comment">         * - multiply the matrix times a vector using #BLAS_dusmv()</span></div>
+<div class="line"><span class="comment">         * - deallocate the matrix using #BLAS_usds() </span></div>
+<div class="line"><span class="comment">         * - finalize the library using</span></div>
+<div class="line"><span class="comment">         *   #rsb_lib_exit(#RSB_NULL_EXIT_OPTIONS) </span></div>
+<div class="line"><span class="comment">        */</span></div>
+<div class="line"><span class="preprocessor">#ifndef RSB_NUMERICAL_TYPE_DOUBLE   </span></div>
+<div class="line"><span class="preprocessor"></span>        printf(<span class="stringliteral">"'double' type configured out."</span></div>
+<div class="line">        <span class="stringliteral">" Please reconfigure the library with it and recompile.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line"><span class="preprocessor">#else </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>        <a class="code" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A = <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>; <span class="comment">/* handle for A */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> nnz = 4;      <span class="comment">/* number of nonzeroes of matrix A */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span>  nr = 3;      <span class="comment">/* number of A's rows */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span>  nc = 3;      <span class="comment">/* number of A's columns */</span></div>
+<div class="line">        <span class="comment">/* A's nonzero elements row indices (coordinates): */</span></div>
+<div class="line">        <span class="keywordtype">int</span>   IA[] = { 0, 1, 2, 2 };</div>
+<div class="line">        <span class="comment">/* A's nonzero elements column indices (coordinates): */</span></div>
+<div class="line">        <span class="keywordtype">int</span>   JA[] = { 0, 1, 0, 2 };</div>
+<div class="line">        <span class="comment">/* A's nonzero values (matrix coefficients): */</span></div>
+<div class="line">        <span class="keywordtype">double</span> VA[] = { 11.0, 22.0, 13.0, 33.0  };</div>
+<div class="line">        <span class="comment">/* the X vector's array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> X[] = { 0.0, 0.0, 0.0 };</div>
+<div class="line">        <span class="comment">/* the B vector's array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> B[] = { -1.0, -2.0, -2.0 };</div>
+<div class="line">        <span class="comment">/* the (known) result array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> AB[] = { 11.0+26.0, 44.0, 66.0+13.0 };</div>
+<div class="line">        <span class="comment">/* rsb error variable: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <span class="keywordtype">int</span> i;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Hello, RSB!\n"</span>);</div>
+<div class="line">        <span class="comment">/* initialize the library */</span></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)) </div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly initialized the library.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* initialize a matrix descriptor */</span></div>
+<div class="line">        A = <a class="code" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a>(nr,nc);</div>
+<div class="line">        <span class="keywordflow">if</span>( A == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        </div>
+<div class="line">        <span class="comment">/* specify properties (e.g.: symmetry)*/</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">BLAS_ussp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>) != 0 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* get properties (e.g.: symmetry) */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>) != 1 )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Symmetry property non set ?!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* insert the nonzeroes (here, all at once) */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a>(A, nnz, VA, IA, JA)</div>
+<div class="line">                        == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* finalize (allocate) the matrix build  */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>(A) == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly allocated a matrix.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        VA[0] = 0.0;</div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a>(A, IA[0], JA[0], &VA[0]) )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* a check */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( VA[0] != 11.0 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* compute X = X + (-1) * A * B   */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>(<a class="code" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a>,-1,A,B,1,X,1))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>( i = 0 ; i < nc; ++i )</div>
+<div class="line">                <span class="keywordflow">if</span>( X[i] != AB[i] )</div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"Computed SPMV result seems wrong. Terminating.\n"</span>);</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">                }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly performed a SPMV.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* deallocate matrix A */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a>(A) )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly freed the matrix.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* finalize the library */</span></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly finalized the library.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with no error.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line"><span class="preprocessor">#endif </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>}</div>
+<div class="line"></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is a first "RSB autotuning" example program.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include autotuning.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* librsb header to include */</span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf() */</span></div>
+<div class="line"><span class="preprocessor">#include <ctype.h></span>      <span class="comment">/* isdigit() */</span></div>
+<div class="line"><span class="preprocessor">#include <stdlib.h></span>     <span class="comment">/* atoi() */</span></div>
+<div class="line"><span class="comment">/* #include "rsb_internals.h" */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> tune_from_file(<span class="keywordtype">char</span> * <span class="keyword">const</span> filename, <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> wvat)</div>
+<div class="line">{</div>
+<div class="line">        <span class="keyword">struct </span>rsb_mtx_t *mtxMp = NULL;</div>
+<div class="line">        <span class="comment">/* spmv specific variables */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> alpha = 1;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> beta = 1;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> order = <a class="code" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> nrhs = 2;  <span class="comment">/* number of right hand sides */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA = <a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>; <span class="comment">/* transposition */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldB = 0;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldC = 0;</div>
+<div class="line">        <span class="comment">/* misc variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> dt;</div>
+<div class="line">        <span class="keywordtype">char</span> ib[200];</div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">char</span>*is = <span class="stringliteral">"RSB_MIF_MATRIX_INFO__TO__CHAR_P"</span>;</div>
+<div class="line">        <span class="comment">/* misc variables */</span></div>
+<div class="line">        <span class="comment">/* input autotuning variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> oitmax = 1 <span class="comment">/*15*/</span>;    <span class="comment">/* auto-tune iterations */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> tmax = 0.1;  <span class="comment">/* time per autotune operation */</span></div>
+<div class="line">        <span class="comment">/* output autotuning variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> flagsA = <a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>;</div>
+<div class="line">        <span class="keywordtype">int</span> ione = 1;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecodea [] = RSB_MATRIX_SPBLAS_TYPE_CODES_ARRAY;</div>
+<div class="line">        <span class="keywordtype">int</span> typecodei;</div>
+<div class="line"></div>
+<div class="line">        errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>( (errval) != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">RSB_IO_WANT_VERBOSE_TUNING</a>, &wvat );</div>
+<div class="line">        </div>
+<div class="line">        <span class="comment">/*</span></div>
+<div class="line"><span class="comment">        errval = rsb_lib_set_opt(RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE, &ione);</span></div>
+<div class="line"><span class="comment">        */</span></div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>( (errval) != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Loading matrix from file \"%s\".\n"</span>,filename);</div>
+<div class="line"></div>
+<div class="line">        mtxMp = <a class="code" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a>(filename, flagsA, typecodea[0], &errval);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>( (errval) != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>( typecodei = 0 ; typecodei < RSB_IMPLEMENTED_TYPES; ++typecodei )</div>
+<div class="line">        {</div>
+<div class="line">                <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = typecodea[typecodei];</div>
+<div class="line">                <span class="keyword">struct </span>rsb_mtx_t *mtxAp = NULL;</div>
+<div class="line">                <span class="keyword">struct </span>rsb_mtx_t *mtxOp = NULL;</div>
+<div class="line">                <a class="code" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> sf = 0.0;</div>
+<div class="line">                <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> tn = 0;</div>
+<div class="line"></div>
+<div class="line">                sf = 0.0;</div>
+<div class="line">                tn = 0;</div>
+<div class="line"></div>
+<div class="line">                printf(<span class="stringliteral">"Considering %c clone.\n"</span>,typecode);</div>
+<div class="line">                </div>
+<div class="line">                errval = <a class="code" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>(&mtxAp, typecode, transA, NULL, mtxMp,</div>
+<div class="line">                                flagsA);</div>
+<div class="line"></div>
+<div class="line">                <span class="keywordflow">if</span>( (errval) != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">                printf(<span class="stringliteral">"Base matrix:\n"</span>);</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,is,ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">                printf(<span class="stringliteral">"%s\n\n"</span>,ib);</div>
+<div class="line"></div>
+<div class="line">                dt = -<a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">                errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(NULL, &sf, &tn, oitmax, tmax, transA,</div>
+<div class="line">                     &alpha, mtxAp, nrhs, order, NULL, ldB, &beta, NULL, ldC);</div>
+<div class="line"></div>
+<div class="line">                dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">                <span class="keywordflow">if</span>(tn == 0)</div>
+<div class="line">                printf(<span class="stringliteral">"After %lfs, autotuning routine did not find a better"</span></div>
+<div class="line">                        <span class="stringliteral">" threads count configuration.\n"</span>,dt);</div>
+<div class="line">                <span class="keywordflow">else</span></div>
+<div class="line">                printf(<span class="stringliteral">"After %lfs, thread autotuning declared speedup of %lg x,"</span></div>
+<div class="line">                        <span class="stringliteral">" when using threads count of %d.\n"</span>,dt,sf,tn);</div>
+<div class="line">                printf(<span class="stringliteral">"\n"</span>);</div>
+<div class="line"></div>
+<div class="line"></div>
+<div class="line">                dt = -<a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line"></div>
+<div class="line">                mtxOp = mtxAp;</div>
+<div class="line">                errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxAp, &sf, &tn, oitmax, tmax, transA,</div>
+<div class="line">                        &alpha, NULL, nrhs, order, NULL, ldB, &beta, NULL, ldC);</div>
+<div class="line">                <span class="keywordflow">if</span>( (errval) != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">                dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">                <span class="keywordflow">if</span>( mtxOp == mtxAp )</div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"After %lfs, global autotuning found old matrix optimal,"</span></div>
+<div class="line">                        <span class="stringliteral">" with declared speedup %lg x when using %d threads\n"</span>,dt,sf,tn);</div>
+<div class="line">                }</div>
+<div class="line">                <span class="keywordflow">else</span></div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"After %lfs, global autotuning declared speedup of %lg x,"</span></div>
+<div class="line">                        <span class="stringliteral">" when using threads count of %d and a new matrix:\n"</span>,dt,sf,tn);</div>
+<div class="line">                        <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,is,ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">                        printf(<span class="stringliteral">"%s\n"</span>,ib);</div>
+<div class="line">                }</div>
+<div class="line">                printf(<span class="stringliteral">"\n"</span>);</div>
+<div class="line"></div>
+<div class="line">                <span class="comment">/* user is expected to:</span></div>
+<div class="line"><span class="comment">                errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);</span></div>
+<div class="line"><span class="comment">                and use mtxAp in SpMV.</span></div>
+<div class="line"><span class="comment">                */</span></div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line">                mtxAp = NULL;</div>
+<div class="line">        }</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxMp);</div>
+<div class="line">        mtxMp = NULL;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">goto</span> ret;</div>
+<div class="line">ret:</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line">}</div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{<span class="comment"></span></div>
+<div class="line"><span class="comment">        /*!</span></div>
+<div class="line"><span class="comment">         Autotuning example.</span></div>
+<div class="line"><span class="comment">         */</span></div>
+<div class="line">        <span class="comment">/* matrix variables */</span></div>
+<div class="line">        <span class="keyword">struct </span>rsb_mtx_t *mtxAp = NULL; <span class="comment">/* matrix structure pointer */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> bs = <a class="code" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> nrA = 500; <span class="comment">/* number of rows */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ncA = 500; <span class="comment">/* number of cols */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = <a class="code" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">RSB_NUMERICAL_TYPE_DEFAULT</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> rd = 1; <span class="comment">/* every rd rows one is non empty */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> cd = 4; <span class="comment">/* every cd cols one is non empty */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA = (nrA/rd)*(ncA/cd); <span class="comment">/* nonzeroes */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>*IA = NULL;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>*JA = NULL;</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a>*VA = NULL;</div>
+<div class="line">        <span class="comment">/* spmv specific variables */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> alpha = 1;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> beta = 1;</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a>*Cp = NULL;</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a>*Bp = NULL;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> order = <a class="code" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> nrhs = 2;  <span class="comment">/* number of right hand sides */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA = <a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>; <span class="comment">/* transposition */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldB = nrA;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldC = ncA;</div>
+<div class="line">        <span class="comment">/* misc variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <span class="keywordtype">size_t</span> so = <span class="keyword">sizeof</span>(<a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a>);</div>
+<div class="line">        <span class="keywordtype">size_t</span> si = <span class="keyword">sizeof</span>(<a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>);</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> dt,odt;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> t,tt = 100;   <span class="comment">/* will repeat spmv tt times */</span></div>
+<div class="line">        <span class="keywordtype">char</span> ib[200];</div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">char</span>*is = <span class="stringliteral">"RSB_MIF_MATRIX_INFO__TO__CHAR_P"</span>;</div>
+<div class="line">        <span class="comment">/* misc counters */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ci; </div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ri;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ni;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> nrhsi;</div>
+<div class="line">        <span class="comment">/* misc variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> etime = 0.0;</div>
+<div class="line">        <span class="comment">/* input autotuning variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> oitmax = 15;  <span class="comment">/* auto-tune iterations */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> tmax = 0.1;  <span class="comment">/* time per autotune operation */</span></div>
+<div class="line">        <span class="comment">/* input/output autotuning variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> tn = 0;       <span class="comment">/* threads number */</span></div>
+<div class="line">        <span class="comment">/* output autotuning variables */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> sf = 0.0;    <span class="comment">/* speedup factor obtained from auto tuning */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> wvat = 1;     <span class="comment">/* want verbose autotuning; see documentation</span></div>
+<div class="line"><span class="comment">                                   of RSB_IO_WANT_VERBOSE_TUNING */</span></div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(argc > 1 && !isdigit(argv[1][0]) )</div>
+<div class="line">                <span class="keywordflow">return</span> tune_from_file(argv[1],wvat);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(argc > 1)</div>
+<div class="line">        {</div>
+<div class="line">                nrA = ncA = atoi(argv[1]);</div>
+<div class="line">                <span class="keywordflow">if</span> ( nrA < <a class="code" href="rsb_8h.html#abaccfe39f69712cebf501c9d55b1a4b8">RSB_MIN_MATRIX_DIM</a> || (nrA > (<a class="code" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">RSB_MAX_MATRIX_DIM</a>) ))</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">                nnzA = (nrA/rd)*(ncA/cd);</div>
+<div class="line">                ldB = nrA;</div>
+<div class="line">                ldC = ncA;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Creating %d x %d matrix with %d nonzeroes.\n"</span>,nrA,ncA,nnzA);</div>
+<div class="line"></div>
+<div class="line">        IA = calloc(nnzA, si);</div>
+<div class="line">        JA = calloc(nnzA, si);</div>
+<div class="line">        VA = calloc(nnzA, so);</div>
+<div class="line">        Bp = calloc(nrhs*ncA ,so);</div>
+<div class="line">        Cp = calloc(nrhs*nrA ,so);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>( ! ( VA && IA && JA && Bp && Cp ) )</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>(nrhsi=0;nrhsi<nrhs;++nrhsi)</div>
+<div class="line">                <span class="keywordflow">for</span>(ci=0;ci<ncA/cd;++ci)</div>
+<div class="line">                        Bp[nrhsi*ldC+ci] = 1.0;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>(nrhsi=0;nrhsi<nrhs;++nrhsi)</div>
+<div class="line">                <span class="keywordflow">for</span>(ri=0;ri<nrA/rd;++ri)</div>
+<div class="line">                        Cp[nrhsi*ldC+ri] = 1.0;</div>
+<div class="line"></div>
+<div class="line">        ni = 0;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>(ci=0;ci<ncA/cd;++ci)</div>
+<div class="line">                <span class="keywordflow">for</span>(ri=0;ri<nrA/rd;++ri)</div>
+<div class="line">                {</div>
+<div class="line">                        VA[ni] = nrA * ri + ci,</div>
+<div class="line">                        IA[ni] = ri;</div>
+<div class="line">                        JA[ni] = ci;</div>
+<div class="line">                        ni++;</div>
+<div class="line">                }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>) <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">RSB_IO_WANT_VERBOSE_TUNING</a>, &wvat );</div>
+<div class="line"></div>
+<div class="line">        mtxAp = <a class="code" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>(</div>
+<div class="line">                VA,IA,JA,nnzA,typecode,nrA,ncA,bs,bs,</div>
+<div class="line">                <a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>,&errval);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* VA, IA, JA are not necessary anymore */</span></div>
+<div class="line">        free(VA);</div>
+<div class="line">        free(IA);</div>
+<div class="line">        free(JA);</div>
+<div class="line">        VA = NULL;</div>
+<div class="line">        IA = NULL;</div>
+<div class="line">        JA = NULL;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>((!mtxAp) || (errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>))</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Allocated matrix of %zd nonzeroes:\n"</span>,(<span class="keywordtype">size_t</span>)nnzA);</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,is,ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">        printf(<span class="stringliteral">"%s\n\n"</span>,ib);</div>
+<div class="line"></div>
+<div class="line">        dt = - <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        <span class="keywordflow">for</span>(t=0;t<tt;++t)</div>
+<div class="line">                <span class="comment">/* </span></div>
+<div class="line"><span class="comment">                   If nrhs == 1, the following is equivalent to</span></div>
+<div class="line"><span class="comment">                   rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);</span></div>
+<div class="line"><span class="comment">                */</span></div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line">        dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        odt = dt;</div>
+<div class="line">        printf(<span class="stringliteral">"Before auto-tuning, %d multiplications took %lfs.\n"</span>,tt,dt);</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Threads autotuning (may take more than %lfs)...\n"</span>,</div>
+<div class="line">                        oitmax*tmax);</div>
+<div class="line">        dt = -<a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(NULL, &sf, &tn, oitmax, tmax, transA,</div>
+<div class="line">                        &alpha, mtxAp, nrhs, order, Bp, ldB, &beta, Cp, ldC);</div>
+<div class="line">        dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(tn == 0)</div>
+<div class="line">        printf(<span class="stringliteral">"After %lfs, autotuning routine did not find a better"</span></div>
+<div class="line">                        <span class="stringliteral">" threads count configuration.\n"</span>,dt);</div>
+<div class="line">        <span class="keywordflow">else</span></div>
+<div class="line">        printf(<span class="stringliteral">"After %lfs, autotuning routine declared speedup of %lg x,"</span></div>
+<div class="line">                        <span class="stringliteral">" when using threads count of %d.\n"</span>,dt,sf,tn);</div>
+<div class="line"></div>
+<div class="line">        errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a>,&tn);</div>
+<div class="line">        <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,is,ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">        printf(<span class="stringliteral">"%s\n"</span>,ib);</div>
+<div class="line"></div>
+<div class="line">        dt = -<a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        <span class="keywordflow">for</span>(t=0;t<tt;++t)</div>
+<div class="line">                <span class="comment">/*rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);*/</span></div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line">        dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        printf(<span class="stringliteral">"After threads auto-tuning, %d multiplications took %lfs"</span></div>
+<div class="line">                        <span class="stringliteral">"  --  effective speedup of %lg x\n"</span>,tt,dt,odt/dt);</div>
+<div class="line">        odt = dt;</div>
+<div class="line"></div>
+<div class="line"></div>
+<div class="line">        tn = 0; <span class="comment">/* this will restore default threads count */</span></div>
+<div class="line">        errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a>,&tn);</div>
+<div class="line">        <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        errval = <a class="code" href="rsb_8h.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a>,&tn);</div>
+<div class="line">        <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Matrix autotuning (may take more than %lfs; using %d"</span></div>
+<div class="line">                        <span class="stringliteral">" threads )...\n"</span>, oitmax*tmax, tn);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* A negative tn will request also threads autotuning: */</span></div>
+<div class="line">        <span class="comment">/* tn = -tn; */</span></div>
+<div class="line"></div>
+<div class="line">        dt = -<a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxAp, &sf, &tn, oitmax, tmax, transA,</div>
+<div class="line">                        &alpha,  NULL, nrhs, order, Bp, ldB, &beta, Cp, ldC);</div>
+<div class="line">        dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(tn == 0)</div>
+<div class="line">        printf(<span class="stringliteral">"After %lfs, autotuning routine did not find a better"</span></div>
+<div class="line">                        <span class="stringliteral">" threads count configuration.\n"</span>,dt);</div>
+<div class="line">        <span class="keywordflow">else</span></div>
+<div class="line">        printf(<span class="stringliteral">"After %lfs, autotuning routine declared speedup of %lg x,"</span></div>
+<div class="line">                        <span class="stringliteral">" when using threads count of %d.\n"</span>,dt,sf,tn);</div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,is,ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">        printf(<span class="stringliteral">"%s\n"</span>,ib);</div>
+<div class="line"></div>
+<div class="line">        dt = -<a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        <span class="keywordflow">for</span>(t=0;t<tt;++t)</div>
+<div class="line">                <span class="comment">/*rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);*/</span></div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line">        dt += <a class="code" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>();</div>
+<div class="line">        printf(<span class="stringliteral">"After threads auto-tuning, %d multiplications took %lfs"</span></div>
+<div class="line">                        <span class="stringliteral">"  --  further speedup of %lg x\n"</span>,tt,dt,odt/dt);</div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line">        free(Cp);</div>
+<div class="line">        free(Bp);</div>
+<div class="line"></div>
+<div class="line"></div>
+<div class="line">        errval = <a class="code" href="rsb_8h.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">RSB_IO_WANT_LIBRSB_ETIME</a>,&etime);</div>
+<div class="line">        <span class="keywordflow">if</span>(errval == <a class="code" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"librsb timer-based profiling is not supported in "</span></div>
+<div class="line">                <span class="stringliteral">"this build. If you wish to have it, re-configure librsb "</span></div>
+<div class="line">                <span class="stringliteral">"with its support. So you can safely ignore the error you"</span></div>
+<div class="line">                <span class="stringliteral">" might just have seen printed out on screen.\n"</span>);</div>
+<div class="line">                errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        }</div>
+<div class="line">        <span class="keywordflow">else</span></div>
+<div class="line">        <span class="keywordflow">if</span>(etime) <span class="comment">/* This will only work if enabled at configure time. */</span></div>
+<div class="line">                printf(<span class="stringliteral">"Elapsed program time is %5.2lfs\n"</span>,etime);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">                        !=<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line">}</div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is an example program using a Sparse BLAS interface</span></div>
+<div class="line"><span class="comment">        and reading from file using the RSB library.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include io-spblas.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* for rsb_lib_init */</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="blas__sparse_8h.html" title="This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .">blas_sparse.h</a>></span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span></div>
+<div class="line">        </div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{</div>
+<div class="line"><span class="preprocessor">#ifndef RSB_NUMERICAL_TYPE_DOUBLE   </span></div>
+<div class="line"><span class="preprocessor"></span>        printf(<span class="stringliteral">"Skipping a test because of 'double' type opted out.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line"><span class="preprocessor">#else </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>        <a class="code" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A = <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = <a class="code" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">RSB_NUMERICAL_TYPE_DOUBLE</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * filename = argc > 1 ? argv[1] : <span class="stringliteral">"../pd.mtx"</span>;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Hello, RSB!\n"</span>);</div>
+<div class="line">        <span class="keywordflow">if</span>((<a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)))!=<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error while initializing the library.\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Correctly initialized the library.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        A = <a class="code" href="blas__sparse_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_load_spblas_matrix_file_as_matrix_market</a>(filename,</div>
+<div class="line">                        typecode );</div>
+<div class="line">        <span class="keywordflow">if</span>( A == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error while loading matrix %s from file.\n"</span>,</div>
+<div class="line">                                filename);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Correctly loaded and allocated a matrix"</span></div>
+<div class="line">                        <span class="stringliteral">" from file %s.\n"</span>,filename);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a>) == 1 )</div>
+<div class="line">                printf(<span class="stringliteral">"Matrix is symmetric\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a>) == 1 )</div>
+<div class="line">                printf(<span class="stringliteral">"Matrix is hermitian\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Now SPMV with NULL vectors will be attempted,"</span></div>
+<div class="line">                        <span class="stringliteral">" resulting in an error (so don't worry).\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>(<a class="code" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a>,-1,A,NULL,1,NULL,1))</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Correctly detected an error condition.\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> okerr;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"No error detected ?\nIf you see this line printed out,"</span></div>
+<div class="line">                <span class="stringliteral">" please report as a bug, because the above NULL pointers"</span></div>
+<div class="line">                <span class="stringliteral">" should have been detected\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line"></div>
+<div class="line">okerr:</div>
+<div class="line">        printf(<span class="stringliteral">"Program correctly recovered from intentional"</span></div>
+<div class="line">                        <span class="stringliteral">" error condition.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a>(A))</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error while freeing the matrix!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Correctly freed the matrix.\n"</span>);</div>
+<div class="line">err:</div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))!=<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Failed finalizing the library.\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> ferr;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Correctly finalized the library.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">ferr:</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line"><span class="preprocessor">#endif </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>}</div>
+<div class="line"></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief A toy program showing instantiation, transposition and other</span></div>
+<div class="line"><span class="comment"> operations on a single matrix.</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include transpose.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{</div>
+<div class="line">        <span class="keyword">struct </span>rsb_mtx_t *mtxAp = NULL;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> brA = <a class="code" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>, bcA=<a class="code" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA = 4;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>  nrA = 3;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>  ncA = 3;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>    IA[] = { 0, 1, 2, 0 };</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>    JA[] = { 0, 1, 2, 2 };</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> VA[] = { 11, 22, 33, 13 };</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> XV[] = { 0,0,0,0,0,0 };</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>  vl = 0;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = <a class="code" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">RSB_NUMERICAL_TYPE_DEFAULT</a>;</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* library initialization */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)!=<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">return</span> -1;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* allocation */</span></div>
+<div class="line">        mtxAp = <a class="code" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>(</div>
+<div class="line">                        VA,IA,JA,nnzA,typecode,nrA,ncA,</div>
+<div class="line">                        brA,bcA,<a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>,NULL);</div>
+<div class="line">        <span class="keywordflow">if</span>(!mtxAp)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">return</span> -1;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* printout */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval = <a class="code" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>(mtxAp,NULL)))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>)</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        </div>
+<div class="line">        <span class="comment">/* matrix transposition */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> != (errval =</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>(&mtxAp,<a class="code" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>,</div>
+<div class="line">                <a class="code" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>,NULL,mtxAp,<a class="code" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a>)))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* printout */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval = <a class="code" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>(mtxAp,NULL)))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>)</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* doing the same after load from file */</span></div>
+<div class="line">        mtxAp = <a class="code" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a>(<span class="stringliteral">"../pd.mtx"</span>,</div>
+<div class="line">                <a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>,typecode,NULL);</div>
+<div class="line">        <span class="keywordflow">if</span>(!mtxAp)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">return</span> -1;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* printout */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval = <a class="code" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>(mtxAp,NULL)))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>)</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* one can see dimensions in advance, also */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval =</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>(<span class="stringliteral">"../pd.mtx"</span>,&nrA,&ncA,&nnzA,NULL)))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>)</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* A matrix can be rendered to Postscript. */</span></div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval =</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>(<span class="stringliteral">"pd.eps"</span>,mtxAp,512,512,<a class="code" href="rsb_8h.html#a77106fe2435306ef028060d0eb7dca14">RSB_MARF_EPS_B</a>)))</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* also vectors can be loaded */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval = </div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>(<span class="stringliteral">"../vf.mtx"</span>,typecode,NULL,&vl )))</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        <span class="comment">/* we expecy vf.mtx to be 6 rows long */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( vl != 6 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval = </div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>(<span class="stringliteral">"../vf.mtx"</span>,typecode,XV, NULL )))</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* matrices can be rendered from file to a pixelmap as well */</span></div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordtype">unsigned</span> <span class="keywordtype">char</span> pixmap[3*2*2];</div>
+<div class="line"></div>
+<div class="line">                <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>!=(errval =</div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>(pixmap,<span class="stringliteral">"../pd.mtx"</span>,2,2,2,<a class="code" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">RSB_MARF_RGB</a>)))</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> != <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line">}</div>
+<div class="line"></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief A toy program implementing the power method</span></div>
+<div class="line"><span class="comment">        for computing matrix eigenvalues.</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include power.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">// printf</span></div>
+<div class="line"><span class="preprocessor">#include <math.h></span>       <span class="comment">// sqrt</span></div>
+<div class="line"><span class="preprocessor">#include <stdlib.h></span>     <span class="comment">// calloc</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{</div>
+<div class="line">        <span class="keywordtype">int</span> WANT_VERBOSE = 0;</div>
+<div class="line">        <span class="keyword">struct </span>rsb_mtx_t *mtxAp = NULL;</div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> bs = <a class="code" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>;</div>
+<div class="line">        <span class="keywordtype">int</span> i;</div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> br = bs, bc = bs; <span class="comment">/* bs x bs blocked */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = 0;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA = 4;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>  nrA = 3;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>  ncA = 3;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> it = 0, maxit = 100;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>    IA[] = { 0, 1, 2, 0 };</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>    JA[] = { 0, 1, 2, 2 };</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE</a> VA[] = { 11, 22, 33, 13 };</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE</a> ZERO = 0;</div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE</a> norm = 0.0, <span class="comment">/* nu */</span></div>
+<div class="line">        oldnorm = 1.0, <span class="comment">/* oldnorm */</span></div>
+<div class="line">        *b1 = NULL, *b2 = NULL,</div>
+<div class="line">        *bnow = NULL, *bnext = NULL;<span class="comment">/* b1 and b2 aliases */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = <a class="code" href="rsb__types_8h.html#ac51619f9cbe0a9a4cbc55e0451bfb59d">RSB_NUMERICAL_TYPE_FIRST_BLAS</a>;</div>
+<div class="line">        <span class="keywordtype">size_t</span> ds = 0;</div>
+<div class="line">        <span class="comment">/* tolerance */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE</a> tol = 1e-14;</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* library initialization */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)!=<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">return</span> -1;</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* allocation */</span></div>
+<div class="line">        mtxAp = <a class="code" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>(VA,IA,JA,nnzA,</div>
+<div class="line">                        typecode,nrA,ncA,br,bc,<a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>,NULL);</div>
+<div class="line">        <span class="keywordflow">if</span>(!mtxAp)</div>
+<div class="line">                <span class="keywordflow">return</span> -1;</div>
+<div class="line"></div>
+<div class="line">        ds = (nrA)*<span class="keyword">sizeof</span>(<a class="code" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE</a>);</div>
+<div class="line">        b1 = calloc(1,ds);</div>
+<div class="line">        b2 = calloc(1,ds);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>(! (b1 && b2))</div>
+<div class="line">        {</div>
+<div class="line">                errval = <a class="code" href="rsb_8h.html#a538215b32e908646c979a2e446ae5467">RSB_ERR_ENOMEM</a>;</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>( i = 0; i < nrA; ++i )</div>
+<div class="line">                b1[i] = 1;</div>
+<div class="line"></div>
+<div class="line">        bnow = b1, bnext = b2;<span class="comment">/* b,b' */</span></div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">while</span>( fabs(norm-oldnorm) > tol && it<maxit )</div>
+<div class="line">        {</div>
+<div class="line">                ++ it;</div>
+<div class="line">                oldnorm = norm;</div>
+<div class="line">                <span class="comment">/* b'<-Ab */</span></div>
+<div class="line">                <span class="keywordflow">if</span>(( <a class="code" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>(<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>,NULL,mtxAp,bnow,</div>
+<div class="line">                        1,&ZERO,bnext,1)) != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">                <span class="comment">/* nu<-||Ab||^2 */</span></div>
+<div class="line">                norm = 0;</div>
+<div class="line">                <span class="keywordflow">for</span>(i=0;i<nrA;++i) </div>
+<div class="line">                        norm += bnext[i]*bnext[i];</div>
+<div class="line">                <span class="comment">/* nu<-||Ab|| */</span></div>
+<div class="line">                norm = sqrt(norm);</div>
+<div class="line">                norm = 1.0/norm;</div>
+<div class="line">                <span class="comment">/* b'<- Ab / ||Ab|| */</span></div>
+<div class="line">                <span class="keywordflow">for</span>(i=0;i<nrA;++i)</div>
+<div class="line">                        bnext[i] *= norm;</div>
+<div class="line">                norm = 1.0/norm;</div>
+<div class="line">                printf(<span class="stringliteral">"it:%d norm:%lg norm diff:%lg\n"</span>,it,norm,norm-oldnorm);</div>
+<div class="line"></div>
+<div class="line">                {<span class="keywordtype">void</span> *tmp=bnow;bnow=bnext;bnext=tmp;<span class="comment">/* pointers swap */</span>}</div>
+<div class="line">                <span class="keywordflow">if</span>(WANT_VERBOSE)</div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"norm:%lg\n"</span>,norm);</div>
+<div class="line">                        <span class="keywordflow">if</span>(isinf(norm))</div>
+<div class="line">                        <span class="comment">/* isinf is a C99 feature (need correct</span></div>
+<div class="line"><span class="comment">                         * compilation flags) */</span></div>
+<div class="line">                                <span class="keywordflow">goto</span> err;</div>
+<div class="line"></div>
+<div class="line">                        <span class="keywordflow">for</span>(i=0;i<2;++i)</div>
+<div class="line">                                printf(<span class="stringliteral">"x[%d]=%lg\n"</span>,i,((<span class="keywordtype">double</span>*)bnext)[i]);</div>
+<div class="line">                }</div>
+<div class="line">        }</div>
+<div class="line">        <span class="comment">/* the biggest eigenvalue should be in bnow */</span></div>
+<div class="line"></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line">        free(b1);</div>
+<div class="line">        free(b2);</div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>)!=<a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        <span class="keywordflow">if</span>( it == maxit )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"ERROR: hit iterations limit without convergence!"</span>);</div>
+<div class="line">                errval=<a class="code" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">RSB_ERR_GENERIC_ERROR</a>;</div>
+<div class="line">        }</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line">}</div>
+<div class="line"></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! Copyright (C) 2008-2016 Michele Martone</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! This file is part of librsb.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">! under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">! by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">! (at your option) any later version.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">! License for more details.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">! License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">! If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"></div>
+<div class="line"><span class="keyword">      SUBROUTINE </span>blas_sparse_mod_example(res)</div>
+<div class="line">      <span class="keywordtype">USE </span><a class="code" href="classblas__sparse.html">blas_sparse</a></div>
+<div class="line">      <span class="keywordtype">USE </span><a class="code" href="classrsb.html">rsb</a> <span class="comment">! For the second part of the example</span></div>
+<div class="line">      <span class="keywordtype">IMPLICIT NONE</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: res, istat = 0, i</div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">TARGET</span> :: mtxap = c_null_ptr <span class="comment">! matrix pointer</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: a</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: transn = <a class="code" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: incx = 1</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: incy = 1</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">PARAMETER</span> :: alpha = 3</div>
+<div class="line"><span class="comment">! Symmetric (declared via lower triangle) matrix based example, e.g.:</span></div>
+<div class="line"><span class="comment">! 1 0</span></div>
+<div class="line"><span class="comment">! 1 1</span></div>
+<div class="line">      <span class="comment">! declaration of VA,IA,JA </span></div>
+<div class="line">      <span class="comment">!INTEGER,PARAMETER :: nr = 100</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: nr = 20</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: nc = nr</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: nnz = (nr*(nr+1))/2 <span class="comment">! half the square</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: nt = 0</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: ic, ir</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: ia(nnz) = (/ (((ir), ic=1,ir), ir=1,nr ) /) <span class="comment">! (/1, 2, 2/)</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">PARAMETER</span> :: ja(nnz) = (/ (((ic), ic=1,ir), ir=1,nr ) /) <span class="comment">! (/1, 1, 2/)</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">PARAMETER</span> :: va(nnz) = (/ ((1, ic=1,ir), ir=1,nr ) /) <span class="comment">! (/1, 1, 1/)</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span> :: x(nc) = (/((1), ir=1,nc)/) <span class="comment">! reference x ! (/1, 1/)</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">PARAMETER</span> :: cy(nr) = (/((alpha+alpha*nr), ir=1,nr)/) <span class="comment">! reference cy after ! (/9, 9/)</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span> :: y(nr) = (/((alpha), ir=1,nr)/) <span class="comment">! y will be overwritten ! (/3, 3/)</span></div>
+<div class="line">      <span class="comment">! First example part: pure blas_sparse code.</span></div>
+<div class="line">      res = 0</div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#acf14608f8b0375ca133b7f850bde3b50">duscr_begin</a>(nr,nc,a,res)</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.NE.0) goto 9999</div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">ussp</a>(a,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>,istat)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">ussp</a>(a,<a class="code" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a>,istat) <span class="comment">! (experimental) turns auto-tuning + thread setting on</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) print *,<span class="stringliteral">"autotuning returned nonzero:"</span>, istat &</div>
+<div class="line">       &,<span class="stringliteral">" ...did you enable autotuning ?"</span></div>
+<div class="line">      <span class="comment">!</span></div>
+<div class="line">      <span class="comment">! First style example </span></div>
+<div class="line">      CALL <a class="code" href="interfaceblas__sparse_1_1uscr__insert__entries.html">uscr_insert_entries</a>(a,nnz,va,ia,ja,istat)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a48f1e1b82322910d45a1b2455421745f">uscr_end</a>(a,istat)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line">      <span class="comment">! CALL ussp(A,blas_rsb_duplicates_sum,istat)</span></div>
+<div class="line">      <span class="comment">! CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat) ! uncomment this to activate add of coefficients to pattern</span></div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a1e0eb1ccd8ffbf49baefe455a248f7fe">usgp</a>(a,<a class="code" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a>,nt)  <span class="comment">! (experimental)</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (nt.NE.0) print*,<span class="stringliteral">"autotuner chose "</span>,nt,<span class="stringliteral">" threads"</span></div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">ussp</a>(a,<a class="code" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a>,istat) <span class="comment">! (experimental) turns auto-tuning + thread setting off</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line"></div>
+<div class="line">      CALL <a class="code" href="interfaceblas__sparse_1_1usmv.html">usmv</a>(transn,alpha,a,x,incx,y,incy,istat)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line">      <span class="comment">!</span></div>
+<div class="line">      <span class="keywordflow">DO</span> i = 1, nr</div>
+<div class="line">            <span class="keywordflow">IF</span> (y(i).NE.cy(i)) print *, <span class="stringliteral">"first check results are not ok"</span></div>
+<div class="line">            <span class="keywordflow">IF</span> (y(i).NE.cy(i)) goto 9997</div>
+<div class="line">      <span class="keywordflow">END DO</span></div>
+<div class="line">      <span class="comment">!</span></div>
+<div class="line">      y(:) = alpha <span class="comment">! reset</span></div>
+<div class="line">      <span class="comment">!</span></div>
+<div class="line">      <span class="comment">! Second style example </span></div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a469df92a4d25a9554fb1d79cdac1de84">ussp</a>(a,<a class="code" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a>,istat) <span class="comment">! (experimental) turns auto-tuning + thread setting on</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line">      CALL <a class="code" href="interfaceblas__sparse_1_1usmv.html">usmv</a>(transn,alpha,a,x,incx,y,incy,istat)</div>
+<div class="line">      CALL <a class="code" href="interfaceblas__sparse_1_1usmm.html">usmm</a>(<a class="code" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a>,transn,1, alpha,a,x,nr,y,nc,istat) <span class="comment">! Equivalent to the above (as long as incx=incy=1).</span></div>
+<div class="line">      CALL <a class="code" href="interfaceblas__sparse_1_1usmm.html">usmm</a>(<a class="code" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a>,transn,1,-alpha,a,x,nr,y,nc,istat) <span class="comment">! Subtract the last usmm call contribution.</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) goto 9997</div>
+<div class="line">      <span class="comment">!</span></div>
+<div class="line">      <span class="keywordflow">DO</span> i = 1, nr</div>
+<div class="line">            <span class="keywordflow">IF</span> (y(i).NE.cy(i)) print *,<span class="stringliteral">"second check results are not ok"</span></div>
+<div class="line">            <span class="keywordflow">IF</span> (y(i).NE.cy(i)) goto 9997</div>
+<div class="line">      <span class="keywordflow">END DO</span></div>
+<div class="line">      <span class="comment">!</span></div>
+<div class="line">      print *, <span class="stringliteral">"check results are ok"</span></div>
+<div class="line">      </div>
+<div class="line">      <span class="comment">! Second part of the example: access to the rsb.h interface via</span></div>
+<div class="line">      <span class="comment">! the ISO C Binding interface.</span></div>
+<div class="line">      mtxap = <a class="code" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html">rsb_blas_get_mtx</a>(a) <span class="comment">! get pointer to rsb structure (as in the rsb.h API)</span></div>
+<div class="line">      <span class="keywordflow">IF</span>(nr.LT.5) istat = <a class="code" href="interfacersb_1_1rsb__file__mtx__save.html">rsb_file_mtx_save</a>(mtxap,c_null_ptr) <span class="comment">! write to stdout (only if matrix small enough)</span></div>
+<div class="line"></div>
+<div class="line">      goto 9998</div>
+<div class="line">9997      res = -1</div>
+<div class="line">9998      <span class="keywordflow">CONTINUE</span></div>
+<div class="line">      CALL <a class="code" href="classblas__sparse.html#a8a3b6cd055048ab5e15b1b18be291f32">usds</a>(a,istat)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.0) res = -1</div>
+<div class="line">9999      <span class="keywordflow">CONTINUE</span></div>
+<div class="line"><span class="keyword">      end SUBROUTINE </span>blas_sparse_mod_example</div>
+<div class="line"></div>
+<div class="line">      <span class="keyword">PROGRAM</span> main</div>
+<div class="line">      <span class="keywordtype">USE </span><a class="code" href="classrsb.html">rsb</a><span class="keywordtype">, ONLY</span>: rsb_lib_init, rsb_lib_exit, C_PTR, C_NULL_PTR,&</div>
+<div class="line">       & rsb_io_want_extra_verbose_interface,rsb_io_want_verbose_tuning,&</div>
+<div class="line">       & <a class="code" href="interfacersb_1_1rsb__lib__set__opt.html">rsb_lib_set_opt</a></div>
+<div class="line">      <span class="keywordtype">USE </span>iso_c_binding</div>
+<div class="line">      <span class="keywordtype">IMPLICIT NONE</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: res = 0, passed = 0, failed = 0</div>
+<div class="line">      <span class="comment">!TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS</span></div>
+<div class="line">      <span class="comment">!TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS</span></div>
+<div class="line">      <span class="comment">! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59411</span></div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">PARAMETER</span> :: eo = c_null_ptr</div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">PARAMETER</span> :: io = c_null_ptr</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span>::ione=1</div>
+<div class="line">      res = <a class="code" href="interfacersb_1_1rsb__lib__init.html">rsb_lib_init</a>(io)</div>
+<div class="line">      res = <a class="code" href="interfacersb_1_1rsb__lib__set__opt.html">rsb_lib_set_opt</a>(rsb_io_want_verbose_tuning,c_loc(ione))</div>
+<div class="line">      </div>
+<div class="line">      CALL blas_sparse_mod_example(res)</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.LT.0) failed = failed + 1</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.EQ.0) passed = passed + 1</div>
+<div class="line"></div>
+<div class="line">      res = <a class="code" href="interfacersb_1_1rsb__lib__exit.html">rsb_lib_exit</a>(eo)</div>
+<div class="line">      </div>
+<div class="line">      print *, <span class="stringliteral">"FAILED:"</span>, failed</div>
+<div class="line">      print *, <span class="stringliteral">"PASSED:"</span>, passed</div>
+<div class="line">      <span class="keywordflow">IF</span> (failed .GT. 0) <span class="keywordflow">THEN</span></div>
+<div class="line">       stop 1</div>
+<div class="line">      <span class="keywordflow">END IF</span></div>
+<div class="line"><span class="keyword">      END PROGRAM</span></div>
+</div><!-- fragment --> <div class="fragment"><div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! Copyright (C) 2008-2016 Michele Martone</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! This file is part of librsb.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">! under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">! by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">! (at your option) any later version.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">! License for more details.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="comment">! You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">! License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">! If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment">! </span></div>
+<div class="line"><span class="keyword">      SUBROUTINE </span>rsb_mod_example1(res)</div>
+<div class="line">      <span class="keywordtype">USE </span><a class="code" href="classrsb.html">rsb</a></div>
+<div class="line">      <span class="keywordtype">USE </span>iso_c_binding</div>
+<div class="line">      <span class="keywordtype">IMPLICIT NONE</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> ::res</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: istat = 0, i</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: transt = rsb_transposition_n <span class="comment">! Please note that this interface is unfinished</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: incx = 1, incy = 1</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: alpha = 3, beta = 1</div>
+<div class="line"><span class="comment">! 1 1</span></div>
+<div class="line"><span class="comment">! 1 1</span></div>
+<div class="line">      <span class="comment">! declaration of VA,IA,JA </span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: nnz = 4</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: nr = 2</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: nc = 2</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: nrhs = 1</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: order = rsb_flag_want_column_major_order <span class="comment">! rhs layout</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: flags = rsb_flag_noflags </div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: ia(4) = (/0, 1, 1,0/)</div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: ja(4) = (/0, 0, 1,1/)</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: va(4) = (/1,1,1,1/)</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: x(2) = (/1, 1/)<span class="comment">! reference x </span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: cy(2) = (/9, 9/)<span class="comment">! reference cy after </span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: y(2) = (/3, 3/)<span class="comment">! y will be overwritten</span></div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">TARGET</span> :: mtxap = c_null_ptr <span class="comment">! matrix pointer</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span> :: tmax = 2.0 <span class="comment">! tuning max time</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: titmax = 2 <span class="comment">! tuning max iterations</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: ont = 0     <span class="comment">! optimal number of threads</span></div>
+<div class="line"></div>
+<div class="line">      res = 0</div>
+<div class="line">      mtxap = <a class="code" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html">rsb_mtx_alloc_from_coo_const</a>(c_loc(va),c_loc(ia),c_loc(ja)&</div>
+<div class="line">       &,nnz,&</div>
+<div class="line">       & rsb_numerical_type_double,nr,nc,1,1,flags,c_loc(istat))</div>
+<div class="line"></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.rsb_err_no_error) goto 9997</div>
+<div class="line"></div>
+<div class="line">      istat = <a class="code" href="interfacersb_1_1rsb__file__mtx__save.html">rsb_file_mtx_save</a>(mtxap,c_null_ptr)</div>
+<div class="line"></div>
+<div class="line">      <span class="comment">! Structure autotuning:</span></div>
+<div class="line">      istat = <a class="code" href="interfacersb_1_1rsb__tune__spmm.html">rsb_tune_spmm</a>(c_loc(mtxap),c_null_ptr,c_null_ptr,titmax,&</div>
+<div class="line">       & tmax,&</div>
+<div class="line">       & transt,c_loc(alpha),c_null_ptr,nrhs,order,c_loc(x),nr,&</div>
+<div class="line">       & c_loc(beta),c_loc(y),nc)</div>
+<div class="line"></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.rsb_err_no_error) goto 9997</div>
+<div class="line"></div>
+<div class="line">      <span class="comment">! Thread count autotuning:</span></div>
+<div class="line">      istat = <a class="code" href="interfacersb_1_1rsb__tune__spmm.html">rsb_tune_spmm</a>(c_null_ptr,c_null_ptr,c_loc(ont),titmax,&</div>
+<div class="line">       & tmax,&</div>
+<div class="line">       & transt,c_loc(alpha),mtxap,nrhs,order,c_loc(x),nr,c_loc(beta),&</div>
+<div class="line">       & c_loc(y),nc)</div>
+<div class="line">      print *, <span class="stringliteral">"Optimal number of threads:"</span>, ont</div>
+<div class="line"></div>
+<div class="line">      y(:) = (/3, 3/)<span class="comment">! reference y </span></div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.rsb_err_no_error) goto 9997</div>
+<div class="line">      </div>
+<div class="line">      istat = <a class="code" href="interfacersb_1_1rsb__file__mtx__save.html">rsb_file_mtx_save</a>(mtxap,c_null_ptr)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.rsb_err_no_error) goto 9997</div>
+<div class="line"></div>
+<div class="line">      istat = <a class="code" href="interfacersb_1_1rsb__spmv.html">rsb_spmv</a>(transt,c_loc(alpha),mtxap,c_loc(x),incx,&</div>
+<div class="line">       & c_loc(beta),c_loc(y),incy)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.rsb_err_no_error) goto 9997</div>
+<div class="line">      <span class="keywordflow">DO</span> i = 1, 2</div>
+<div class="line">            <span class="keywordflow">IF</span> (y(i).NE.cy(i)) print *, </div>
+<div class="line"><span class="stringliteral">"type=d dims=2x2 sym=g diag=g &      &blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"</span></div>
+<div class="line">            <span class="keywordflow">IF</span> (y(i).NE.cy(i)) goto 9997</div>
+<div class="line">      <span class="keywordflow">END DO</span></div>
+<div class="line">      print*,</div>
+<div class="line"><span class="stringliteral">"type=d dims=2x2 sym=g diag=g blocks=1x1 usmv alpha= 3&       & beta= 1 incx=1 incy=1 trans=n is ok"</span></div>
+<div class="line">      goto 9998</div>
+<div class="line">9997      res = -1</div>
+<div class="line">9998      <span class="keywordflow">CONTINUE</span></div>
+<div class="line">      mtxap = <a class="code" href="interfacersb_1_1rsb__mtx__free.html">rsb_mtx_free</a>(mtxap)</div>
+<div class="line">      <span class="keywordflow">IF</span> (istat.NE.rsb_err_no_error) res = -1 </div>
+<div class="line"><span class="comment">! 9999      CONTINUE</span></div>
+<div class="line">      istat = <a class="code" href="interfacersb_1_1rsb__perror.html">rsb_perror</a>(c_null_ptr,istat)</div>
+<div class="line"><span class="keyword">      end SUBROUTINE </span>rsb_mod_example1</div>
+<div class="line"></div>
+<div class="line"><span class="keyword">      SUBROUTINE </span>rsb_mod_example2(res)</div>
+<div class="line">      <span class="keywordtype">USE </span><a class="code" href="classrsb.html">rsb</a></div>
+<div class="line">      <span class="keywordtype">USE </span>iso_c_binding</div>
+<div class="line">      <span class="keywordtype">IMPLICIT NONE</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: errval</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: res</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: transt = rsb_transposition_n  <span class="comment">! no transposition</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: incx = 1, incb = 1        <span class="comment">! X, B vectors increment</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: alpha = 3,beta = 1</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: nnza = 4, nra = 3, nca = 3     <span class="comment">! nonzeroes, rows, columns of matrix A</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: ia(4) = (/1, 2, 3, 3/)  <span class="comment">! row    indices</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span>,<span class="keywordtype">TARGET</span> :: ja(4) = (/1, 2, 1, 3/)  <span class="comment">! column indices</span></div>
+<div class="line">      <span class="keywordtype">INTEGER(C_SIGNED_CHAR)</span> :: typecode = rsb_numerical_type_double</div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: flags =rsb_flag_default_matrix_flags+rsb_flag_symmetric</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: va(4) = (/11.0, 22.0, 13.0, 33.0/) <span class="comment">! coefficients</span></div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: x(3) = (/   0,    0,    0/)</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: b(3) = (/-1.0, -2.0, -2.0/)</div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">TARGET</span>  :: mtxap = c_null_ptr</div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>  :: mtxapp = c_null_ptr</div>
+<div class="line">      <span class="keywordtype">REAL(KIND=8)</span>,<span class="keywordtype">TARGET</span> :: etime = 0.0</div>
+<div class="line">      <span class="comment">!TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS</span></div>
+<div class="line">      <span class="comment">!TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS</span></div>
+<div class="line">      <span class="comment">! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59411</span></div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">PARAMETER</span> :: eo = c_null_ptr</div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">PARAMETER</span> :: io = c_null_ptr</div>
+<div class="line"></div>
+<div class="line">      errval = <a class="code" href="interfacersb_1_1rsb__lib__init.html">rsb_lib_init</a>(io)                <span class="comment">! librsb initialization</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.NE.rsb_err_no_error) &</div>
+<div class="line">       & stop <span class="stringliteral">"error calling rsb_lib_init"</span></div>
+<div class="line"><span class="preprocessor">#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 5)</span></div>
+<div class="line"><span class="preprocessor"></span><span class="preprocessor">#define RSB_SKIP_BECAUSE_OLD_COMPILER 1</span></div>
+<div class="line"><span class="preprocessor"></span><span class="preprocessor">#endif</span></div>
+<div class="line"><span class="preprocessor"></span><span class="preprocessor">#ifndef RSB_SKIP_BECAUSE_OLD_COMPILER</span></div>
+<div class="line"><span class="preprocessor"></span>      mtxap = <a class="code" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html">rsb_mtx_alloc_from_coo_begin</a>(nnza,typecode,nra,nca,flags,&</div>
+<div class="line">       & c_loc(errval)) <span class="comment">! begin matrix creation</span></div>
+<div class="line">      errval = <a class="code" href="interfacersb_1_1rsb__mtx__set__vals.html">rsb_mtx_set_vals</a>(mtxap,&</div>
+<div class="line">       & c_loc(va),c_loc(ia),c_loc(ja),nnza,flags) <span class="comment">! insert some nonzeroes</span></div>
+<div class="line">      mtxapp = c_loc(mtxap) <span class="comment">! Old compilers like e.g.: Gfortran 4.4.7 will NOT compile this.</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.NE.rsb_err_no_error) &</div>
+<div class="line">       & stop <span class="stringliteral">"error calling rsb_mtx_set_vals"</span></div>
+<div class="line">      errval = <a class="code" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html">rsb_mtx_alloc_from_coo_end</a>(mtxapp)                   <span class="comment">! end matrix creation</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.NE.rsb_err_no_error) &</div>
+<div class="line">       & stop <span class="stringliteral">"error calling rsb_mtx_alloc_from_coo_end"</span></div>
+<div class="line">      errval = <a class="code" href="interfacersb_1_1rsb__spmv.html">rsb_spmv</a>(transt,c_loc(alpha),mtxap,c_loc(x),&</div>
+<div class="line">       & incx,c_loc(beta),c_loc(b),incb) <span class="comment">! X := X + (3) * A * B </span></div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.NE.rsb_err_no_error)&</div>
+<div class="line">       & stop <span class="stringliteral">"error calling rsb_spmv"</span></div>
+<div class="line">      mtxap = <a class="code" href="interfacersb_1_1rsb__mtx__free.html">rsb_mtx_free</a>(mtxap)                                 <span class="comment">! destroy matrix</span></div>
+<div class="line"></div>
+<div class="line">      <span class="comment">! The following is optional and depends on configure options, so it is allowed to fail</span></div>
+<div class="line">      errval = <a class="code" href="interfacersb_1_1rsb__lib__get__opt.html">rsb_lib_get_opt</a>(rsb_io_want_librsb_etime,c_loc(etime))</div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.EQ.rsb_err_no_error)&</div>
+<div class="line">       & print*,<span class="stringliteral">"Time spent in librsb is:"</span>,etime</div>
+<div class="line">      <span class="comment">! IF (errval.NE.0)STOP "error calling rsb_lib_get_opt" </span></div>
+<div class="line">      errval = rsb_err_no_error</div>
+<div class="line"></div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.NE.rsb_err_no_error) &</div>
+<div class="line">       & stop <span class="stringliteral">"error calling rsb_mtx_free"</span></div>
+<div class="line"><span class="preprocessor">#else</span></div>
+<div class="line"><span class="preprocessor"></span>      print*,<span class="stringliteral">"You have an old Fortran compiler not supporting C_LOC."</span></div>
+<div class="line">      print*,<span class="stringliteral">"Skipping a part of the test"</span></div>
+<div class="line"><span class="preprocessor">#endif</span></div>
+<div class="line"><span class="preprocessor"></span>      errval=<a class="code" href="interfacersb_1_1rsb__lib__exit.html">rsb_lib_exit</a>(eo)                 <span class="comment">! librsb finalization</span></div>
+<div class="line">      <span class="keywordflow">IF</span> (errval.NE.rsb_err_no_error)&</div>
+<div class="line">       & stop <span class="stringliteral">"error calling rsb_lib_exit"</span></div>
+<div class="line">      print *, <span class="stringliteral">"rsb module fortran test is ok"</span></div>
+<div class="line">      res = errval</div>
+<div class="line"><span class="keyword">      end SUBROUTINE </span>rsb_mod_example2</div>
+<div class="line"></div>
+<div class="line">      <span class="keyword">PROGRAM</span> main</div>
+<div class="line">      <span class="keywordtype">USE </span><a class="code" href="classrsb.html">rsb</a></div>
+<div class="line">      <span class="keywordtype">IMPLICIT NONE</span></div>
+<div class="line">      <span class="keywordtype">INTEGER</span> :: res = rsb_err_no_error, passed = 0, failed = 0</div>
+<div class="line">      <span class="comment">!TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS</span></div>
+<div class="line">      <span class="comment">!TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS</span></div>
+<div class="line">      <span class="comment">! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59411</span></div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">PARAMETER</span> :: eo = c_null_ptr</div>
+<div class="line">      <span class="keywordtype">TYPE(</span>c_ptr<span class="keywordtype">)</span>,<span class="keywordtype">PARAMETER</span> :: io = c_null_ptr</div>
+<div class="line"></div>
+<div class="line">      res = <a class="code" href="interfacersb_1_1rsb__lib__init.html">rsb_lib_init</a>(io)</div>
+<div class="line">      </div>
+<div class="line">      CALL rsb_mod_example1(res)</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.LT.0) failed = failed + 1</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.EQ.0) passed = passed + 1</div>
+<div class="line"></div>
+<div class="line">      res = <a class="code" href="interfacersb_1_1rsb__lib__exit.html">rsb_lib_exit</a>(eo)</div>
+<div class="line"></div>
+<div class="line">      CALL rsb_mod_example2(res)</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.LT.0) failed = failed + 1</div>
+<div class="line">      <span class="keywordflow">IF</span> (res.EQ.0) passed = passed + 1</div>
+<div class="line">      </div>
+<div class="line">      print *, <span class="stringliteral">"FAILED:"</span>, failed</div>
+<div class="line">      print *, <span class="stringliteral">"PASSED:"</span>, passed</div>
+<div class="line">      <span class="keywordflow">IF</span> (failed.GT.0) <span class="keywordflow">THEN</span></div>
+<div class="line">       stop 1</div>
+<div class="line">      <span class="keywordflow">END IF</span></div>
+<div class="line"><span class="keyword">      END PROGRAM</span></div>
+<div class="line"></div>
+</div><!-- fragment --> </div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:23 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/group__rsb__doc__rsb.html b/doc/html/group__rsb__doc__rsb.html
new file mode 100644
index 0000000..93fce8a
--- /dev/null
+++ b/doc/html/group__rsb__doc__rsb.html
@@ -0,0 +1,3742 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: The librsb library interface (rsb.h, rsb.F90)</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#define-members">Macros</a> |
+<a href="#enum-members">Enumerations</a> |
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">The librsb library interface (rsb.h, rsb.F90)</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>The reference documentation of the <code>librsb</code> library comes in both HTML and Unix man pages formats. The following sections/man pages are available: <a class="el" href="group__rsb__doc__rsb.html">The librsb library interface (rsb.h, rsb.F90)</a> ; <a class="el" href="group__rsb__doc__sparse__blas.html">The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)</a> ; <a class="el" href="group__rsb__doc__examples.html">Example programs and code</a>.  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:ga68e662dcfb6981c1efc8eb03ef327182"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga68e662dcfb6981c1efc8eb03ef327182">RSB_SIZEOF</a>(TYPE)   RSB_NUMERICAL_TYPE_SIZE(TYPE)</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="enum-members"></a>
+Enumerations</h2></td></tr>
+<tr class="memitem:gae0bada88731b01751401847d60110fb6"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">RSB_IO_WANT_VERBOSE_INIT</a> = 0x000001, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">RSB_IO_WANT_VERBOSE_EXIT</a> = 0x000002, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">RSB_IO_WANT_OUTPUT_STREAM</a> = 0x000003, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673">RSB_IO_WANT_SORT_METHOD</a> = 0x000004, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7">RSB_IO_WANT_CACHE_BLOCKING_METHOD</a> = 0x000005, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a">RSB_IO_WANT_SUBDIVISION_MULTIPLIER</a> = 0x000006, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001">RSB_IO_WANT_VERBOSE_ERRORS</a> = 0x000007, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</a> = 0x000008, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a> = 0x000009, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a> = 0x000010, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d">RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING</a> = 0x000011, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">RSB_IO_WANT_IS_INITIALIZED_MARKER</a> = 0x000012, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f">RSB_IO_WANT_MEM_ALLOC_CNT</a> = 0x000013, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24">RSB_IO_WANT_MEM_ALLOC_TOT</a> = 0x000014, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5">RSB_IO_WANT_LEAF_LEVEL_MULTIVEC</a> = 0x000015, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84">RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS</a> = 0x000016, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9">RSB_IO_WANT_MAX_MEMORY_ALLOCATED</a> = 0x000017, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">RSB_IO_WANT_LIBRSB_ETIME</a> = 0x000018, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">RSB_IO_WANT_VERBOSE_TUNING</a> = 0x000019
+<br/>
+ }</td></tr>
+<tr class="memdesc:gae0bada88731b01751401847d60110fb6"><td class="mdescLeft"> </td><td class="mdescRight">library option values for <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html# [...]
+<tr class="memitem:ga14750ca720fd92a2be879a59ae36dfe9"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">RSB_EXTF_NORM_ONE</a> = 0x00001001, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">RSB_EXTF_NORM_TWO</a> = 0x00001002, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">RSB_EXTF_NORM_INF</a> = 0x00001003, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">RSB_EXTF_SUMS_ROW</a> = 0x00001004, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e">RSB_EXTF_SUMS_COL</a> = 0x00001005, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54">RSB_EXTF_ASUMS_ROW</a> = 0x00001006, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada">RSB_EXTF_ASUMS_COL</a> = 0x00001007, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">RSB_EXTF_DIAG</a> = 0x00000004
+<br/>
+ }</td></tr>
+<tr class="memdesc:ga14750ca720fd92a2be879a59ae36dfe9"><td class="mdescLeft"> </td><td class="mdescRight">Extraction filter flags, to be used with <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm()</a>/<a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a>.  <a href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">More...</a><br/></td></tr>
+<tr class="memitem:ga211914bd1afe8044a70dc864f3c1fc8f"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858">RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T</a> = 0x00000001, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065">RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T</a> = 0x00000002, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954">RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T</a> = 0x00000004, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0">RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T</a> = 0x00000008, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T</a> = 0x00000010, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f">RSB_MIF_TOTAL_SIZE__TO__SIZE_T</a> = 0x00000020, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e">RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T</a> = 0x00000040, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93">RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T</a> = 0x00000080, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81">RSB_MIF_MATRIX_INFO__TO__CHAR_P</a> = 0x00000100, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f">RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T</a> = 0x00000200
+<br/>
+ }</td></tr>
+<tr class="memdesc:ga211914bd1afe8044a70dc864f3c1fc8f"><td class="mdescLeft"> </td><td class="mdescRight">Flags for getting matrix informations via <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info()</a>/<a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str()</a>.  <a href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">More...</a><br/></td></tr>
+<tr class="memitem:ga16c86c65a187bfbe94ecfdb87b97cade"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a">RSB_ELOPF_MUL</a> = 0x00000001, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969">RSB_ELOPF_DIV</a> = 0x00000002, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a">RSB_ELOPF_POW</a> = 0x00000004, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4">RSB_ELOPF_NEG</a> = 0x00000008, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287">RSB_ELOPF_SCALE_ROWS</a> = 0x00000010, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e">RSB_ELOPF_SCALE_COLS</a> = 0x00000020, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5">RSB_ELOPF_SCALE_ROWS_REAL</a> = 0x00000040, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f">RSB_ELOPF_SCALE_COLS_REAL</a> = 0x00000080
+<br/>
+ }</td></tr>
+<tr class="memdesc:ga16c86c65a187bfbe94ecfdb87b97cade"><td class="mdescLeft"> </td><td class="mdescRight">Flags for specifying a particular elemental/row-wise operation with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>.  <a href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">More...</a><br/></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:gaf2b874d9f117ee6a6899634472b17946"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga4670aa682e70f82d5039c600e426a368"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *opnp, const <a class="el" href="rsb_8h.html#a10ec0af478bcc [...]
+<tr class="memitem:ga1707f8b0c28805f692146cf2fb28ae70"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga86db30487afe975ed18a7aa6ee0db81d"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga86c1b0d0586f817ee31ca1caa3fee9be"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:ga3b7f9a461377de348b33a873f2e1893f"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a> (void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *JA, <a class [...]
+<tr class="memitem:gab64a020286a8b58d23d84d4512bd9132"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a> (struct rsb_mtx_t *mtxAp)</td></tr>
+<tr class="memitem:gae181671ba19191caa5a282cbde4fdfc5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a> (struct rsb_mtx_t **mtxBpp, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a4 [...]
+<tr class="memitem:ga4a16a82d289c76a437915db449553d4d"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, const void *Xp,  [...]
+<tr class="memitem:ga9b044332b720d3f8083ae792068fb04a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transT, const void *alphap, const struct rsb_mtx_t *mtxTp, const void *Xp,  [...]
+<tr class="memitem:ga48e6f3844605fffac9f622f05afa6043"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transT, const void *alphap, const struct rsb_mtx_t *mtxTp, <a class="el" hr [...]
+<tr class="memitem:gaa09eca432d5bb8c57fcff5d9ab98dfb8"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a> (void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b43952 [...]
+<tr class="memitem:gaa79f69918eafbd8f737b7866a00a0330"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a> (const char *filename, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *nrp, <a class="el" href="rsb_8h.html# [...]
+<tr class="memitem:gab660cf8aff876ae88b59c7a22ddfc912"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a> (void *stream, <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval)</td></tr>
+<tr class="memitem:ga28710b8dade48738ea8e075aa1a3d262"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a> (<a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval, <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678e [...]
+<tr class="memitem:ga2d7533a97c97b215090d69c2d9235412"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a> (struct rsb_mtx_t *mtxAp, enum <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a> elop_flags, const void [...]
+<tr class="memitem:gab8069ad6d5a67bc8a726131891e98c46"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a> (struct rsb_mtx_t *mtxAp, const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a clas [...]
+<tr class="memitem:gad8f1aa9ac5081edd789374e7bb82697f"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a> (const struct rsb_mtx_t *mtxAp, void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a clas [...]
+<tr class="memitem:gad911ac7528c95c874d02cb17e6b76c54"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a> (const struct rsb_mtx_t *mtxAp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename)</td></tr>
+<tr class="memitem:gac4b2a63cdfe1cd4083b1561ee4bea696"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6 [...]
+<tr class="memitem:gad071e0373a08f74ee7ae910e9e4fd140"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6 [...]
+<tr class="memitem:ga00833b0cf57da8e430f9d0e2b5375bb3"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> flagsA, <a class="el" href="r [...]
+<tr class="memitem:ga30823d02e577e59da4ccff6baaeb8ea1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb [...]
+<tr class="memitem:ga8813ccbbb1065ac76bfe22c42feafa05"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb [...]
+<tr class="memitem:gaf30a70ea183d30d216f700782fc01524"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a> (const void *alphap, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldB, <a cl [...]
+<tr class="memitem:ga7459601f0d54bd95549959b9749fedde"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a> (const char psbtrans)</td></tr>
+<tr class="memitem:ga13d417f776654fd159f274e56191573e"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *RP, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:gaebf57d9e5263f41eb6163581ffc141aa"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:ga60121166daf00968ba717931f04ea455"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a> (void *VA, <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> *RP, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *JA, <a class [...]
+<tr class="memitem:ga3c46a4942a6acb90063d721b6446e78e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a> (struct rsb_mtx_t *mtxAp, void **VAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> **IAp, <a class="el" hr [...]
+<tr class="memitem:gaac3c6c033733a8101b9ccf56f8fc7112"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a> (const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href=" [...]
+<tr class="memitem:ga4adca460f50bc1ad7d9ffdfda2273b87"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href [...]
+<tr class="memitem:gaa01c4a69db732f99e8a960ee8c9afa23"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, v [...]
+<tr class="memitem:ga68115178d85cd28c645058deb0aa6379"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a> (const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el"  [...]
+<tr class="memitem:ga3ec8d721b5333aae6ea9b03eb0039285"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, <a class="el" hr [...]
+<tr class="memitem:ga74d97612d4af70244c886b9eadd90a0e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055 [...]
+<tr class="memitem:gab0702d7080d1699162e4201bc70cc5ee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a> (const char *filename, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> pmWidth, <a clas [...]
+<tr class="memitem:ga4b45a74b985f5cbd869bc9a540951771"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a> (void *pmp, const char *filename, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> pmlWidth, <a class="el" href="r [...]
+<tr class="memitem:gadf75c148fe661486ab0d8140657b8d9a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a> (struct rsb_mtx_t *mtxAp, void **VAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> **IAp, <a class="el" hr [...]
+<tr class="memitem:gadaee12cc24dac7f8ebc68efd3d09c819"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a> (void *opdp, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb_precf_t</a> prec_flags, const void * [...]
+<tr class="memitem:gad9a3eacd54fb7043464006cd57866edf"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a> (const struct rsb_mtx_t *mtxAp, enum <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a> miflags, void *min [...]
+<tr class="memitem:ga2b7d51b9822f73d2fe7fcf5b9d0be1e9"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a> (const struct rsb_mtx_t *mtxAp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *mis, void *minfop, size_t  [...]
+<tr class="memitem:ga6a645ce89fd167d72c92cdcfbcd8ed81"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a> (const struct rsb_mtx_t *mtxAp, void *Np, enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> flags)< [...]
+<tr class="memitem:gad0b2352cea6b7512b466d1c51327fcf8"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a> (const struct rsb_mtx_t *mtxAp, void *Dp, enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> flags)< [...]
+<tr class="memitem:ga6677d4e20c00bdf4ebf53567246f5693"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a> (void)</td></tr>
+<tr class="memitem:gafca80e53d47a7ec3eb116e755fe47c58"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a> (<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" hre [...]
+<tr class="memitem:gab583fbefa0a66e9d30dac034480c2d86"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a> (struct rsb_mtx_t **mtxApp)</td></tr>
+<tr class="memitem:ga8c11024d248e2e686476fd9e89aa7c15"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a> (struct rsb_mtx_t **mtxOpp, <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> *sfp, <a class="el" href="rsb_8h.html#aefcdc [...]
+<tr class="memitem:ga8d7a05bbc165bd6ac20e8e23487a5871"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a> (struct rsb_mtx_t **mtxOpp, <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> *sfp, <a class="el" href="rsb_8h.html#aefcdc [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<p>The reference documentation of the <code>librsb</code> library comes in both HTML and Unix man pages formats. The following sections/man pages are available: <a class="el" href="group__rsb__doc__rsb.html">The librsb library interface (rsb.h, rsb.F90)</a> ; <a class="el" href="group__rsb__doc__sparse__blas.html">The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)</a> ; <a class="el" href="group__rsb__doc__examples.html">Example programs and code</a>. </p>
+<p>In general, users of this library are interested in high performance sparse matrix computations on cache based shared memory parallel computers. For this, <code>librsb</code> offers a native C interface (here documented) and a Fortran one (in <a class="el" href="rsb_8F90.html">rsb.F90</a>, equivalent to the C declaration headers from <a class="el" href="rsb_8h.html">rsb.h</a>), in addition to a the Sparse BLAS one (both C and Fortran, documented).</p>
+<p>Configuration, build, and installation instructions are contained in the <code>README</code> file distributed in the sources archive.</p>
+<p><b> Typical program structure </b></p>
+<ul>
+<li>initialize <code>librsb</code> with <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a> </li>
+<li>(in any order) allocate matrices (e.g.: with <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace()</a> or others); do any computation with them (e.g.: <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv()</a>, <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv()</a> ); converting matrices (e.g.: with <a class="el" href="group__rsb__doc__rsb.html# [...]
+<li>finalize <code>librsb</code> with <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit()</a></li>
+</ul>
+<p><b> Important usage notes </b></p>
+<p><b> General program structure </b> Before calling any <code>librsb</code> function, a program is required to initialize <code>librsb's</code> internal status. This is done by calling <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a> . Afterwards, any <code>librsb</code> function can be safely used. When <code>librsb</code> functions are not intended to be called anymore, a program may call <a class="el" href="group__rsb__doc__rsb.html [...]
+<p><b> Manipulating matrices and vectors </b> In order to use <code>librsb</code>, the user is not required to use explicitly any of <code>librsb's</code> data structures: their manipulation is to be performed by <code>librsb</code> functions. Therefore, knowledge of <code>librsb's</code> matrix type (<code>rsb_mtx_t</code>) is not necessary at all: this structure is intended to be used as an opaque container.</p>
+<p>On the contrary, arrays for numerical vectors (or more generally, dense matrices) are expected to be managed by the user: <code>librsb</code> does not furnish any specific vector type. Computational functions treat dense vectors/matrices are simple arrays of a specified type; see the <a class="el" href="group__rsb__doc__examples.html">Example programs and code</a> .</p>
+<p><b> Computational functions </b> This library can be configured at build time to support a custom subset of numerical types. To keep the programming interface compact, it has been decided to not replicate the computational functions to each numerical type. Instead, the type is expected to be specified by the user via a type flag. For instance, matrix assembly functions (e.g.: <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const [...]
+<p><b> Memory management </b></p>
+<p>Matrix structures (<code>rsb_mtx_t</code>) allocated by <code>librsb</code> shall be freed only via <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free()</a> .</p>
+<p><b> Benchmarking </b></p>
+<p>If you want to benchmark this library, there are different possibilities: </p>
+<div class="fragment"><div class="line"><span class="preprocessor">#!/bin/sh</span></div>
+<div class="line"><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"># systematic comparative benchmark, mostly for dense matrices</span></div>
+<div class="line"><span class="preprocessor"></span><span class="preprocessor"># (with Intel MKL, if linked) benchmark comparing   </span></div>
+<div class="line"><span class="preprocessor"></span><span class="preprocessor"># produces a number of plots systematically</span></div>
+<div class="line"><span class="preprocessor"></span>bench/dense.sh</div>
+<div class="line"></div>
+<div class="line"><span class="preprocessor"># the benchmark command; assumes A.mtx is a file in Matrix Market format</span></div>
+<div class="line"><span class="preprocessor"></span>./rsbench -oa -Ob -f A.mtx -qH -R -n1 -t100 --verbose -TD --compare-competitors </div>
+<div class="line"></div>
+<div class="line"><span class="preprocessor"># rsbench is very flexible tool; see the help for it:</span></div>
+<div class="line"><span class="preprocessor"></span>./rsbench -oa -Ob --help</div>
+</div><!-- fragment --><p><b> Tuning and Customization </b></p>
+<p>There are different <code></code>./configure options you may look at for tuning or customizing the library. </p>
+<h2>Macro Definition Documentation</h2>
+<a class="anchor" id="ga68e662dcfb6981c1efc8eb03ef327182"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_SIZEOF</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">TYPE</td><td>)</td>
+          <td>   RSB_NUMERICAL_TYPE_SIZE(TYPE)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Use <a class="el" href="group__rsb__doc__rsb.html#ga68e662dcfb6981c1efc8eb03ef327182">RSB_SIZEOF</a> macro to get the size (in bytes) of a type supported by the library (e.g.: when allocating numerical vectors). </p>
+
+</div>
+</div>
+<h2>Enumeration Type Documentation</h2>
+<a class="anchor" id="ga16c86c65a187bfbe94ecfdb87b97cade"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Flags for specifying a particular elemental/row-wise operation with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a"></a>RSB_ELOPF_MUL</em> </td><td>
+<p>Elemental multiplication of the matrix by a specified scalar (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969"></a>RSB_ELOPF_DIV</em> </td><td>
+<p>Elemental division by a specified scalar (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a"></a>RSB_ELOPF_POW</em> </td><td>
+<p>Elemental power to a specified scalar (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4"></a>RSB_ELOPF_NEG</em> </td><td>
+<p>Elemental negation (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, unary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287"></a>RSB_ELOPF_SCALE_ROWS</em> </td><td>
+<p>Row wise scaling by a specified scaling vector (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e"></a>RSB_ELOPF_SCALE_COLS</em> </td><td>
+<p>Column wise scaling by a specified scaling vector (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5"></a>RSB_ELOPF_SCALE_ROWS_REAL</em> </td><td>
+<p>Row wise scaling by a specified scaling vector. If matrix is of a complex type, the argument is expected to be of the corresponding real type (assumed that that type has been enabled). (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f"></a>RSB_ELOPF_SCALE_COLS_REAL</em> </td><td>
+<p>Column wise scaling by a specified scaling vector. If matrix is of a complex type, the argument is expected to be of the corresponding real type (assumed that that type has been enabled). (usable with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>, binary operation). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ga14750ca720fd92a2be879a59ae36dfe9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Extraction filter flags, to be used with <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm()</a>/<a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508"></a>RSB_EXTF_NORM_ONE</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm()</a> flag value for computing the one-norm. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef"></a>RSB_EXTF_NORM_TWO</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm()</a> flag value for computing the two-norm (Frobenius norm). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f"></a>RSB_EXTF_NORM_INF</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm()</a> flag value for computing the infinity-norm. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5"></a>RSB_EXTF_SUMS_ROW</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a> flag value for computing the sum along each row. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e"></a>RSB_EXTF_SUMS_COL</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a> flag value for computing the sum along each column. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54"></a>RSB_EXTF_ASUMS_ROW</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a> flag value for computing the absolute values sum, along each row. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada"></a>RSB_EXTF_ASUMS_COL</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a> flag value for computing the absolute values sum, along each column. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1"></a>RSB_EXTF_DIAG</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a> flag value for extracting the diagonal submatrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ga211914bd1afe8044a70dc864f3c1fc8f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Flags for getting matrix informations via <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info()</a>/<a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str()</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858"></a>RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T</em> </td><td>
+<p>Index storage occupation, in bytes. (size_t) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065"></a>RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T</em> </td><td>
+<p>Index storage occupation per nnz, in bytes. (<a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a>) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954"></a>RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T</em> </td><td>
+<p>Rows count(<a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0"></a>RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T</em> </td><td>
+<p>Columns count (<a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c"></a>RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T</em> </td><td>
+<p>Nonzeroes count (<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a>) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f"></a>RSB_MIF_TOTAL_SIZE__TO__SIZE_T</em> </td><td>
+<p>Total size, in bytes (size_t) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e"></a>RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T</em> </td><td>
+<p>Matrix flags (<a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a>) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93"></a>RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T</em> </td><td>
+<p>Matrix type code (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a>) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81"></a>RSB_MIF_MATRIX_INFO__TO__CHAR_P</em> </td><td>
+<p>Matrix info string, only for <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str()</a> (<a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a>*) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f"></a>RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T</em> </td><td>
+<p>Leaf submatrices count (<a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a>) </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="gae0bada88731b01751401847d60110fb6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>library option values for <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d [...]
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a"></a>RSB_IO_WANT_VERBOSE_INIT</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">RSB_IO_WANT_VERBOSE_INIT</a> prompts for a verbose initialization of the library: messages will be written to the file descriptor (<code>FILE*</code>) pointed by the value pointer when calling <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532"></a>RSB_IO_WANT_VERBOSE_EXIT</em> </td><td>
+<p><a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">RSB_IO_WANT_VERBOSE_EXIT</a> prompts for a verbose finalization of the library: messages will be written to the file descriptor (<code>FILE*</code>) pointed by the value pointer when calling <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f"></a>RSB_IO_WANT_OUTPUT_STREAM</em> </td><td>
+<p>Specifies the default output stream. Output (debug info) info will be written to the file descriptor (<code>FILE*</code>) pointed by the value pointer. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673"></a>RSB_IO_WANT_SORT_METHOD</em> </td><td>
+<p>Specifies the default sorting method. Specified as a pointed integer (<a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a>) number, in {[0],1}. (internal) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7"></a>RSB_IO_WANT_CACHE_BLOCKING_METHOD</em> </td><td>
+<p>Specifies the default cache blocking method. Specified as a pointed integer (<a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a>) number, in {-1,[0],1}. (internal) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a"></a>RSB_IO_WANT_SUBDIVISION_MULTIPLIER</em> </td><td>
+<p>Specifies a multiplier for finer (if >1.0) or coarser (if <1.0) subdivisions. Specified as a pointed (<a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a>) number, in {..,[1.0],..}. (internal) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001"></a>RSB_IO_WANT_VERBOSE_ERRORS</em> </td><td>
+<p>Prompts for a verbose error reporting: messages will be written to the file descriptor (<code>FILE*</code>) pointed by the value pointer. Only meaningful if an interface error verbosity greater than 0 was set at configure time. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53"></a>RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</em> </td><td>
+<p>Prompts for bounded box computation, for a smoother submatrices locking; pointed <a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> in {0,[1]}. (internal). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4"></a>RSB_IO_WANT_EXECUTING_THREADS</em> </td><td>
+<p>Specifies the number of desired executing threads; pointed <a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> in {[0],1,..}. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb"></a>RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</em> </td><td>
+<p>Specifies the level of interface verbosity; if setting, pointed <a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> values should be in {[0],1,..}. Support may be enabled or disabled at build time via the <code>–enable-internals-error-verbosity</code> configure option. If disabled, only getting is supported and yields -1, but setting is not supported and the <a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">RSB_ERR_NO_STREAM_OUTPUT_C [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d"></a>RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING</em> </td><td>
+<p>Specifies a custom memory hierarchy info string; pointed <code>const</code> <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a>*; (may point to a NULL string pointer). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b"></a>RSB_IO_WANT_IS_INITIALIZED_MARKER</em> </td><td>
+<p>Used for getting whether the library has been initialized (<a class="el" href="rsb_8h.html#af580e920b9f507028d3b7d34b4dadd6f">RSB_BOOL_TRUE</a>) or not (<a class="el" href="rsb_8h.html#ad396755fe9a1d81991d5ac238058db18">RSB_BOOL_FALSE</a>) ; pointed <code>const</code> <a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a>*; (this is NOT for general users). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f"></a>RSB_IO_WANT_MEM_ALLOC_CNT</em> </td><td>
+<p>Used for getting the count of memory allocations performed by librsb employing librsb's memory allocation wrapper (if disabled, will return zero); pointed <code>const</code> <code>size_t*</code>; (this is for debugging purposes). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24"></a>RSB_IO_WANT_MEM_ALLOC_TOT</em> </td><td>
+<p>Used for getting the total amount of memory allocated by librsb employing librsb's memory allocation wrapper (if disabled, will return zero); pointed <code>const</code> <code>size_t*</code>; (this is for debugging purposes). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5"></a>RSB_IO_WANT_LEAF_LEVEL_MULTIVEC</em> </td><td>
+<p>Specifies whether the default multi-vector ops shall act at a leaf level (default value of 0 is yes). Specified as a pointed integer (<a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a>) number, in {-1,[0]}. (internal) </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84"></a>RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS</em> </td><td>
+<p>Specifies an upper limit to the count of allocated memory areas (default value of 0 means no limit). Specified as a pointed <code>size_t</code>. Only works if the memory wrapper (<code>–enable-allocator-wrapper</code>) has been specified at configure time. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9"></a>RSB_IO_WANT_MAX_MEMORY_ALLOCATED</em> </td><td>
+<p>Specifies an upper limit to the amount of allocated memory (default value of 0 means no limit). Specified as a pointed <code>size_t</code>. Only works if the memory wrapper (<code>–enable-allocator-wrapper</code>) has been specified at configure time. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9"></a>RSB_IO_WANT_LIBRSB_ETIME</em> </td><td>
+<p>Represents time spent in librsb. Specified as a pointed <a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a>. Only works if statistics collection (<code>–enable-librsb-stats</code>) was specified at configure time. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591"></a>RSB_IO_WANT_VERBOSE_TUNING</em> </td><td>
+<p>Auto tuning verbosity level for rsb_tune_spmm/rsb_tune_spsm. If 0, no verbosity; if 1, verbose; if 2, verbose with trace files being dumped. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<h2>Function Documentation</h2>
+<a class="anchor" id="gaa09eca432d5bb8c57fcff5d9ab98dfb8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_coo_sort </td>
+          <td>(</td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sorts row-major the given COO input arrays representing a sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">VA,IA,JA</td><td>Output numerical values (<code>VA</code>) array; output row (<code>IA</code>) and column (<code>JA</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">nnzA</td><td>The number of nonzeroes in the input arrays representing matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of matrix storage flags. If unsure, use <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a></dd></dl>
+<dl class="section note"><dt>Note</dt><dd>By invoking with swapped <code>IA</code> and <code>JA</code> (and swapping <code>nrA</code> and <code>ncA</code> as well) one can obtain column major order.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa79f69918eafbd8f737b7866a00a0330"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_file_mtx_get_dims </td>
+          <td>(</td>
+          <td class="paramtype">const char * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>nrp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>ncp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>nzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> * </td>
+          <td class="paramname"><em>flagsp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Reads structural information (dimensions, structural flags) for a matrix file into user specified (and optionally <code>NULL</code>) variables.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">filename</td><td>The specified matrix file name (cannot be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">nrp,ncp</td><td>Output pointers to rows and columns count variables (can be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">nzp</td><td>Output pointer to the nonzeroes count variable (can be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">flagsp</td><td>Output pointer to the detected structural flags variable. Will be a combination of <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>, <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>, <a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a>, <a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. If read dimensions are illegal (see <a class="el" hr [...]
+<dl class="section note"><dt>Note</dt><dd>The only sparse matrix file format currently supported is Matrix Market. E.g.: <pre class="fragment">%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
+</pre> In the above example header on the first line, you can specify either <code>real</code> or <code>complex</code> or <code>pattern</code> for the numerical type. Either <code>general</code>, <code>symmetric</code>, <code>hermitian</code> can be specified for the structure. In case of <code>pattern</code> matrices, only coordinate indices will be loaded (saving <code>pattern</code> matrices is not yet supported); in case of <code>real</code> matrices, also one coefficient value will  [...]
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="ga00833b0cf57da8e430f9d0e2b5375bb3"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_file_mtx_load </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Loads a sparse matrix from the specified matrix file, assembling it in the format specified by flags, using the numerical type representation as specified by the user.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">filename</td><td>The specified matrix file name (cannot be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of matrix storage flags. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>The only sparse matrix file format currently supported is Matrix Market. E.g.: <pre class="fragment">%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
+</pre> In the above example header on the first line, you can specify either <code>real</code> or <code>complex</code> or <code>pattern</code> for the numerical type. Either <code>general</code>, <code>symmetric</code>, <code>hermitian</code> can be specified for the structure. In case of <code>pattern</code> matrices, only coordinate indices will be loaded (saving <code>pattern</code> matrices is not yet supported); in case of <code>real</code> matrices, also one coefficient value will  [...]
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group [...]
+
+</div>
+</div>
+<a class="anchor" id="ga4b45a74b985f5cbd869bc9a540951771"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_file_mtx_rndr </td>
+          <td>(</td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>pmp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const char * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>pmlWidth</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>pmWidth</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>pmHeight</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> </td>
+          <td class="paramname"><em>rflags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Renders as pixel map the matrix contained in a matrix file.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">pmp</td><td>Pixel map array pointer. </td></tr>
+    <tr><td class="paramname">filename</td><td>The specified matrix file name (cannot be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">pmlWidth</td><td>stride between lines (in pixels; no less than <code>pmWidth</code>). </td></tr>
+    <tr><td class="paramname">pmWidth</td><td>Pixel map width (in pixels or points). </td></tr>
+    <tr><td class="paramname">pmHeight</td><td>Pixel map height (in pixels or points). </td></tr>
+    <tr><td class="paramname">rflags</td><td>The color mode; only <a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">RSB_MARF_RGB</a> is supported for now (1 byte per channel, 3 channels — red, green, blue): this requires array <code>pmp</code> to be at least (3*<code>pmlWidth*<code>pmHeight</code>)</code> bytes large. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>At the time being, <code>pmlWidth</code> is required to be equal to <code>pmWidth</code>. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad911ac7528c95c874d02cb17e6b76c54"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_file_mtx_save </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Saves the given matrix to the specified matrix file.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">filename</td><td>The specified output file name (if <code>NULL</code>, will write to standard output). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Some structural info contained in the matrix structural flags may be lost in the output data.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>The only sparse matrix file format currently supported is Matrix Market. E.g.: <pre class="fragment">%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
+</pre> In the above example header on the first line, you can specify either <code>real</code> or <code>complex</code> or <code>pattern</code> for the numerical type. Either <code>general</code>, <code>symmetric</code>, <code>hermitian</code> can be specified for the structure. In case of <code>pattern</code> matrices, only coordinate indices will be loaded (saving <code>pattern</code> matrices is not yet supported); in case of <code>real</code> matrices, also one coefficient value will  [...]
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group [...]
+
+</div>
+</div>
+<a class="anchor" id="gad071e0373a08f74ee7ae910e9e4fd140"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_file_vec_load </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>yvlp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Loads a dense vector from the specified file, using the numerical type representation as specified by the user. This function is intended to be called in two steps: first with <code>Yp=NULL</code>, in order to write the vector length to <code>*yvlp</code> ; then, with <code>yvlp=NULL</code>, to get <code>Yp</code> written.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">filename</td><td>The specified vector file name (cannot be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">Yp</td><td>The input array vector. </td></tr>
+    <tr><td class="paramname">yvlp</td><td>An optional pointer (can be <code>NULL</code>). If supplied, vector length will be written here, and no vector will be read. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>The only dense vector file format currently supported is Matrix Market. E.g.: <pre class="fragment">%%MatrixMarket matrix array complex general
+6           1
+11.000000000000000E+000 12.000000000000000E+000 
+21.000000000000000E+000 22.000000000000000E+000 
+31.000000000000000E+000 32.000000000000000E+000 
+41.000000000000000E+000 42.000000000000000E+000 
+51.000000000000000E+000 52.000000000000000E+000 
+61.000000000000000E+000 62.000000000000000E+000 
+</pre> In the above example header on the first line, you can specify either <code>real</code> or <code>complex</code> or <code>pattern</code> for the numerical type. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group [...]
+
+</div>
+</div>
+<a class="anchor" id="gac4b2a63cdfe1cd4083b1561ee4bea696"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_file_vec_save </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>yvl</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Saves a dense vector to the specified file, using the numerical type representation as specified by the user. This function assumes <code>Yp!=NULL</code> and <code>yvl>0</code>.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">filename</td><td>The specified vector file name (cannot be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">Yp</td><td>The output array vector. </td></tr>
+    <tr><td class="paramname">yvl</td><td>Output vector length. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>The only dense vector file format currently supported is Matrix Market. E.g.: <pre class="fragment">%%MatrixMarket matrix array complex general
+6           1
+11.000000000000000E+000 12.000000000000000E+000 
+21.000000000000000E+000 22.000000000000000E+000 
+31.000000000000000E+000 32.000000000000000E+000 
+41.000000000000000E+000 42.000000000000000E+000 
+51.000000000000000E+000 52.000000000000000E+000 
+61.000000000000000E+000 62.000000000000000E+000 
+</pre> In the above example header on the first line, you can specify either <code>real</code> or <code>complex</code> or <code>pattern</code> for the numerical type. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group [...]
+
+</div>
+</div>
+<a class="anchor" id="ga86db30487afe975ed18a7aa6ee0db81d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_exit </td>
+          <td>(</td>
+          <td class="paramtype">struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> * </td>
+          <td class="paramname"><em>iop</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Finalize <code>librsb</code>. <br/>
+ <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a> should be called after having freed all matrices. <br/>
+ If not all of the data structures were properly deallocated before, this function may still attempt finalizing the library and return the <a class="el" href="rsb_8h.html#a1b63053f52d6426b726a05b206a3862a">RSB_ERR_MEMORY_LEAK</a> error code (this depends on the <code>–enable-allocator-wrapper</code> configure time option). Any allocated memory will be lost (<code>librsb</code> does not keep track of allocated matrices). <br/>
+ Internal library state will be cleared. After this call, it is legal to initialize the library again, by calling <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a>. <br/>
+ On an error, the library state may be inconsistent, so it is advisable to either terminate program execution (rather than forcing a new initialization with <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a>). <br/>
+ Parameter <code>iop</code> is reserved for future use; for now it is safe to pass <a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iop</td><td>A pointer to a <a class="el" href="structrsb__initopts.html" title="A structure specifying library (initialization) options, to be used with the rsb_lib_reinit() functio...">rsb_initopts</a> structure with library options. It may be <code>NULL</code> (or better, <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>/<a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>)  [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a [...]
+
+</div>
+</div>
+<a class="anchor" id="gaf2b874d9f117ee6a6899634472b17946"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_init </td>
+          <td>(</td>
+          <td class="paramtype">struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> * </td>
+          <td class="paramname"><em>iop</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>This is the library initialization function. <br/>
+ It must be called only once before using any other library function. <br/>
+ It is allowed to call it again after <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit()</a>. <br/>
+ To fine-tune the library behaviour, one may specify a number of options via the <code>iop</code> parameter. <br/>
+ Options may be specified also after <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a> by calling <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit()</a>. <br/>
+ One may call <a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a> with flag <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">RSB_IO_WANT_IS_INITIALIZED_MARKER</a> to verify whether the library has been initialized or not.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iop</td><td>A pointer to a <a class="el" href="structrsb__initopts.html" title="A structure specifying library (initialization) options, to be used with the rsb_lib_reinit() functio...">rsb_initopts</a> structure with library options. It may be <code>NULL</code> (or better, <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>/<a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>)  [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a [...]
+
+</div>
+</div>
+<a class="anchor" id="ga1707f8b0c28805f692146cf2fb28ae70"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_reinit </td>
+          <td>(</td>
+          <td class="paramtype">struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> * </td>
+          <td class="paramname"><em>iop</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Changes the library operation options which were set at initialization time either by a user or as defaults. <br/>
+ Not all options may be supported, depending on build time library settings. <br/>
+ If an unsupported option was specified, an appropriate error (e.g.: <a class="el" href="rsb_8h.html#ab4f407e7c8364bee51cc77546d6f0922">RSB_ERR_UNSUPPORTED_OPERATION</a>) will be returned. <br/>
+ On the first error, option processing is interrupted and the remaining options (if any) are not processed. <br/>
+ Program execution may continue safely even if an error code is returned (that is, library status should be consistent). <br/>
+</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iop</td><td>A pointer to a <a class="el" href="structrsb__initopts.html" title="A structure specifying library (initialization) options, to be used with the rsb_lib_reinit() functio...">rsb_initopts</a> structure with library options. It may be <code>NULL</code> (or better, <a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>/<a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>)  [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a [...]
+
+</div>
+</div>
+<a class="anchor" id="ga4670aa682e70f82d5039c600e426a368"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_set_opt_str </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>opnp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>opvp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies individual library options in order to fine-tune the library behaviour. Both the option name and the value shall be expressed as strings, identical to their preprocessor identifiers (see <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6" title="library option values for rsb_lib_init, rsb_lib_set_opt_str, rsb_lib_reinit, rsb_lib_exit, rsb_lib_get_opt, rsb_lib_set_opt, or (deprecated) macros RSB_REINIT_SINGLE_VALUE_GET, RSB_REINIT_SINGLE_VALUE_SE [...]
+</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">opnp</td><td>A pointer to a library option input name string (may not be <code>NULL</code>). </td></tr>
+    <tr><td class="paramname">opvp</td><td>A pointer to a library option input value string (may not be <code>NULL</code>). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a [...]
+
+</div>
+</div>
+<a class="anchor" id="gaf30a70ea183d30d216f700782fc01524"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_add_to_dense </td>
+          <td>(</td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nrB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ncB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a> </td>
+          <td class="paramname"><em>rowmajorB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Bp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Dense matrix B is updated by adding scaled sparse matrix <img class="formulaInl" alt="${A}$" src="form_45.png"/> to it: <img class="formulaInl" alt="$B \leftarrow B + \alpha {A} $" src="form_46.png"/></p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">ldB</td><td>Leading dimension of <code>Bp</code> array. </td></tr>
+    <tr><td class="paramname">nrB,ncB</td><td>The number of rows and columns for the dense matrix <img class="formulaInl" alt="$B$" src="form_24.png"/>. </td></tr>
+    <tr><td class="paramname">rowmajorB</td><td><a class="el" href="rsb_8h.html#af580e920b9f507028d3b7d34b4dadd6f">RSB_BOOL_TRUE</a> if the dense matrix <img class="formulaInl" alt="$B$" src="form_24.png"/> is considered stored as row major, or <a class="el" href="rsb_8h.html#ad396755fe9a1d81991d5ac238058db18">RSB_BOOL_FALSE</a> if as column major. </td></tr>
+    <tr><td class="paramname">Bp</td><td>Array representing the dense matrix <img class="formulaInl" alt="$B$" src="form_24.png"/>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>Please note that it suffices to 'transpose' <code>Bp's</code> description parameters to get <img class="formulaInl" alt="$A$" src="form_21.png"/> transposed summed in. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafca80e53d47a7ec3eb116e755fe47c58"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_alloc_from_coo_begin </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Creates an empty matrix structure in assembly state. The user then populates it using <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals()</a> repeatedly; then assembles it with <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end()</a>.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">nnzA</td><td>A rough estimate of the number of nonzeroes matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> will host (used for optimizing arrays allocation). If you do not know yet, you can specify zero. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of index conversion and matrix storage flags and other meaningful flags. The encouraged base choice here is <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. If Fortran (1 based) indices are being used for the IA, JA arrays, then the <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> flag should be added. If symmet [...]
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>Pointer to a <code>rsb_mtx_t</code> matrix structure in assembly state, or <code>NULL</code> (on error). </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="ga86c1b0d0586f817ee31ca1caa3fee9be"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_alloc_from_coo_const </td>
+          <td>(</td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Given as input COO arrays <code>VA</code>,IA,JA, allocates and assembles an RSB matrix using separate arrays.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">VA,IA,JA</td><td>Input numerical values (<code>VA</code>) array; row (<code>IA</code>) and column (<code>JA</code>) input indices arrays. </td></tr>
+    <tr><td class="paramname">nnzA</td><td>The number of nonzeroes in the input arrays representing matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">brA,bcA</td><td>Blocking parameters: <code>brA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use); <code>bcA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of index conversion and matrix storage flags and other meaningful flags. The encouraged base choice here is <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. If Fortran (1 based) indices are being used for the IA, JA arrays, then the <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> flag should be added. If symmet [...]
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="gab583fbefa0a66e9d30dac034480c2d86"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_alloc_from_coo_end </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t ** </td>
+          <td class="paramname"><em>mtxApp</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Assembles RSB arrays for a matrix in build state created with <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin()</a> and populated with <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals()</a>. After assembly, any operation on the matrix is allowed.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxApp</td><td><code>rsb_mtx_t</code> pointer to an unassembled matrix address. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested. </dd></dl>
+<dl class="section note"><dt>Note</dt><dd>Note that the memory location of the matrix will be changed by this call, and the (old) <code>*mtxApp</code> address value will be not valid anymore. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="ga3b7f9a461377de348b33a873f2e1893f"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_alloc_from_coo_inplace </td>
+          <td>(</td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Given as input COO arrays <code>VA</code>,IA,JA, allocates and assembles an RSB matrix reusing input arrays. <br/>
+ Assumes all three <code>VA</code>,IA,JA arrays are at least min(<code>nnzA</code>,<code>nrA+1</code>,<code>ncA+1</code>) sized. The user is expected NOT to use these arrays until the matrix has been destroyed with <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free()</a>. Then, it is possible to use these arrays again.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">VA,IA,JA</td><td>Input/output numerical values array (<code>VA</code>); row (<code>IA</code>) and column (<code>JA</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">nnzA</td><td>The number of nonzeroes in the input arrays representing matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">brA,bcA</td><td>Blocking parameters: <code>brA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use); <code>bcA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of index conversion and matrix storage flags and other meaningful flags. The encouraged base choice here is <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. If Fortran (1 based) indices are being used for the IA, JA arrays, then the <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> flag should be added. If symmet [...]
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="gaebf57d9e5263f41eb6163581ffc141aa"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_alloc_from_csc_const </td>
+          <td>(</td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>CP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Given input read only CSC format arrays, allocates and assembles an RSB matrix (stored in separate arrays).</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">VA,IA,CP</td><td>Input numerical values (<code>VA</code>) array, input row indices (<code>IA</code>) and compressed column (<code>CP</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">nnzA</td><td>The number of nonzeroes in the input arrays representing matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">brA,bcA</td><td>Blocking parameters: <code>brA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use); <code>bcA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of index conversion and matrix storage flags and other meaningful flags. The encouraged base choice here is <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. If Fortran (1 based) indices are being used for the IA, JA arrays, then the <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> flag should be added. If symmet [...]
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="ga13d417f776654fd159f274e56191573e"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_alloc_from_csr_const </td>
+          <td>(</td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>RP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Given input read only CSR format arrays, allocates and assembles an RSB matrix (stored in separate arrays).</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">VA,RP,JA</td><td>Input numerical values (<code>VA</code>) array; compressed rows (<code>RP</code>) and column (<code>JA</code>) input indices arrays. </td></tr>
+    <tr><td class="paramname">nnzA</td><td>The number of nonzeroes in the input arrays representing matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">brA,bcA</td><td>Blocking parameters: <code>brA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use); <code>bcA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of index conversion and matrix storage flags and other meaningful flags. The encouraged base choice here is <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. If Fortran (1 based) indices are being used for the IA, JA arrays, then the <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> flag should be added. If symmet [...]
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="ga60121166daf00968ba717931f04ea455"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_alloc_from_csr_inplace </td>
+          <td>(</td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> * </td>
+          <td class="paramname"><em>RP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a> </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Given as input CSR arrays <code>VA</code>,RP,JA , allocates and assembles an RSB matrix reusing input arrays. <br/>
+ Assumes all three <code>VA</code>,IA,JA arrays are at least min(<code>nnzA</code>,<code>nrA+1</code>,<code>ncA+1</code>) sized. The user is expected NOT to use these arrays until the matrix has been destroyed with <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free()</a>. Then, it is possible to use these arrays again.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">VA,RP,JA</td><td>Input numerical values (<code>VA</code>) array; compressed rows (<code>RP</code>) and column (<code>JA</code>) input indices arrays. Will not be freed by <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free()</a>. </td></tr>
+    <tr><td class="paramname">nnzA</td><td>The number of nonzeroes in the input arrays representing matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">nrA,ncA</td><td>The number of rows and columns of the sparse matrix <img class="formulaInl" alt="$A$" src="form_21.png"/>. </td></tr>
+    <tr><td class="paramname">brA,bcA</td><td>Blocking parameters: <code>brA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use); <code>bcA</code> should be set to 1 or <a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a> (currently unused, reserved for future use). </td></tr>
+    <tr><td class="paramname">flagsA</td><td>A valid combination of index conversion and matrix storage flags and other meaningful flags. The encouraged base choice here is <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>. If Fortran (1 based) indices are being used for the IA, JA arrays, then the <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> flag should be added. If symmet [...]
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="gae181671ba19191caa5a282cbde4fdfc5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_clone </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t ** </td>
+          <td class="paramname"><em>mtxBpp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>This function clones a given matrix, allocating a fresh data structure or overwriting an existing one. <br/>
+ Target type (specified by <code>typecode</code>) can be different from that in the matrix. <code>If</code> <code>alphap=NULL</code>, the cloned matrix will not be scaled. <br/>
+ This new structure will be completely separated and independent from the original one. <br/>
+ Examples: </p>
+<div class="fragment"><div class="line"><span class="comment">// will clone the matrix exactly</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>(&mtxBp,<a class="code" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>,<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>,NULL,mtxAp,<a class="code" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a>);</div>
+<div class="line"><span class="comment">// will clone the transpose of the matrix</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>(&mtxBp,<a class="code" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>,<a class="code" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>,NULL,mtxAp,<a class="code" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a>);</div>
+<div class="line"><span class="comment">// will clone the lower triangle of the matrix</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>(&mtxBp,<a class="code" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>,<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>,NULL,mtxAp,<a class="code" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a>|<a class="code" href="rsb_8h.html#aca1c [...]
+</div><!-- fragment --><dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxBpp</td><td>Valid <code>rsb_mtx_t</code> pointer to an address for matrix <img class="formulaInl" alt="$B$" src="form_24.png"/>. If <code>*mtxBpp==NULL</code>, a fresh clone will be assigned there; if not, the existing matrix structure will be freed and allocated to host the new one. The case <code>*mtxBpp==mtxAp</code> is supported. </td></tr>
+    <tr><td class="paramname">typecode</td><td>A valid type code for the desired output matrix (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value for scaling the output. Of the type code of <code>mtxAp</code>. </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a> or a combination of other flags, e.g.: <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a>, <a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a>, <a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a>, <a class="el" href="rsb [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="gab64a020286a8b58d23d84d4512bd9132"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_mtx_free </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Frees a previously allocated sparse matrix structure. <br/>
+ In the case the matrix has the <a class="el" href="rsb_8h.html#a6abc0e23c782b817e2ef96d8294f990d">RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS</a> flag, the main three data arrays VA,IA,JA will not be freed by <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a> (see <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>,<a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba71 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>Always <code>NULL</code>.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>, <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>, <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>, <a class="el [...]
+
+</div>
+</div>
+<a class="anchor" id="gaac3c6c033733a8101b9ccf56f8fc7112"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_coo </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Returns the matrix converted in a coordinate storage format. <br/>
+ Elements will be stored in no particular order. <br/>
+ If there are structural or fill-in zero elements, these will be skipped. <br/>
+ Writes as many entries as there are nonzeroes (use <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>(mtxAp,<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T</a>,&nnz)) to find out how many in order to allocate the arrays correctly.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VA,IA,JA</td><td>Output numerical values (<code>VA</code>) array; output row (<code>IA</code>) and column (<code>JA</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> or <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> (see <a class="el" href="rsb_8h.html#flags_section">flags_section</a> flags section). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="ga68115178d85cd28c645058deb0aa6379"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_coo_block </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>frA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>lrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>fcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>lcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IREN</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JREN</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> * </td>
+          <td class="paramname"><em>rnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Writes in COO format the specified submatrix. Works in two stages: first the user invokes it with <code>VA</code>,IA,JA set to <code>NULL</code> to get <code>*rnzp</code>. Then the the <code>VA</code>,IA,JA arrays can be allocated, and the function called again, this time with <code>rnzp=NULL</code> but the <code>VA</code>,IA,JA arrays pointers non <code>NULL</code> (or at least, one of them).</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VA,IA,JA</td><td>Output numerical values (<code>VA</code>) array; output row (<code>IA</code>) and column (<code>JA</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">frA,lrA</td><td>First and last row indices. </td></tr>
+    <tr><td class="paramname">fcA,lcA</td><td>First and last column indices. </td></tr>
+    <tr><td class="paramname">IREN,JREN</td><td>Renumbering arrays for <code>IA</code> and <code>JA</code> (respectively rows count and columns count sized). If <code>NULL</code>, no renumbering will be used. </td></tr>
+    <tr><td class="paramname">rnzp</td><td>A pointer where the number of relevant nonzero elements will be written to. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> or <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> (see <a class="el" href="rsb_8h.html#flags_section">flags_section</a> flags section). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. Examples: <div class="fragment"><div class="line"><s [...]
+<div class="line">errval=<a class="code" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>(mtxAp,NULL,NULL,NULL,frA,lrA,fcA,lcA,NULL,NULL,&rnz,flags )</div>
+<div class="line"><span class="comment">// allocate VA, IA, JA to rnz elements</span></div>
+<div class="line">...</div>
+<div class="line"><span class="comment">// get the  rnz  values then</span></div>
+<div class="line">errval=<a class="code" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>(mtxAp,  VA,  IA,  JA,frA,lrA,fcA,lcA,NULL,NULL,NULL,flags )</div>
+</div><!-- fragment --></dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Expect this function to change soon (e.g.: have scaling parameters, etc.). Contact the author if you intend to use it. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="ga4adca460f50bc1ad7d9ffdfda2273b87"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_csr </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> * </td>
+          <td class="paramname"><em>RP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Fills the given arrays with the matrix expressed in the CSR format.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VA,RP,JA</td><td>Output numerical values (<code>VA</code>) array, compressed row indices (<code>RP</code>) and column indices (<code>JA</code>) arrays. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> or <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> (see <a class="el" href="rsb_8h.html#flags_section">flags_section</a> flags section). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="gad9a3eacd54fb7043464006cd57866edf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_info </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a> </td>
+          <td class="paramname"><em>miflags</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>minfop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Returns a specified matrix (numerical) property.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">miflags</td><td>A valid value of matrix info flags (see <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f" title="Flags for getting matrix informations via rsb_mtx_get_info()/rsb_mtx_get_info_str().">rsb_mif_t</a> for valid values). </td></tr>
+    <tr><td class="paramname">minfop</td><td>Pointer to a variable of the right type, according to the matrix info flag specification (see <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f" title="Flags for getting matrix informations via rsb_mtx_get_info()/rsb_mtx_get_info_str().">rsb_mif_t</a>).</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group [...]
+
+</div>
+</div>
+<a class="anchor" id="ga2b7d51b9822f73d2fe7fcf5b9d0be1e9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_info_str </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>mis</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>minfop</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t </td>
+          <td class="paramname"><em>buflen</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Returns a specified matrix (numerical) property, via a string form query.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">mis</td><td>A string specifying any identifier among the matrix info ones. See <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f" title="Flags for getting matrix informations via rsb_mtx_get_info()/rsb_mtx_get_info_str().">rsb_mif_t</a> for a list of valid identifiers that can be supplied in string form. </td></tr>
+    <tr><td class="paramname">minfop</td><td>Pointer to a variable of the right type, according to the matrix info flag specification (see <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f" title="Flags for getting matrix informations via rsb_mtx_get_info()/rsb_mtx_get_info_str().">rsb_mif_t</a>). </td></tr>
+    <tr><td class="paramname">buflen</td><td>If greater than 0, <code>minfop</code> will be treated as a string of length <code>buflen</code> and filled with the desired value via the standard <code>snprintf()</code> function.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group [...]
+
+</div>
+</div>
+<a class="anchor" id="ga6a645ce89fd167d72c92cdcfbcd8ed81"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_nrm </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Np</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Computes a matrix norm (either infinite-norm or or 2-norm or 1-norm).</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">Np</td><td>Points to a scalar value which will be overwritten with the selected norm. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">RSB_EXTF_NORM_ONE</a> or <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">RSB_EXTF_NORM_TWO</a> or <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">RSB_EXTF_NORM_INF</a>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<p>In case of a complex type, only the real part will be written to <code>Np</code>.</p>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="gadaee12cc24dac7f8ebc68efd3d09c819"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_prec </td>
+          <td>(</td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>opdp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb_precf_t</a> </td>
+          <td class="paramname"><em>prec_flags</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>ipdp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A function computing a simple preconditioner out of <code>mtxAp</code>.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">opdp</td><td>Preconditioner data pointer (output). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">prec_flags</td><td>Valid preconditioner request flags (currently, only <a class="el" href="rsb_8h.html#a56bb6be11af9a5a0ed9aaa8774ab6db9">RSB_PRECF_ILU0</a> is supported; for it, <code>*opdp</code> will be overwritten with two <code>rsb_mtx_t</code> pointers, respectively a lower and an upper matrix.). </td></tr>
+    <tr><td class="paramname">ipdp</td><td>Preconditioner data pointer (input).</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>Matrix should be square, have at least two rows, and have at least one nonzero. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="gaa01c4a69db732f99e8a960ee8c9afa23"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_rows_sparse </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>frA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>lrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> * </td>
+          <td class="paramname"><em>rnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Writes to the given COO arrays the specified submatrix.</p>
+<p>Invoke with <code>VA</code>,IA,JA set to <code>NULL</code> in order to get the nonzeroes count written to <code>*rnzp</code>, and know how large the arrays should be.</p>
+<p>IA can be <code>NULL</code> (in this case it will be ignored). The written rows are ordered.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VA,IA,JA</td><td>Output numerical values (<code>VA</code>) array; input row (<code>IA</code>) and column (<code>JA</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">frA,lrA</td><td>First and last row indices. </td></tr>
+    <tr><td class="paramname">rnzp</td><td>A pointer where the number of relevant nonzero elements will be written to. </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> or <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> (see <a class="el" href="rsb_8h.html#flags_section">flags_section</a> flags section). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="gad8f1aa9ac5081edd789374e7bb82697f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_vals </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Gets the specified matrix elements, if found. Please note that unlike <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>, the matrix has to be fully assembled here.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VA,IA,JA</td><td>Output numerical values (<code>VA</code>) array; input row (<code>IA</code>) and column (<code>JA</code>) indices arrays. </td></tr>
+    <tr><td class="paramname">nnz</td><td>The number of nonzeroes in the input arrays. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> or <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> (see <a class="el" href="rsb_8h.html#flags_section">flags_section</a> flags section). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="gad0b2352cea6b7512b466d1c51327fcf8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_get_vec </td>
+          <td>(</td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Dp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Will overwrite a supplied array with a specific vector quantity.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">Dp</td><td>A valid pointer to a numerical vector array <img class="formulaInl" alt="$D$" src="form_50.png"/>. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either one of the different extraction filter flags (e.g.: <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">RSB_EXTF_DIAG</a>, <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">RSB_EXTF_SUMS_ROW</a>, ...) . </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>, <a class="el" href="g [...]
+
+</div>
+</div>
+<a class="anchor" id="gab0702d7080d1699162e4201bc70cc5ee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_rndr </td>
+          <td>(</td>
+          <td class="paramtype">const char * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>pmWidth</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>pmHeight</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> </td>
+          <td class="paramname"><em>rflags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Renders a matrix to a file. Currently, only Encapsulated Postscript (EPS) is supported.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">filename</td><td>The specified output file name (if <code>NULL</code>, will write to standard output). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">pmWidth</td><td>Pixel map width (in pixels or points). </td></tr>
+    <tr><td class="paramname">pmHeight</td><td>Pixel map height (in pixels or points). </td></tr>
+    <tr><td class="paramname">rflags</td><td>The color mode; only <a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">RSB_MARF_RGB</a> is supported for now (1 byte per channel, 3 channels — red, green, blue): this requires array <code>pmp</code> to be at least (3*<code>pmlWidth*<code>pmHeight</code>)</code> bytes large.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab8069ad6d5a67bc8a726131891e98c46"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_set_vals </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Updates the specified matrix elements, if found in the nonzero pattern.</p>
+<p>In the special case of a matrix in assembly state (that is, one that has been created as empty with <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin()</a> and not yet assembled with <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end()</a> ) all the supplied matrix elements will be accepted: whether already present or not.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VA,IA,JA</td><td>Input numerical values (<code>VA</code>) array; row (<code>IA</code>) and column (<code>JA</code>) input indices arrays. </td></tr>
+    <tr><td class="paramname">nnz</td><td>The number of nonzeroes in the input arrays. </td></tr>
+    <tr><td class="paramname">flags</td><td>Either <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a> or <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> plus either <a class="el" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">RSB_FLAG_DUPLICATES_SUM</a> (to sum into) or <a class="el" href="rsb_8h.html#aff85f26964888f838aa97eb371ce5da3">RSB_FLAG_DUPLICATES_KEEP_LAST</a> (to ove [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>, <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gadf75c148fe661486ab0d8140657b8d9a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_switch_to_coo </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void ** </td>
+          <td class="paramname"><em>VAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ** </td>
+          <td class="paramname"><em>IAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ** </td>
+          <td class="paramname"><em>JAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Switches a matrix to COO arrays in place.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VAp,IAp,JAp</td><td>Output numerical values (<code>VAp</code>) array pointer; output row (<code>IAp</code>) and column (<code>JAp</code>) indices arrays pointers. </td></tr>
+    <tr><td class="paramname">flags</td><td>A combination of <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a>,<a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a>,<a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a>. (see <a class="el" href="rsb_8h.html#flags_section">flags_section</a> flags section). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>This function is only valid if <code>mtxAp</code> has been assembled in place (that is, in the arrays that are being reclaimed), so with e.g.: <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace()</a>. Please also note that the matrix will get freed internally and so <code>mtxAp</code> will not be usable in any way afterwards. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a>,<a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3c46a4942a6acb90063d721b6446e78e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_switch_to_csr </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void ** </td>
+          <td class="paramname"><em>VAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ** </td>
+          <td class="paramname"><em>IAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ** </td>
+          <td class="paramname"><em>JAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Switches the matrix to the CSR format, in-place.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">VAp,IAp,JAp</td><td>Output numerical values (<code>VAp</code>) array pointer; output row (<code>IAp</code>) and column (<code>JAp</code>) indices arrays pointers. </td></tr>
+    <tr><td class="paramname">flags</td><td>A valid combination of index conversion flags (that is, <a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a> and <a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a>) and other meaningful flags. Symmetry flags shall be the same as in the matrix in use, because symmetry expansion may happen otherwise. Flags <a class="el" href="rsb_8h.html#a6abc0e2 [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section note"><dt>Note</dt><dd>This function is only valid if <code>mtxAp</code> has been assembled in place (that is, in the arrays that are being reclaimed), so with e.g.: <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace()</a>. Please also note that the matrix will get freed internally and so <code>mtxAp</code> will not be usable in any way afterwards. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a>,<a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2d7533a97c97b215090d69c2d9235412"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_mtx_upd_vals </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a> </td>
+          <td class="paramname"><em>elop_flags</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>omegap</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p><img class="formulaInl" alt="$ A \leftarrow op (A,\Omega) $" src="form_36.png"/> Updates the matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> by applying either a rowwise or an elemental operation <img class="formulaInl" alt="$op$" src="form_37.png"/>, which is determined by <code>elop_flags</code>. If an unary operation is selected, <code>omegap</code> can be <code>NULL</code>.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">elop_flags</td><td>Elemental operation specification flags (see <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade" title="Flags for specifying a particular elemental/row-wise operation with rsb_mtx_upd_vals().">rsb_elopf_t</a> for valid choices). </td></tr>
+    <tr><td class="paramname">omegap</td><td>Pointer to a numerical location(s) (of the same type as matrix). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>, <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab660cf8aff876ae88b59c7a22ddfc912"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_perror </td>
+          <td>(</td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>stream</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td>
+          <td class="paramname"><em>errval</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Prints out to the specified <code>stream</code> a string corresponding to the error code (using <code><stdio.h>'s</code> <code>fprintf</code>). If <code>stream==NULL</code>, will print out to the default output stream; see <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">RSB_IO_WANT_OUTPUT_STREAM</a> .</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">stream</td><td>A <code></code>(FILE*) pointer, as declared in <code><stdio.h></code>; can be <code>NULL</code>. </td></tr>
+    <tr><td class="paramname">errval</td><td>A valid error flag value (see <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a>). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>, <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga7459601f0d54bd95549959b9749fedde"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> rsb_psblas_trans_to_rsb_trans </td>
+          <td>(</td>
+          <td class="paramtype">const char </td>
+          <td class="paramname"><em>psbtrans</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>"Translates" a PSBLAS transposition value character to a <code>librsb</code> one. <br/>
+ See the PSBLAS library website/documentation for valid input values.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">psbtrans</td><td>Transposition parameter value valid in the PSBLAS library. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A valid transposition code; that is <a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a> for 'N', <a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a> for 'T', RSB_TRANSPOSITION_C for 'C', (See <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3ec8d721b5333aae6ea9b03eb0039285"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_spmm </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Updates a dense matrix with the product of sparse matrix by dense matrix; that is, computes <img class="formulaInl" alt="$ C \leftarrow \beta\cdot C + \alpha\cdot opa(A) \cdot B $" src="form_47.png"/>.</p>
+<p><img class="formulaInl" alt="$opa( A )=A$" src="form_27.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opa( A )= A ^T$" src="form_28.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opa( A )= A ^H$" src="form_29.png"/> if <code>transA=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>The number of right hand side vectors (cannot be <code><1</code>). </td></tr>
+    <tr><td class="paramname">order</td><td>A flag among <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a> and <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>. For contiguous vector arrays, you probably want <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>. </td></tr>
+    <tr><td class="paramname">Bp</td><td>The input vector array. </td></tr>
+    <tr><td class="paramname">ldB</td><td>Leading dimension of <code>Bp</code> array. </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">Cp</td><td>The output vector array. </td></tr>
+    <tr><td class="paramname">ldC</td><td>Leading dimension of <code>Cp</code> array. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8813ccbbb1065ac76bfe22c42feafa05"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_spmsp </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxBp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Computes the weighted product of two sparse matrices in a new sparse matrix (also known as SpGEMM operation): <img class="formulaInl" alt="$C \leftarrow \alpha \cdot opa(A) \cdot \beta \cdot opb(B) $" src="form_42.png"/> Symmetry/Hermitian flags are ignored by this operation.</p>
+<p><img class="formulaInl" alt="$opa( A )=A$" src="form_27.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opa( A )= A ^T$" src="form_28.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opa( A )= A ^H$" src="form_29.png"/> if <code>transA=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">transB</td><td>Transposition parameter for <img class="formulaInl" alt="$B$" src="form_24.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">mtxBp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$B$" src="form_24.png"/> representation. </td></tr>
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Parameters <code>alphap</code>,betap,transA,transB are not yet taken in consideration. The following defaults are valid: <img class="formulaInl" alt="$\alpha=1.0$" src="form_43.png"/> and <img class="formulaInl" alt="$\beta=1.0$" src="form_44.png"/>, and <code>transA=transB=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga74d97612d4af70244c886b9eadd90a0e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_spmsp_to_dense </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxBp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>nrC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ncC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a> </td>
+          <td class="paramname"><em>rowmajorC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Cp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Computes the product of sparse matrices and adds it to a dense matrix: <img class="formulaInl" alt="$C \leftarrow \alpha opa(A) \cdot \beta \cdot opb(B) $" src="form_48.png"/>.</p>
+<p><img class="formulaInl" alt="$opa( A )=A$" src="form_27.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opa( A )= A ^T$" src="form_28.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opa( A )= A ^H$" src="form_29.png"/> if <code>transA=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">transB</td><td>Transposition parameter for <img class="formulaInl" alt="$B$" src="form_24.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">mtxBp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$B$" src="form_24.png"/> representation. </td></tr>
+    <tr><td class="paramname">ldC</td><td>Leading dimension of <code>Cp</code> array. </td></tr>
+    <tr><td class="paramname">nrC,ncC</td><td>The number of rows and columns for the dense matrix <img class="formulaInl" alt="$C$" src="form_49.png"/>. </td></tr>
+    <tr><td class="paramname">rowmajorC</td><td><a class="el" href="rsb_8h.html#af580e920b9f507028d3b7d34b4dadd6f">RSB_BOOL_TRUE</a> if the dense matrix <img class="formulaInl" alt="$C$" src="form_49.png"/> is considered stored as row major, or <a class="el" href="rsb_8h.html#ad396755fe9a1d81991d5ac238058db18">RSB_BOOL_FALSE</a> if as column major. </td></tr>
+    <tr><td class="paramname">Cp</td><td>Array representing the dense matrix <img class="formulaInl" alt="$C$" src="form_49.png"/>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Parameters <code>alphap</code>,betap,transA,transB are not yet taken in consideration. The following defaults are valid: <img class="formulaInl" alt="$\alpha=1.0$" src="form_43.png"/> and <img class="formulaInl" alt="$\beta=1.0$" src="form_44.png"/>, and <code>transA=transB=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4a16a82d289c76a437915db449553d4d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_spmv </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Xp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>incX</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>incY</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiplies a sparse matrix <img class="formulaInl" alt="$opa(A)$" src="form_25.png"/> by a vector <img class="formulaInl" alt="$X$" src="form_3.png"/>, updating vector <img class="formulaInl" alt="$Y$" src="form_2.png"/>. <br/>
+ Computes <img class="formulaInl" alt="$Y \leftarrow \beta Y + \alpha \cdot opa(A) \cdot X $" src="form_26.png"/>. <br/>
+ It is not allowed to supply same <code>Xp</code> and <code>Yp</code> (that is, <code>Xp==Yp</code>). <br/>
+</p>
+<p><img class="formulaInl" alt="$opa( A )=A$" src="form_27.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opa( A )= A ^T$" src="form_28.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opa( A )= A ^H$" src="form_29.png"/> if <code>transA=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">Xp</td><td>The input vector array. </td></tr>
+    <tr><td class="paramname">incX</td><td>Spacing of vector elements in each input vector array (>=1). </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">Yp</td><td>The output array vector. </td></tr>
+    <tr><td class="paramname">incY</td><td>Spacing of vector elements in each output vector array (>=1). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga30823d02e577e59da4ccff6baaeb8ea1"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_sppsp </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxBp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> * </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Computes the weighted sum of two sparse matrices, returning a new matrix: <img class="formulaInl" alt="$C \leftarrow \alpha\cdot transA(A) + \beta\cdot transB{B} $" src="form_38.png"/> Symmetry flags are ignored in this operation.</p>
+<p><img class="formulaInl" alt="$opa( A )=A$" src="form_27.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opa( A )= A ^T$" src="form_28.png"/> if <code>transA=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opa( A )= A ^H$" src="form_29.png"/> if <code>transA=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">typecode</td><td>A valid type code for the given (numerical array) input pointer (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">transB</td><td>Transposition parameter for <img class="formulaInl" alt="$B$" src="form_24.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">mtxBp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$B$" src="form_24.png"/> representation. </td></tr>
+    <tr><td class="paramname">errvalp</td><td>An optional (can be <code>NULL</code>) pointer to <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> where the error status will be written to. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer (<code>struct</code> <code>rsb_mtx_t*</code>) to the newly allocated matrix structure; on error, <code>NULL</code>.</dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a></dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>This function has not been thoroughly tested. </dd>
+<dd>
+This function is not optimized.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga48e6f3844605fffac9f622f05afa6043"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_spsm </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxTp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Computes <img class="formulaInl" alt="$Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot B $" src="form_35.png"/>, with upper or lower triangular <img class="formulaInl" alt="$T$" src="form_31.png"/>.</p>
+<p><img class="formulaInl" alt="$opt( T )=T$" src="form_32.png"/> if <code>transT=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opt( T )= T ^T$" src="form_33.png"/> if <code>transT=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opt( T )= T ^H$" src="form_34.png"/> if <code>transT=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition parameter for <img class="formulaInl" alt="$T$" src="form_31.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxTp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$T$" src="form_31.png"/> representation. The matrix must be triangular; that is, it must have been allocated with either <a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">RSB_FLAG_LOWER_TRIANGULAR</a> or <a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">RSB_FLAG_UPPER_TRIANGULAR</a> flags. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>The number of right hand side vectors (cannot be <code><1</code>). </td></tr>
+    <tr><td class="paramname">order</td><td>A flag among <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a> and <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>. For contiguous vector arrays, you probably want <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>. </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">Bp</td><td>The input vector array. </td></tr>
+    <tr><td class="paramname">ldB</td><td>Leading dimension of <code>Bp</code> array. </td></tr>
+    <tr><td class="paramname">Cp</td><td>The output vector array. </td></tr>
+    <tr><td class="paramname">ldC</td><td>Leading dimension of <code>Cp</code> array. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>, <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9b044332b720d3f8083ae792068fb04a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_spsv </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxTp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Xp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>incX</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>incY</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Computes <img class="formulaInl" alt="$Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot X $" src="form_30.png"/>, with upper or lower triangular <img class="formulaInl" alt="$T$" src="form_31.png"/>. It is allowed to supply same <code>Xp</code> and <code>Yp</code> (that is, <code>Xp==Yp</code>).</p>
+<p><img class="formulaInl" alt="$opt( T )=T$" src="form_32.png"/> if <code>transT=<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></code>; <img class="formulaInl" alt="$opt( T )= T ^T$" src="form_33.png"/> if <code>transT=<a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a></code>; <img class="formulaInl" alt="$opt( T )= T ^H$" src="form_34.png"/> if <code>transT=<a class="el" href="rsb__types_8 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition parameter for <img class="formulaInl" alt="$T$" src="form_31.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxTp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$T$" src="form_31.png"/> representation. The matrix must be triangular; that is, it must have been allocated with either <a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">RSB_FLAG_LOWER_TRIANGULAR</a> or <a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">RSB_FLAG_UPPER_TRIANGULAR</a> flags. </td></tr>
+    <tr><td class="paramname">Xp</td><td>The input vector array. </td></tr>
+    <tr><td class="paramname">incX</td><td>Spacing of vector elements in each input vector array (>=1). </td></tr>
+    <tr><td class="paramname">Yp</td><td>The output array vector. </td></tr>
+    <tr><td class="paramname">incY</td><td>Spacing of vector elements in each output vector array (>=1). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. If <code>–enable-zero-division-checks-on-solve [...]
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>, <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga28710b8dade48738ea8e075aa1a3d262"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_strerror_r </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td>
+          <td class="paramname"><em>errval</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>buf</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">size_t </td>
+          <td class="paramname"><em>buflen</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Writes a textual description of an error code in the specified string buffer. No more than buflen characters will be written (comprehensive of the terminting <code>NUL</code> character).</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">errval</td><td>A valid error flag value (see <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a>). </td></tr>
+    <tr><td class="paramname">buf</td><td>A valid string buffer pointer where to write to. </td></tr>
+    <tr><td class="paramname">buflen</td><td>The string buffer length.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>, <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6677d4e20c00bdf4ebf53567246f5693"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> rsb_time </td>
+          <td>(</td>
+          <td class="paramtype">void </td>
+          <td class="paramname"></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Returns the current time in seconds. This function is meant to be used for computing wall clock time intervals (e.g.: for benchmarking purposes). The user should not rely on this function for absolute time computations.</p>
+<dl class="section return"><dt>Returns</dt><dd>A value for the current time, in seconds. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8c11024d248e2e686476fd9e89aa7c15"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_tune_spmm </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t ** </td>
+          <td class="paramname"><em>mtxOpp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> * </td>
+          <td class="paramname"><em>sfp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> * </td>
+          <td class="paramname"><em>tnp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> </td>
+          <td class="paramname"><em>maxr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> </td>
+          <td class="paramname"><em>maxt</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An auto-tuner: optimizes either the matrix instance, the thread count or both for the <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a> operation.</p>
+<p>The tuner works by evaluating different instances and working threads variants. The instance leading to faster operation time will be retained and given back to the user in <code>*mtxOpp</code>. If <code>nrhs==1</code> and <code>order==<a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a></code>, unitary stride vectors are assumed. In case of error, the original input matrix shall be unaffected. It is possible to specify the leading di [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxOpp</td><td>Optimal matrix structure pointer will be assigned to <code>*mtxOpp</code> (it may occur that *mtxOpp==mtxAp on output). If <code>mtxOpp</code> is <code>NULL</code> then no data structure optimization will be attempted; rather, only optimal threads search will occur (<code>tnp</code> must be not <code>NULL</code> then). </td></tr>
+    <tr><td class="paramname">sfp</td><td>Achieved speedup factor will be written to <code>*sfp</code> (unless <code>sfp==NULL</code>). </td></tr>
+    <tr><td class="paramname">tnp</td><td>If <code>tnp==NULL</code> on input, the current thread count will be utilized. Otherwise, if <code>*tnp>0</code>, then *tnp will be used as first suggestion in optimal thread count searching. If <code>tnp!=NULL</code> ,on output <code>*tnp</code> will be set to contain the optimal number of threads. Then, the user is expected to set this number of threads using e.g.: <code><a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_ [...]
+    <tr><td class="paramname">maxr</td><td>Optimizer rounds max count. If <code><1</code>, will be treated as 1; if 0 will be decided automatically. Max is <a class="el" href="rsb__types_8h.html#aef1f5467f82116857e5003daa0f75ccd">RSB_CONST_MAX_TUNING_ROUNDS</a>. </td></tr>
+    <tr><td class="paramname">maxt</td><td>Maximum time (in seconds) per optimization round (does not take in account conversion time). If <code>maxt<0.0</code> is provided, <code>-ceil</code>(maxt) will be interpreted as number of iterations to check for each operation time sample. If <code>maxt==0.0</code> is provided, a default choice will be made instead. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>The number of right hand side vectors (cannot be <code><1</code>). </td></tr>
+    <tr><td class="paramname">order</td><td>A flag among <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a> and <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>. For contiguous vector arrays, you probably want <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>. </td></tr>
+    <tr><td class="paramname">Bp</td><td>The input vector array. If <code>NULL</code>, a temporary, internally allocated copy will be used. </td></tr>
+    <tr><td class="paramname">ldB</td><td>Leading dimension of <code>Bp</code> array. </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">Cp</td><td>The output vector array. If <code>NULL</code>, a temporary, internally allocated copy will be used. </td></tr>
+    <tr><td class="paramname">ldC</td><td>Leading dimension of <code>Cp</code> array. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<p>Examples: </p>
+<div class="fragment"><div class="line"><span class="comment">// obtain best thread count for mtxAp:</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(NULL  ,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line"></div>
+<div class="line"><span class="comment">// obtain best thread count for mtxAp; Bp and Cp will be allocated by the tuner:</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(NULL  ,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,NULL,0,&beta,NULL,0);</div>
+<div class="line"></div>
+<div class="line"><span class="comment">// obtain best clone of mtxAp (for current thread count):</span></div>
+<div class="line">assert(mtxOp == NULL && mtxAp != NULL);</div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxOp,&sf,NULL,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line"></div>
+<div class="line"><span class="comment">// obtain best clone of mtxAp and best thread count:</span></div>
+<div class="line">assert(mtxOp == NULL && mtxAp != NULL);</div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxOp,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line"></div>
+<div class="line"><span class="comment">// replace mtxAp with best clone (if any):</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxAp,&sf,NULL,maxr,maxt,transA,&alpha,NULL ,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line"></div>
+<div class="line"><span class="comment">// replace mtxAp with best clone (if any) and obtain best thread count:</span></div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxAp,&sf,&tn ,maxr,maxt,transA,&alpha,NULL ,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+<div class="line"></div>
+<div class="line"><span class="comment">// illegal call:</span></div>
+<div class="line">assert(mtxOp != NULL && mtxAp != NULL);</div>
+<div class="line">errval = <a class="code" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>(&mtxOp,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);</div>
+</div><!-- fragment --><dl class="section warning"><dt>Warning</dt><dd>This function is still experimental. In case of error, although the matrix shall be unaffected, the library status may be affected (e.g.: execution thread count, default matrix subdivision). </dd></dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000001">Todo:</a></b></dt><dd>In the future, autotuning functionality shall improve considerably. Need support for lightweight, threads-only optimization. May support strided vectors in the future. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>, <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a></dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8d7a05bbc165bd6ac20e8e23487a5871"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_tune_spsm </td>
+          <td>(</td>
+          <td class="paramtype">struct rsb_mtx_t ** </td>
+          <td class="paramname"><em>mtxOpp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> * </td>
+          <td class="paramname"><em>sfp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> * </td>
+          <td class="paramname"><em>tnp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> </td>
+          <td class="paramname"><em>maxr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> </td>
+          <td class="paramname"><em>maxt</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const struct rsb_mtx_t * </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An auto-tuner: optimizes either the matrix instance, the thread count or both for the <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a> operation.</p>
+<p>The tuner works by evaluating different instances and working threads variants. The instance leading to faster operation time will be retained and given back to the user in <code>*mtxOpp</code>. If <code>nrhs==1</code> and <code>order==<a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a></code>, unitary stride vectors are assumed. In case of error, the original input matrix shall be unaffected. It is possible to specify the leading di [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">mtxOpp</td><td>Optimal matrix structure pointer will be assigned to <code>*mtxOpp</code> (it may occur that *mtxOpp==mtxAp on output). If <code>mtxOpp</code> is <code>NULL</code> then no data structure optimization will be attempted; rather, only optimal threads search will occur (<code>tnp</code> must be not <code>NULL</code> then). </td></tr>
+    <tr><td class="paramname">sfp</td><td>Achieved speedup factor will be written to <code>*sfp</code> (unless <code>sfp==NULL</code>). </td></tr>
+    <tr><td class="paramname">tnp</td><td>If <code>tnp==NULL</code> on input, the current thread count will be utilized. Otherwise, if <code>*tnp>0</code>, then *tnp will be used as first suggestion in optimal thread count searching. If <code>tnp!=NULL</code> ,on output <code>*tnp</code> will be set to contain the optimal number of threads. Then, the user is expected to set this number of threads using e.g.: <code><a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_ [...]
+    <tr><td class="paramname">maxr</td><td>Optimizer rounds max count. If <code><1</code>, will be treated as 1; if 0 will be decided automatically. Max is <a class="el" href="rsb__types_8h.html#aef1f5467f82116857e5003daa0f75ccd">RSB_CONST_MAX_TUNING_ROUNDS</a>. </td></tr>
+    <tr><td class="paramname">maxt</td><td>Maximum time (in seconds) per optimization round (does not take in account conversion time). If <code>maxt<0.0</code> is provided, <code>-ceil</code>(maxt) will be interpreted as number of iterations to check for each operation time sample. If <code>maxt==0.0</code> is provided, a default choice will be made instead. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition parameter for <img class="formulaInl" alt="$A$" src="form_21.png"/> (see <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>). </td></tr>
+    <tr><td class="paramname">alphap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value (of the same type as matrix). </td></tr>
+    <tr><td class="paramname">mtxAp</td><td>Valid <code>rsb_mtx_t</code> pointer to matrix <img class="formulaInl" alt="$A$" src="form_21.png"/> representation. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>The number of right hand side vectors (cannot be <code><1</code>). </td></tr>
+    <tr><td class="paramname">order</td><td>A flag among <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a> and <a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>. For contiguous vector arrays, you probably want <a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>. </td></tr>
+    <tr><td class="paramname">Bp</td><td>The input vector array. If <code>NULL</code>, a temporary, internally allocated copy will be used. </td></tr>
+    <tr><td class="paramname">ldB</td><td>Leading dimension of <code>Bp</code> array. </td></tr>
+    <tr><td class="paramname">betap</td><td>Optional pointer (if <code>NULL</code>, will default to 1) to a numerical value. </td></tr>
+    <tr><td class="paramname">Cp</td><td>The output vector array. If <code>NULL</code>, a temporary, internally allocated copy will be used. </td></tr>
+    <tr><td class="paramname">ldC</td><td>Leading dimension of <code>Cp</code> array. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> on correct operation, an error code otherwise. You can use <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a> to get more information about the error.</dd></dl>
+<p>If <code>–enable-zero-division-checks-on-solve</code> was specified at configure time, attempts to solve a triangular matrix with zeroes on a diagonal will fail. </p>
+<dl class="section warning"><dt>Warning</dt><dd>This function is still experimental. In case of error, although the matrix shall be unaffected, the library status may be affected (e.g.: execution thread count, default matrix subdivision). </dd></dl>
+<dl class="todo"><dt><b><a class="el" href="todo.html#_todo000002">Todo:</a></b></dt><dd>In the future, autotuning functionality shall improve considerably. Need support for lightweight, threads-only optimization. May support strided vectors in the future. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>, <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a> </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a></dd></dl>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:23 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/group__rsb__doc__sparse__blas.html b/doc/html/group__rsb__doc__sparse__blas.html
new file mode 100644
index 0000000..1906091
--- /dev/null
+++ b/doc/html/group__rsb__doc__sparse__blas.html
@@ -0,0 +1,14445 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>A Sparse BLAS interface (see <a href="http://www.netlib.org/blas/blast-forum/">http://www.netlib.org/blas/blast-forum/</a>) to <code>librsb</code>. Level 1 (vector-vector operations) is supported in a basic way. Level 2 (sparse matrix-dense vector operations) is supported fully. Level 3 (sparse matrix-dense matrix operations) is supported as a wrapper around Level 2.  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:ga88a22a58b50ce89708abb232e4cbffcd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">BLAS_susdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const float *x, const int *indx, const float *y, int incy, float *r, enum <a class="el" href="blas__sparse_8 [...]
+<tr class="memitem:ga3d4d6df66fbbdfb8585770ce2ce37e6b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_susdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const float *x, const int *indx, const float *y, int *incy, float *r, enum <a class="el" href="blas__spa [...]
+<tr class="memitem:ga2ff8ae1b5a89cdb1bfd23b7b27635614"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">BLAS_dusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const double *x, const int *indx, const double *y, int incy, double *r, enum <a class="el" href="blas__spars [...]
+<tr class="memitem:ga891919cc22b2f9db6b26c857e2080b48"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">blas_dusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const double *x, const int *indx, const double *y, int *incy, double *r, enum <a class="el" href="blas__ [...]
+<tr class="memitem:gae02711e85989d740894aa260028cab15"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">BLAS_cusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:ga6805ad5c8346534e68b436708920d135"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">blas_cusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:ga1baea6bd05a2117418d333f5365e34df"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">BLAS_zusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:gaa9f54b685570087469d21462d089ef7d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">blas_zusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:gaeedaef37cd7591d8b15bc7e8ee049414"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">BLAS_susaxpy</a> (int nnz, float alpha, const float *x, const int *indx, float *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga863f07d7735eaa4fc0c6dbe1be09974e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">blas_susaxpy_</a> (int *nnz, float *alpha, const float *x, const int *indx, float *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga31b475fb2cc3f50775a5b6db930ab570"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">BLAS_dusaxpy</a> (int nnz, double alpha, const double *x, const int *indx, double *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga90f1fe9fa99b947c8096befdbfb49fb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">blas_dusaxpy_</a> (int *nnz, double *alpha, const double *x, const int *indx, double *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafaf15e2530cd078b260bb744e00487cb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">BLAS_cusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gac6189fef9b94289f2b8a5b6b7287b50b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">blas_cusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga20f8bb20cf00554547342750d80b2197"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">BLAS_zusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga58ad4724155b0cef43cdb7d95f879d8c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">blas_zusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga40cdf6b61694154efa1ba8d180381827"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">BLAS_susga</a> (int nnz, const float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga69bea2986de886f37a493464b1006456"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">blas_susga_</a> (int *nnz, const float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaa253fd591971e664e48e058e85855882"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">BLAS_dusga</a> (int nnz, const double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga21d8b0bd816bfd21371f70ca82ee9d9c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">blas_dusga_</a> (int *nnz, const double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga71f2df0176e5f44bf482ea2386ac5fac"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">BLAS_cusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga284485bb91904fe1324257ba1ab3a982"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">blas_cusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a29ab06d610d011109dd0c3da94992f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">BLAS_zusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga245af9e95488dece29876354c6e91fed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">blas_zusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2c53b81e979cbae6a5d198509f6d905a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">BLAS_susgz</a> (int nnz, float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga74964bd95bd8945b13c7fe2c7f559e5c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">blas_susgz_</a> (int *nnz, float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0b26bd51a324ee09433dbfa995396344"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">BLAS_dusgz</a> (int nnz, double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gadd448e0d4a33417634e6232c77d8a82a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">blas_dusgz_</a> (int *nnz, double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a4c72eb85493e921f4d40e18edb83ef"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">BLAS_cusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga32fdcc497a0db0ba36b413725ddc8c13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">blas_cusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0d52a140d65ab78ee0c515c445b42451"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">BLAS_zusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga5a6be1c191d51a622b99fe1b9a776bdc"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">blas_zusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gad58ff27808df2287b9cc77f6ed4d55ff"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">BLAS_sussc</a> (int nnz, const float *x, float *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga3f88389831294ad45b84ec31313fbc15"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">blas_sussc_</a> (int *nnz, const float *x, float *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gac71029e615c6c893b54e2f9395a536a4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">BLAS_dussc</a> (int nnz, const double *x, double *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga98ac28de307a8713020edd41be98d455"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">blas_dussc_</a> (int *nnz, const double *x, double *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga1b93628d321fbb77a50f98b467a3ff84"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">BLAS_cussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gafc77b392db05fc22122d4639595cccb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">blas_cussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaad333ae644010e3b059190b98528c79d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">BLAS_zussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gab89e9860df0ed52620651cfc607a987a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">blas_zussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafb4d039eb5319613ed30db7fb323278c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A,  [...]
+<tr class="memitem:ga651b1d1df5c964dbb21c1a5b14d7878b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">blas_susmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:ga9a8f45ddd3c890a296239b212f0c033b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, [...]
+<tr class="memitem:ga7172d1d1d0f3310ceaf9ecd1d128407b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">blas_dusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga9ec2e63176f2d6b11ee48bb523b4f7c7"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga3d60593a2a4ea8c081590b392c39419d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">blas_cusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga1ee2eb4be4c1e0565051fe04ca7415a2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga6747bd2d7930018d8693a97a3eb2865c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">blas_zusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:gafc9acf48136458baa6ace90355e7abb2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">BLAS_sussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T,  [...]
+<tr class="memitem:ga3b63c0a83f8088e60c8e609b451354f0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">blas_sussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:gade1bbec9b8263a2a5e76112f1042576b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">BLAS_dussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T, [...]
+<tr class="memitem:ga36f989895809beaafaa57bb5ab41347f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">blas_dussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga4c327ba1fa391b550f2fc5580ad49bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">BLAS_cussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga39b0ab077486c1fc3766d68ae9048447"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">blas_cussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga7c1e740064369d0029cd627643eb841a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">BLAS_zussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga5d14a5df82e93614e8c524f6d20bb5c5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">blas_zussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga29c11c0c304637e89852359b0f8b10b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">BLAS_susmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2c1da8c4c1473a930ebfaa62f360ca8e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">blas_susmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:gaeeddeb634efe4448a31d62fb547362f6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">BLAS_dusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaa6f99d27ec6f88cca6c6cfac1e8ce7e3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">blas_dusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga8c87639294b57d2893cd29f64902a64d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">BLAS_cusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2dc070f4b09c4b37d89ab9a0fb16352b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">blas_cusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga88138db4545610d234d18d42237f36ee"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">BLAS_zusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaf7018fb638e25fe8b149d0cab4e844c0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">blas_zusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga3d7835bb3621aaf70787d72f86355f8d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">BLAS_sussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga916f5af1f63f33a3a084accaf2dfd6f1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">blas_sussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gaad6ff4b3cce242f76362e6ad8a947713"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">BLAS_dussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga4b93f6ef00d1aa3197a45a7e492edcd6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">blas_dussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad864666e842f7d0878b1fb9d57e80c28"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">BLAS_cussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:gac3d8f0b6742566cbbadf6b18c9aa40b5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">blas_cussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:ga8602eae41f9e5248ff086087abe68bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">BLAS_zussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga60f808ded982233be9a4faaa5fb75db3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">blas_zussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">BLAS_suscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad2f7ede753754c2474d5460a92bba99e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">blas_suscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac931dcb1129ee3016ab82602c3d14fee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad7d5969e9edee49441fc89d22715e60d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">blas_duscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga3483c364b4afec22621e46059b166247"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">BLAS_cuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gaf4d21720c592de22cfd4139517d9d255"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">blas_cuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga52b67393ad16e3d40e74fcdba88c7da4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">BLAS_zuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gae0246836bd8d4b8697c6674998397f3a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">blas_zuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga11c5559450e186c2a86d714f564411f3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">BLAS_suscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga0067882e19affabebf581452a7c05252"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">blas_suscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac0ca32cd2c78c8553d6d6b324e06ef59"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga864facf0316453a27af4b7024a11453b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">blas_duscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga050218d0fa552a3e2c2d5452f876d9b5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga967bfc819ed66559e96ae55a6826d1f8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">blas_cuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5a261b2d1cc996c2a982ff8469faf286"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">BLAS_zuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga62c3bd7ba1a96f82055478d40af67370"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">blas_zuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae7e006a448094a70204be60f24cdf1a3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">BLAS_suscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaab267e13449c999ad8a8e3e358f4b2ed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">blas_suscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae0c3c6dc5503e21afb8192efb0f66edd"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">BLAS_duscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga12c7c1bdd46724147dbbd9b38dd2028e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">blas_duscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga95174fcf3bfbef91ab6b3b85fc90b128"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">BLAS_cuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga24a2dac4570e6021fdcc5c84b52fb5bb"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">blas_cuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gaa582b369a0233027349f8f844cce7622"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">BLAS_zuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaa51253d1c144c8aa744b2e13742fec40"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">blas_zuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga7176a90049256cb0e0fe45db66f57dd2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">BLAS_suscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga5822f3be35eeb550c323de69ec9933d3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">blas_suscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5d9ce97bf054b1e3750eaae5d4e6c335"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga2120eb06b87f0e85d03a368e5bc55485"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">blas_duscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac2b5eccd5cf442b5e2e79201d62ca2b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gaa78d3bef027e5a29ab5e5dd6188bcd75"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">blas_cuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gad6315d71f6f7abf8b82c89c70d6abbf3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga6c23466b531e84f472d5fa75228cb895"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">blas_zuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga26e2c422895e5df8492bdb561cab4a54"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">BLAS_suscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float val, int i, int j)</td></tr>
+<tr class="memitem:ga9b3085c739330bca518e8ef371f7d3b1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">blas_suscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga346ff5263bf0b3a5d7dda94e2000130c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">BLAS_duscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double val, int i, int j)</td></tr>
+<tr class="memitem:ga29c2f202a144845cc1d32c8d65bd5c5f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">blas_duscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gaa39564978ebda8a88f8d19e3e060bc4d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">BLAS_cuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:ga6d735497bdd3bbafbb6168cb0fde5103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">blas_cuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga1ffe345c537b53ac5839da21b236d87c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">BLAS_zuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:gaad6627231dc4230affa318726ff3f345"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">blas_zuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gac6158601459aabebc22795864a2a62ba"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">BLAS_suscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const float *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga9119b49fd049bcaa310bccb36fcda664"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">blas_suscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const float *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gae0683bc8f0af5dd3e53b964190f9e1b4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const double *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gac2c1a4c7b2cebca56aedbad7a002e15f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">blas_duscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const double *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga5af752a3fcb2898412f576eee7d9d618"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">BLAS_cuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga3deb906fcd5f9b9221b5865541c57d18"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">blas_cuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gaacc9c9e5c95df4ea6656ad93f1f09666"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">BLAS_zuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gad9ad3afc16fc0181117004fd46ff78ae"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">blas_zuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga547d271038794dfc797aecc70e294761"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">BLAS_suscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga2d8c691851acf099c25eff1a4c2885c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">blas_suscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga8ee73d3b27bdc68e12c85ba281a337be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">BLAS_duscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:ga5645393bb00d715d882e8e2d55c3f0d1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">blas_duscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga589495aa8acd4eac99ef9132bc4062c9"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">BLAS_cuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga1aadf4dc810ff6eb123a1bf9c859efe8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">blas_cuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga00cfdd3669b146b25d42a32f104ff8a3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">BLAS_zuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga10a2dc6a5399459c83282bda757f5096"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">blas_zuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga9b815fa125e3c84a6e6a6ead2c9ef87b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">BLAS_suscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga71080ddbf0e0e602c7bc36993a6c88ca"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">blas_suscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gac3472ca6b036771a68d6f5f01387e482"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">BLAS_duscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:gaa72e5450302fa424dcd6cfae0bad872d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">blas_duscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga77929c94cee3278cc7594a3f1377f5f8"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">BLAS_cuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gad4acfbfdf33a5682ac657add0292711d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">blas_cuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gab52e13dc7c61fc48e593276f04cb2d30"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">BLAS_zuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gaf871e29bfce399dedbebe2aa9c7831df"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">blas_zuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga6e567e79f675ed861c8f446d0e7a78f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">BLAS_suscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const float *val, const int row_stride, const int col_stride, const int *indx, con [...]
+<tr class="memitem:gafcee9667fc445e32012c960fca7e698d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">blas_suscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const float *val, const int *row_stride, const int *col_stride, const int *in [...]
+<tr class="memitem:ga290547e34be3648b2fe6a7378e59a7ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">BLAS_duscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const double *val, const int row_stride, const int col_stride, const int *indx, co [...]
+<tr class="memitem:ga1f7870f8a1114b94444c721c933e8bef"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">blas_duscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const double *val, const int *row_stride, const int *col_stride, const int *i [...]
+<tr class="memitem:gaf089aaac5d65a4e38130b25d5ba2ba27"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">BLAS_cuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga06acafbf28371b1ad8a75a85173261e6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">blas_cuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:ga52519d2caa1070b0c80ac3c6cb104d92"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">BLAS_zuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga8c3430083655b74988536d823e40c723"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">blas_zuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:gaa682b478ac48e12d4a091977e8c45768"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">BLAS_suscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga61080e2828351bd1585deb2713ed8a29"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">blas_suscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga5d35aa3e27cdbf8a50db5b47ff5e0892"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">BLAS_duscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga38012bbc4e99df72fb95409a4860ead7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">blas_duscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga42054351f49850f079733143b2af87fb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">BLAS_cuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga527ae15ee9e003d948494d9fcdad5dba"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">blas_cuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga573ee2ea89db4a133b8729abbb1223f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">BLAS_zuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:gac3837cd5c7b2e8ac11c6c0e5cff8914c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">blas_zuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga2ff68116b5ae79c37bf335096de973c0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">BLAS_uscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga60974067bf5367a9a3c6eaa9f6f8f4ab"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">blas_uscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga8b0cca8196f40f7b55084a978b40717f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gae4db91cffaf71632bd41b7423c64b757"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">blas_usds_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae671b9fc06140680a8c104ef4f0f54f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">BLAS_susrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</ [...]
+<tr class="memitem:ga9de54361f778577330c6c5ece88a63c3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">blas_susrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga01917c64887638dfb5226be1f87d964a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">BLAS_dusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga9f09f9d05e01d5b354ce234781e3945a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">blas_dusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gafc79de03622ceeb2e0b4343fe5904a36"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">BLAS_cusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:gae09ac29c14cede27a8d6a2be2687453e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">blas_cusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gad551879cdde6d16d9dd5b9edc647c667"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">BLAS_zusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:ga806bb32c4231e4cd9d833370484ad369"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">blas_zusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:ga1113eda1c806ca3631fefde07624fbd6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">BLAS_susget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *d)</td></tr>
+<tr class="memitem:ga0444e8a4b321bf1488fb496bdf3116d2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">blas_susget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *d, int *istat)</td></tr>
+<tr class="memitem:ga35b70a7c3083b791cf1b94cb20ef57be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">BLAS_dusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *d)</td></tr>
+<tr class="memitem:ga7cfde04c833adeb887db75f4b2e104dd"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">blas_dusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *d, int *istat)</td></tr>
+<tr class="memitem:ga4ec4b6dce3701c5803efa6b7455e1504"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">BLAS_cusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga4865a8fda031074a0d91cf5c548584b9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">blas_cusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad175937c05d3d05d3aa7fa35eb3028ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">BLAS_zusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga73feb9adc685f7ff1d66763b0801a0f9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">blas_zusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad84dbcdeda549e1b0361f7ade7a38b13"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">BLAS_susget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga1a8c39f41962e3be6ac84ea3be73f7a0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">blas_susget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gab866cf0951b576a47da3864d668919f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">BLAS_dusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:gac09a79789dc8b79d2e5a375732703103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">blas_dusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gac50e955d6e2bff77e2c3ac2146c77aaf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">BLAS_cusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga9e11da08762387d8a7a885665298e815"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">blas_cusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gadeb3cbe1cc6987763a55665bcdb8aef5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">BLAS_zusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga50cba1e236b63775110d6d1b292417da"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">blas_zusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:ga8f78343207ff584d2d78789bd90e5533"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">BLAS_susget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga0977f63d781215c826aa5a0ea2df9f47"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">blas_susget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga498d143bae71d800dc35e2f1ee071359"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">BLAS_dusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:gaf2e6ab2c5cbd23a7690bbe8e26794033"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">blas_dusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga23f0c1852e05a426d24d2eb1bcae168b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">BLAS_cusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga04751c01dcfb6730a33eaa91f403dd09"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">blas_cusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gaf9d44fc73526a4fdf9627424626bf4a5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">BLAS_zusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga63f072aa25f7f7f8ac1ac4e32aae0c2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">blas_zusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gafc031d78d0274c81039c2448a403cd10"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">BLAS_susget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga039a9d4da3423ea71726242e1c1251e7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">blas_susget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga441bff94fdc50b9bf6e180d36f51c3ce"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">BLAS_dusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga3a4bc573dc07849e7a72ecb2d2f0c31d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">blas_dusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafe27f3044269d37cadb569fc6796ac01"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">BLAS_cusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga19e30bb70673342b4d6308bd9cf46884"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">blas_cusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga85e15d7a3331e8ed4d702908477e2896"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">BLAS_zusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga9bdd048dea68ecbd8fd712349d4fbf13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">blas_zusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafc49f44b76021677000bebe7d7fe133b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">BLAS_susget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gaffaaf5b49e850adda0163b6bc082077d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">blas_susget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_t [...]
+<tr class="memitem:ga39b4e25d5d5ce080f8dd994856e41fd0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">BLAS_dusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga15c7a93ed41a5488c0ef814d2061214a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">blas_dusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ [...]
+<tr class="memitem:ga65e5bef193bd5a2d47e80bff7eebed8e"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">BLAS_cusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:gacefa288104224e6c8f069f4001dacc08"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">blas_cusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:ga286c2cf2c749c80c8b71ff2f4bdb1566"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">BLAS_zusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga01b88a27714ca87085421fd9a4f3e479"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">blas_zusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gad3e05b01efa2857c0938ada63f30cadf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">BLAS_susset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const float *va, int nnz)</td></tr>
+<tr class="memitem:gac0abb530fc46d610bf56e7fb1ef42c6c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">blas_susset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const float *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gae34ff937437af99d317739192e2783da"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">BLAS_dusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const double *va, int nnz)</td></tr>
+<tr class="memitem:ga8e2acb49dac4221d1554c30238bd6747"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">blas_dusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const double *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga3b358be87656e2d8065e1d30dd8060f4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">BLAS_cusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga38398053da29e668ee440e55f675532b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">blas_cusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gac542af7517c9f667122e8bdc408487b3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">BLAS_zusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga156a8d0225d9761cd58e15e026b9ba2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">blas_zusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gaf17e549ec8cf353144ac1e3a1f080f46"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">BLAS_susset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gab8c3e5745870d4399382051dcedad144"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">blas_susset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gac8aa3ed1e29f2555519421290d236d0c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">BLAS_dusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:gab50cd8a5a6a5d866789628da0c9141a2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">blas_dusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga778acfebd02199f440b890b0176af19c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">BLAS_cusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga4a32533889a4ed82a21f457d1253317d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">blas_cusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gaca954a070d476342e254587fc2faa7fd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">BLAS_zusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga52efe19f0972fa51ac6329cf717b676c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">blas_zusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gad86989cd1f58003617f3db251b6fc0f1"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">BLAS_susget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gaac53e141083bc9871d81b587e5f785c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">blas_susget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gacf35fa073f6cc991efe75f6a012a9a04"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:ga6443c32b223693698a8a0f0198ae4bee"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">blas_dusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga4c7eae1cfcd8cafc16f31b169c4a7514"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">BLAS_cusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga104bc9ee1e6ce32012933e822019ecf0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">blas_cusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga27417bc0d923f7288ed736837492275c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">BLAS_zusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga845cca2b512e38b467fc0d4b93d660b7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">blas_zusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga852f4a68eef6963708d11f37e975b178"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:ga2cb97e106eb117547157a8fc61491b91"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2cb97e106eb117547157a8fc61491b91">blas_usgp_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *pname, int *istat)</td></tr>
+<tr class="memitem:ga5ea0303be1db6c9dd73c03bba6dc6158"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5ea0303be1db6c9dd73c03bba6dc6158">blas_ussp_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *pname, int *istat)</td></tr>
+<tr class="memitem:ga89577a4a63cc8659f1d463fb819bc002"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:gac4d8c73e5d9faa85209bcc4e885d4ff1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_blas_get_mtx</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<p>A Sparse BLAS interface (see <a href="http://www.netlib.org/blas/blast-forum/">http://www.netlib.org/blas/blast-forum/</a>) to <code>librsb</code>. Level 1 (vector-vector operations) is supported in a basic way. Level 2 (sparse matrix-dense vector operations) is supported fully. Level 3 (sparse matrix-dense matrix operations) is supported as a wrapper around Level 2. </p>
+<p>We also implement a number of useful extra functions as custom extensions, giving access to other <code>librsb</code> functionality.</p>
+<p>The usage pattern of this interface matches that of the Sparse BLAS standard, exception made for the necessity of initialization/finalization of <code>librsb</code>. The Sparse BLAS interface is also available for Fortran: see <a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a>.</p>
+<p>The user should be aware of the following: </p>
+<ul>
+<li>Because this Sparse BLAS implementation is built around <code>librsb</code>, initialization with <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a> and finalization with <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit()</a> is necessary. Inclusion of the <code><a class="el" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library [...]
+<li><code>librsb</code> gives users freedom of in/out arbitrarily BLAS types support at configure/build time. Hence, while all the interface functions are always included the Sparse BLAS header file, they may return an error code. Be sure of having configured correctly the library at configure time (and see the <a class="el" href="blas__sparse_8h.html">blas_sparse.h</a> header file for types configured in the current build). </li>
+<li>According to the standard, the complex type functions for C accept scalar values by reference rather than by copy; equivalent functions for other types do not do so, so this may cause confusion. Be careful. </li>
+<li>Error checking is weak; so for instance, passing a function the handle of a matrix of mismatching type will not be detected as an error, although it's incorrect. </li>
+<li>According to the standard, VBR and BCSR styled constructors are supported, although these are interfaces for <code>librsb's</code> own matrix representation. </li>
+<li>Here we list functions for both Fortran and C functions. However, the Fortran functions are declared and documented with the C notation. We may provide a better documentation in a subsequent release. </li>
+<li>Each identifier documented here suffixed by <code>_</code> (e.g.: <a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_susdot_()</a>) can be used from Fortran with the name stripped by that suffix (so in this case, <code>blas_susdot</code>). We will provide a proper fix to this inconvenience in a subsequent release. </li>
+<li>Each Fortran program using <code>librsb's</code> Sparse BLAS Implementation shall <code>use</code> modules <code><a class="el" href="classblas__sparse.html">blas_sparse</a></code> and <code>rsb</code>. </li>
+<li>Also Fortran programs have to call <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a> and <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit()</a> e.g.: <pre class="fragment">        USE blas_sparse             ! module implementing the Sparse BLAS on the top of librsb
+        USE rsb                     ! rsb module
+        ...
+        INTEGER :: istat            ! integer variable
+        ...
+        istat = rsb_lib_init(RSB_NULL_INIT_OPTIONS) ! please note that this is not part of Sparse BLAS but it is needed by librsb
+        if(istat.NE.0)STOP          ! a value different than zero signals an error
+        ...
+        ! code calling Sparse BLAS routines
+        ...
+        istat = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) ! please note that this is not part of Sparse BLAS but it is needed by librsb
+        if(istat.NE.0)STOP          ! a value different than zero signals an error
+        ...
+</pre> </li>
+<li>For Fortran, more procedures exist, although they are not documented here. According to the Sparse BLAS (<a href="http://www.netlib.org/blas/blast-forum/">http://www.netlib.org/blas/blast-forum/</a>), for almost each subroutine whose identifier prefixed with <code>blas_X</code> (with <code>X</code> being one of S,D,C,Z), a corresponding generic modern Fortran version exists. Please note how not all of the certain procedures identifier prefixes include the type character.</li>
+</ul>
+<p>E.g.: </p>
+<div class="fragment"><div class="line">! the following code (<span class="charliteral">'d'</span> stays <span class="keywordflow">for</span> <span class="stringliteral">'double precision'</span>):</div>
+<div class="line">CALL blas_duscr_begin(nr,nc,A,istat)</div>
+<div class="line">CALL blas_ussp(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>,istat)</div>
+<div class="line">CALL blas_duscr_insert_entries(A,nnz,VA,IA,JA,istat)</div>
+<div class="line">CALL blas_duscr_end(A,istat)</div>
+<div class="line">CALL blas_dusmv(transT,alpha,A,X,incX,B,incB,istat) </div>
+<div class="line">CALL blas_dusds(A,istat)</div>
+<div class="line">! is equivalent to:</div>
+<div class="line">CALL duscr_begin(nr,nc,A,istat) ! here, <span class="stringliteral">'d'</span> must be retained for avoiding ambiguity</div>
+<div class="line">CALL ussp(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>,istat)</div>
+<div class="line">CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)</div>
+<div class="line">CALL uscr_end(A,istat)</div>
+<div class="line">CALL usmv(transT,alpha,A,X,incX,B,incB,istat) </div>
+<div class="line">CALL usds(A,istat)</div>
+</div><!-- fragment --> <h2>Function Documentation</h2>
+<a class="anchor" id="gafaf15e2530cd078b260bb744e00487cb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusaxpy </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac6189fef9b94289f2b8a5b6b7287b50b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusaxpy_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3483c364b4afec22621e46059b166247"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_cuscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>n</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf4d21720c592de22cfd4139517d9d255"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga050218d0fa552a3e2c2d5452f876d9b5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_cuscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>l</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga967bfc819ed66559e96ae55a6826d1f8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac2b5eccd5cf442b5e2e79201d62ca2b5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_end </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa78d3bef027e5a29ab5e5dd6188bcd75"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_end_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga42054351f49850f079733143b2af87fb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga527ae15ee9e003d948494d9fcdad5dba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_insert_block_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf089aaac5d65a4e38130b25d5ba2ba27"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga06acafbf28371b1ad8a75a85173261e6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_insert_clique_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga589495aa8acd4eac99ef9132bc4062c9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1aadf4dc810ff6eb123a1bf9c859efe8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_insert_col_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5af752a3fcb2898412f576eee7d9d618"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3deb906fcd5f9b9221b5865541c57d18"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_insert_entries_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa39564978ebda8a88f8d19e3e060bc4d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6d735497bdd3bbafbb6168cb0fde5103"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_insert_entry_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga77929c94cee3278cc7594a3f1377f5f8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cuscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad4acfbfdf33a5682ac657add0292711d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_insert_row_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga95174fcf3bfbef91ab6b3b85fc90b128"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_cuscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga24a2dac4570e6021fdcc5c84b52fb5bb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cuscr_variable_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae02711e85989d740894aa260028cab15"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusdot </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6805ad5c8346534e68b436708920d135"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusdot_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> * </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga71f2df0176e5f44bf482ea2386ac5fac"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusga </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga284485bb91904fe1324257ba1ab3a982"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusga_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4ec4b6dce3701c5803efa6b7455e1504"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusget_diag </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>d</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4865a8fda031074a0d91cf5c548584b9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusget_diag_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4c7eae1cfcd8cafc16f31b169c4a7514"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusget_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga104bc9ee1e6ce32012933e822019ecf0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusget_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga65e5bef193bd5a2d47e80bff7eebed8e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusget_infinity_norm </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gacefa288104224e6c8f069f4001dacc08"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusget_infinity_norm_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafe27f3044269d37cadb569fc6796ac01"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusget_matrix_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga19e30bb70673342b4d6308bd9cf46884"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusget_matrix_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac50e955d6e2bff77e2c3ac2146c77aaf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusget_rows_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9e11da08762387d8a7a885665298e815"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusget_rows_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga23f0c1852e05a426d24d2eb1bcae168b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusget_rows_sparse </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga04751c01dcfb6730a33eaa91f403dd09"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusget_rows_sparse_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2a4c72eb85493e921f4d40e18edb83ef"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusgz </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga32fdcc497a0db0ba36b413725ddc8c13"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusgz_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8c87639294b57d2893cd29f64902a64d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusmm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldc</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2dc070f4b09c4b37d89ab9a0fb16352b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusmm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9ec2e63176f2d6b11ee48bb523b4f7c7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusmv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3d60593a2a4ea8c081590b392c39419d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusmv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafc79de03622ceeb2e0b4343fe5904a36"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusrows_scale </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae09ac29c14cede27a8d6a2be2687453e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusrows_scale_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1b93628d321fbb77a50f98b467a3ff84"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cussc </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafc77b392db05fc22122d4639595cccb3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cussc_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga778acfebd02199f440b890b0176af19c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusset_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4a32533889a4ed82a21f457d1253317d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusset_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3b358be87656e2d8065e1d30dd8060f4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cusset_elements </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga38398053da29e668ee440e55f675532b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cusset_elements_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad864666e842f7d0878b1fb9d57e80c28"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cussm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac3d8f0b6742566cbbadf6b18c9aa40b5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cussm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4c327ba1fa391b550f2fc5580ad49bdf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_cussv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga39b0ab077486c1fc3766d68ae9048447"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_cussv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga31b475fb2cc3f50775a5b6db930ab570"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusaxpy </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga90f1fe9fa99b947c8096befdbfb49fb3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusaxpy_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac931dcb1129ee3016ab82602c3d14fee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_duscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>n</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad7d5969e9edee49441fc89d22715e60d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac0ca32cd2c78c8553d6d6b324e06ef59"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_duscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>l</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga864facf0316453a27af4b7024a11453b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5d9ce97bf054b1e3750eaae5d4e6c335"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_end </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2120eb06b87f0e85d03a368e5bc55485"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_end_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5d35aa3e27cdbf8a50db5b47ff5e0892"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga38012bbc4e99df72fb95409a4860ead7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_insert_block_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga290547e34be3648b2fe6a7378e59a7ec"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1f7870f8a1114b94444c721c933e8bef"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_insert_clique_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8ee73d3b27bdc68e12c85ba281a337be"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5645393bb00d715d882e8e2d55c3f0d1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_insert_col_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae0683bc8f0af5dd3e53b964190f9e1b4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac2c1a4c7b2cebca56aedbad7a002e15f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_insert_entries_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga346ff5263bf0b3a5d7dda94e2000130c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga29c2f202a144845cc1d32c8d65bd5c5f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_insert_entry_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac3472ca6b036771a68d6f5f01387e482"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_duscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa72e5450302fa424dcd6cfae0bad872d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_insert_row_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae0c3c6dc5503e21afb8192efb0f66edd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_duscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga12c7c1bdd46724147dbbd9b38dd2028e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_duscr_variable_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2ff8ae1b5a89cdb1bfd23b7b27635614"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusdot </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga891919cc22b2f9db6b26c857e2080b48"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusdot_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> * </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa253fd591971e664e48e058e85855882"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusga </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga21d8b0bd816bfd21371f70ca82ee9d9c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusga_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga35b70a7c3083b791cf1b94cb20ef57be"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusget_diag </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>d</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga7cfde04c833adeb887db75f4b2e104dd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusget_diag_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gacf35fa073f6cc991efe75f6a012a9a04"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusget_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6443c32b223693698a8a0f0198ae4bee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusget_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga39b4e25d5d5ce080f8dd994856e41fd0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusget_infinity_norm </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga15c7a93ed41a5488c0ef814d2061214a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusget_infinity_norm_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga441bff94fdc50b9bf6e180d36f51c3ce"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusget_matrix_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3a4bc573dc07849e7a72ecb2d2f0c31d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusget_matrix_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab866cf0951b576a47da3864d668919f5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusget_rows_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac09a79789dc8b79d2e5a375732703103"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusget_rows_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga498d143bae71d800dc35e2f1ee071359"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusget_rows_sparse </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf2e6ab2c5cbd23a7690bbe8e26794033"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusget_rows_sparse_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga0b26bd51a324ee09433dbfa995396344"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusgz </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gadd448e0d4a33417634e6232c77d8a82a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusgz_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaeeddeb634efe4448a31d62fb547362f6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusmm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldc</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa6f99d27ec6f88cca6c6cfac1e8ce7e3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusmm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9a8f45ddd3c890a296239b212f0c033b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusmv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga7172d1d1d0f3310ceaf9ecd1d128407b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusmv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga01917c64887638dfb5226be1f87d964a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusrows_scale </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9f09f9d05e01d5b354ce234781e3945a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusrows_scale_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac71029e615c6c893b54e2f9395a536a4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dussc </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga98ac28de307a8713020edd41be98d455"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dussc_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac8aa3ed1e29f2555519421290d236d0c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusset_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab50cd8a5a6a5d866789628da0c9141a2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusset_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae34ff937437af99d317739192e2783da"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dusset_elements </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8e2acb49dac4221d1554c30238bd6747"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dusset_elements_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const double * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaad6ff4b3cce242f76362e6ad8a947713"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dussm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga4b93f6ef00d1aa3197a45a7e492edcd6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dussm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gade1bbec9b8263a2a5e76112f1042576b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_dussv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga36f989895809beaafaa57bb5ab41347f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_dussv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">double * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaeedaef37cd7591d8b15bc7e8ee049414"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susaxpy </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga863f07d7735eaa4fc0c6dbe1be09974e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susaxpy_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_suscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>n</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad2f7ede753754c2474d5460a92bba99e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga11c5559450e186c2a86d714f564411f3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_suscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>l</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga0067882e19affabebf581452a7c05252"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga7176a90049256cb0e0fe45db66f57dd2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_end </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5822f3be35eeb550c323de69ec9933d3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_end_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa682b478ac48e12d4a091977e8c45768"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga61080e2828351bd1585deb2713ed8a29"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_insert_block_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6e567e79f675ed861c8f446d0e7a78f5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafcee9667fc445e32012c960fca7e698d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_insert_clique_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga547d271038794dfc797aecc70e294761"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2d8c691851acf099c25eff1a4c2885c1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_insert_col_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac6158601459aabebc22795864a2a62ba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9119b49fd049bcaa310bccb36fcda664"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_insert_entries_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga26e2c422895e5df8492bdb561cab4a54"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9b3085c739330bca518e8ef371f7d3b1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_insert_entry_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9b815fa125e3c84a6e6a6ead2c9ef87b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_suscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga71080ddbf0e0e602c7bc36993a6c88ca"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_insert_row_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae7e006a448094a70204be60f24cdf1a3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_suscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaab267e13449c999ad8a8e3e358f4b2ed"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_suscr_variable_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga88a22a58b50ce89708abb232e4cbffcd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susdot </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3d4d6df66fbbdfb8585770ce2ce37e6b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susdot_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> * </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga40cdf6b61694154efa1ba8d180381827"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susga </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga69bea2986de886f37a493464b1006456"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susga_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1113eda1c806ca3631fefde07624fbd6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susget_diag </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>d</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga0444e8a4b321bf1488fb496bdf3116d2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susget_diag_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad86989cd1f58003617f3db251b6fc0f1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susget_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaac53e141083bc9871d81b587e5f785c1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susget_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafc49f44b76021677000bebe7d7fe133b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susget_infinity_norm </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaffaaf5b49e850adda0163b6bc082077d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susget_infinity_norm_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafc031d78d0274c81039c2448a403cd10"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susget_matrix_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga039a9d4da3423ea71726242e1c1251e7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susget_matrix_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad84dbcdeda549e1b0361f7ade7a38b13"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susget_rows_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1a8c39f41962e3be6ac84ea3be73f7a0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susget_rows_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8f78343207ff584d2d78789bd90e5533"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susget_rows_sparse </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga0977f63d781215c826aa5a0ea2df9f47"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susget_rows_sparse_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2c53b81e979cbae6a5d198509f6d905a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susgz </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga74964bd95bd8945b13c7fe2c7f559e5c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susgz_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga29c11c0c304637e89852359b0f8b10b5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susmm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldc</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2c1da8c4c1473a930ebfaa62f360ca8e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susmm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafb4d039eb5319613ed30db7fb323278c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susmv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga651b1d1df5c964dbb21c1a5b14d7878b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susmv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae671b9fc06140680a8c104ef4f0f54f0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susrows_scale </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9de54361f778577330c6c5ece88a63c3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susrows_scale_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad58ff27808df2287b9cc77f6ed4d55ff"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_sussc </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3f88389831294ad45b84ec31313fbc15"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_sussc_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf17e549ec8cf353144ac1e3a1f080f46"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susset_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab8c3e5745870d4399382051dcedad144"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susset_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad3e05b01efa2857c0938ada63f30cadf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_susset_elements </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac0abb530fc46d610bf56e7fb1ef42c6c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_susset_elements_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const float * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3d7835bb3621aaf70787d72f86355f8d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_sussm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga916f5af1f63f33a3a084accaf2dfd6f1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_sussm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gafc9acf48136458baa6ace90355e7abb2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_sussv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga3b63c0a83f8088e60c8e609b451354f0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_sussv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">float * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2ff68116b5ae79c37bf335096de973c0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_uscr_end </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga60974067bf5367a9a3c6eaa9f6f8f4ab"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_uscr_end_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8b0cca8196f40f7b55084a978b40717f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_usds </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Destroys a matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae4db91cffaf71632bd41b7423c64b757"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_usds_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Destroys a matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga852f4a68eef6963708d11f37e975b178"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_usgp </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>pname</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a matrix property. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A is the matrix to apply the property. </td></tr>
+    <tr><td class="paramname">pname</td><td>The desired matrix property. For valid matrix properties, see <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a>, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a>, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a>, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_t [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2cb97e106eb117547157a8fc61491b91"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_usgp_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>pname</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a matrix property. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A is the matrix to apply the property. </td></tr>
+    <tr><td class="paramname">pname</td><td>The desired matrix property. For valid matrix properties, see <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a>, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a>, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a>, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_t [...]
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga89577a4a63cc8659f1d463fb819bc002"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_ussp </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>pname</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a matrix property. Should be called just after creation, before nonzeroes insertion. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A is the matrix to apply the property. </td></tr>
+    <tr><td class="paramname">pname</td><td>The desired matrix property. For valid matrix properties, see <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a>, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a>, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a>, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_t [...]
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5ea0303be1db6c9dd73c03bba6dc6158"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_ussp_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>pname</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a matrix property. Should be called just after creation, before nonzeroes insertion. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A is the matrix to apply the property. </td></tr>
+    <tr><td class="paramname">pname</td><td>The desired matrix property. For valid matrix properties, see <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a>, <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a>, <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a>, <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_t [...]
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga20f8bb20cf00554547342750d80b2197"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusaxpy </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga58ad4724155b0cef43cdb7d95f879d8c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusaxpy_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse vector update: <img class="formulaInl" alt="$Y \leftarrow \alpha X + Y$" src="form_4.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Will scale values of <img class="formulaInl" alt="$X$" src="form_3.png"/> before accumulating to <img class="formulaInl" alt="$Y$" src="form_2.png"/>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga52b67393ad16e3d40e74fcdba88c7da4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_zuscr_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>n</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gae0246836bd8d4b8697c6674998397f3a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>m</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>n</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">m</td><td>Is the count of rows. </td></tr>
+    <tr><td class="paramname">n</td><td>Is the count of columns.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5a261b2d1cc996c2a982ff8469faf286"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_zuscr_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>l</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga62c3bd7ba1a96f82055478d40af67370"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">k,l</td><td>Are row and column dimensions when specifying a matrix as BCSR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad6315d71f6f7abf8b82c89c70d6abbf3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_end </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6c23466b531e84f472d5fa75228cb895"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_end_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Makes an assembled matrix out of a matrix in build state. After this, it is not possible anymore to insert nonzeroes, but computational routines. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga573ee2ea89db4a133b8729abbb1223f0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac3837cd5c7b2e8ac11c6c0e5cff8914c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_insert_block_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga52519d2caa1070b0c80ac3c6cb104d92"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8c3430083655b74988536d823e40c723"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_insert_clique_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga00cfdd3669b146b25d42a32f104ff8a3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga10a2dc6a5399459c83282bda757f5096"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_insert_col_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaacc9c9e5c95df4ea6656ad93f1f09666"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad9ad3afc16fc0181117004fd46ff78ae"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_insert_entries_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1ffe345c537b53ac5839da21b236d87c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaad6627231dc4230affa318726ff3f345"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_insert_entry_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab52e13dc7c61fc48e593276f04cb2d30"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zuscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf871e29bfce399dedbebe2aa9c7831df"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_insert_row_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa582b369a0233027349f8f844cce7622"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> BLAS_zuscr_variable_block_begin </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>A matrix handle in case of success, or -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa51253d1c144c8aa744b2e13742fec40"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zuscr_variable_block_begin_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Mb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>Nb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>K</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>L</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Allocates an empty matrix (A) and leaves it in build state. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">K,L</td><td>Are arrays specifying row/column block sizes when specifying a matrix as VBR. </td></tr>
+    <tr><td class="paramname">Mb</td><td>Block rows count. </td></tr>
+    <tr><td class="paramname">Nb</td><td>Block columns count.</td></tr>
+    <tr><td class="paramname">A</td><td>A valid pointer to an empty matrix handle. </td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value. Will assign a valid matrix handle to <img class="formulaInl" alt="$A$" src="form_21.png"/> in case of success, or set it to -1 on error.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1baea6bd05a2117418d333f5365e34df"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusdot </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaa9f54b685570087469d21462d089ef7d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusdot_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> * </td>
+          <td class="paramname"><em>conj</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>r</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse dot product. <img class="formulaInl" alt="$r \leftarrow X^T Y,$" src="form_0.png"/> <img class="formulaInl" alt="$r \leftarrow X^H Y$" src="form_1.png"/> </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">r</td><td>Sparse dot result array. </td></tr>
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements. </td></tr>
+    <tr><td class="paramname">conj</td><td>If <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a>, values of X will be considered conjugated.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga2a29ab06d610d011109dd0c3da94992f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusga </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga245af9e95488dece29876354c6e91fed"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusga_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather. <img class="formulaInl" alt="$X \leftarrow Y |_x$" src="form_5.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad175937c05d3d05d3aa7fa35eb3028ec"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusget_diag </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>d</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga73feb9adc685f7ff1d66763b0801a0f9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusget_diag_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get matrix diagonal. <img class="formulaInl" alt="$d\leftarrow diag(A)$" src="form_22.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Array for the diagonal entries. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga27417bc0d923f7288ed736837492275c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusget_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga845cca2b512e38b467fc0d4b93d660b7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusget_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get a single matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga286c2cf2c749c80c8b71ff2f4bdb1566"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusget_infinity_norm </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga01b88a27714ca87085421fd9a4f3e479"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusget_infinity_norm_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>in</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get infinity norm of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">in</td><td>Infinity norm pointer. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga85e15d7a3331e8ed4d702908477e2896"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusget_matrix_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga9bdd048dea68ecbd8fd712349d4fbf13"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusget_matrix_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Output value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gadeb3cbe1cc6987763a55665bcdb8aef5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusget_rows_nnz </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga50cba1e236b63775110d6d1b292417da"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusget_rows_nnz_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get nnz count of matrix row interval. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">fr</td><td>First row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+    <tr><td class="paramname">nnzp</td><td>Pointer to the nonzeroes variable. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf9d44fc73526a4fdf9627424626bf4a5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusget_rows_sparse </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>lr</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga63f072aa25f7f7f8ac1ac4e32aae0c2e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusget_rows_sparse_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>fr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>lr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Get sparse rows of matrix. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">VA</td><td>pointer to values. </td></tr>
+    <tr><td class="paramname">IA</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">JA</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Obtained nonzeroes. </td></tr>
+    <tr><td class="paramname">fr</td><td>first row. </td></tr>
+    <tr><td class="paramname">lr</td><td>Last row. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga0d52a140d65ab78ee0c515c445b42451"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusgz </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5a6be1c191d51a622b99fe1b9a776bdc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusgz_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse gather and zero. <img class="formulaInl" alt="$X \leftarrow Y |_x;Y|_x\leftarrow 0$" src="form_6.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga88138db4545610d234d18d42237f36ee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusmm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldc</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaf7018fb638e25fe8b149d0cab4e844c0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusmm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga1ee2eb4be4c1e0565051fe04ca7415a2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusmv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga6747bd2d7930018d8693a97a3eb2865c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusmv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rs [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gad551879cdde6d16d9dd5b9edc647c667"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusrows_scale </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>trans</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga806bb32c4231e4cd9d833370484ad369"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusrows_scale_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>d</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>trans</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Scale rows interval of matrix by specified factor. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">d</td><td>Rows scaling vector. </td></tr>
+    <tr><td class="paramname">trans</td><td>Transposition parameter (if transposed will scale columns). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaad333ae644010e3b059190b98528c79d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zussc </td>
+          <td>(</td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> </td>
+          <td class="paramname"><em>index_base</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gab89e9860df0ed52620651cfc607a987a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zussc_ </td>
+          <td>(</td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> * </td>
+          <td class="paramname"><em>index_base</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sparse scatter: <img class="formulaInl" alt="$Y |_x\leftarrow X$" src="form_7.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">y</td><td>Array for <img class="formulaInl" alt="$Y$" src="form_2.png"/> vector. </td></tr>
+    <tr><td class="paramname">x</td><td>Array for <img class="formulaInl" alt="$X$" src="form_3.png"/> vector. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Size of <img class="formulaInl" alt="$X$" src="form_3.png"/><em>and</em> <em><img class="formulaInl" alt="$Y$" src="form_2.png"/></em> vectors. </td></tr>
+    <tr><td class="paramname">indx</td><td>Is the array of indices at which sparse vector <img class="formulaInl" alt="$X$" src="form_3.png"/> will be accessed. </td></tr>
+    <tr><td class="paramname">index_base</td><td>Specifies the contents of <code>indx</code>, either <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> or <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a>. </td></tr>
+    <tr><td class="paramname">incy</td><td>The distance between consecutive <code>y</code> array elements..</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+<dl class="section warning"><dt>Warning</dt><dd>Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gaca954a070d476342e254587fc2faa7fd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusset_element </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga52efe19f0972fa51ac6329cf717b676c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusset_element_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>v</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set a single (existing) matrix nonzero coefficient <img class="formulaInl" alt="$A_{i,j}$" src="form_23.png"/>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">v</td><td>Value pointer. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac542af7517c9f667122e8bdc408487b3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zusset_elements </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nnz</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga156a8d0225d9761cd58e15e026b9ba2e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zusset_elements_ </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ia</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const int * </td>
+          <td class="paramname"><em>ja</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>va</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Set individual matrix nonzero coefficients values. The operation is pattern preserving, that is, nonzeroes must already exist. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">ia</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">ja</td><td>Column indices array. </td></tr>
+    <tr><td class="paramname">va</td><td>Values array. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Length of the <code>ia</code>,ja,va arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality..</dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga8602eae41f9e5248ff086087abe68bdf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zussm </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>ldb</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga60f808ded982233be9a4faaa5fb75db3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zussm_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> * </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga7c1e740064369d0029cd627643eb841a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">int BLAS_zussv </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int </td>
+          <td class="paramname"><em>incx</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ga5d14a5df82e93614e8c524f6d20bb5c5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void blas_zussv_ </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> * </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> * </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">int * </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+    <tr><td class="paramname">istat</td><td>If non <code>NULL</code>, <code>*istat</code> will be set to the return code, either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>This is a subroutine for Fortran, so it does not return any value.</dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="gac4d8c73e5d9faa85209bcc4e885d4ff1"></a>
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">struct rsb_mtx_t* rsb_blas_get_mtx </td>
+          <td>(</td>
+          <td class="paramtype"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">read</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+<p>Given a valid Sparse BLAS handle, returns a pointer to the inner rsb_mtx_t structure. Then, this can be used for many of the <a class="el" href="rsb_8h.html">rsb.h</a> functions. This is an experimental function, so we recommend to use it with functions not modifying the matrix (ones that take <code>const</code> <code>struct</code> <code>rsb_mtx_t*mtxAp</code>). You can use this funtion from either Fortran or C.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, a valid pointer to the inner matrix structure (<code>struct</code> <code>rsb_mtx_t*</code>); on error, <code>NULL</code>.</dd></dl>
+<p><br/>
+</p>
+<p>An example using Fortran: </p>
+<div class="fragment"><div class="line">...  </div>
+<div class="line">USE <a class="code" href="classblas__sparse.html">blas_sparse</a> </div>
+<div class="line">USE <a class="code" href="classrsb.html">rsb</a> </div>
+<div class="line">IMPLICIT NONE </div>
+<div class="line">TYPE(C_PTR),TARGET :: mtxAp = C_NULL_PTR ! matrix pointer </div>
+<div class="line">INTEGER :: A ! <a class="code" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> handle </div>
+<div class="line">INTEGER, TARGET :: istat = 0 </div>
+<div class="line">... ! begin, populate and finalize A, e.g. <span class="keyword">using</span> <a class="code" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a>, <a class="code" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a>, <a class="code" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">BLAS_uscr_end</a></div>
+<div class="line">! <span class="keyword">get</span> pointer to <a class="code" href="classrsb.html">rsb</a> structure: </div>
+<div class="line">mtxAp = <a class="code" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_blas_get_mtx</a>(A) </div>
+<div class="line">! Now one can use it with any <a class="code" href="classrsb.html">rsb</a>.h/<a class="code" href="classrsb.html">rsb</a>.F90 <span class="keyword">function</span>, e.g.: </div>
+<div class="line">istat = <a class="code" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>(mtxAp, C_NULL_PTR) ! write to stdout </div>
+</div><!-- fragment --> <dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>, <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>, <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a [...]
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>, <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>, </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>, <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>, <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>, <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a>, </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>, </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>, <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>, <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>, <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>, <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0 [...]
+<dl class="section note"><dt>Note</dt><dd>This function is an extension implemented by <code>librsb</code> and thus it is not part of the standard. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality.</dd></dl>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/index.html b/doc/html/index.html
new file mode 100644
index 0000000..338eeed
--- /dev/null
+++ b/doc/html/index.html
@@ -0,0 +1,1082 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Main Page</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li class="current"><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">librsb Documentation</div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock"><p>A sparse matrix library implementing the `Recursive Sparse Blocks' (<b>RSB</b>) matrix storage.</p>
+<p>This is the documentation for the application programming interface (API) of the <em>`<code>librsb'</code> library</em>. <br/>
+ In order to use <code>librsb</code>, there is no need for the user to know the RSB layout and algorithms: this documentation should be sufficient. <br/>
+ This library is dual-interfaced; it supports: a native (`RSB') interface (with identifiers prefixed by `rsb_' or `RSB_'), and a (mostly complete) Sparse BLAS interface, as a wrapper around the RSB interface. <br/>
+ Many computationally intensive operations are implemented with thread parallelism, by using OpenMP. <br/>
+ Thread parallelism can be turned off at configure time, if desired, or limited at execution time. <br/>
+ Many of the computational kernels source code files (mostly internals) were automatically generated. <br/>
+ This user documentation concerns the end user API only; that is, neither the internals, nor the code generator. <br/>
+</p>
+<p>You should consult the remaining documentation (e.g. the README file, code comments) to find information about how to modify the generator or the library internals.</p>
+<p>This library is research software and as such, still <b>experimental</b>. For a first approach, we suggest to go through the <a class="el" href="group__rsb__doc__examples.html">Example programs and code</a> documentation section, or the <a class="el" href="index.html#examples_section">quick start examples</a> section on this page.</p>
+<p><br/>
+ Information about the supported matrix types and matrix operations resides in the <a class="el" href="rsb__types_8h.html">rsb_types.h </a> file.</p>
+<p>A C/C++ user can use the native API of RSB by including the <a class="el" href="rsb_8h.html">rsb.h </a> header. The same interface is available in Fortran via the ISO C Binding interface, specified in <a class="el" href="rsb_8F90.html">rsb.F90</a>. <br/>
+</p>
+<p>The C header file for the <a class="el" href="group__rsb__doc__sparse__blas.html">The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)</a> is <a class="el" href="blas__sparse_8h.html">blas_sparse.h</a>.</p>
+<dl class="section author"><dt>Author</dt><dd>Michele Martone < michelemartone AT users DOT sourceforge DOT net ></dd></dl>
+<p>Contents of the README file : </p>
+<pre class="fragment">
+================================================================================
+
+ librsb README file 
+ 
+================================================================================
+	librsb - Recursive Sparse Blocks  Matrix computations library
+
+ A library for sparse matrix computations featuring the Recursive Sparse Blocks
+ (RSB) matrix format. This format allows cache efficient and multi-threaded
+ (that is, shared memory parallel) operations on large sparse matrices.
+ It provides the most common operations necessary to iterative solvers, like
+ matrix-vector multiplication, triangular solution, rows/columns scaling, 
+ diagonal extraction / setting, blocks extraction, norm computation, formats
+ conversion.  The RSB format is especially well suited for symmetric and
+ transposed multiplication variants.
+ Most numerical kernels code is auto generated, and the supported numerical
+ types can be chosen by the user at build time.
+ This library is dual-interfaced: it can be used via the native (`RSB') 
+ interface (with identifiers prefixed by `rsb_' or `RSB_'), and a Sparse BLAS
+ one (`BLAS_').
+ The `RSB' interface can be used from C (rsb.h header) or via modern Fortran
+ ISO-C-BINDING ("rsb" module).
+ The Sparse BLAS interface is usable from C via the blas_sparse.h header, and
+ from Fortran via the "blas_sparse" module.
+
+================================================================================
+
+ This (README) is the first document you should read about librsb.
+ It contains basic instructions to generate, compile, install, and use librsb.
+ The reference documentation for programming with librsb is contained in the
+ ./doc/ source package subdirectory and when installed, placed in the
+ appropriate system directories as both Unix man pages (./doc/man/) and HTML
+ (./doc/html/).
+ If you are a user of a previous version of librsb, see the NEWS file listing
+ the changes.
+ After having read this file you are welcome to ask questions to the author.
+
+--------------------------------------------------------------------------------
+		INTRODUCTION
+--------------------------------------------------------------------------------
+
+ librsb is a library for sparse matrix algebra computations.
+ It is stand-alone: does not require any other library to build or work.
+ It is shared memory parallel, using the OpenMP standard.
+ It focuses on high performance and provides build options.
+ A part of the library code is automatically generated from templates and
+ macros, on the basis of the numerical type a user wish to have supported.
+ The configure script options (self documented --- not documented here) provide
+ many build time options, especially with respect to debug and additional 
+ verbosity.
+
+   		INTRODUCTION
+   		MAIN ASPECTS,FEATURES
+   		QUICK INSTALL AND TESTING
+   		LIBRARY CONFIGURATION, GENERATION, BUILD 
+   		INSTALLATION, USAGE
+   		EXECUTION AND ENVIRONMENT VARIABLES
+   		DOCUMENTATION, EXAMPLES AND PROGRAMMING GUIDELINES
+   		CONFIGURE, BUILD AND BENCHMARK EXAMPLE
+   		COMPATIBILITY
+   		FAQ
+   		POSSIBLE / POTENTIAL FUTURE FEATURES / ENHANCEMENTS
+   		ABOUT THE INTERNALS
+   		BUGS
+   		CONTACTS
+   		CREDITS
+   		LICENSE
+
+--------------------------------------------------------------------------------
+		MAIN ASPECTS,FEATURES
+--------------------------------------------------------------------------------
+
+ * very efficient (see the website for benchmark performance results)
+ * threads/structure autotuning feature for additional performance
+ * support for multiple numerical data types which can be turned
+   on/off individually (e.g.:double, float, int, char, complex, double complex)
+   at configure time
+ * a sparse BLAS interface for matrix assembly, computation, destruction
+ * a code generator for its inner CSR, COO computational kernels
+ * based on a recursive memory layout of submatrices
+ * enough functionality to implement the most common iterative methods 
+ * basic index types overflow checks and input sanitizing
+ * parallel matrix assembly and conversion routines
+ * auxiliary functions for matrix I/O (using the "Matrix Market" format:
+   real, integer, complex and pattern are supported)
+ * implemented as a building block for solvers like e.g. PSBLAS
+ * dual implementation of kernels: with "full word" and "half word" indices
+ * thread level (shared memory) parallelism by using OpenMP
+ * basic (unoptimized) sparse matrices multiplication and summation
+ * interactive usage possible by using the "sparsersb" plugin for GNU Octave 
+ * complete with examples and a test suite
+ * see the NEWS text file for a list of changes from version to version
+
+--------------------------------------------------------------------------------
+		QUICK INSTALL AND TESTING EXAMPLE
+--------------------------------------------------------------------------------
+	
+	# unpack the archives or get them from the repositories
+	./autogen.sh	# only necessary if  configure  file does not exist
+	./configure --prefix=$HOME/local/librsb/
+        # see also ./configure --help for many other options
+	# librsb has been configured
+	make help	# provides information
+	make		# build the library and test programs
+	# librsb has been built
+        make  qtests	# perform brief sanity tests
+        make qqtests	# the same, but with less output
+        make  tests	# perform extended sanity tests
+	ls examples/*.c   # here are editable examples; build them with 'make'
+	ls examples/*.F90 # here are editable examples; build them with 'make'
+	make install	# install to $HOME/local/librsb/
+	# librsb has been installed; now you can write your own programs
+
+	# for instance, try using one of the librsb examples as a model: 
+	mkdir -p ~/rsb-test/ && cp examples/hello.c ~/rsb-test/myrsb.c
+	# adapt hello.c to your needs and recompile:
+	cd ~/rsb-test/
+	export PATH=$PATH:$HOME/local/librsb/bin/
+	gcc `librsb-config --I_opts`.  -c myrsb.c 
+ 	gcc -o myrsb myrsb.o `librsb-config --static --ldflags --extra_libs`
+ 	./myrsb         # run your program
+
+--------------------------------------------------------------------------------
+ 		LIBRARY CONFIGURATION, GENERATION, BUILD 
+--------------------------------------------------------------------------------
+
+ This library consists of C code (C 99), partially generated by M4 macros.
+ The user wishing to build librsb can specify different initial parameters 
+ determining the supported matrix operations, inner explicit loop unrolling
+ factors, available numerical data types and code variations.
+ These parameters have to be specified to the  ./configure  script.
+
+ The M4 macros are used at build time to generate specialized C code.
+ If building from repository sources, an M4 preprocessor is required.
+ Otherwise, it is necessary only when specifying ./configure  options affecting
+ code generation (see ./configure --help).
+ The M4 preprocessor executable can be specified explicitly to ./configure
+ with the M4 environment variable or via the --with-m4 option.
+ After invoking ./configure  and before running 'make' it is possible to invoke
+ 'make cleanall' to make sure that auto-generated code is deleted first.
+ 
+ At configure time, it is very important that the configure script is able to
+ detect the system cache memory hierarchy parameters.
+ In the case it fails, you are encouraged to specify cache parameters by 
+ re-running ./configure  and setting the --with-memhinfo  option.
+ For instance:
+    --with-memhinfo=L2:4/64/512K,L1:8/64/24K 
+ These values need not be exact: they can be approximate.
+ Yet they may be critical to library performance; for this reason you are
+ allowed to override this default in a variety of ways.
+ Read further to get a description of the memory hierarchy info string format.
+
+ If you want to build Fortran examples, be sure of invoking ./configure with the
+ --enable-fortran-examples option.  You can specify the desired Fortran compiler
+ and compilation flags via the FC and FCFLAGS variables.
+
+ Set the CPPFLAGS variable at configure time to provide additional compilation
+ flags; e.g. configure to detect necessary headers in non-standard location.
+ Similarly, the LDFLAGS variable can be set to contain link time options; so 
+ you can use it to specify libraries to be linked to librsb examples.
+ Invoke ./configure --help  for details of other relevant environment variables.
+ 
+ After ./configure  you will see informations about the current build options
+ and if satisfied, invoke 'make' to build the library and the examples.
+
+ To check for library consistence, run:
+
+   make qtests # takes a short time
+or
+   make tests  # takes longer, more complete
+ 
+ If these tests terminate with an error code, it is highly likely that it has
+ been caused by a bug in librsb, so please tell us (see BUGS).
+
+--------------------------------------------------------------------------------
+		INSTALLATION, USAGE
+--------------------------------------------------------------------------------
+ 
+ Once built, the library can be installed with:
+
+	su -c 'make install'	#'make install' installs the library system-wide
+
+ This installs header files, binary library files, and the librsb-config
+ program.
+ Then, application C programs should include the rsb.h header file with
+	#include <rsb.h>
+ and be compiled using include options as generated by the output of 
+  	`librsb-config --I_opts`.
+
+ To link to the librsb.a static library file and its dependencies one can use 
+ the output of `librsb-config --static --ldflags --extra_libs`.
+ 
+ Only static libraries are built currently.
+
+ If you wish to use the library without installing it in the system directories,
+ make sure to include the <rsb.h> header file and link to the librsb.a library
+ and all the necessary additional libraries.  
+
+ Users of pkg-config can manually copy the librsb.pc file to the appropriate
+ directory to use pkg-config in a way similar to librsb-config.
+
+--------------------------------------------------------------------------------
+		EXECUTION AND ENVIRONMENT VARIABLES
+--------------------------------------------------------------------------------
+ 
+ By default, the only environment variable read by librsb is
+ RSB_USER_SET_MEM_HIERARCHY_INFO, and will override configure-time and
+ auto-detected settings about memory hierarchy.
+
+ Its value is specified as n concatenated strings of the form:
+	 L<l>:<a_l>/<b_l>/<c_l>
+ These strings are separated by a comma (","), and each of them is made
+ up from substrings where:
+   <n> is the cache memories hierarchy height, from 1 upwards.
+   <l> is the cache level, from 1 upwards.
+   <a_l> is the cache associativity
+   <b_l> is the cache block size (cache line length)
+   <c_l> is the cache capacity (size)
+
+ The <a_l>, <b_l>, <c_l> substrings consist of an integer number with an
+ optional multiplier character among {K,M,G} (to specify respectively 2^10,
+ 2^20 or 2^30).
+ Any value is permitted, a long as it is positive. Higher level cache
+ capacities are required to be larger than lower level ones.
+ Example strings and usage in the BASH shell:
+  RSB_USER_SET_MEM_HIERARCHY_INFO="L2:4/64/512K,L1:8/64/32K"  <your program>
+  RSB_USER_SET_MEM_HIERARCHY_INFO="L1:8/128/2M"  <your program>
+
+ You may explicitly set this environment variable to fine-tune the library
+ operation.
+ If not doing so, runtime detection will be attempted; if this shall fail,
+ a configure time detected value will be used.
+ In some cases the configure time detection fails (e.g.: on very recent
+ systems); this is not a fault of librsb but rather of the underlying
+ environment.
+
+ A default value for this memory hierarchy info string can be set at configure
+ time by using the  --with-memhinfo  configure option.
+
+ If you don't know values for these parameters, you can run the
+  ./scripts/linux-sys-cache.sh 
+ script to try to get a guess on a Linux system.
+ On other systems, please consult the available documentation.
+ E.g.: On Mac OS 10.6 it is possible to get this information by invoking
+  "sysctl -a | grep cache".
+  
+ The librsb library achieves parallelism by using OpenMP.
+ Even though librsb does not directly read any OpenMP environment variable,
+ it is still affected by them (e.g. the OMP_NUM_THREADS environment variable
+ specifying the number of parallel threads).
+ Please consult your compiler's OpenMP implementation documentation
+ for more information.
+
+--------------------------------------------------------------------------------
+		DOCUMENTATION, EXAMPLES AND PROGRAMMING GUIDELINES
+--------------------------------------------------------------------------------
+
+ The API is entirely specified in the <rsb.h> header file. This is the only
+ header file the application developer should ever include to use the library.
+ 
+ The complete API documentation is generated by the doxygen tool in the doc
+ directory in both HTML and man formats, and gets installed with 'make install'.
+ If you wish not to use doxygen (or don't have it) you can skip documentation
+ generation by adding the "DOXYGEN=false" argument to ./configure .
+
+ There are a number of working example programs in the "examples" directory.
+
+ The library only declares symbols prefixed by `rsb_'.
+ These symbols include those declared in rsb.h, as well as internal,
+ undocumented service functions and variables.
+ Therefore, to avoid name clashes, you should avoid declaring `rsb_' prefixed
+ identifiers in programs using librsb.  
+
+ If configure has been invoked with the --enable-sparse-blas-interface, then
+ the corresponding `BLAS_' and `blas_' prefixed symbols will also be built.
+
+ If after building the library, you find that it exports symbols with different
+ prefixes (besides the system specific, compiler-generated symbols), please 
+ report this to us -- it is a bug.
+
+--------------------------------------------------------------------------------
+	CONFIGURE, BUILD AND BENCHMARK EXAMPLE
+--------------------------------------------------------------------------------
+
+ First configure and build with reasonable options, such as (gcc, 64 bit):
+
+  export MKLROOT=/opt/intel/mkl
+  ./configure --disable-debug CC=gcc FC=gfortran CFLAGS=-O3 \
+    --with-mkl="-static -L${MKLROOT}/lib/intel64 \
+    -Wl,--start-group,-lmkl_intel_lp64,-lmkl_gnu_thread,-lmkl_core,--end-group \
+    -fopenmp -lpthread"                        \
+    --with-memhinfo=L2:4/64/512K,L1:8/64/24K   \
+    --with-mkl-include=/opt/intel/mkl/include/ \
+    --prefix=/opt/librsb-optimized/            \
+    --enable-matrix-types="double,double complex"
+
+ Or (icc, 64 bit):
+
+  export MKLROOT=/opt/intel/mkl
+ ./configure --disable-debug CC=icc FC=ifort CFLAGS=-O3 \
+ --with-mkl="-static -L${MKLROOT}/lib/intel64 -openmp -lpthread \
+ -Wl,--start-group,-lmkl_intel_lp64,-lmkl_intel_thread,-lmkl_core,--end-group" \
+ --with-memhinfo=L2:4/64/512K,L1:8/64/24K   \
+ --with-mkl-include=/opt/intel/mkl/include/ \
+ --prefix=/opt/librsb-optimized/            \
+ --enable-matrix-types="double,double complex"
+
+  or (32 bit):
+
+  ./configure --disable-debug CC=gcc FC=gfortran CFLAGS=-O3 \
+   --with-memhinfo=L2:4/64/512K,L1:8/64/24K     \
+   --with-mkl="-static -L/opt/intel/mkl/lib/ia32/ -lmkl_solver \
+   -Wl,--start-group,-lmkl_intel,-lmkl_gnu_thread,-lmkl_core,--end-group \
+   -fopenmp -lpthread" \
+   --with-mkl-include=/opt/intel/mkl/include/   \
+   --prefix=/opt/librsb-optimized/              \
+   --enable-matrix-types="double,double complex"
+
+and then
+
+  make       # builds library and test programs
+  make tests # optional
+
+ In the above example, optional use of the MKL library is configured in.
+ However, librsb does not use MKL in any way: it is only used by the
+ "rsbench" test program.
+
+ Say you want to quickly benchmark the library for a quick SPMV speed test.
+ You have a valid Matrix Market file containing a matrix, A.mtx,
+ and you want to benchmark librsb with it on 1 and 4 cores, performing
+ 100 sparse matrix-vector multiply iterations.
+ Then do a serial test first:
+ ./rsbench -oa -Ob -f A.mtx -qH -R -n1 -t100 --verbose 
+ and then a parallel test:
+ OMP_NUM_THREADS=4 ./rsbench -oa -Ob -f A.mtx -qH -R -n1,4 -t100 --verbose
+
+ You can add option --compare-competitors to enable comparisons to the MKL,
+ provided it has been configured in.
+ If not specifying a type (argument to the -T option), the default will be
+ used.
+ If configured in at build time, choices may be -T D (where D is the BLAS
+ prefix for "double"), -T Z (Z stands for "double complex") and so on.
+ You can specify "-T :" to mean all of the configured types.
+ Output of 'rsbench' shall be easy to understand or parse.
+
+ For more options and configure information, invoke:
+
+ ./rsbench --help
+
+ To get the built in defaults, invoke the following:
+ ./rsbench -oa -Ob --help
+ ./rsbench --help
+ ./rsbench --version
+ ./rsbench -I
+ ./rsbench -C
+
+ An example Matrix Market matrix file contents:
+
+%%MatrixMarket matrix coordinate pattern general
+% This is a comment.
+% See other examples in the distributed *.mtx files.
+2 2 3
+1 1
+2 1
+2 2
+
+--------------------------------------------------------------------------------
+		COMPATIBILITY
+--------------------------------------------------------------------------------
+ 
+ This library has been built and tested on Unix machines.
+ Microsoft Windows users might try building librsb under the Cygwin environment.
+
+ Some tricks may have to be used on IBM AIX. For instance, adding the
+ --without-xdr or the --without-zlib switch to ./configure.
+ Your mileage may vary.
+ AIX's "make" program may give problems; use the GNU version "gmake" instead;
+ the same shall be done with the M4 interpreter.
+
+ This library was developed mostly on Debian Linux and using only free software.
+
+--------------------------------------------------------------------------------
+		FAQ
+--------------------------------------------------------------------------------
+
+ Q: Can you provide me good configure defaults for an optimized build ?
+ A: Default './configure' options are appropriate for an optimized build.
+    You will need to choose good compilation flags.
+    A good starting point for gcc is ./configure CC=gcc CFLAGS='-O3'. 
+    For more, consult your compiler documentation (e.g. man gcc, man icc),
+    and learn about the best flags for your specific platform.
+    Striping your executable (make install-strip for librsb's rsbench) may
+    help.
+
+ Q: I am a beginner and I wish librsb to be very verbose when I invoke
+    library interface functions incorrectly.
+    Can you provide me good configure defaults for such a "debug" build ?
+ A: Yes: ./scripts/configure_for_debug.sh
+
+ Q: I have machine X, compiler Y, compiling flags Z; is SpMV performance P with
+    matrix M good ?
+ A: In general, hard to tell. However you can `make hinfo.log' and send me 
+    (see CONTACTS) the hinfo.log file and your matrix in Matrix Market format
+    (well, please don't send matrices by email but rather upload them
+    somewhere on the web and send an URL to them).
+    The hinfo.log file will contain useful compile and machine informations.
+    Then I *may* get an idea about the performance you should get with that
+    matrix on that computer.
+
+ Q: What is the Sparse BLAS ?
+ A: It's a programming interface specification:
+    [sparseblas_2001]:
+    BLAS Technical Forum Standard, Chapter 3, Sparse BLAS
+    http://www.netlib.org/blas/blast-forum/chapter3.pdf
+    [dhp_2002]:
+    An Overview of the Sparse Basic Linear Algebra Subprograms:
+     The New Standard from the BLAS Technical Forum
+    IAIN S. DUFF, CERFACS and Rutherford Appleton Laboratory
+    MICHAEL A. HEROUX, Sandia National Laboratories
+    ROLDAN POZO, National Institute of Standards and Technology
+    [dv_2002]:
+    Algorithm 818:
+     A Reference Model Implementation of the Sparse BLAS in Fortran 95
+    IAIN S. DUFF, CERFACS, France and Atlas Centre, RAL, England
+    CHRISTOF V�MEL, CERFACS, France
+
+ Q: Is there an easy way to profile librsb usage in my application ?
+ A: Yes: build with --enable-librsb-stats and extract time elapsed in librsb
+    via e.g.: RSB_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_LIBRSB_ETIME,&dt,errval).
+
+ Q: Why another sparse matrix library ?
+ A: This library is the fruit of the author's PhD work, focused on researching
+    improved multi threaded and cache friendly matrix storage schemes for
+    PSBLAS.
+
+ Q: What are the key features of this library when compared to other ones ?
+ A: Recursive storage, a code generator, parallel BLAS operations
+    (including matrix assembly, matrix-matrix multiplication, transposed
+     matrix-vector multiply), a battery of tests, a Sparse BLAS
+     interface and a free software licensing.
+ 
+ Q: How do I detect librsb from my package's configure script ?
+ A: Add to your configure.ac:
+    AH_TEMPLATE([HAVE_LIBRSB])
+    AC_CHECK_FUNC([rsb_lib_init],AC_DEFINE([HAVE_LIBRSB],[1],[librsb detected]))
+    then rerun autoconf and invoke configure as:
+    ./configure	CFLAGS=`librsb-config   --cflags` \
+                LDFLAGS=`librsb-config  --ldflags --extra_libs`
+   
+ Q: How is correctness checked in the librsb test suite ?
+ A: Different linear system generators and tester programs are being used to
+    brute-force-test several routines and input combinations as possible.
+    See 'make tests'; and run/edit the following tester programs if you are
+    curious:
+    test -f sbtc && ./sbtc||true # Sparse BLAS checker (C interface based)
+    test -f sbtf && ./sbtf||true # Sparse BLAS checker (Fortran interface, opt.)
+    ./rsbench -Q 10.0 # 10 seconds brute-test
+
+ Q: Why did you write the library in C and not in C++ ?
+ A: Mainly...
+    Because C can be easily interfaced with C++ and Fortran.
+    Because using a debugger under full fledged C++ is a headache.
+    Because of the C's 'restrict' keyword.
+    
+ Q: Why did you use C and not Fortran ?
+ A: This library is slightly system-oriented, and system calls interfacing is
+    much easier in C. Also C's pointers arithmetic support plays a crucial role.
+
+ Q: Is there a quick and easy way to perform an artificial performance
+    test with huge matrices without having to program ?
+ A: Sure. The following lines generate matrices of a specified dimension.
+    You can play with them by changing the matrix size, for instance. 
+    ./rsbench  -oa -Ob -qH -R --dense 1                    --verbose
+    ./rsbench  -oa -Ob -qH -R --dense 1024                 --verbose
+    ./rsbench  -oa -Ob -qH -R --lower 1024 --as-symmetric  --verbose
+    ./rsbench  -oa -Ob -qH -R --dense 1000 --gen-lband 10 --gen-uband 3
+    ./rsbench  -oa -Ob -qH -R --generate-diagonal 1000
+
+ Q: I've found a bug! What should I do ?
+ A: First please make sure it is really a bug: read the documentation, check,
+    double check.
+    Then you can write a description of the problem, with a minimal program
+    source code and data to replicate it.
+    Then you can jump to the CONTACTS details section.
+
+ Q: Is it possible to build matrices of, say, long double or 
+    long double complex or int or short int ?
+ A: Yes, it's not a problem. You should invoke the configure script accordingly,
+    e.g.: --enable-matrix-types="long double".
+    If this breaks code compilation, feel free to contact the author
+    (see the CONTACTS section).
+
+ Q: Is there a way to compare the performance of this library to some other
+    high performance libraries ?
+ A: If you build rsbench with support for the Intel MKL library, then you
+    can do performance comparisons with e.g.:
+    # ./rsbench -oa -Ob -qH -R --gen-diag 100 --compare-competitors --verbose
+    or use the following script:
+    # bench/dense.sh ' '
+    Or even better, check out the --write-performance-record feature ; for 
+    details see the output of:
+    # rsbench -oa -Ob --help
+
+ Q: Is there a non-threaded (serial) version of librsb ?
+ A: Yes: you can configure the library to work serially (with no OpenMP).
+    See ./configure --help. 
+
+ Q: Is this library thread-safe ?
+ A: Probably yes: no static buffers are being used, and reentrant C standard
+    library functions are invoked.
+
+ Q: Does the librsb library run on GPUs or Intel MIC ?
+ A: It has been built on Intel MIC once, but not tested.
+
+ Q: I built and compiled the code without enabling any BLAS type (S,D,C,Z), 
+     and both `make qtests' and `make tests' ran successfully outside the
+     ./examples directory, but `make tests' breaks within ./examples directory.
+ A: Well, the tests passed because the examples testing was simply skipped.
+    The example programs need at least one of these types to work.
+
+ Q: At build time I get many "unused variable" warnings. Why ? 
+ A: librsb accommodates many code generation and build time configuration
+    options. Some combinations may turn off compilation of certain parts of the
+    code, leading some variables to be unused.
+
+ Q: Are there papers to read about the RSB format and algorithms ?
+ A: Yes, the following:
+
+    Michele Martone
+    Efficient Multithreaded Untransposed, Transposed or Symmetric Sparse
+    Matrix-Vector Multiplication with the Recursive Sparse Blocks Format
+    Parallel Computing 40(7): 251-270 (2014)
+    http://dx.doi.org/10.1016/j.parco.2014.03.008
+
+    Michele Martone
+    Cache and Energy Efficiency of Sparse Matrix-Vector Multiplication for
+    Different BLAS Numerical Types with the RSB Format
+    Proceedings of the ParCo 2013 conference, September 2013, Munich, Germany
+    PARCO 2013: 193-202
+    http://dx.doi.org/10.3233/978-1-61499-381-0-193
+
+    Michele Martone, Marcin Paprzycki, Salvatore Filippone: An Improved Sparse
+    Matrix-Vector Multiply Based on Recursive Sparse Blocks Layout.
+    LSSC 2011: 606-613
+    http://dx.doi.org/10.1007/978-3-642-29843-1_69
+
+    Michele Martone, Salvatore Filippone, Salvatore Tucci, Marcin Paprzycki,
+    Maria Ganzha: Utilizing Recursive Storage in Sparse Matrix-Vector
+    Multiplication - Preliminary Considerations. CATA 2010: 300-305
+    
+    Michele Martone, Salvatore Filippone, Marcin Paprzycki, Salvatore Tucci:
+    Assembling Recursively Stored Sparse Matrices. IMCSIT 2010: 317-325
+    http://www.proceedings2010.imcsit.org/pliks/205.pdf
+
+    Michele Martone, Salvatore Filippone, Pawel Gepner, Marcin Paprzycki,
+    Salvatore Tucci: Use of Hybrid Recursive CSR/COO Data Structures in Sparse
+    Matrices-Vector Multiplication. IMCSIT 2010: 327-335
+    http://dx.doi.org/10.1109/SYNASC.2010.72
+
+    Michele Martone, Salvatore Filippone, Marcin Paprzycki, Salvatore Tucci:
+    On BLAS Operations with Recursively Stored Sparse Matrices.
+    SYNASC 2010: 49-56
+    http://dx.doi.org/10.1109/SYNASC.2010.72
+
+    Michele Martone, Salvatore Filippone, Marcin Paprzycki, Salvatore Tucci:
+    On the Usage of 16 Bit Indices in Recursively Stored Sparse Matrices.
+    SYNASC 2010: 57-64
+    http://dx.doi.org/10.1109/SYNASC.2010.77
+
+ Q: I have M4-related problems on IBM SP5/SP6 (my M4 preprocessor tries to
+    regenerate code but it fails). What should I do ?
+ A: A fix is to use a GNU M4 implementation 
+    e.g.: M4=/opt/freeware/bin/m4 ./configure ...
+    e.g.: M4=gm4 ./configure ...
+    or execute:
+    touch *.h ; touch *.c ; make
+    Or "./configure; make"  the library on a different machine, then build 
+    a sources archive with `make dist', and use it on the original machine.
+   
+--------------------------------------------------------------------------------
+	POSSIBLE / POTENTIAL FUTURE FEATURES / ENHANCEMENTS
+--------------------------------------------------------------------------------
+
+ * auxiliary functions for numerical vectors
+ * CSC,BCSR,BCSC and other formats
+ * (optional) loop unrolled kernels for BCSR/BCSC
+ * performance prediction/estimation facilities (experimental)
+ * types of the blocks, nonzeroes, and coordinates indices can be user specified
+ * a code generator for BCSR, BCSC, VBR, VBC kernels
+ * full support for BCSR, BCSC storages 
+ * automatic matrix blocking selection (for BCSR/BCSC) 
+ * an arbitrary subset of block size kernels can be specified to be generated
+ * full support for VBR,VBC storages
+ * recursive storage variants of blocked formats (non uniform blocking)
+ * more auto-tuning and prediction control
+ * use of assembly functions or intrinsics
+ * the use of context variables (scenarios with multiple libraries using
+   librsb completely independently at the same time are not supported)
+ * enhanced in-place matrix assembly functions (useful for really huge matrices)
+
+--------------------------------------------------------------------------------
+   		ABOUT THE INTERNALS
+--------------------------------------------------------------------------------
+
+ The following good practices are being followed during development of librsb.
+
+ - only symbols beginning with `rsb_' or `blas_' are being exported.
+ - internal functions are usually prefixed by `rsb__'.
+ - no library internal function shall call any API function.
+
+ If by using/inspecting the code you notice any of the above is being violated,
+ please report about it.
+
+--------------------------------------------------------------------------------
+		BUGS
+--------------------------------------------------------------------------------
+
+ If you encounter any bug (e.g.: mismatch of library/program behaviour and
+ documentation, please let me know about it by sending me (see CONTACTS) all
+ relevant information (code snippet, originating data/matrix, config.log), in
+ such a way that I can replicate the bug behaviour on my machines.
+ If the bug occurred when using rsb interfaced to some proprietary library,
+ please make sure the bug is in librsb.
+
+ It may be of great help to you to build the library with the debug compile
+ options on (e.g.: CFLAGS='-O0 -ggdb'), and with appropriate library verbosity
+ levels (--enable-internals-error-verbosity, --enable-interface-error-verbosity
+ and --enable-io-level  options to configure) to better understand the program 
+ behaviour before sending a report.
+
+ Make sure you have the latest version of the library when reporting a bug. 
+
+--------------------------------------------------------------------------------
+		CONTACTS
+--------------------------------------------------------------------------------
+
+ You are welcome to contact the librsb author:
+
+  Michele Martone < michelemartone AT users DOT sourceforge DOT net >
+ 
+ Please specify "librsb" in the "Subject:" line of your emails.
+
+ More information and downloads on  http://sourceforge.net/projects/librsb
+
+ Mailing list: https://lists.sourceforge.net/lists/listinfo/librsb-users
+ 
+--------------------------------------------------------------------------------
+		CREDITS	(in alphabetical order)
+--------------------------------------------------------------------------------
+
+For librsb-1.2:
+ Marco Atzeri provided testing, patches to build librsb under cygwin and
+ spotted a few bugs.
+ Mu-Chu Lee provided a patch to fix sorting code crashing with > 10^9 nnz.
+
+For librsb-1.1:
+ Gilles Gouaillardet provided a patch for OpenMP-encapsulated I/O.
+ Marco Restelli provided with testing and detailed comments and suggestions.
+
+For librsb-1.0:
+ Francis Casson helped with testing and documentation reviewing during the first
+ release.
+ Nitya Hariharan helped revising early versions of the documentation.
+
+--------------------------------------------------------------------------------
+		LICENSE
+--------------------------------------------------------------------------------
+
+ This software is distributed under the terms of the Lesser GNU Public License
+ version 3 (LGPLv3) or later.
+ See the COPYING file for a copy of the LGPLv3.
+
+ librsb is free software.
+ To support it, consider writing "thank you" to the author and acknowledging use
+ of librsb in your publications. That would be very appreciated.
+
+--------------------------------------------------------------------------------
+</pre><p><a class="anchor" id="examples_section"></a></p>
+<p>For a quick startup, consider the following two programs.</p>
+<p>The first, using the internal RSB interface: </p>
+<div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is a first "hello RSB" example program.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include hello.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* librsb header to include */</span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf() */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{<span class="comment"></span></div>
+<div class="line"><span class="comment">        /*!</span></div>
+<div class="line"><span class="comment">          A Hello-RSB program.</span></div>
+<div class="line"><span class="comment">         </span></div>
+<div class="line"><span class="comment">          This program shows how to use the rsb.h interface correctly to:</span></div>
+<div class="line"><span class="comment">         </span></div>
+<div class="line"><span class="comment">          - initialize the library using #rsb_lib_init()</span></div>
+<div class="line"><span class="comment">          - set library options using #rsb_lib_set_opt()</span></div>
+<div class="line"><span class="comment">          - revert such changes </span></div>
+<div class="line"><span class="comment">          - allocate (build) a single sparse matrix in the RSB format</span></div>
+<div class="line"><span class="comment">            using #rsb_mtx_alloc_from_coo_const()</span></div>
+<div class="line"><span class="comment">          - prints information obtained via #rsb_mtx_get_info_str()</span></div>
+<div class="line"><span class="comment">          - multiply the matrix times a vector using #rsb_spmv()</span></div>
+<div class="line"><span class="comment">          - deallocate the matrix using #rsb_mtx_free() </span></div>
+<div class="line"><span class="comment">          - finalize the library using #rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) </span></div>
+<div class="line"><span class="comment">         </span></div>
+<div class="line"><span class="comment">          In this example, we use #RSB_DEFAULT_TYPE as matrix type.</span></div>
+<div class="line"><span class="comment">          This type depends on what was configured at library build time.</span></div>
+<div class="line"><span class="comment">         * */</span></div>
+<div class="line">        <span class="keyword">struct </span>rsb_mtx_t *mtxAp = NULL; <span class="comment">/* matrix structure pointer */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> bs = <a class="code" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>;</div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> brA = bs, bcA = bs;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> one = 1;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode = <a class="code" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">RSB_NUMERICAL_TYPE_DEFAULT</a>;</div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA = 4;           <span class="comment">/* matrix nonzeroes count */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> nrA = 3;            <span class="comment">/* matrix rows count */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> ncA = 3;            <span class="comment">/* matrix columns count */</span></div>
+<div class="line">        <span class="comment">/* nonzero row indices coordinates: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> IA[] = {0,1,2,2};</div>
+<div class="line">        <span class="comment">/* nonzero column indices coordinates: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> JA[] = {0,1,2,2};</div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> VA[] = {11,22,32,1};<span class="comment">/* values of nonzeroes */</span></div>
+<div class="line">        <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> X[] = { 0, 0, 0 };     <span class="comment">/* X vector's array */</span></div>
+<div class="line">        <span class="keyword">const</span> <a class="code" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a> B[] = { -1, -2, -5 }; <span class="comment">/* B vector's array */</span></div>
+<div class="line">        <span class="keywordtype">char</span> ib[200];</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Hello, RSB!\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Initializing the library...\n"</span>);</div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)) != </div>
+<div class="line">                        <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error initializing the library!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly initialized the library.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Attempting to set the"</span></div>
+<div class="line">               <span class="stringliteral">" RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE library option.\n"</span>);</div>
+<div class="line">        {</div>
+<div class="line">                <a class="code" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> evi=1; </div>
+<div class="line">                <span class="comment">/* Setting a single optional library parameter. */</span></div>
+<div class="line">                errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(</div>
+<div class="line">                        <a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a>, &evi);</div>
+<div class="line">                <span class="keywordflow">if</span>(errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">                {</div>
+<div class="line">                        <span class="keywordtype">char</span> errbuf[256];</div>
+<div class="line">                        <a class="code" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a>(errval,&errbuf[0],<span class="keyword">sizeof</span>(errbuf));</div>
+<div class="line">                        printf(<span class="stringliteral">"Failed setting the"</span></div>
+<div class="line">                        <span class="stringliteral">" RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE"</span></div>
+<div class="line">                        <span class="stringliteral">" library option (reason string:\n%s).\n"</span>,errbuf);</div>
+<div class="line">                        <span class="keywordflow">if</span>(errval&<a class="code" href="rsb_8h.html#a4d8eb05488b681b75449f64c418b8893">RSB_ERRS_UNSUPPORTED_FEATURES</a>)</div>
+<div class="line">                        {</div>
+<div class="line">                          printf(<span class="stringliteral">"This error may be safely ignored.\n"</span>);</div>
+<div class="line">                        }</div>
+<div class="line">                        <span class="keywordflow">else</span></div>
+<div class="line">                        {</div>
+<div class="line">                          printf(<span class="stringliteral">"Some unexpected error occurred!\n"</span>);</div>
+<div class="line">                          <span class="keywordflow">goto</span> err;</div>
+<div class="line">                        }</div>
+<div class="line">                }</div>
+<div class="line">                <span class="keywordflow">else</span></div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"Setting back the "</span></div>
+<div class="line">                                <span class="stringliteral">"RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE"</span></div>
+<div class="line">                                <span class="stringliteral">" library option.\n"</span>);</div>
+<div class="line">                        evi = 0;</div>
+<div class="line">                        errval = <a class="code" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>(<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a>,</div>
+<div class="line">                                        &evi);</div>
+<div class="line">                        errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">                }</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        mtxAp = <a class="code" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>(</div>
+<div class="line">                VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,</div>
+<div class="line">                <a class="code" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>    <span class="comment">/* default format will be chosen */</span></div>
+<div class="line">                |<a class="code" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">RSB_FLAG_DUPLICATES_SUM</a><span class="comment">/* duplicates will be summed */</span></div>
+<div class="line">                        ,&errval);</div>
+<div class="line">        <span class="keywordflow">if</span>((!mtxAp) || (errval != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>))</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error while allocating the matrix!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly allocated a matrix.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Summary information of the matrix:\n"</span>);</div>
+<div class="line">        <span class="comment">/* print out the matrix summary information  */</span></div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>(mtxAp,<span class="stringliteral">"RSB_MIF_MATRIX_INFO__TO__CHAR_P"</span>,</div>
+<div class="line">                        ib,<span class="keyword">sizeof</span>(ib));</div>
+<div class="line">        printf(<span class="stringliteral">"%s"</span>,ib);</div>
+<div class="line">        printf(<span class="stringliteral">"\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = </div>
+<div class="line">                <a class="code" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>(<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>,&one,mtxAp,B,1,&one,X,1))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a> )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error performing a multiplication!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly performed a SPMV.\n"</span>);</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>(mtxAp);</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly freed the matrix.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Error finalizing the library!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly finalized the library.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with no error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line">}</div>
+<div class="line"></div>
+</div><!-- fragment --><p>And the second, using the Sparse BLAS interface: </p>
+<div class="fragment"><div class="line"><span class="comment">/*</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">Copyright (C) 2008-2015 Michele Martone</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">This file is part of librsb.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is free software; you can redistribute it and/or modify it</span></div>
+<div class="line"><span class="comment">under the terms of the GNU Lesser General Public License as published</span></div>
+<div class="line"><span class="comment">by the Free Software Foundation; either version 3 of the License, or</span></div>
+<div class="line"><span class="comment">(at your option) any later version.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">librsb is distributed in the hope that it will be useful, but WITHOUT</span></div>
+<div class="line"><span class="comment">ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or</span></div>
+<div class="line"><span class="comment">FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License for more details.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">You should have received a copy of the GNU Lesser General Public</span></div>
+<div class="line"><span class="comment">License along with librsb; see the file COPYING.</span></div>
+<div class="line"><span class="comment">If not, see <http://www.gnu.org/licenses/>.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment">*/</span><span class="comment"></span></div>
+<div class="line"><span class="comment">/*!</span></div>
+<div class="line"><span class="comment"> \ingroup rsb_doc_examples</span></div>
+<div class="line"><span class="comment"> @file</span></div>
+<div class="line"><span class="comment"> @author Michele Martone</span></div>
+<div class="line"><span class="comment"> @brief This is a first "hello RSB" example program using </span></div>
+<div class="line"><span class="comment">        a Sparse BLAS interface.</span></div>
+<div class="line"><span class="comment"></span></div>
+<div class="line"><span class="comment"> \include hello-spblas.c</span></div>
+<div class="line"><span class="comment">*/</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>></span>        <span class="comment">/* for rsb_lib_init */</span></div>
+<div class="line"><span class="preprocessor">#include <<a class="code" href="blas__sparse_8h.html" title="This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .">blas_sparse.h</a>></span>        <span class="comment">/* Sparse BLAS on the top of librsb */</span></div>
+<div class="line"><span class="preprocessor">#include <stdio.h></span>      <span class="comment">/* printf */</span></div>
+<div class="line"></div>
+<div class="line"><span class="keywordtype">int</span> main(<span class="keyword">const</span> <span class="keywordtype">int</span> argc, <span class="keywordtype">char</span> * <span class="keyword">const</span> argv[])</div>
+<div class="line">{<span class="comment"></span></div>
+<div class="line"><span class="comment">        /*!</span></div>
+<div class="line"><span class="comment">         * A Hello/Sparse BLAS program.</span></div>
+<div class="line"><span class="comment">         *</span></div>
+<div class="line"><span class="comment">         * This program shows how to use the blas_sparse.h</span></div>
+<div class="line"><span class="comment">         * interface correctly to:</span></div>
+<div class="line"><span class="comment">         *</span></div>
+<div class="line"><span class="comment">         * - initialize the library using #rsb_lib_init()</span></div>
+<div class="line"><span class="comment">         * - allocate (build) a single sparse matrix in the RSB</span></div>
+<div class="line"><span class="comment">         *   format using #BLAS_duscr_begin()/#BLAS_duscr_insert_entries()</span></div>
+<div class="line"><span class="comment">         *   /#BLAS_duscr_end()</span></div>
+<div class="line"><span class="comment">         * - extract one matrix element with #BLAS_dusget_element()</span></div>
+<div class="line"><span class="comment">         * - multiply the matrix times a vector using #BLAS_dusmv()</span></div>
+<div class="line"><span class="comment">         * - deallocate the matrix using #BLAS_usds() </span></div>
+<div class="line"><span class="comment">         * - finalize the library using</span></div>
+<div class="line"><span class="comment">         *   #rsb_lib_exit(#RSB_NULL_EXIT_OPTIONS) </span></div>
+<div class="line"><span class="comment">        */</span></div>
+<div class="line"><span class="preprocessor">#ifndef RSB_NUMERICAL_TYPE_DOUBLE   </span></div>
+<div class="line"><span class="preprocessor"></span>        printf(<span class="stringliteral">"'double' type configured out."</span></div>
+<div class="line">        <span class="stringliteral">" Please reconfigure the library with it and recompile.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line"><span class="preprocessor">#else </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>        <a class="code" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A = <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>; <span class="comment">/* handle for A */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span> nnz = 4;      <span class="comment">/* number of nonzeroes of matrix A */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span>  nr = 3;      <span class="comment">/* number of A's rows */</span></div>
+<div class="line">        <span class="keyword">const</span> <span class="keywordtype">int</span>  nc = 3;      <span class="comment">/* number of A's columns */</span></div>
+<div class="line">        <span class="comment">/* A's nonzero elements row indices (coordinates): */</span></div>
+<div class="line">        <span class="keywordtype">int</span>   IA[] = { 0, 1, 2, 2 };</div>
+<div class="line">        <span class="comment">/* A's nonzero elements column indices (coordinates): */</span></div>
+<div class="line">        <span class="keywordtype">int</span>   JA[] = { 0, 1, 0, 2 };</div>
+<div class="line">        <span class="comment">/* A's nonzero values (matrix coefficients): */</span></div>
+<div class="line">        <span class="keywordtype">double</span> VA[] = { 11.0, 22.0, 13.0, 33.0  };</div>
+<div class="line">        <span class="comment">/* the X vector's array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> X[] = { 0.0, 0.0, 0.0 };</div>
+<div class="line">        <span class="comment">/* the B vector's array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> B[] = { -1.0, -2.0, -2.0 };</div>
+<div class="line">        <span class="comment">/* the (known) result array: */</span></div>
+<div class="line">        <span class="keywordtype">double</span> AB[] = { 11.0+26.0, 44.0, 66.0+13.0 };</div>
+<div class="line">        <span class="comment">/* rsb error variable: */</span></div>
+<div class="line">        <a class="code" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval = <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>;</div>
+<div class="line">        <span class="keywordtype">int</span> i;</div>
+<div class="line"></div>
+<div class="line">        printf(<span class="stringliteral">"Hello, RSB!\n"</span>);</div>
+<div class="line">        <span class="comment">/* initialize the library */</span></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>(<a class="code" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>)) </div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly initialized the library.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* initialize a matrix descriptor */</span></div>
+<div class="line">        A = <a class="code" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a>(nr,nc);</div>
+<div class="line">        <span class="keywordflow">if</span>( A == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        </div>
+<div class="line">        <span class="comment">/* specify properties (e.g.: symmetry)*/</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">BLAS_ussp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>) != 0 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* get properties (e.g.: symmetry) */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="blas__sparse_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>(A,<a class="code" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a>) != 1 )</div>
+<div class="line">        {</div>
+<div class="line">                printf(<span class="stringliteral">"Symmetry property non set ?!\n"</span>);</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* insert the nonzeroes (here, all at once) */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a>(A, nnz, VA, IA, JA)</div>
+<div class="line">                        == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* finalize (allocate) the matrix build  */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>(A) == <a class="code" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly allocated a matrix.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        VA[0] = 0.0;</div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a>(A, IA[0], JA[0], &VA[0]) )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* a check */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( VA[0] != 11.0 )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* compute X = X + (-1) * A * B   */</span></div>
+<div class="line">        <span class="keywordflow">if</span>(<a class="code" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>(<a class="code" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a>,-1,A,B,1,X,1))</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">for</span>( i = 0 ; i < nc; ++i )</div>
+<div class="line">                <span class="keywordflow">if</span>( X[i] != AB[i] )</div>
+<div class="line">                {</div>
+<div class="line">                        printf(<span class="stringliteral">"Computed SPMV result seems wrong. Terminating.\n"</span>);</div>
+<div class="line">                        <span class="keywordflow">goto</span> err;</div>
+<div class="line">                }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly performed a SPMV.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* deallocate matrix A */</span></div>
+<div class="line">        <span class="keywordflow">if</span>( <a class="code" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a>(A) )</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly freed the matrix.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="comment">/* finalize the library */</span></div>
+<div class="line">        <span class="keywordflow">if</span>((errval = <a class="code" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>(<a class="code" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>))</div>
+<div class="line">                        != <a class="code" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)</div>
+<div class="line">        {</div>
+<div class="line">                <span class="keywordflow">goto</span> err;</div>
+<div class="line">        }</div>
+<div class="line">        printf(<span class="stringliteral">"Correctly finalized the library.\n"</span>);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with no error.\n"</span>);</div>
+<div class="line"></div>
+<div class="line">        <span class="keywordflow">return</span> 0;</div>
+<div class="line">err:</div>
+<div class="line">        <a class="code" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>(NULL,errval);</div>
+<div class="line">        printf(<span class="stringliteral">"Program terminating with error.\n"</span>);</div>
+<div class="line">        <span class="keywordflow">return</span> -1;</div>
+<div class="line"><span class="preprocessor">#endif </span><span class="comment">/* RSB_NUMERICAL_TYPE_DOUBLE */</span><span class="preprocessor"></span></div>
+<div class="line"><span class="preprocessor"></span>}</div>
+<div class="line"></div>
+</div><!-- fragment --><p>For more, see the <a class="el" href="group__rsb__doc__examples.html">Example programs and code</a> section. </p>
+</div></div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1rsb__blas__get__mtx.html b/doc/html/interfaceblas__sparse_1_1rsb__blas__get__mtx.html
new file mode 100644
index 0000000..15b09fc
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1rsb__blas__get__mtx.html
@@ -0,0 +1,89 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::rsb_blas_get_mtx Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html">rsb_blas_get_mtx</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::rsb_blas_get_mtx Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ab7287586fccf2ade719b9a0b0585fb6a"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html#ab7287586fccf2ade719b9a0b0585fb6a">rsb_blas_get_mtx</a> (A)</td></tr>
+</table>
+<h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ab7287586fccf2ade719b9a0b0585fb6a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function blas_sparse::rsb_blas_get_mtx::rsb_blas_get_mtx </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>A</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1uscr__insert__block.html b/doc/html/interfaceblas__sparse_1_1uscr__insert__block.html
new file mode 100644
index 0000000..6ff88c9
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1uscr__insert__block.html
@@ -0,0 +1,395 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::uscr_insert_block Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html">uscr_insert_block</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::uscr_insert_block Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>inserts a dense block  
+ <a href="interfaceblas__sparse_1_1uscr__insert__block.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ab52411aa7d878e2fc62abc2983b9871f"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#ab52411aa7d878e2fc62abc2983b9871f">suscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:ab52411aa7d878e2fc62abc2983b9871f"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#ab52411aa7d878e2fc62abc29 [...]
+<tr class="memitem:aa74a6aa929703b1221d125dabf0610a9"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#aa74a6aa929703b1221d125dabf0610a9">duscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:aa74a6aa929703b1221d125dabf0610a9"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#aa74a6aa929703b1221d125da [...]
+<tr class="memitem:af239abd22080f58d5cf0ea2dfdd78953"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#af239abd22080f58d5cf0ea2dfdd78953">cuscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:af239abd22080f58d5cf0ea2dfdd78953"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#af239abd22080f58d5cf0ea2d [...]
+<tr class="memitem:ad4e920769d6a259d1b2fae20e6fb2853"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html#ad4e920769d6a259d1b2fae20e6fb2853">zuscr_insert_block</a> (A, val, row_stride, col_stride, i, j, istat)</td></tr>
+<tr class="memdesc:ad4e920769d6a259d1b2fae20e6fb2853"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together.  <a href="#ad4e920769d6a259d1b2fae20 [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>inserts a dense block </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="af239abd22080f58d5cf0ea2dfdd78953"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_block::cuscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aa74a6aa929703b1221d125dabf0610a9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_block::duscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ab52411aa7d878e2fc62abc2983b9871f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_block::suscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ad4e920769d6a259d1b2fae20e6fb2853"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_block::zuscr_insert_block </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole block in a matrix, assuming it is in build state. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked <code>begin</code> function. If not called a blocked <code>begin</code> function, will assume 1x1 (that is, no) blocking. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row and column strides in accessing <code>val</code>. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Block row/column indices. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release. </dd></dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286" [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1uscr__insert__clique.html b/doc/html/interfaceblas__sparse_1_1uscr__insert__clique.html
new file mode 100644
index 0000000..ce3bb18
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1uscr__insert__clique.html
@@ -0,0 +1,443 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::uscr_insert_clique Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html">uscr_insert_clique</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::uscr_insert_clique Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>inserts a clique  
+ <a href="interfaceblas__sparse_1_1uscr__insert__clique.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ae4d7b020b0c50e575aa6a80b44ab8a53"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#ae4d7b020b0c50e575aa6a80b44ab8a53">suscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:ae4d7b020b0c50e575aa6a80b44ab8a53"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#ae4d7b020b0c50e575aa6a80b44ab8a53"></a><br/></td></tr>
+<tr class="memitem:a24491a9b6aeae9698aacf9c649fabcfc"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#a24491a9b6aeae9698aacf9c649fabcfc">duscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a24491a9b6aeae9698aacf9c649fabcfc"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#a24491a9b6aeae9698aacf9c649fabcfc"></a><br/></td></tr>
+<tr class="memitem:af3f921a0867dbd20c1499660a2b78376"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#af3f921a0867dbd20c1499660a2b78376">cuscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:af3f921a0867dbd20c1499660a2b78376"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#af3f921a0867dbd20c1499660a2b78376"></a><br/></td></tr>
+<tr class="memitem:af457ebe2c2b2112ba6cdb94e9bb53928"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html#af457ebe2c2b2112ba6cdb94e9bb53928">zuscr_insert_clique</a> (A, k, l, val, row_stride, col_stride, indx, jndx, istat)</td></tr>
+<tr class="memdesc:af457ebe2c2b2112ba6cdb94e9bb53928"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together.  <a href="#af457ebe2c2b2112ba6cdb94e9bb53928"></a><br/></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>inserts a clique </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="af3f921a0867dbd20c1499660a2b78376"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_clique::cuscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a24491a9b6aeae9698aacf9c649fabcfc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_clique::duscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ae4d7b020b0c50e575aa6a80b44ab8a53"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_clique::suscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="af457ebe2c2b2112ba6cdb94e9bb53928"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_clique::zuscr_insert_clique </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>k</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>l</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>row_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>col_stride</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole clique in a matrix, assuming this is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">k,l</td><td>Clique rows and columns count. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">row_stride,col_stride</td><td>Row/columns stride in accessing the clique. </td></tr>
+    <tr><td class="paramname">indx,jndx</td><td>Row/column indices arrays. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section warning"><dt>Warning</dt><dd>Signature of this routine for Fortran does not agree to the standard. This shall be corrected in a future release.</dd></dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1uscr__insert__col.html b/doc/html/interfaceblas__sparse_1_1uscr__insert__col.html
new file mode 100644
index 0000000..83a77bd
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1uscr__insert__col.html
@@ -0,0 +1,367 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::uscr_insert_col Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html">uscr_insert_col</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::uscr_insert_col Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>inserts a sparse column  
+ <a href="interfaceblas__sparse_1_1uscr__insert__col.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a46d6cb6bd1b38c5c75eef95cb9ce4135"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a46d6cb6bd1b38c5c75eef95cb9ce4135">suscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a46d6cb6bd1b38c5c75eef95cb9ce4135"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a46d6cb6bd1b38c5c75eef95cb9ce4135"></a><br/></td></tr>
+<tr class="memitem:a8dfa301a73cd1bf09b66a0b2e9c704a8"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a8dfa301a73cd1bf09b66a0b2e9c704a8">duscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a8dfa301a73cd1bf09b66a0b2e9c704a8"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a8dfa301a73cd1bf09b66a0b2e9c704a8"></a><br/></td></tr>
+<tr class="memitem:a380ea4ffed92a6cf0e73a50952fc6a64"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a380ea4ffed92a6cf0e73a50952fc6a64">cuscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a380ea4ffed92a6cf0e73a50952fc6a64"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a380ea4ffed92a6cf0e73a50952fc6a64"></a><br/></td></tr>
+<tr class="memitem:a397d0fb9bd1ba1bddc0eaeb4d3e47a5c"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html#a397d0fb9bd1ba1bddc0eaeb4d3e47a5c">zuscr_insert_col</a> (A, j, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a397d0fb9bd1ba1bddc0eaeb4d3e47a5c"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a397d0fb9bd1ba1bddc0eaeb4d3e47a5c"></a><br/></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>inserts a sparse column </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a380ea4ffed92a6cf0e73a50952fc6a64"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_col::cuscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a8dfa301a73cd1bf09b66a0b2e9c704a8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_col::duscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a46d6cb6bd1b38c5c75eef95cb9ce4135"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_col::suscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a397d0fb9bd1ba1bddc0eaeb4d3e47a5c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_col::zuscr_insert_col </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole column in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">j</td><td>Column index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1uscr__insert__entries.html b/doc/html/interfaceblas__sparse_1_1uscr__insert__entries.html
new file mode 100644
index 0000000..1f757d3
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1uscr__insert__entries.html
@@ -0,0 +1,367 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::uscr_insert_entries Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html">uscr_insert_entries</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::uscr_insert_entries Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>inserts multiple entries  
+ <a href="interfaceblas__sparse_1_1uscr__insert__entries.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a68b943e8b2dfb946299b80b38397a05d"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a68b943e8b2dfb946299b80b38397a05d">suscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a68b943e8b2dfb946299b80b38397a05d"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a68b943e8b2dfb946299b80b38397a05d"></a><br/></td></tr>
+<tr class="memitem:a6d994adf4a26516c4bbd08020a923a5a"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a6d994adf4a26516c4bbd08020a923a5a">duscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a6d994adf4a26516c4bbd08020a923a5a"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a6d994adf4a26516c4bbd08020a923a5a"></a><br/></td></tr>
+<tr class="memitem:a5035e49b6a0d45c9aee959667fd567b2"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#a5035e49b6a0d45c9aee959667fd567b2">cuscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:a5035e49b6a0d45c9aee959667fd567b2"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a5035e49b6a0d45c9aee959667fd567b2"></a><br/></td></tr>
+<tr class="memitem:ae73d20580b844428d7ca4834e578d448"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html#ae73d20580b844428d7ca4834e578d448">zuscr_insert_entries</a> (A, nnz, val, indx, jndx, istat)</td></tr>
+<tr class="memdesc:ae73d20580b844428d7ca4834e578d448"><td class="mdescLeft"> </td><td class="mdescRight">Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#ae73d20580b844428d7ca4834e578d448"></a><br/></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>inserts multiple entries </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a5035e49b6a0d45c9aee959667fd567b2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entries::cuscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a6d994adf4a26516c4bbd08020a923a5a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entries::duscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a68b943e8b2dfb946299b80b38397a05d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entries::suscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ae73d20580b844428d7ca4834e578d448"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entries::zuscr_insert_entries </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>jndx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts entries in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row indices array. </td></tr>
+    <tr><td class="paramname">jndx</td><td>Column indices array.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1uscr__insert__entry.html b/doc/html/interfaceblas__sparse_1_1uscr__insert__entry.html
new file mode 100644
index 0000000..abdb320
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1uscr__insert__entry.html
@@ -0,0 +1,340 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::uscr_insert_entry Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html">uscr_insert_entry</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::uscr_insert_entry Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>A Sparse BLAS interface for RSB.  
+ <a href="interfaceblas__sparse_1_1uscr__insert__entry.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a3b1e4b0dddeb275de32edeafda52990f"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#a3b1e4b0dddeb275de32edeafda52990f">suscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:a3b1e4b0dddeb275de32edeafda52990f"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a3b1e4b0dddeb275de32edeafda52990f"></a><br/></td></tr>
+<tr class="memitem:ac1bd26e50082f7eb1123a59794ae3f1c"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#ac1bd26e50082f7eb1123a59794ae3f1c">duscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:ac1bd26e50082f7eb1123a59794ae3f1c"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#ac1bd26e50082f7eb1123a59794ae3f1c"></a><br/></td></tr>
+<tr class="memitem:aea33478c2c2911daf74c478ded2ed39e"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#aea33478c2c2911daf74c478ded2ed39e">cuscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:aea33478c2c2911daf74c478ded2ed39e"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#aea33478c2c2911daf74c478ded2ed39e"></a><br/></td></tr>
+<tr class="memitem:a3eae411ca3d10ec5dfddbdb53a5a7d4d"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html#a3eae411ca3d10ec5dfddbdb53a5a7d4d">zuscr_insert_entry</a> (A, val, i, j, istat)</td></tr>
+<tr class="memdesc:a3eae411ca3d10ec5dfddbdb53a5a7d4d"><td class="mdescLeft"> </td><td class="mdescRight">Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a3eae411ca3d10ec5dfddbdb53a5a7d4d"></a><br/></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>A Sparse BLAS interface for RSB. </p>
+<p>inserts a single entry </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="aea33478c2c2911daf74c478ded2ed39e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entry::cuscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ac1bd26e50082f7eb1123a59794ae3f1c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entry::duscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3b1e4b0dddeb275de32edeafda52990f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entry::suscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3eae411ca3d10ec5dfddbdb53a5a7d4d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_entry::zuscr_insert_entry </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>j</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts an entry in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">i,j</td><td>Row and column indices.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1uscr__insert__row.html b/doc/html/interfaceblas__sparse_1_1uscr__insert__row.html
new file mode 100644
index 0000000..82c0e05
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1uscr__insert__row.html
@@ -0,0 +1,367 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::uscr_insert_row Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html">uscr_insert_row</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::uscr_insert_row Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>inserts a sparse row  
+ <a href="interfaceblas__sparse_1_1uscr__insert__row.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ad625073be16e7d5ebe9a66f73f9da15c"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#ad625073be16e7d5ebe9a66f73f9da15c">suscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:ad625073be16e7d5ebe9a66f73f9da15c"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#ad625073be16e7d5ebe9a66f73f9da15c"></a><br/></td></tr>
+<tr class="memitem:a14fd80441fbbc200a36de62c86f12538"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#a14fd80441fbbc200a36de62c86f12538">duscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a14fd80441fbbc200a36de62c86f12538"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a14fd80441fbbc200a36de62c86f12538"></a><br/></td></tr>
+<tr class="memitem:a59a678b947de912694a162cafb171100"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#a59a678b947de912694a162cafb171100">cuscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:a59a678b947de912694a162cafb171100"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#a59a678b947de912694a162cafb171100"></a><br/></td></tr>
+<tr class="memitem:adf35f583386e093b7805b732d52aa95b"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html#adf35f583386e093b7805b732d52aa95b">zuscr_insert_row</a> (A, i, nnz, val, indx, istat)</td></tr>
+<tr class="memdesc:adf35f583386e093b7805b732d52aa95b"><td class="mdescLeft"> </td><td class="mdescRight">Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together.  <a href="#adf35f583386e093b7805b732d52aa95b"></a><br/></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>inserts a sparse row </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a59a678b947de912694a162cafb171100"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_row::cuscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a14fd80441fbbc200a36de62c86f12538"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_row::duscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ad625073be16e7d5ebe9a66f73f9da15c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_row::suscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="adf35f583386e093b7805b732d52aa95b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::uscr_insert_row::zuscr_insert_row </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>i</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>val</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, dimension (:) </td>
+          <td class="paramname"><em>indx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Inserts a whole row in a matrix, assuming it is in build state. By default, duplicate entries will be summed together. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">i</td><td>Row index. </td></tr>
+    <tr><td class="paramname">nnz</td><td>Number of nonzeroes to insert. </td></tr>
+    <tr><td class="paramname">val</td><td>Array of values. </td></tr>
+    <tr><td class="paramname">indx</td><td>Row index.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:24 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1usmm.html b/doc/html/interfaceblas__sparse_1_1usmm.html
new file mode 100644
index 0000000..5713bc1
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1usmm.html
@@ -0,0 +1,503 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::usmm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1usmm.html">usmm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::usmm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>multiplication : c <- beta c + alpha A b  
+ <a href="interfaceblas__sparse_1_1usmm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:abeab18a2f4b8c597aad8e7e618d12bfc"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmm.html#abeab18a2f4b8c597aad8e7e618d12bfc">susmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:abeab18a2f4b8c597aad8e7e618d12bfc"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#abeab18a2f4b8c597aad8e7e618 [...]
+<tr class="memitem:a444e03055975d19e0907fdc774d6419f"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmm.html#a444e03055975d19e0907fdc774d6419f">dusmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:a444e03055975d19e0907fdc774d6419f"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#a444e03055975d19e0907fdc774 [...]
+<tr class="memitem:a0a2303f12cfe05ba01cdb52b751d5f33"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmm.html#a0a2303f12cfe05ba01cdb52b751d5f33">cusmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:a0a2303f12cfe05ba01cdb52b751d5f33"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#a0a2303f12cfe05ba01cdb52b75 [...]
+<tr class="memitem:a76c548fa7c494d5e8aba03ab1dc4bc39"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmm.html#a76c548fa7c494d5e8aba03ab1dc4bc39">zusmm</a> (order, transA, nrhs, alpha, A, b, ldb, c, ldc, istat)</td></tr>
+<tr class="memdesc:a76c548fa7c494d5e8aba03ab1dc4bc39"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>.  <a href="#a76c548fa7c494d5e8aba03ab1d [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>multiplication : c <- beta c + alpha A b </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a0a2303f12cfe05ba01cdb52b751d5f33"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmm::cusmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a444e03055975d19e0907fdc774d6419f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmm::dusmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="abeab18a2f4b8c597aad8e7e618d12bfc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmm::susmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a76c548fa7c494d5e8aba03ab1dc4bc39"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmm::zusmm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>c</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldc</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$C \leftarrow \alpha AB+C,$" src="form_15.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^T B+C,$" src="form_16.png"/> <img class="formulaInl" alt="$C \leftarrow \alpha A^H B+C$" src="form_17.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>. </td></tr>
+    <tr><td class="paramname">c</td><td>Dense vector <em>c</em>. </td></tr>
+    <tr><td class="paramname">ldc</td><td>Leading dimension of <em>c</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1usmv.html b/doc/html/interfaceblas__sparse_1_1usmv.html
new file mode 100644
index 0000000..356cc60
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1usmv.html
@@ -0,0 +1,447 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::usmv Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1usmv.html">usmv</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::usmv Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>multiplication : c <- beta c + alpha A b  
+ <a href="interfaceblas__sparse_1_1usmv.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a94abb35b1f09c96790c08bbcc6adedb9"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmv.html#a94abb35b1f09c96790c08bbcc6adedb9">susmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:a94abb35b1f09c96790c08bbcc6adedb9"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#a94abb35b1f09c96790c08bbcc6adedb9"></a>< [...]
+<tr class="memitem:ab356fcfdebfd118dd5e6165e66a3dc70"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmv.html#ab356fcfdebfd118dd5e6165e66a3dc70">dusmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:ab356fcfdebfd118dd5e6165e66a3dc70"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#ab356fcfdebfd118dd5e6165e66a3dc70"></a>< [...]
+<tr class="memitem:a43d3541d816401bb2581913cfa2070bb"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmv.html#a43d3541d816401bb2581913cfa2070bb">cusmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:a43d3541d816401bb2581913cfa2070bb"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#a43d3541d816401bb2581913cfa2070bb"></a>< [...]
+<tr class="memitem:a34dd1627794af46998a243677e1bbaa8"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmv.html#a34dd1627794af46998a243677e1bbaa8">zusmv</a> (transA, alpha, A, x, incx, y, incy, istat)</td></tr>
+<tr class="memdesc:a34dd1627794af46998a243677e1bbaa8"><td class="mdescLeft"> </td><td class="mdescRight">Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>.  <a href="#a34dd1627794af46998a243677e1bbaa8"></a>< [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>multiplication : c <- beta c + alpha A b </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a43d3541d816401bb2581913cfa2070bb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmv::cusmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ab356fcfdebfd118dd5e6165e66a3dc70"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmv::dusmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a94abb35b1f09c96790c08bbcc6adedb9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmv::susmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a34dd1627794af46998a243677e1bbaa8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::usmv::zusmv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>A</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>y</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incy</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Multiply by a dense vector. Either of <img class="formulaInl" alt="$Y \leftarrow \alpha A X + Y ,$" src="form_8.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^T X + Y,$" src="form_9.png"/> <img class="formulaInl" alt="$Y \leftarrow \alpha A^H X + Y$" src="form_10.png"/>, depending on the value of <code>transA</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transA</td><td>Transposition operator for matrix <em>A</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">A</td><td>A valid matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>. </td></tr>
+    <tr><td class="paramname">y</td><td>Dense vector <em>y</em>. </td></tr>
+    <tr><td class="paramname">incy</td><td>Stride of <em>y</em>. </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section note"><dt>Note</dt><dd></dd>
+<dd>
+</dd>
+<dd>
+By setting the <code><a class="el" href="classblas__sparse.html#acf0fe16da38fc03226e462dc6104cc68">blas_rsb_autotune_next_operation</a></code> property via <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (at any time) the next multiplication routine call (either of <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d0 [...]
+<dl class="section see"><dt>See Also</dt><dd>On the topic of autotuning, see also <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. If <code>–enable-rsb-num-threads</code> has been specified at configure time, the <code>RSB_NUM_THREADS</code> environment variable will override the number of executing threads specified by <code>OMP_NUM_THREADS</code>. (See also <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847 [...]
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1ussm.html b/doc/html/interfaceblas__sparse_1_1ussm.html
new file mode 100644
index 0000000..d03fd88
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1ussm.html
@@ -0,0 +1,423 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::ussm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1ussm.html">ussm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::ussm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>triangular solve: b <- alpha A^-1 b  
+ <a href="interfaceblas__sparse_1_1ussm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a30729d148522f306da9f787961ddeae6"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussm.html#a30729d148522f306da9f787961ddeae6">sussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:a30729d148522f306da9f787961ddeae6"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#a30729d14852 [...]
+<tr class="memitem:a25c815e459c07efcba93c29b156136c0"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussm.html#a25c815e459c07efcba93c29b156136c0">dussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:a25c815e459c07efcba93c29b156136c0"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#a25c815e459c [...]
+<tr class="memitem:a19ec9206f2a3e66ccfddff2be3fb55ad"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussm.html#a19ec9206f2a3e66ccfddff2be3fb55ad">cussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:a19ec9206f2a3e66ccfddff2be3fb55ad"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#a19ec9206f2a [...]
+<tr class="memitem:ab838e61a288bbd7b055ea37222d2c567"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussm.html#ab838e61a288bbd7b055ea37222d2c567">zussm</a> (order, transT, nrhs, alpha, T, b, ldb, istat)</td></tr>
+<tr class="memdesc:ab838e61a288bbd7b055ea37222d2c567"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>.  <a href="#ab838e61a288 [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>triangular solve: b <- alpha A^-1 b </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a19ec9206f2a3e66ccfddff2be3fb55ad"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussm::cussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a25c815e459c07efcba93c29b156136c0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussm::dussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a30729d148522f306da9f787961ddeae6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussm::sussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ab838e61a288bbd7b055ea37222d2c567"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussm::zussm </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>b</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>ldb</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense matrix (aka multi-vector). Either of <img class="formulaInl" alt="$B \leftarrow \alpha T^{-1} B,$" src="form_18.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-T} B,$" src="form_19.png"/> <img class="formulaInl" alt="$B \leftarrow \alpha T^{-H} B$" src="form_20.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">order</td><td>layour of the dense array. </td></tr>
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">nrhs</td><td>Number of right hand side columns. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">b</td><td>Dense vector <em>b</em>. </td></tr>
+    <tr><td class="paramname">ldb</td><td>Leading dimension of <em>b</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfaceblas__sparse_1_1ussv.html b/doc/html/interfaceblas__sparse_1_1ussv.html
new file mode 100644
index 0000000..82610a4
--- /dev/null
+++ b/doc/html/interfaceblas__sparse_1_1ussv.html
@@ -0,0 +1,367 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: blas_sparse::ussv Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classblas__sparse.html">blas_sparse</a></li><li class="navelem"><a class="el" href="interfaceblas__sparse_1_1ussv.html">ussv</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">blas_sparse::ussv Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>triangular solve: b <- alpha A^-1 b  
+ <a href="interfaceblas__sparse_1_1ussv.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a3c65b4e4dcd66663b1424378932549c8"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussv.html#a3c65b4e4dcd66663b1424378932549c8">sussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:a3c65b4e4dcd66663b1424378932549c8"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#a3c65b4e4dcd66663b1424378932549c8" [...]
+<tr class="memitem:a45d49f6f9887a808109bbb4467efb1dc"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussv.html#a45d49f6f9887a808109bbb4467efb1dc">dussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:a45d49f6f9887a808109bbb4467efb1dc"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#a45d49f6f9887a808109bbb4467efb1dc" [...]
+<tr class="memitem:a8e0df43045904452d698c18dbb8b33a1"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussv.html#a8e0df43045904452d698c18dbb8b33a1">cussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:a8e0df43045904452d698c18dbb8b33a1"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#a8e0df43045904452d698c18dbb8b33a1" [...]
+<tr class="memitem:a9ff3d54dd856f144f7f22e9d6e5d3135"><td class="memItemLeft" align="right" valign="top">subroutine </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussv.html#a9ff3d54dd856f144f7f22e9d6e5d3135">zussv</a> (transT, alpha, T, x, incx, istat)</td></tr>
+<tr class="memdesc:a9ff3d54dd856f144f7f22e9d6e5d3135"><td class="mdescLeft"> </td><td class="mdescRight">Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>.  <a href="#a9ff3d54dd856f144f7f22e9d6e5d3135" [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>triangular solve: b <- alpha A^-1 b </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+</div><h2>Member Function Documentation</h2>
+<a class="anchor" id="a8e0df43045904452d698c18dbb8b33a1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussv::cussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a45d49f6f9887a808109bbb4467efb1dc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussv::dussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3c65b4e4dcd66663b1424378932549c8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussv::sussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(kind(1.e0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9ff3d54dd856f144f7f22e9d6e5d3135"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">subroutine blas_sparse::ussv::zussv </td>
+          <td>(</td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)) </td>
+          <td class="paramname"><em>alpha</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>T</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">complex(kind(1.d0)), dimension (:) </td>
+          <td class="paramname"><em>x</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer </td>
+          <td class="paramname"><em>incx</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer, intent(out) </td>
+          <td class="paramname"><em>istat</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p>Triangular solve, by a dense vector. Either of <img class="formulaInl" alt="$X \leftarrow \alpha T^{-1}X,$" src="form_12.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-T}X,$" src="form_13.png"/> <img class="formulaInl" alt="$X \leftarrow \alpha T^{-H}X$" src="form_14.png"/>, depending on the value of <code>transT</code>. </p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">transT</td><td>Transposition operator for matrix <em>T</em>. </td></tr>
+    <tr><td class="paramname">alpha</td><td>Value for <img class="formulaInl" alt="$ \alpha $" src="form_11.png"/>. </td></tr>
+    <tr><td class="paramname">T</td><td>A valid triangular matrix handle. </td></tr>
+    <tr><td class="paramname">x</td><td>Dense vector <em>x</em>. </td></tr>
+    <tr><td class="paramname">incx</td><td>Stride of <em>x</em>.</td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section return"><dt>Returns</dt><dd>On success, 0 is returned; on error, -1. </dd></dl>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">istat</td><td>The return code will be written to <code>istat</code> (this is a Fortran routine): either 0 (success) or -1 (failure). </td></tr>
+  </table>
+  </dd>
+</dl>
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb__blas__sparse_8F90.html">rsb_blas_sparse.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__coo__sort.html b/doc/html/interfacersb_1_1rsb__coo__sort.html
new file mode 100644
index 0000000..52eafdc
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__coo__sort.html
@@ -0,0 +1,140 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_coo_sort Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__coo__sort.html">rsb_coo_sort</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_coo_sort Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a>.  
+ <a href="interfacersb_1_1rsb__coo__sort.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a5712d2c61081ca75f636c2474c7d815e"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__coo__sort.html#a5712d2c61081ca75f636c2474c7d815e">rsb_coo_sort</a> (VA, IA, JA, nnzA, nrA, ncA, typecode, flagsA)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a5712d2c61081ca75f636c2474c7d815e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_coo_sort::rsb_coo_sort </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__file__mtx__get__dims.html b/doc/html/interfacersb_1_1rsb__file__mtx__get__dims.html
new file mode 100644
index 0000000..76fc691
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__file__mtx__get__dims.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_file_mtx_get_dims Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html">rsb_file_mtx_get_dims</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_file_mtx_get_dims Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>.  
+ <a href="interfacersb_1_1rsb__file__mtx__get__dims.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ac465d04ed5f480a291981ae4a853257f"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html#ac465d04ed5f480a291981ae4a853257f">rsb_file_mtx_get_dims</a> (filename, nrp, ncp, nzp, flagsp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ac465d04ed5f480a291981ae4a853257f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_file_mtx_get_dims::rsb_file_mtx_get_dims </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>nrp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>ncp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>nzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>flagsp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__file__mtx__load.html b/doc/html/interfacersb_1_1rsb__file__mtx__load.html
new file mode 100644
index 0000000..4b9954d
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__file__mtx__load.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_file_mtx_load Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__file__mtx__load.html">rsb_file_mtx_load</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_file_mtx_load Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a>.  
+ <a href="interfacersb_1_1rsb__file__mtx__load.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a7a8a1195bbef16b39f8e68a4286e2ea1"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__load.html#a7a8a1195bbef16b39f8e68a4286e2ea1">rsb_file_mtx_load</a> (filename, flagsA, typecode, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a7a8a1195bbef16b39f8e68a4286e2ea1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_file_mtx_load::rsb_file_mtx_load </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__file__mtx__rndr.html b/doc/html/interfacersb_1_1rsb__file__mtx__rndr.html
new file mode 100644
index 0000000..0e1216f
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__file__mtx__rndr.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_file_mtx_rndr Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html">rsb_file_mtx_rndr</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_file_mtx_rndr Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>.  
+ <a href="interfacersb_1_1rsb__file__mtx__rndr.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a325a0bceb4ab80ba11a3f7e99235936d"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html#a325a0bceb4ab80ba11a3f7e99235936d">rsb_file_mtx_rndr</a> (pmp, filename, pmlWidth, pmWidth, pmHeight, rflags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a325a0bceb4ab80ba11a3f7e99235936d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_file_mtx_rndr::rsb_file_mtx_rndr </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>pmp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>pmlWidth</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>pmWidth</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>pmHeight</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>rflags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__file__mtx__save.html b/doc/html/interfacersb_1_1rsb__file__mtx__save.html
new file mode 100644
index 0000000..f751746
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__file__mtx__save.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_file_mtx_save Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__file__mtx__save.html">rsb_file_mtx_save</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_file_mtx_save Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>.  
+ <a href="interfacersb_1_1rsb__file__mtx__save.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a819ab03ec355b43ecde9c6b43336d991"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__save.html#a819ab03ec355b43ecde9c6b43336d991">rsb_file_mtx_save</a> (mtxAp, filename)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a819ab03ec355b43ecde9c6b43336d991"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_file_mtx_save::rsb_file_mtx_save </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__file__vec__load.html b/doc/html/interfacersb_1_1rsb__file__vec__load.html
new file mode 100644
index 0000000..b12ab85
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__file__vec__load.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_file_vec_load Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__file__vec__load.html">rsb_file_vec_load</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_file_vec_load Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>.  
+ <a href="interfacersb_1_1rsb__file__vec__load.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a13774316ce6035da0ff647ca917d0d33"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__vec__load.html#a13774316ce6035da0ff647ca917d0d33">rsb_file_vec_load</a> (filename, typecode, Yp, yvlp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a13774316ce6035da0ff647ca917d0d33"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_file_vec_load::rsb_file_vec_load </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>yvlp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__file__vec__save.html b/doc/html/interfacersb_1_1rsb__file__vec__save.html
new file mode 100644
index 0000000..7f1ab30
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__file__vec__save.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_file_vec_save Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__file__vec__save.html">rsb_file_vec_save</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_file_vec_save Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a>.  
+ <a href="interfacersb_1_1rsb__file__vec__save.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a1a4271ef2990d373fb92e4d438a36678"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__vec__save.html#a1a4271ef2990d373fb92e4d438a36678">rsb_file_vec_save</a> (filename, typecode, Yp, yvl)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a1a4271ef2990d373fb92e4d438a36678"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_file_vec_save::rsb_file_vec_save </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>yvl</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__lib__exit.html b/doc/html/interfacersb_1_1rsb__lib__exit.html
new file mode 100644
index 0000000..841293c
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__lib__exit.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_lib_exit Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__lib__exit.html">rsb_lib_exit</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_lib_exit Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>.  
+ <a href="interfacersb_1_1rsb__lib__exit.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a7f978ab1fb36092abb76da1d2abefe44"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__exit.html#a7f978ab1fb36092abb76da1d2abefe44">rsb_lib_exit</a> (iop)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a7f978ab1fb36092abb76da1d2abefe44"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_lib_exit::rsb_lib_exit </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>iop</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__lib__get__opt.html b/doc/html/interfacersb_1_1rsb__lib__get__opt.html
new file mode 100644
index 0000000..1021a4b
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__lib__get__opt.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_lib_get_opt Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__lib__get__opt.html">rsb_lib_get_opt</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_lib_get_opt Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a>.  
+ <a href="interfacersb_1_1rsb__lib__get__opt.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ab87dc671d4ddb02ef1ff4a438c30f5a1"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__get__opt.html#ab87dc671d4ddb02ef1ff4a438c30f5a1">rsb_lib_get_opt</a> (iof, iop)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ab87dc671d4ddb02ef1ff4a438c30f5a1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_lib_get_opt::rsb_lib_get_opt </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>iof</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>iop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__lib__init.html b/doc/html/interfacersb_1_1rsb__lib__init.html
new file mode 100644
index 0000000..4b84d44
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__lib__init.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_lib_init Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__lib__init.html">rsb_lib_init</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_lib_init Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>.  
+ <a href="interfacersb_1_1rsb__lib__init.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a769d6bd7b5f59ebf378fd9d956a10970"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__init.html#a769d6bd7b5f59ebf378fd9d956a10970">rsb_lib_init</a> (iop)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a769d6bd7b5f59ebf378fd9d956a10970"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_lib_init::rsb_lib_init </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>iop</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__lib__reinit.html b/doc/html/interfacersb_1_1rsb__lib__reinit.html
new file mode 100644
index 0000000..42f6f85
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__lib__reinit.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_lib_reinit Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__lib__reinit.html">rsb_lib_reinit</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_lib_reinit Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>.  
+ <a href="interfacersb_1_1rsb__lib__reinit.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:af222e6ea5e86eb4dd635aed1b435cec2"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__reinit.html#af222e6ea5e86eb4dd635aed1b435cec2">rsb_lib_reinit</a> (iop)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="af222e6ea5e86eb4dd635aed1b435cec2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_lib_reinit::rsb_lib_reinit </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>iop</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__lib__set__opt.html b/doc/html/interfacersb_1_1rsb__lib__set__opt.html
new file mode 100644
index 0000000..23fbdfe
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__lib__set__opt.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_lib_set_opt Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__lib__set__opt.html">rsb_lib_set_opt</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_lib_set_opt Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>.  
+ <a href="interfacersb_1_1rsb__lib__set__opt.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a676b1645de5c3b9c3b14c87ceb12ce3e"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__set__opt.html#a676b1645de5c3b9c3b14c87ceb12ce3e">rsb_lib_set_opt</a> (iof, iop)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a676b1645de5c3b9c3b14c87ceb12ce3e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_lib_set_opt::rsb_lib_set_opt </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>iof</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>iop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__lib__set__opt__str.html b/doc/html/interfacersb_1_1rsb__lib__set__opt__str.html
new file mode 100644
index 0000000..813e04a
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__lib__set__opt__str.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_lib_set_opt_str Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html">rsb_lib_set_opt_str</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_lib_set_opt_str Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>.  
+ <a href="interfacersb_1_1rsb__lib__set__opt__str.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:af0f406c52171320cf25529324b064db8"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html#af0f406c52171320cf25529324b064db8">rsb_lib_set_opt_str</a> (opnp, opvp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="af0f406c52171320cf25529324b064db8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_lib_set_opt_str::rsb_lib_set_opt_str </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>opnp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>opvp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__add__to__dense.html b/doc/html/interfacersb_1_1rsb__mtx__add__to__dense.html
new file mode 100644
index 0000000..f390b2a
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__add__to__dense.html
@@ -0,0 +1,134 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_add_to_dense Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html">rsb_mtx_add_to_dense</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_add_to_dense Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a>.  
+ <a href="interfacersb_1_1rsb__mtx__add__to__dense.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a073fda633ce6ad1ac0128e7e80cc7a1a"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html#a073fda633ce6ad1ac0128e7e80cc7a1a">rsb_mtx_add_to_dense</a> (alphap, mtxAp, ldB, nrB, ncB, rowmajorB, Bp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a073fda633ce6ad1ac0128e7e80cc7a1a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_add_to_dense::rsb_mtx_add_to_dense </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>rowmajorB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Bp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html
new file mode 100644
index 0000000..34e6159
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_coo_begin Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html">rsb_mtx_alloc_from_coo_begin</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_coo_begin Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:acdbe2149810598ad743510fb43850063"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html#acdbe2149810598ad743510fb43850063">rsb_mtx_alloc_from_coo_begin</a> (nnzA, typecode, nrA, ncA, flagsA, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="acdbe2149810598ad743510fb43850063"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_alloc_from_coo_begin::rsb_mtx_alloc_from_coo_begin </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__const.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__const.html
new file mode 100644
index 0000000..8908414
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__const.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_coo_const Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html">rsb_mtx_alloc_from_coo_const</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_coo_const Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a343f0c34a21b70af5723b84f906f04ed"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html#a343f0c34a21b70af5723b84f906f04ed">rsb_mtx_alloc_from_coo_const</a> (VA, IA, JA, nnzA, typecode, nrA, ncA, brA, bcA, flagsA, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a343f0c34a21b70af5723b84f906f04ed"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_alloc_from_coo_const::rsb_mtx_alloc_from_coo_const </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__end.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__end.html
new file mode 100644
index 0000000..ab240cc
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__end.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_coo_end Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html">rsb_mtx_alloc_from_coo_end</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_coo_end Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:acb65cf6cb8c6965a1f19f3a77a3bb635"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html#acb65cf6cb8c6965a1f19f3a77a3bb635">rsb_mtx_alloc_from_coo_end</a> (mtxApp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="acb65cf6cb8c6965a1f19f3a77a3bb635"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_alloc_from_coo_end::rsb_mtx_alloc_from_coo_end </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxApp</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html
new file mode 100644
index 0000000..920de02
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_coo_inplace Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html">rsb_mtx_alloc_from_coo_inplace</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_coo_inplace Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a79dd14c8140e12f2f4490a488468c406"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html#a79dd14c8140e12f2f4490a488468c406">rsb_mtx_alloc_from_coo_inplace</a> (VA, IA, JA, nnzA, typecode, nrA, ncA, brA, bcA, flagsA, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a79dd14c8140e12f2f4490a488468c406"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_alloc_from_coo_inplace::rsb_mtx_alloc_from_coo_inplace </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csc__const.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csc__const.html
new file mode 100644
index 0000000..5893d7e
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csc__const.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_csc_const Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html">rsb_mtx_alloc_from_csc_const</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_csc_const Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ae148c3e7567302bb4b8312482f47b057"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html#ae148c3e7567302bb4b8312482f47b057">rsb_mtx_alloc_from_csc_const</a> (VA, IA, CP, nnzA, typecode, nrA, ncA, brA, bcA, flagsA, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ae148c3e7567302bb4b8312482f47b057"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_alloc_from_csc_const::rsb_mtx_alloc_from_csc_const </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>CP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csr__const.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csr__const.html
new file mode 100644
index 0000000..6d142cb
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csr__const.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_csr_const Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html">rsb_mtx_alloc_from_csr_const</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_csr_const Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ac4d4d18a8a83b5790b9472750ef96e0e"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html#ac4d4d18a8a83b5790b9472750ef96e0e">rsb_mtx_alloc_from_csr_const</a> (VA, RP, JA, nnzA, typecode, nrA, ncA, brA, bcA, flagsA, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ac4d4d18a8a83b5790b9472750ef96e0e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_alloc_from_csr_const::rsb_mtx_alloc_from_csr_const </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>RP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html
new file mode 100644
index 0000000..780c71f
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html
@@ -0,0 +1,158 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_alloc_from_csr_inplace Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html">rsb_mtx_alloc_from_csr_inplace</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_alloc_from_csr_inplace Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a>.  
+ <a href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ae987825747d0697dd1fa7b7b67a8b509"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html#ae987825747d0697dd1fa7b7b67a8b509">rsb_mtx_alloc_from_csr_inplace</a> (VA, RP, JA, nnzA, typecode, nrA, ncA, brA, bcA, flagsA, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ae987825747d0697dd1fa7b7b67a8b509"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_alloc_from_csr_inplace::rsb_mtx_alloc_from_csr_inplace </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>RP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnzA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>brA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>bcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flagsA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__clone.html b/doc/html/interfacersb_1_1rsb__mtx__clone.html
new file mode 100644
index 0000000..add4d76
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__clone.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_clone Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__clone.html">rsb_mtx_clone</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_clone Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>.  
+ <a href="interfacersb_1_1rsb__mtx__clone.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a9d94feec5e252fa47ee272dc6dc9d896"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__clone.html#a9d94feec5e252fa47ee272dc6dc9d896">rsb_mtx_clone</a> (mtxBpp, typecode, transA, alphap, mtxAp, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a9d94feec5e252fa47ee272dc6dc9d896"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_clone::rsb_mtx_clone </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxBpp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__free.html b/doc/html/interfacersb_1_1rsb__mtx__free.html
new file mode 100644
index 0000000..e0bf165
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__free.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_free Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__free.html">rsb_mtx_free</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_free Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>.  
+ <a href="interfacersb_1_1rsb__mtx__free.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a09a2dbae0d1161971139c2156cbef16d"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__free.html#a09a2dbae0d1161971139c2156cbef16d">rsb_mtx_free</a> (mtxAp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a09a2dbae0d1161971139c2156cbef16d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_mtx_free::rsb_mtx_free </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__coo.html b/doc/html/interfacersb_1_1rsb__mtx__get__coo.html
new file mode 100644
index 0000000..4269e1e
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__coo.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_coo Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html">rsb_mtx_get_coo</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_coo Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__coo.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ad56231129b3d2be969605ab3c43020fe"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html#ad56231129b3d2be969605ab3c43020fe">rsb_mtx_get_coo</a> (mtxAp, VA, IA, JA, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ad56231129b3d2be969605ab3c43020fe"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_coo::rsb_mtx_get_coo </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__coo__block.html b/doc/html/interfacersb_1_1rsb__mtx__get__coo__block.html
new file mode 100644
index 0000000..6d11077
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__coo__block.html
@@ -0,0 +1,164 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_coo_block Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html">rsb_mtx_get_coo_block</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_coo_block Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__coo__block.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a15cad9939688f96a5164b0ed6873bf00"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html#a15cad9939688f96a5164b0ed6873bf00">rsb_mtx_get_coo_block</a> (mtxAp, VA, IA, JA, frA, lrA, fcA, lcA, IREN, JREN, rnzp, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a15cad9939688f96a5164b0ed6873bf00"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_coo_block::rsb_mtx_get_coo_block </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>frA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>lrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>fcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>lcA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IREN</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JREN</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>rnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__csr.html b/doc/html/interfacersb_1_1rsb__mtx__get__csr.html
new file mode 100644
index 0000000..d96a17d
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__csr.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_csr Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html">rsb_mtx_get_csr</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_csr Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__csr.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a26e57debd9264300f0436440df805625"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html#a26e57debd9264300f0436440df805625">rsb_mtx_get_csr</a> (typecode, mtxAp, VA, RP, JA, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a26e57debd9264300f0436440df805625"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_csr::rsb_mtx_get_csr </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>RP</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__info.html b/doc/html/interfacersb_1_1rsb__mtx__get__info.html
new file mode 100644
index 0000000..7dae084
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__info.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_info Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__info.html">rsb_mtx_get_info</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_info Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__info.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a279563ac765d73fed65942786f0b56f3"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__info.html#a279563ac765d73fed65942786f0b56f3">rsb_mtx_get_info</a> (mtxAp, miflags, minfop)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a279563ac765d73fed65942786f0b56f3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_info::rsb_mtx_get_info </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>miflags</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>minfop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__info__str.html b/doc/html/interfacersb_1_1rsb__mtx__get__info__str.html
new file mode 100644
index 0000000..e78b70f
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__info__str.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_info_str Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html">rsb_mtx_get_info_str</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_info_str Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__info__str.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a36f95acdfcb25020c8ef5cc3e46f65f5"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html#a36f95acdfcb25020c8ef5cc3e46f65f5">rsb_mtx_get_info_str</a> (mtxAp, mis, minfop, buflen)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a36f95acdfcb25020c8ef5cc3e46f65f5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_info_str::rsb_mtx_get_info_str </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mis</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>minfop</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_size_t) </td>
+          <td class="paramname"><em>buflen</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__nrm.html b/doc/html/interfacersb_1_1rsb__mtx__get__nrm.html
new file mode 100644
index 0000000..2c9895a
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__nrm.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_nrm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html">rsb_mtx_get_nrm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_nrm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__nrm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ac31aeb4a3fa773f965833de0a7f430f8"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html#ac31aeb4a3fa773f965833de0a7f430f8">rsb_mtx_get_nrm</a> (mtxAp, Np, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ac31aeb4a3fa773f965833de0a7f430f8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_nrm::rsb_mtx_get_nrm </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Np</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__prec.html b/doc/html/interfacersb_1_1rsb__mtx__get__prec.html
new file mode 100644
index 0000000..d53b0dc
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__prec.html
@@ -0,0 +1,116 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_prec Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html">rsb_mtx_get_prec</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_prec Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__prec.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a348e683f8b908ee70aa854c80803aafc"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html#a348e683f8b908ee70aa854c80803aafc">rsb_mtx_get_prec</a> (opdp, mtxAp, prec_flags, ipdp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a348e683f8b908ee70aa854c80803aafc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_prec::rsb_mtx_get_prec </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>opdp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>prec_flags</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>ipdp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__rows__sparse.html b/doc/html/interfacersb_1_1rsb__mtx__get__rows__sparse.html
new file mode 100644
index 0000000..7f7eb8b
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__rows__sparse.html
@@ -0,0 +1,152 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_rows_sparse Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html">rsb_mtx_get_rows_sparse</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_rows_sparse Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__rows__sparse.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a048ca91f617db2fd2e8fbd250068829b"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html#a048ca91f617db2fd2e8fbd250068829b">rsb_mtx_get_rows_sparse</a> (transA, alphap, mtxAp, VA, IA, JA, frA, lrA, rnzp, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a048ca91f617db2fd2e8fbd250068829b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_rows_sparse::rsb_mtx_get_rows_sparse </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>frA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>lrA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>rnzp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__vals.html b/doc/html/interfacersb_1_1rsb__mtx__get__vals.html
new file mode 100644
index 0000000..a77eb4b
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__vals.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_vals Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html">rsb_mtx_get_vals</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_vals Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__vals.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a5f501a125d2fd5b4138c81dce37a427e"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html#a5f501a125d2fd5b4138c81dce37a427e">rsb_mtx_get_vals</a> (mtxAp, VA, IA, JA, nnz, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a5f501a125d2fd5b4138c81dce37a427e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_vals::rsb_mtx_get_vals </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__get__vec.html b/doc/html/interfacersb_1_1rsb__mtx__get__vec.html
new file mode 100644
index 0000000..969ca6a
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__get__vec.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_get_vec Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html">rsb_mtx_get_vec</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_get_vec Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a>.  
+ <a href="interfacersb_1_1rsb__mtx__get__vec.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a8da26f8850a32ea89255ba5c946b9be3"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html#a8da26f8850a32ea89255ba5c946b9be3">rsb_mtx_get_vec</a> (mtxAp, Dp, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a8da26f8850a32ea89255ba5c946b9be3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_get_vec::rsb_mtx_get_vec </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Dp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__rndr.html b/doc/html/interfacersb_1_1rsb__mtx__rndr.html
new file mode 100644
index 0000000..5d19d20
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__rndr.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_rndr Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__rndr.html">rsb_mtx_rndr</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_rndr Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>.  
+ <a href="interfacersb_1_1rsb__mtx__rndr.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:acedd2acc7f1393e056d36cbea3c4cdaa"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__rndr.html#acedd2acc7f1393e056d36cbea3c4cdaa">rsb_mtx_rndr</a> (filename, mtxAp, pmWidth, pmHeight, rflags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="acedd2acc7f1393e056d36cbea3c4cdaa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_rndr::rsb_mtx_rndr </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>pmWidth</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>pmHeight</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>rflags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__set__vals.html b/doc/html/interfacersb_1_1rsb__mtx__set__vals.html
new file mode 100644
index 0000000..af391cc
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__set__vals.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_set_vals Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html">rsb_mtx_set_vals</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_set_vals Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>.  
+ <a href="interfacersb_1_1rsb__mtx__set__vals.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a67f48229a8cc61f12c2dd6ca7c3d3d44"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html#a67f48229a8cc61f12c2dd6ca7c3d3d44">rsb_mtx_set_vals</a> (mtxAp, VA, IA, JA, nnz, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a67f48229a8cc61f12c2dd6ca7c3d3d44"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_set_vals::rsb_mtx_set_vals </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nnz</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__switch__to__coo.html b/doc/html/interfacersb_1_1rsb__mtx__switch__to__coo.html
new file mode 100644
index 0000000..41e8ade
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__switch__to__coo.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_switch_to_coo Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html">rsb_mtx_switch_to_coo</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_switch_to_coo Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a>.  
+ <a href="interfacersb_1_1rsb__mtx__switch__to__coo.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a4054c2c9dbf8dd8ad06a551f7eadf23f"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html#a4054c2c9dbf8dd8ad06a551f7eadf23f">rsb_mtx_switch_to_coo</a> (mtxAp, VAp, IAp, JAp, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a4054c2c9dbf8dd8ad06a551f7eadf23f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_switch_to_coo::rsb_mtx_switch_to_coo </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__switch__to__csr.html b/doc/html/interfacersb_1_1rsb__mtx__switch__to__csr.html
new file mode 100644
index 0000000..8d17c7a
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__switch__to__csr.html
@@ -0,0 +1,122 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_switch_to_csr Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html">rsb_mtx_switch_to_csr</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_switch_to_csr Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a>.  
+ <a href="interfacersb_1_1rsb__mtx__switch__to__csr.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ab085734a0dd0cbdad2bcff6e62718379"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html#ab085734a0dd0cbdad2bcff6e62718379">rsb_mtx_switch_to_csr</a> (mtxAp, VAp, IAp, JAp, flags)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ab085734a0dd0cbdad2bcff6e62718379"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_switch_to_csr::rsb_mtx_switch_to_csr </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>VAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>IAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>JAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>flags</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__mtx__upd__vals.html b/doc/html/interfacersb_1_1rsb__mtx__upd__vals.html
new file mode 100644
index 0000000..76b3045
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__mtx__upd__vals.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_mtx_upd_vals Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html">rsb_mtx_upd_vals</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_mtx_upd_vals Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>.  
+ <a href="interfacersb_1_1rsb__mtx__upd__vals.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:abaf717d65f4cce3f643dbad78dd43f5d"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html#abaf717d65f4cce3f643dbad78dd43f5d">rsb_mtx_upd_vals</a> (mtxAp, elop_flags, omegap)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="abaf717d65f4cce3f643dbad78dd43f5d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_mtx_upd_vals::rsb_mtx_upd_vals </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>elop_flags</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>omegap</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__perror.html b/doc/html/interfacersb_1_1rsb__perror.html
new file mode 100644
index 0000000..733aa70
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__perror.html
@@ -0,0 +1,104 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_perror Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__perror.html">rsb_perror</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_perror Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>.  
+ <a href="interfacersb_1_1rsb__perror.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a72bfc792fff96e8db48fafdd91669751"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__perror.html#a72bfc792fff96e8db48fafdd91669751">rsb_perror</a> (stream, errval)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a72bfc792fff96e8db48fafdd91669751"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_perror::rsb_perror </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>stream</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>errval</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html b/doc/html/interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html
new file mode 100644
index 0000000..d2c0b8f
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_psblas_trans_to_rsb_trans Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html">rsb_psblas_trans_to_rsb_trans</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_psblas_trans_to_rsb_trans Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a>.  
+ <a href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:af3f97f3e696d1309ab86da4a3e0f6de8"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html#af3f97f3e696d1309ab86da4a3e0f6de8">rsb_psblas_trans_to_rsb_trans</a> (psbtrans)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="af3f97f3e696d1309ab86da4a3e0f6de8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_psblas_trans_to_rsb_trans::rsb_psblas_trans_to_rsb_trans </td>
+          <td>(</td>
+          <td class="paramtype">character(c_char) </td>
+          <td class="paramname"><em>psbtrans</em></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__spmm.html b/doc/html/interfacersb_1_1rsb__spmm.html
new file mode 100644
index 0000000..ac4b87d
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__spmm.html
@@ -0,0 +1,152 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_spmm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__spmm.html">rsb_spmm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_spmm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>.  
+ <a href="interfacersb_1_1rsb__spmm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a7af958e6026d556fc5e4e994514d8ac1"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmm.html#a7af958e6026d556fc5e4e994514d8ac1">rsb_spmm</a> (transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a7af958e6026d556fc5e4e994514d8ac1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_spmm::rsb_spmm </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__spmsp.html b/doc/html/interfacersb_1_1rsb__spmsp.html
new file mode 100644
index 0000000..fb4550d
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__spmsp.html
@@ -0,0 +1,140 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_spmsp Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__spmsp.html">rsb_spmsp</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_spmsp Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>.  
+ <a href="interfacersb_1_1rsb__spmsp.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a935b754474aa9edc1234f1efc16c8e3b"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmsp.html#a935b754474aa9edc1234f1efc16c8e3b">rsb_spmsp</a> (typecode, transA, alphap, mtxAp, transB, betap, mtxBp, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a935b754474aa9edc1234f1efc16c8e3b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_spmsp::rsb_spmsp </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxBp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__spmsp__to__dense.html b/doc/html/interfacersb_1_1rsb__spmsp__to__dense.html
new file mode 100644
index 0000000..df59db6
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__spmsp__to__dense.html
@@ -0,0 +1,164 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_spmsp_to_dense Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html">rsb_spmsp_to_dense</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_spmsp_to_dense Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>.  
+ <a href="interfacersb_1_1rsb__spmsp__to__dense.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ad1e0ae6df6ef2842d5fff69204253568"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html#ad1e0ae6df6ef2842d5fff69204253568">rsb_spmsp_to_dense</a> (typecode, transA, alphap, mtxAp, transB, betap, mtxBp, ldC, nrC, ncC, rowmajorC, Cp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ad1e0ae6df6ef2842d5fff69204253568"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_spmsp_to_dense::rsb_spmsp_to_dense </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxBp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ncC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>rowmajorC</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Cp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__spmv.html b/doc/html/interfacersb_1_1rsb__spmv.html
new file mode 100644
index 0000000..65980e8
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__spmv.html
@@ -0,0 +1,140 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_spmv Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__spmv.html">rsb_spmv</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_spmv Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>.  
+ <a href="interfacersb_1_1rsb__spmv.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:ad340345701bc3e8b0d26f56820ff2842"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmv.html#ad340345701bc3e8b0d26f56820ff2842">rsb_spmv</a> (transA, alphap, mtxAp, Xp, incX, betap, Yp, incY)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="ad340345701bc3e8b0d26f56820ff2842"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_spmv::rsb_spmv </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Xp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>incX</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>incY</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__sppsp.html b/doc/html/interfacersb_1_1rsb__sppsp.html
new file mode 100644
index 0000000..38d973b
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__sppsp.html
@@ -0,0 +1,140 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_sppsp Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__sppsp.html">rsb_sppsp</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_sppsp Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>.  
+ <a href="interfacersb_1_1rsb__sppsp.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a157e6aec78681df74866193b32b76101"><td class="memItemLeft" align="right" valign="top">TYPE(C_PTR) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__sppsp.html#a157e6aec78681df74866193b32b76101">rsb_sppsp</a> (typecode, transA, alphap, mtxAp, transB, betap, mtxBp, errvalp)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a157e6aec78681df74866193b32b76101"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">TYPE(C_PTR) function rsb::rsb_sppsp::rsb_sppsp </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_signed_char) </td>
+          <td class="paramname"><em>typecode</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxBp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>errvalp</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__spsm.html b/doc/html/interfacersb_1_1rsb__spsm.html
new file mode 100644
index 0000000..e1ed29f
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__spsm.html
@@ -0,0 +1,152 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_spsm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__spsm.html">rsb_spsm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_spsm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>.  
+ <a href="interfacersb_1_1rsb__spsm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a1e87e26c84faeac8cda8e6ed3cf77e35"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spsm.html#a1e87e26c84faeac8cda8e6ed3cf77e35">rsb_spsm</a> (transT, alphap, mtxTp, nrhs, order, betap, Bp, ldB, Cp, ldC)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a1e87e26c84faeac8cda8e6ed3cf77e35"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_spsm::rsb_spsm </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxTp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__spsv.html b/doc/html/interfacersb_1_1rsb__spsv.html
new file mode 100644
index 0000000..74dde17
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__spsv.html
@@ -0,0 +1,134 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_spsv Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__spsv.html">rsb_spsv</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_spsv Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>.  
+ <a href="interfacersb_1_1rsb__spsv.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:aaaa26b35783e2a125255025de14c18e0"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spsv.html#aaaa26b35783e2a125255025de14c18e0">rsb_spsv</a> (transT, alphap, mtxTp, Xp, incX, Yp, incY)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="aaaa26b35783e2a125255025de14c18e0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_spsv::rsb_spsv </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transT</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxTp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Xp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>incX</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Yp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>incY</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__strerror__r.html b/doc/html/interfacersb_1_1rsb__strerror__r.html
new file mode 100644
index 0000000..79db03f
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__strerror__r.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_strerror_r Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__strerror__r.html">rsb_strerror_r</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_strerror_r Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a>.  
+ <a href="interfacersb_1_1rsb__strerror__r.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a67a031bb42f0e21ddc01ce3c5f12400f"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__strerror__r.html#a67a031bb42f0e21ddc01ce3c5f12400f">rsb_strerror_r</a> (errval, buf, buflen)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a67a031bb42f0e21ddc01ce3c5f12400f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_strerror_r::rsb_strerror_r </td>
+          <td>(</td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>errval</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>buf</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_size_t) </td>
+          <td class="paramname"><em>buflen</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__time.html b/doc/html/interfacersb_1_1rsb__time.html
new file mode 100644
index 0000000..dd12568
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__time.html
@@ -0,0 +1,93 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_time Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__time.html">rsb_time</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_time Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>.  
+ <a href="interfacersb_1_1rsb__time.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a60b407a11a393bd8b6106dab907c5e92"><td class="memItemLeft" align="right" valign="top">REAL(C_DOUBLE) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__time.html#a60b407a11a393bd8b6106dab907c5e92">rsb_time</a> ()</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a60b407a11a393bd8b6106dab907c5e92"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">REAL(C_DOUBLE) function rsb::rsb_time::rsb_time </td>
+          <td>(</td>
+          <td class="paramname"></td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__tune__spmm.html b/doc/html/interfacersb_1_1rsb__tune__spmm.html
new file mode 100644
index 0000000..a1e396d
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__tune__spmm.html
@@ -0,0 +1,182 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_tune_spmm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__tune__spmm.html">rsb_tune_spmm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_tune_spmm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>.  
+ <a href="interfacersb_1_1rsb__tune__spmm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a50a0bd8eb0673e0bac6375f25e719c81"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__tune__spmm.html#a50a0bd8eb0673e0bac6375f25e719c81">rsb_tune_spmm</a> (mtxOpp, sfp, tnp, maxr, maxt, transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a50a0bd8eb0673e0bac6375f25e719c81"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_tune_spmm::rsb_tune_spmm </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxOpp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>sfp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>tnp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>maxr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(c_double) </td>
+          <td class="paramname"><em>maxt</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/interfacersb_1_1rsb__tune__spsm.html b/doc/html/interfacersb_1_1rsb__tune__spsm.html
new file mode 100644
index 0000000..3daf726
--- /dev/null
+++ b/doc/html/interfacersb_1_1rsb__tune__spsm.html
@@ -0,0 +1,182 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb::rsb_tune_spsm Interface Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+<div id="nav-path" class="navpath">
+  <ul>
+<li class="navelem"><a class="el" href="classrsb.html">rsb</a></li><li class="navelem"><a class="el" href="interfacersb_1_1rsb__tune__spsm.html">rsb_tune_spsm</a></li>  </ul>
+</div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb::rsb_tune_spsm Interface Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a>.  
+ <a href="interfacersb_1_1rsb__tune__spsm.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:a6cac8625fd1e15c4686b56faa31cf663"><td class="memItemLeft" align="right" valign="top">INTEGER(C_INT) function </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__tune__spsm.html#a6cac8625fd1e15c4686b56faa31cf663">rsb_tune_spsm</a> (mtxOpp, sfp, tnp, maxr, maxt, transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a>. </p>
+</div><h2>Constructor & Destructor Documentation</h2>
+<a class="anchor" id="a6cac8625fd1e15c4686b56faa31cf663"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">INTEGER(C_INT) function rsb::rsb_tune_spsm::rsb_tune_spsm </td>
+          <td>(</td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxOpp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>sfp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>tnp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>maxr</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">real(c_double) </td>
+          <td class="paramname"><em>maxt</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>transA</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>alphap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>mtxAp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>nrhs</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>order</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Bp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldB</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>betap</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">type(c_ptr) </td>
+          <td class="paramname"><em>Cp</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">integer(c_int) </td>
+          <td class="paramname"><em>ldC</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<hr/>The documentation for this interface was generated from the following file:<ul>
+<li><a class="el" href="rsb_8F90.html">rsb.F90</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/modules.html b/doc/html/modules.html
new file mode 100644
index 0000000..1cdd77e
--- /dev/null
+++ b/doc/html/modules.html
@@ -0,0 +1,59 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Modules</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li class="current"><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Modules</div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock">Here is a list of all modules:</div><div class="directory">
+<table class="directory">
+<tr id="row_0_" class="even"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><a class="el" href="group__rsb__doc__rsb.html" target="_self">The librsb library interface (rsb.h, rsb.F90)</a></td><td class="desc">The reference documentation of the <code>librsb</code> library comes in both HTML and Unix man pages formats. The following sections/man pages are available: <a class="el" href="group__rsb__doc__rsb.html">The librsb library interface (rsb.h, rsb.F90)</a>  [...]
+<tr id="row_1_"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><a class="el" href="group__rsb__doc__examples.html" target="_self">Example programs and code</a></td><td class="desc">Examples of usage of <code>librsb</code> </td></tr>
+<tr id="row_2_" class="even"><td class="entry"><img src="ftv2lastnode.png" alt="\" width="16" height="22" /><a class="el" href="group__rsb__doc__sparse__blas.html" target="_self">The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)</a></td><td class="desc">A Sparse BLAS interface (see <a href="http://www.netlib.org/blas/blast-forum/">http://www.netlib.org/blas/blast-forum/</a>) to <code>librsb</code>. Level 1 (vector-vector operations) is supported in a basic way. Lev [...]
+</table>
+</div><!-- directory -->
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/nav_f.png b/doc/html/nav_f.png
new file mode 100644
index 0000000..72a58a5
Binary files /dev/null and b/doc/html/nav_f.png differ
diff --git a/doc/html/nav_g.png b/doc/html/nav_g.png
new file mode 100644
index 0000000..8c6c13e
Binary files /dev/null and b/doc/html/nav_g.png differ
diff --git a/doc/html/nav_h.png b/doc/html/nav_h.png
new file mode 100644
index 0000000..33389b1
Binary files /dev/null and b/doc/html/nav_h.png differ
diff --git a/doc/html/open.png b/doc/html/open.png
new file mode 100644
index 0000000..30f75c7
Binary files /dev/null and b/doc/html/open.png differ
diff --git a/doc/html/pages.html b/doc/html/pages.html
new file mode 100644
index 0000000..f6f4573
--- /dev/null
+++ b/doc/html/pages.html
@@ -0,0 +1,58 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Related Pages</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li class="current"><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Related Pages</div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock">Here is a list of all related documentation pages:</div><div class="directory">
+<table class="directory">
+<tr id="row_0_" class="even"><td class="entry"><img src="ftv2node.png" alt="o" width="16" height="22" /><a class="el" href="todo.html" target="_self">Todo List</a></td><td class="desc"></td></tr>
+<tr id="row_1_"><td class="entry"><img src="ftv2lastnode.png" alt="\" width="16" height="22" /><a class="el" href="deprecated.html" target="_self">Deprecated List</a></td><td class="desc"></td></tr>
+</table>
+</div><!-- directory -->
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb_8F90.html b/doc/html/rsb_8F90.html
new file mode 100644
index 0000000..7dc7543
--- /dev/null
+++ b/doc/html/rsb_8F90.html
@@ -0,0 +1,172 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb.F90 File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#nested-classes">Data Structures</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb.F90 File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>Header file automatically generated from <<a class="el" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>>, offering ISO-C-BINDING interfaces to <<a class="el" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>>'s functions. Defines <code>MODULE</code> <code>rsb</code>. For examples of usage, see Fortran examples i [...]
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="nested-classes"></a>
+Data Structures</h2></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">module  </td><td class="memItemRight" valign="bottom"><a class="el" href="classrsb.html">rsb</a></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__strerror__r.html">rsb::rsb_strerror_r</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a>.  <a href="interfacersb_1_1rsb__strerror__r.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__perror.html">rsb::rsb_perror</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a>.  <a href="interfacersb_1_1rsb__perror.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__init.html">rsb::rsb_lib_init</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>.  <a href="interfacersb_1_1rsb__lib__init.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__reinit.html">rsb::rsb_lib_reinit</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>.  <a href="interfacersb_1_1rsb__lib__reinit.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__set__opt__str.html">rsb::rsb_lib_set_opt_str</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>.  <a href="interfacersb_1_1rsb__lib__set__opt__str.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__set__opt.html">rsb::rsb_lib_set_opt</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a>.  <a href="interfacersb_1_1rsb__lib__set__opt.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__get__opt.html">rsb::rsb_lib_get_opt</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a>.  <a href="interfacersb_1_1rsb__lib__get__opt.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__lib__exit.html">rsb::rsb_lib_exit</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>.  <a href="interfacersb_1_1rsb__lib__exit.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html">rsb::rsb_mtx_alloc_from_coo_begin</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__begin.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html">rsb::rsb_mtx_alloc_from_coo_end</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__end.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html">rsb::rsb_mtx_alloc_from_csr_const</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__csr__const.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html">rsb::rsb_mtx_alloc_from_csc_const</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__csc__const.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html">rsb::rsb_mtx_alloc_from_csr_inplace</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__csr__inplace.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html">rsb::rsb_mtx_alloc_from_coo_const</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__const.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html">rsb::rsb_mtx_alloc_from_coo_inplace</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a>.  <a href="interfacersb_1_1rsb__mtx__alloc__from__coo__inplace.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__clone.html">rsb::rsb_mtx_clone</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>.  <a href="interfacersb_1_1rsb__mtx__clone.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__free.html">rsb::rsb_mtx_free</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a>.  <a href="interfacersb_1_1rsb__mtx__free.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__nrm.html">rsb::rsb_mtx_get_nrm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a>.  <a href="interfacersb_1_1rsb__mtx__get__nrm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__vec.html">rsb::rsb_mtx_get_vec</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a>.  <a href="interfacersb_1_1rsb__mtx__get__vec.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__rndr.html">rsb::rsb_mtx_rndr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a>.  <a href="interfacersb_1_1rsb__mtx__rndr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__rndr.html">rsb::rsb_file_mtx_rndr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>.  <a href="interfacersb_1_1rsb__file__mtx__rndr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmv.html">rsb::rsb_spmv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a>.  <a href="interfacersb_1_1rsb__spmv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmm.html">rsb::rsb_spmm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a>.  <a href="interfacersb_1_1rsb__spmm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spsv.html">rsb::rsb_spsv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a>.  <a href="interfacersb_1_1rsb__spsv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spsm.html">rsb::rsb_spsm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a>.  <a href="interfacersb_1_1rsb__spsm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__add__to__dense.html">rsb::rsb_mtx_add_to_dense</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a>.  <a href="interfacersb_1_1rsb__mtx__add__to__dense.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__sppsp.html">rsb::rsb_sppsp</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a>.  <a href="interfacersb_1_1rsb__sppsp.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmsp.html">rsb::rsb_spmsp</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a>.  <a href="interfacersb_1_1rsb__spmsp.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__spmsp__to__dense.html">rsb::rsb_spmsp_to_dense</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a>.  <a href="interfacersb_1_1rsb__spmsp__to__dense.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__coo.html">rsb::rsb_mtx_switch_to_coo</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a>.  <a href="interfacersb_1_1rsb__mtx__switch__to__coo.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__switch__to__csr.html">rsb::rsb_mtx_switch_to_csr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a>.  <a href="interfacersb_1_1rsb__mtx__switch__to__csr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo.html">rsb::rsb_mtx_get_coo</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a>.  <a href="interfacersb_1_1rsb__mtx__get__coo.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__csr.html">rsb::rsb_mtx_get_csr</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a>.  <a href="interfacersb_1_1rsb__mtx__get__csr.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__rows__sparse.html">rsb::rsb_mtx_get_rows_sparse</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a>.  <a href="interfacersb_1_1rsb__mtx__get__rows__sparse.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__coo__block.html">rsb::rsb_mtx_get_coo_block</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a>.  <a href="interfacersb_1_1rsb__mtx__get__coo__block.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__info.html">rsb::rsb_mtx_get_info</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a>.  <a href="interfacersb_1_1rsb__mtx__get__info.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__info__str.html">rsb::rsb_mtx_get_info_str</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a>.  <a href="interfacersb_1_1rsb__mtx__get__info__str.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__upd__vals.html">rsb::rsb_mtx_upd_vals</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>.  <a href="interfacersb_1_1rsb__mtx__upd__vals.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__prec.html">rsb::rsb_mtx_get_prec</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a>.  <a href="interfacersb_1_1rsb__mtx__get__prec.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__set__vals.html">rsb::rsb_mtx_set_vals</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>.  <a href="interfacersb_1_1rsb__mtx__set__vals.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__mtx__get__vals.html">rsb::rsb_mtx_get_vals</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a>.  <a href="interfacersb_1_1rsb__mtx__get__vals.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__tune__spmm.html">rsb::rsb_tune_spmm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>.  <a href="interfacersb_1_1rsb__tune__spmm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__tune__spsm.html">rsb::rsb_tune_spsm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a>.  <a href="interfacersb_1_1rsb__tune__spsm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html">rsb::rsb_psblas_trans_to_rsb_trans</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a>.  <a href="interfacersb_1_1rsb__psblas__trans__to__rsb__trans.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__save.html">rsb::rsb_file_mtx_save</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a>.  <a href="interfacersb_1_1rsb__file__mtx__save.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__load.html">rsb::rsb_file_mtx_load</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a>.  <a href="interfacersb_1_1rsb__file__mtx__load.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__vec__load.html">rsb::rsb_file_vec_load</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a>.  <a href="interfacersb_1_1rsb__file__vec__load.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__vec__save.html">rsb::rsb_file_vec_save</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a>.  <a href="interfacersb_1_1rsb__file__vec__save.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__file__mtx__get__dims.html">rsb::rsb_file_mtx_get_dims</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>.  <a href="interfacersb_1_1rsb__file__mtx__get__dims.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__coo__sort.html">rsb::rsb_coo_sort</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a>.  <a href="interfacersb_1_1rsb__coo__sort.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacersb_1_1rsb__time.html">rsb::rsb_time</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">ISO C BINDING interface to <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a>.  <a href="interfacersb_1_1rsb__time.html#details">More...</a><br/></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>Header file automatically generated from <<a class="el" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>>, offering ISO-C-BINDING interfaces to <<a class="el" href="rsb_8h.html" title="This file declares the user interface functions and data structures for the librsb library...">rsb.h</a>>'s functions. Defines <code>MODULE</code> <code>rsb</code>. For examples of usage, [...]
+</div></div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:21 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb_8h.html b/doc/html/rsb_8h.html
new file mode 100644
index 0000000..a09b156
--- /dev/null
+++ b/doc/html/rsb_8h.html
@@ -0,0 +1,2280 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb.h File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#nested-classes">Data Structures</a> |
+<a href="#define-members">Macros</a> |
+<a href="#typedef-members">Typedefs</a> |
+<a href="#enum-members">Enumerations</a> |
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb.h File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>This file declares the user interface functions and data structures for the <code>librsb</code> library.  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="nested-classes"></a>
+Data Structures</h2></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">struct  </td><td class="memItemRight" valign="bottom"><a class="el" href="structrsb__initopts.html">rsb_initopts</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">A structure specifying library (initialization) options, to be used with the <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit()</a> function. <br/>
+.  <a href="structrsb__initopts.html#details">More...</a><br/></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:a43e6277fc54647f36c97956e4c92062d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(E)   (-(E))</td></tr>
+<tr class="memitem:a1ad3f986b2e84249785751bf42ff3f8a"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x000)</td></tr>
+<tr class="memitem:ad46ebc803d7cad695babdc7d8c709828"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">RSB_ERR_GENERIC_ERROR</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x001)</td></tr>
+<tr class="memitem:ab4f407e7c8364bee51cc77546d6f0922"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ab4f407e7c8364bee51cc77546d6f0922">RSB_ERR_UNSUPPORTED_OPERATION</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x002)</td></tr>
+<tr class="memitem:afdf2ab3912960ee19f23e7d585371548"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#afdf2ab3912960ee19f23e7d585371548">RSB_ERR_UNSUPPORTED_TYPE</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x004)</td></tr>
+<tr class="memitem:ac00cd41eab18a0d2b9323b401029dd73"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ac00cd41eab18a0d2b9323b401029dd73">RSB_ERR_UNSUPPORTED_FORMAT</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x008)</td></tr>
+<tr class="memitem:a8e650a7e3b5c5aa1fb9763b0f1498126"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a8e650a7e3b5c5aa1fb9763b0f1498126">RSB_ERR_INTERNAL_ERROR</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x010)</td></tr>
+<tr class="memitem:af0b262c6c554403269234219b3aec409"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#af0b262c6c554403269234219b3aec409">RSB_ERR_BADARGS</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x020)</td></tr>
+<tr class="memitem:a538215b32e908646c979a2e446ae5467"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a538215b32e908646c979a2e446ae5467">RSB_ERR_ENOMEM</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x040)</td></tr>
+<tr class="memitem:a0bd20d0f68cf911bf9dfda495d8e12db"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a0bd20d0f68cf911bf9dfda495d8e12db">RSB_ERR_UNIMPLEMENTED_YET</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x100)</td></tr>
+<tr class="memitem:a3d7758ee9127e0c93c9075402999d154"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3d7758ee9127e0c93c9075402999d154">RSB_ERR_LIMITS</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x200)</td></tr>
+<tr class="memitem:a40628c24058f45a481e18b6ad491bf1b"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a40628c24058f45a481e18b6ad491bf1b">RSB_ERR_FORTRAN_ERROR</a>   <a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">RSB_ERR_GENERIC_ERROR</a></td></tr>
+<tr class="memitem:accf836c8eb3145e9ab4fd277d6911764"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x400)</td></tr>
+<tr class="memitem:a5ab0f86009e1f934b25b23fc4837b9b0"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a5ab0f86009e1f934b25b23fc4837b9b0">RSB_ERR_NO_USER_CONFIGURATION</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x800)</td></tr>
+<tr class="memitem:a14103828be5eb82e40d3b772ce54abda"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a14103828be5eb82e40d3b772ce54abda">RSB_ERR_CORRUPT_INPUT_DATA</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x1000)</td></tr>
+<tr class="memitem:a3cacb604d0ad892e195c7c97eda18dba"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3cacb604d0ad892e195c7c97eda18dba">RSB_ERR_FAILED_MEMHIER_DETECTION</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x2000)</td></tr>
+<tr class="memitem:a935de71c3acc5714ad539d65288e2593"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a935de71c3acc5714ad539d65288e2593">RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x4000)</td></tr>
+<tr class="memitem:a9d7fe7c0e3fabfba57bf2318459ed18a"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x8000)</td></tr>
+<tr class="memitem:a8d504baa13048da05bb71235e2c8d181"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a8d504baa13048da05bb71235e2c8d181">RSB_ERR_INVALID_NUMERICAL_DATA</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x10000)</td></tr>
+<tr class="memitem:a1b63053f52d6426b726a05b206a3862a"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a1b63053f52d6426b726a05b206a3862a">RSB_ERR_MEMORY_LEAK</a>   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x20000)</td></tr>
+<tr class="memitem:a4d8eb05488b681b75449f64c418b8893"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a4d8eb05488b681b75449f64c418b8893">RSB_ERRS_UNSUPPORTED_FEATURES</a>   (<a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>|<a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT</a> [...]
+<tr class="memitem:a61f8a9ebc9bced69076389ba3cd2cce8"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a61f8a9ebc9bced69076389ba3cd2cce8">RSB_PROGRAM_SUCCESS</a>   (0)</td></tr>
+<tr class="memitem:a7f6f859f61b0855e5389e1bc98829bd4"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a7f6f859f61b0855e5389e1bc98829bd4">RSB_PROGRAM_ERROR</a>   (-1)</td></tr>
+<tr class="memitem:a9738e6b8b638ca234acd92b49c6ac1db"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a9738e6b8b638ca234acd92b49c6ac1db">RSB_ERR_TO_PROGRAM_ERROR</a>(E)   ((E)==(<a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)?RSB_PROGRAM_SUCCESS:RSB_PROGRAM_ERROR)</td></tr>
+<tr class="memitem:afeb783fe4dca5762623a621b7095dd01"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>(IOF, IOP, IOS, ERRVAL)   { enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> keys[]={IOF}; void*values[]={(IOP)}; struct <a class="el" href="structrsb__initopts.html"> [...]
+<tr class="memitem:aa0ca08a816983bc6294317d0e22e0509"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">RSB_REINIT_SINGLE_VALUE_C_IOP</a>(IOF, IOP, IOS, ERRVAL)   { enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> keys[]={IOF}; const void*values[]={(IOP)}; struct <a class="el" href="structrsb__ini [...]
+<tr class="memitem:a20da3b07d4c17771762413010816e36e"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_REINIT_SINGLE_VALUE_SET</a>(IOF, IOP, ERRVAL)   <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>(IOF,IOP,<a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">RSB_IO_SPECIFIER_SET</a> [...]
+<tr class="memitem:ae6f837f13f6413a163f2c6b0c02dadf2"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a>(IOF, IOP, ERRVAL)   <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>(IOF,IOP,<a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">RSB_IO_SPECIFIER_GET</a> [...]
+<tr class="memitem:afd8b1de2977b2d810f9c615195d9acec"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">RSB_IO_SPECIFIER_GET</a>   1</td></tr>
+<tr class="memitem:aef619407815752dc767cfd6870b72101"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">RSB_IO_SPECIFIER_SET</a>   0</td></tr>
+<tr class="memitem:add105c42e570c5c269680d437f8c51e2"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#add105c42e570c5c269680d437f8c51e2">RSB_NULL_INIT_OPTIONS</a>   NULL</td></tr>
+<tr class="memitem:a2234a5e51156de6c95c3f8c2951ae09f"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a2234a5e51156de6c95c3f8c2951ae09f">RSB_NULL_EXIT_OPTIONS</a>   NULL</td></tr>
+<tr class="memitem:a552fe79778c824e8d88ddfd0d9c58586"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a552fe79778c824e8d88ddfd0d9c58586">rsb_mtx_get_norm</a>   <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a></td></tr>
+<tr class="memitem:a191af7bdb17d4b0abb3a195c11e56c3b"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a191af7bdb17d4b0abb3a195c11e56c3b">rsb_file_mtx_render</a>   <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a></td></tr>
+<tr class="memitem:a40d40562867aceec2899cdddf79b3086"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a40d40562867aceec2899cdddf79b3086">rsb_mtx_upd_values</a>   <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a></td></tr>
+<tr class="memitem:a56bb6be11af9a5a0ed9aaa8774ab6db9"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a56bb6be11af9a5a0ed9aaa8774ab6db9">RSB_PRECF_ILU0</a>   0x00000001</td></tr>
+<tr class="memitem:a8ba1704fe1f07cb9abe856d9a1a20ea9"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a8ba1704fe1f07cb9abe856d9a1a20ea9">rsb_mtx_get_preconditioner</a>   <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a></td></tr>
+<tr class="memitem:a5b622f80450cdef4f8a06742eacbb045"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a5b622f80450cdef4f8a06742eacbb045">rsb_mtx_set_values</a>   <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a></td></tr>
+<tr class="memitem:af08b72a410e54fd7db6dcb12db232aec"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#af08b72a410e54fd7db6dcb12db232aec">rsb_mtx_get_values</a>   <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a></td></tr>
+<tr class="memitem:a97106c8db99424b5b69cd6be5bf59937"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a97106c8db99424b5b69cd6be5bf59937">rsb_file_mtx_get_dimensions</a>   <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a></td></tr>
+<tr class="memitem:ga68e662dcfb6981c1efc8eb03ef327182"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga68e662dcfb6981c1efc8eb03ef327182">RSB_SIZEOF</a>(TYPE)   RSB_NUMERICAL_TYPE_SIZE(TYPE)</td></tr>
+<tr><td colspan="2"><div class="groupHeader">Other constants</div></td></tr>
+<tr><td colspan="2"><div class="groupText"><p>Other constants for some typedefs. </p>
+</div></td></tr>
+<tr class="memitem:af580e920b9f507028d3b7d34b4dadd6f"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#af580e920b9f507028d3b7d34b4dadd6f">RSB_BOOL_TRUE</a>   1</td></tr>
+<tr class="memitem:ad396755fe9a1d81991d5ac238058db18"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ad396755fe9a1d81991d5ac238058db18">RSB_BOOL_FALSE</a>   0</td></tr>
+<tr class="memitem:a3949d8af584a0e0e0a17e96d28b8d078"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3949d8af584a0e0e0a17e96d28b8d078">RSB_DO_FLAG_ADD</a>(V, F)   (V) |=  (F)</td></tr>
+<tr class="memitem:aee33ededde2130f79f6c84966f1a180b"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aee33ededde2130f79f6c84966f1a180b">RSB_DO_FLAG_DEL</a>(V, F)   (V) &= ~(F)</td></tr>
+<tr class="memitem:a23beda4691d4e83e6d3984960dc9f422"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a23beda4691d4e83e6d3984960dc9f422">RSB_DO_FLAG_FILTEROUT</a>(V, F)   ((V) & ~(F))</td></tr>
+<tr class="memitem:a70e87c7a0afaf9b27650d252086559f7"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a70e87c7a0afaf9b27650d252086559f7">RSB_DO_FLAG_FILTERONLY</a>(V, F)   ((V) & (F))</td></tr>
+<tr class="memitem:ad155950ce44eddd61911184bccba86ab"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ad155950ce44eddd61911184bccba86ab">RSB_DO_FLAG_HAS</a>(V, F)   ((((V)&(F))==(F))?RSB_BOOL_TRUE:RSB_BOOL_FALSE)</td></tr>
+<tr class="memitem:a116d0af2caf6bddd358035597a260244"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a116d0af2caf6bddd358035597a260244">RSB_DO_FLAG_HAS_INTERSECTION</a>(V, F)   (((V)&(F))?RSB_BOOL_TRUE:RSB_BOOL_FALSE)</td></tr>
+<tr class="memitem:a7be45869842d6ecc5646740350d27d26"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a7be45869842d6ecc5646740350d27d26">RSB_DEFAULT_ROW_BLOCKING</a>   1</td></tr>
+<tr class="memitem:a0f7e634867763b3cc1faaa3ba8e106db"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a0f7e634867763b3cc1faaa3ba8e106db">RSB_DEFAULT_COL_BLOCKING</a>   1</td></tr>
+<tr class="memitem:a3579d00f3b97cd569707f7c62e462322"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3579d00f3b97cd569707f7c62e462322">RSB_DEFAULT_BLOCKING</a>   1</td></tr>
+<tr class="memitem:af7d43df61fa72c8971cece701ae53a22"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#af7d43df61fa72c8971cece701ae53a22">RSB_IS_SIGNED</a>(T)   (((T)0) > (((T)-1)))</td></tr>
+<tr class="memitem:a9ea900484e72f4876b3fd8d9f402ea39"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a9ea900484e72f4876b3fd8d9f402ea39">RSB_MAX_UNSIGNED</a>(T)   ((T)-1)</td></tr>
+<tr class="memitem:a5749695a0fccd6348d669c6790185a68"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a5749695a0fccd6348d669c6790185a68">RSB_CHAR_BIT</a>   8	/* bits per byte; if not 8, librsb compilation should fail */</td></tr>
+<tr class="memitem:ab16e9407330a11d4163be1cc586990b3"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">RSB_HALF_MAX_SIGNED</a>(T)   ((T)1 << (sizeof(T)*<a class="el" href="rsb_8h.html#a5749695a0fccd6348d669c6790185a68">RSB_CHAR_BIT</a>-2))</td></tr>
+<tr class="memitem:a465659728318d495a364e906806ffae7"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a465659728318d495a364e906806ffae7">RSB_MAX_SIGNED</a>(T)   (<a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">RSB_HALF_MAX_SIGNED</a>(T) - 1 + <a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">RSB_HALF_MAX_SIGNED</a>(T))</td></tr>
+<tr class="memitem:a0ad77b7888128f3e1b144b48e6e93b87"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">RSB_MAX_VALUE_FOR_TYPE</a>(T)   (<a class="el" href="rsb_8h.html#af7d43df61fa72c8971cece701ae53a22">RSB_IS_SIGNED</a>(T)?<a class="el" href="rsb_8h.html#a465659728318d495a364e906806ffae7">RSB_MAX_SIGNED</a>(T):<a class="el" href="rsb_8h.html#a9ea [...]
+<tr class="memitem:abaccfe39f69712cebf501c9d55b1a4b8"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#abaccfe39f69712cebf501c9d55b1a4b8">RSB_MIN_MATRIX_DIM</a>   0 /*!> Minimum allowed matrix dimension. */</td></tr>
+<tr class="memitem:a425f78c0a49004e45df20db728f8196d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a425f78c0a49004e45df20db728f8196d">RSB_MIN_MATRIX_NNZ</a>   0 /*!> Minimum allowed matrix nonzeroes count. */</td></tr>
+<tr class="memitem:af576621f0846e0b9a999ea21641e13c8"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#af576621f0846e0b9a999ea21641e13c8">RSB_NNZ_BLK_MAX</a>   255 /* Dense block maximal allowed size (still unused, for now internal) */</td></tr>
+<tr class="memitem:a318a92d60883f6ade7345459074374f5"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">RSB_MAX_MATRIX_DIM</a>   (<a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">RSB_MAX_VALUE_FOR_TYPE</a>(<a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>)-<a class="el" href="rsb_8h.html#af576621 [...]
+<tr class="memitem:a63c69ef30355064d818326768674c9b2"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a63c69ef30355064d818326768674c9b2">RSB_MAX_MATRIX_NNZ</a>   (<a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">RSB_MAX_VALUE_FOR_TYPE</a>(<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a>)-<a class="el" href="rsb_8h.html#af576621 [...]
+<tr class="memitem:af88edb77d90929bf6cef617ab862d2bc"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#af88edb77d90929bf6cef617ab862d2bc">RSB_MARKER_COO_VALUE</a>   (<a class="el" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">RSB_MAX_MATRIX_DIM</a>+1)		/* */</td></tr>
+<tr class="memitem:a967c5aae0dc536668ed67d810378e7fc"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a967c5aae0dc536668ed67d810378e7fc">RSB_MARKER_NNZ_VALUE</a>   (<a class="el" href="rsb_8h.html#a63c69ef30355064d818326768674c9b2">RSB_MAX_MATRIX_NNZ</a>+1)		/* */</td></tr>
+<tr class="memitem:a88e6b599d650b509b54d4fe7c3008b12"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a88e6b599d650b509b54d4fe7c3008b12">RSB_INVALID_COO_IDX_VAL</a>   ((<a class="el" href="rsb_8h.html#af88edb77d90929bf6cef617ab862d2bc">RSB_MARKER_COO_VALUE</a>)+1)	/*< A value which is illegal for any #<a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_id [...]
+<tr class="memitem:a20253111f2fa6a4bc0c75fe7e6430890"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a20253111f2fa6a4bc0c75fe7e6430890">RSB_INVALID_NNZ_IDX_VAL</a>   ((<a class="el" href="rsb_8h.html#a967c5aae0dc536668ed67d810378e7fc">RSB_MARKER_NNZ_VALUE</a>)+1)	/*< A value which is illegal for any #<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_id [...]
+<tr><td colspan="2"><div class="groupHeader">Matrix assembly flags</div></td></tr>
+<tr><td colspan="2"><div class="groupText"><p><a class="anchor" id="rsb_mtx_t"></a> struct rsb_mtx_t declaration is in a separate, internal include file</p>
+<p><a class="anchor" id="flags_section"></a> These are flags which could be combined to specify the assembly of sparse matrices and in various matrix-related operations. <br/>
+ If unsure what flags to use to a function, <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a> shall be a good default in most cases. </p>
+</div></td></tr>
+<tr class="memitem:aa83897e25c1235a780ed7fe317c78555"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aa83897e25c1235a780ed7fe317c78555">RSB_FLAG_DEFAULT_STORAGE_FLAGS</a>   (<a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>|<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td></tr>
+<tr class="memitem:a6b21a3edf4231070a10223f1a9ae1dc4"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a6b21a3edf4231070a10223f1a9ae1dc4">RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS</a>   <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a></td></tr>
+<tr class="memitem:a8c90a9ad92722ffbbf1bfcadb805c520"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a8c90a9ad92722ffbbf1bfcadb805c520">RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS</a>   <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a></td></tr>
+<tr class="memitem:a17c314e28220f3b81aed9cc7d79f97e4"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a>   (<a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>|<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>|<a cla [...]
+<tr class="memitem:acac4b9c09a3fd6be63e511fc5042038f"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#acac4b9c09a3fd6be63e511fc5042038f">RSB_FLAG_DEFAULT_MATRIX_FLAGS</a>   <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a></td></tr>
+<tr class="memitem:a0ea7640214ee34c87e483c475b15827d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a>   0x000000</td></tr>
+<tr class="memitem:aacf404fe630d480353ce767fd27ba097"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aacf404fe630d480353ce767fd27ba097">RSB_FLAG_IDENTICAL_FLAGS</a>   <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a></td></tr>
+<tr class="memitem:a8ccb4d7203ce7707f9d13bd6c5ef4169"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a8ccb4d7203ce7707f9d13bd6c5ef4169">RSB_FLAG_FORTRAN_INDICES_INTERFACE</a>   0x000001</td></tr>
+<tr class="memitem:a49a9315ba7e702e323eadca04d0d735a"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a49a9315ba7e702e323eadca04d0d735a">RSB_FLAG_C_INDICES_INTERFACE</a>   0x000000</td></tr>
+<tr class="memitem:a693ed0d053ad81ca2ad6dc383afa0586"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>   0x000002</td></tr>
+<tr class="memitem:a7e9ef3a7ae3c22ab5c76d36b3ac482cc"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a7e9ef3a7ae3c22ab5c76d36b3ac482cc">RSB_FLAG_WANT_ROW_MAJOR_ORDER</a>   0x000000</td></tr>
+<tr class="memitem:a6ed7790c2f7129a6e051b8167c48a43c"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a6ed7790c2f7129a6e051b8167c48a43c">RSB_FLAG_WANT_COLUMN_MAJOR_ORDER</a>   0x4000000</td></tr>
+<tr class="memitem:a726fa64beccf21ae1b70149b88c3affb"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a726fa64beccf21ae1b70149b88c3affb">RSB_FLAG_SORTED_INPUT</a>   0x000004</td></tr>
+<tr class="memitem:adca72e259846399da3512fcb062ad518"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a>   0x000008</td></tr>
+<tr class="memitem:aca1c9530dfb366137304d196eb0393c1"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>   0x000010</td></tr>
+<tr class="memitem:a7e66ba39d7ea80c4be17bc524a21056f"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>   0x000020</td></tr>
+<tr class="memitem:a4af24812309eb471c861ba618cb996f2"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a4af24812309eb471c861ba618cb996f2">RSB_FLAG_UNIT_DIAG_IMPLICIT</a>   0x000040</td></tr>
+<tr class="memitem:a0ee1c6081692a3ca98ee7ea0c7648ec8"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>   0x000100</td></tr>
+<tr class="memitem:aff85f26964888f838aa97eb371ce5da3"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aff85f26964888f838aa97eb371ce5da3">RSB_FLAG_DUPLICATES_KEEP_LAST</a>   0x000000</td></tr>
+<tr class="memitem:a7fee489042762b3b22d8184c592a9e52"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a7fee489042762b3b22d8184c592a9e52">RSB_FLAG_DUPLICATES_DEFAULT_HANDLE</a>   0x000000</td></tr>
+<tr class="memitem:afd1b39c625f4249cd32fccea38957f97"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#afd1b39c625f4249cd32fccea38957f97">RSB_FLAG_DUPLICATES_SUM</a>   0x000200</td></tr>
+<tr class="memitem:abf243a6f15925734e143703c4ad33512"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#abf243a6f15925734e143703c4ad33512">RSB_FLAG_DISCARD_ZEROS</a>   0x000400</td></tr>
+<tr class="memitem:a5ca428920608e6dd6fcc4e9a4fa8ee70"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>   0x002000</td></tr>
+<tr class="memitem:a12c780564b9c8db7f8104cc5952a490f"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>   0x004000</td></tr>
+<tr class="memitem:adce7e20015d4a549bb8c44a00a80fc7e"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#adce7e20015d4a549bb8c44a00a80fc7e">RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS</a>   0x040000</td></tr>
+<tr class="memitem:a1d3b9bd7a31257cc8116be3dee0125b5"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a1d3b9bd7a31257cc8116be3dee0125b5">RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT</a>   0x080000</td></tr>
+<tr class="memitem:a183c4b8ead89e452d1c204c92b3f8f61"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a>   0x400000</td></tr>
+<tr class="memitem:ae3e1d6090dd2912acba58b4bc0530ab7"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a>   0x800000</td></tr>
+<tr class="memitem:a54d04b341465bf3dadc62ad99d55f8ca"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a54d04b341465bf3dadc62ad99d55f8ca">RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS</a>   0x1000000</td></tr>
+<tr class="memitem:aa06dcddcdd4f42fe2eeda8eb6168bd2d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aa06dcddcdd4f42fe2eeda8eb6168bd2d">RSB_FLAG_LOWER_HERMITIAN</a>   (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memitem:a0565be78af9bac79d07376d501237b00"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a0565be78af9bac79d07376d501237b00">RSB_FLAG_UPPER_HERMITIAN</a>   (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> | <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td></tr>
+<tr class="memitem:aed7916ce610549fc75aa0c3e2d2ae1b9"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aed7916ce610549fc75aa0c3e2d2ae1b9">RSB_FLAG_LOWER_TRIANGULAR</a>   (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memitem:a9168d244582c1a4c57a9ec93d9432539"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a9168d244582c1a4c57a9ec93d9432539">RSB_FLAG_UPPER_TRIANGULAR</a>   (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> | <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td></tr>
+<tr class="memitem:a6933030c784596e3c8dbbbd8daf62805"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a6933030c784596e3c8dbbbd8daf62805">RSB_FLAG_LOWER_SYMMETRIC</a>   (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memitem:abccb47886fb3f8352e4e6ad801fd8efa"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#abccb47886fb3f8352e4e6ad801fd8efa">RSB_FLAG_DIAGONAL</a>   (<a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td></tr>
+<tr class="memitem:a3c2701b010fa2928685f3253a0ff1a99"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3c2701b010fa2928685f3253a0ff1a99">RSB_FLAG_UPPER_SYMMETRIC</a>   (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> | <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td></tr>
+<tr class="memitem:ad8e75dfa2b78fa82cdd31665a375d257"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ad8e75dfa2b78fa82cdd31665a375d257">RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG</a>   0x8000000</td></tr>
+<tr class="memitem:a6abc0e23c782b817e2ef96d8294f990d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a6abc0e23c782b817e2ef96d8294f990d">RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS</a>   0x40000000</td></tr>
+<tr class="memitem:a45ae263259390619ea303a5fbe2640f2"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>   0x200000</td></tr>
+<tr class="memitem:a1b1cf74b08234e3c7c7d463e7c4acea1"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_INDICES_CSR</a>   (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>|<a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>)</td></tr>
+<tr class="memitem:a3051409699970a0df3acfee8cf70b9aa"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>   (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>|<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td></tr>
+<tr class="memitem:a6f4335cce5234a69e06188bcad418091"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a6f4335cce5234a69e06188bcad418091">RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES</a>   (<a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>|<a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_INDICES_ [...]
+<tr><td colspan="2"><div class="groupHeader">Matrix rendering flags</div></td></tr>
+<tr><td colspan="2"><div class="groupText"><p><a class="anchor" id="marf_section"></a> These are flags which could be combined to specify rendering options to <a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a> and <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>. </p>
+</div></td></tr>
+<tr class="memitem:a53604f78febc54c616282c66bca02daf"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a53604f78febc54c616282c66bca02daf">RSB_MARF_RGB</a>   0x00000001</td></tr>
+<tr class="memitem:a8055e62d2824131421d22de1a0256f79"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a8055e62d2824131421d22de1a0256f79">RSB_MARF_EPS_S</a>   0x00000010</td></tr>
+<tr class="memitem:a77106fe2435306ef028060d0eb7dca14"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a77106fe2435306ef028060d0eb7dca14">RSB_MARF_EPS_B</a>   0x00000020</td></tr>
+<tr class="memitem:a2d332e6ed899c019e54ab4e540c82fd8"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a2d332e6ed899c019e54ab4e540c82fd8">RSB_MARF_EPS</a>   0x00000030</td></tr>
+<tr class="memitem:a3562195777ed886282bd6287551a235c"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a3562195777ed886282bd6287551a235c">RSB_MARF_EPS_L</a>   0x00000070</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="typedef-members"></a>
+Typedefs</h2></td></tr>
+<tr class="memitem:aa8f24976a4e4bdf8403ab433564c2005"><td class="memItemLeft" align="right" valign="top">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a></td></tr>
+<tr class="memitem:a528640277b196f7cfce2016cffbdd340"><td class="memItemLeft" align="right" valign="top">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb_precf_t</a></td></tr>
+<tr><td colspan="2"><div class="groupHeader">Type definitions</div></td></tr>
+<tr><td colspan="2"><div class="groupText"><p><a class="anchor" id="definitions_section"></a> These are definitions of <code>librsb</code> base types. </p>
+</div></td></tr>
+<tr class="memitem:ac6a4411e32793f5c150c6ab3c6f7e14e"><td class="memItemLeft" align="right" valign="top">typedef signed int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a></td></tr>
+<tr class="memitem:a4874ba61df0ff15b4395278496f83a5d"><td class="memItemLeft" align="right" valign="top">typedef signed int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a></td></tr>
+<tr class="memitem:a46b3366e54a5b4dda754a6ace22264df"><td class="memItemLeft" align="right" valign="top">typedef signed int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a></td></tr>
+<tr class="memitem:a569215d6312bf658b32d3e89cf2e0715"><td class="memItemLeft" align="right" valign="top">typedef signed int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a></td></tr>
+<tr class="memitem:ac0f6a03345c8874f6e50f0ed033d984b"><td class="memItemLeft" align="right" valign="top">typedef char </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a></td></tr>
+<tr class="memitem:a640e84bcc5268cd92d5d31fd6ac321b8"><td class="memItemLeft" align="right" valign="top">typedef signed int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a></td></tr>
+<tr class="memitem:aefcdc7de885ab34a89a0d36470e11deb"><td class="memItemLeft" align="right" valign="top">typedef signed int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a></td></tr>
+<tr class="memitem:aeeac94f4bf43460df839c8decd897523"><td class="memItemLeft" align="right" valign="top">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a></td></tr>
+<tr class="memitem:a46095ea7e61e1d1ec0ad055cf0291901"><td class="memItemLeft" align="right" valign="top">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a></td></tr>
+<tr class="memitem:ab6fedd060aee0dd9f61f0438987a99a9"><td class="memItemLeft" align="right" valign="top">typedef double </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a></td></tr>
+<tr class="memitem:a10ec0af478bcccdab11545b106678ef6"><td class="memItemLeft" align="right" valign="top">typedef char </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a></td></tr>
+<tr class="memitem:ab7a0af874a2765e9271a63ee4acf3d5d"><td class="memItemLeft" align="right" valign="top">typedef <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="enum-members"></a>
+Enumerations</h2></td></tr>
+<tr class="memitem:gae0bada88731b01751401847d60110fb6"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3a0e460ef74cf3b2edf102c1aaa73d8a">RSB_IO_WANT_VERBOSE_INIT</a> = 0x000001, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a53498790997d5ef408751f9e19994532">RSB_IO_WANT_VERBOSE_EXIT</a> = 0x000002, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae398997ce8253b813f2bbb5834e9670f">RSB_IO_WANT_OUTPUT_STREAM</a> = 0x000003, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a8fd1736c99255474630bee80d4924673">RSB_IO_WANT_SORT_METHOD</a> = 0x000004, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a26f34783677f687b1e857de76a22fdd7">RSB_IO_WANT_CACHE_BLOCKING_METHOD</a> = 0x000005, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6aae67087e45854502f7c54e0065ed9a3a">RSB_IO_WANT_SUBDIVISION_MULTIPLIER</a> = 0x000006, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a0681bef1f3aca28448c14c4ed7eb4001">RSB_IO_WANT_VERBOSE_ERRORS</a> = 0x000007, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</a> = 0x000008, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad9150d4d5672d1835185d6e2286d92f4">RSB_IO_WANT_EXECUTING_THREADS</a> = 0x000009, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a56c0c6849135ce5fa9edd7907ab3e0cb">RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE</a> = 0x000010, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a574d237ad4bb16d884bb46e5a6670d0d">RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING</a> = 0x000011, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad901e7c7c31f4b9118bb313db549ea3b">RSB_IO_WANT_IS_INITIALIZED_MARKER</a> = 0x000012, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ad74c3b62aa359b12e7287e7238792e0f">RSB_IO_WANT_MEM_ALLOC_CNT</a> = 0x000013, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a87d7018453cb3179349f12f9e4667b24">RSB_IO_WANT_MEM_ALLOC_TOT</a> = 0x000014, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a1584d16b27130ebda9f7fefa1d89afa5">RSB_IO_WANT_LEAF_LEVEL_MULTIVEC</a> = 0x000015, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a124bff2579d966823c2371e304656f84">RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS</a> = 0x000016, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ab053d73dfb6ce061b9d95a2f7e908dc9">RSB_IO_WANT_MAX_MEMORY_ALLOCATED</a> = 0x000017, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6ae900da85e3fc1f46083ee0abf34db1d9">RSB_IO_WANT_LIBRSB_ETIME</a> = 0x000018, 
+<a class="el" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a2164b61bd47cf53a3c8d287b419ab591">RSB_IO_WANT_VERBOSE_TUNING</a> = 0x000019
+<br/>
+ }</td></tr>
+<tr class="memdesc:gae0bada88731b01751401847d60110fb6"><td class="mdescLeft"> </td><td class="mdescRight">library option values for <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html# [...]
+<tr class="memitem:ga14750ca720fd92a2be879a59ae36dfe9"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a4c48a7a285045f4614a83c50ad740508">RSB_EXTF_NORM_ONE</a> = 0x00001001, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af5f5082e70a6193ebcf3ea7ba7365eef">RSB_EXTF_NORM_TWO</a> = 0x00001002, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a0a6cb081d0345b5bb6290ae534e3502f">RSB_EXTF_NORM_INF</a> = 0x00001003, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9af9b17f6ad2d8be781b003836f0403fe5">RSB_EXTF_SUMS_ROW</a> = 0x00001004, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a1878f79ae6f00f0b846a2fae397ffe4e">RSB_EXTF_SUMS_COL</a> = 0x00001005, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a8e8061312124af555196c7277102ca54">RSB_EXTF_ASUMS_ROW</a> = 0x00001006, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a26a147a4fe29284c1a3ca18ed3824ada">RSB_EXTF_ASUMS_COL</a> = 0x00001007, 
+<a class="el" href="group__rsb__doc__rsb.html#gga14750ca720fd92a2be879a59ae36dfe9a5c7c241fb262968d5b7c42e63e5c1ea1">RSB_EXTF_DIAG</a> = 0x00000004
+<br/>
+ }</td></tr>
+<tr class="memdesc:ga14750ca720fd92a2be879a59ae36dfe9"><td class="mdescLeft"> </td><td class="mdescRight">Extraction filter flags, to be used with <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm()</a>/<a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec()</a>.  <a href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">More...</a><br/></td></tr>
+<tr class="memitem:ga211914bd1afe8044a70dc864f3c1fc8f"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa21c25054ec5c5a88f951d68457132858">RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T</a> = 0x00000001, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6662a0302f39b67aa567f7c023cfe065">RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T</a> = 0x00000002, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fae9b21eeea628145e87690a5968a5c954">RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T</a> = 0x00000004, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8faa75c11724776205763e381cebb7059d0">RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T</a> = 0x00000008, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa4c02a263fffec5ad80552c8ce3cc782c">RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T</a> = 0x00000010, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa16df07735a83056772b8bde7359e957f">RSB_MIF_TOTAL_SIZE__TO__SIZE_T</a> = 0x00000020, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa7a9e06fbef26bddc97005eea246c478e">RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T</a> = 0x00000040, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa0ee69c4f0e9ac9a8ee4614a295b7be93">RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T</a> = 0x00000080, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa693bf11ea0f96ef79238ab422fcb3f81">RSB_MIF_MATRIX_INFO__TO__CHAR_P</a> = 0x00000100, 
+<a class="el" href="group__rsb__doc__rsb.html#gga211914bd1afe8044a70dc864f3c1fc8fa6256658253071990797f06872811074f">RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T</a> = 0x00000200
+<br/>
+ }</td></tr>
+<tr class="memdesc:ga211914bd1afe8044a70dc864f3c1fc8f"><td class="mdescLeft"> </td><td class="mdescRight">Flags for getting matrix informations via <a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info()</a>/<a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str()</a>.  <a href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">More...</a><br/></td></tr>
+<tr class="memitem:ga16c86c65a187bfbe94ecfdb87b97cade"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a> { <br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5d853af7a6db57bc49cdbf7a53927e8a">RSB_ELOPF_MUL</a> = 0x00000001, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea5665d0891b6ec738013ae7925de01969">RSB_ELOPF_DIV</a> = 0x00000002, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeae2cbeab6782b6e02b069568ec44cb94a">RSB_ELOPF_POW</a> = 0x00000004, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea299b987e6a560bf0bec0432859a959e4">RSB_ELOPF_NEG</a> = 0x00000008, 
+<br/>
+  <a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea884b319e42b1f2d70543e26c300a4287">RSB_ELOPF_SCALE_ROWS</a> = 0x00000010, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadeafadcdf204c627d95c3dde82ee0c5608e">RSB_ELOPF_SCALE_COLS</a> = 0x00000020, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea445dc5c113f761b58356e93e1b2bbfb5">RSB_ELOPF_SCALE_ROWS_REAL</a> = 0x00000040, 
+<a class="el" href="group__rsb__doc__rsb.html#gga16c86c65a187bfbe94ecfdb87b97cadea3a56f10b068d68e579bf4b01f8347f3f">RSB_ELOPF_SCALE_COLS_REAL</a> = 0x00000080
+<br/>
+ }</td></tr>
+<tr class="memdesc:ga16c86c65a187bfbe94ecfdb87b97cade"><td class="mdescLeft"> </td><td class="mdescRight">Flags for specifying a particular elemental/row-wise operation with <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals()</a>.  <a href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">More...</a><br/></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:ga28710b8dade48738ea8e075aa1a3d262"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a> (<a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval, <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678e [...]
+<tr class="memitem:gab660cf8aff876ae88b59c7a22ddfc912"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a> (void *stream, <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval)</td></tr>
+<tr class="memitem:gaf2b874d9f117ee6a6899634472b17946"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga1707f8b0c28805f692146cf2fb28ae70"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga4670aa682e70f82d5039c600e426a368"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *opnp, const <a class="el" href="rsb_8h.html#a10ec0af478bcc [...]
+<tr class="memitem:a2a08c5a23f3999fe8cf36440680e4a05"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> (enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> iof, const void *iop)</td></tr>
+<tr class="memitem:a96a28efc32dd050d2a74208b3ad2f227"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb_8h.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a> (enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> iof, void *iop)</td></tr>
+<tr class="memitem:ga86db30487afe975ed18a7aa6ee0db81d"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:gafca80e53d47a7ec3eb116e755fe47c58"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a> (<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" hre [...]
+<tr class="memitem:gab583fbefa0a66e9d30dac034480c2d86"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a> (struct rsb_mtx_t **mtxApp)</td></tr>
+<tr class="memitem:ga13d417f776654fd159f274e56191573e"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *RP, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:gaebf57d9e5263f41eb6163581ffc141aa"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:ga60121166daf00968ba717931f04ea455"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a> (void *VA, <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> *RP, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *JA, <a class [...]
+<tr class="memitem:ga86c1b0d0586f817ee31ca1caa3fee9be"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:ga3b7f9a461377de348b33a873f2e1893f"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a> (void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *JA, <a class [...]
+<tr class="memitem:gae181671ba19191caa5a282cbde4fdfc5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a> (struct rsb_mtx_t **mtxBpp, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a4 [...]
+<tr class="memitem:gab64a020286a8b58d23d84d4512bd9132"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a> (struct rsb_mtx_t *mtxAp)</td></tr>
+<tr class="memitem:ga6a645ce89fd167d72c92cdcfbcd8ed81"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a> (const struct rsb_mtx_t *mtxAp, void *Np, enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> flags)< [...]
+<tr class="memitem:gad0b2352cea6b7512b466d1c51327fcf8"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a> (const struct rsb_mtx_t *mtxAp, void *Dp, enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> flags)< [...]
+<tr class="memitem:gab0702d7080d1699162e4201bc70cc5ee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_ [...]
+<tr class="memitem:ga4b45a74b985f5cbd869bc9a540951771"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a> (void *pmp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#a4874ba [...]
+<tr class="memitem:ga4a16a82d289c76a437915db449553d4d"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, const void *Xp,  [...]
+<tr class="memitem:ga3ec8d721b5333aae6ea9b03eb0039285"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, <a class="el" hr [...]
+<tr class="memitem:ga9b044332b720d3f8083ae792068fb04a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transT, const void *alphap, const struct rsb_mtx_t *mtxTp, const void *Xp,  [...]
+<tr class="memitem:ga48e6f3844605fffac9f622f05afa6043"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transT, const void *alphap, const struct rsb_mtx_t *mtxTp, <a class="el" hr [...]
+<tr class="memitem:gaf30a70ea183d30d216f700782fc01524"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a> (const void *alphap, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldB, <a cl [...]
+<tr class="memitem:ga30823d02e577e59da4ccff6baaeb8ea1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb [...]
+<tr class="memitem:ga8813ccbbb1065ac76bfe22c42feafa05"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb [...]
+<tr class="memitem:ga74d97612d4af70244c886b9eadd90a0e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055 [...]
+<tr class="memitem:gadf75c148fe661486ab0d8140657b8d9a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a> (struct rsb_mtx_t *mtxAp, void **VAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> **IAp, <a class="el" hr [...]
+<tr class="memitem:ga3c46a4942a6acb90063d721b6446e78e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a> (struct rsb_mtx_t *mtxAp, void **VAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> **IAp, <a class="el" hr [...]
+<tr class="memitem:gaac3c6c033733a8101b9ccf56f8fc7112"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a> (const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href=" [...]
+<tr class="memitem:ga4adca460f50bc1ad7d9ffdfda2273b87"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href [...]
+<tr class="memitem:gaa01c4a69db732f99e8a960ee8c9afa23"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, v [...]
+<tr class="memitem:ga68115178d85cd28c645058deb0aa6379"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a> (const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el"  [...]
+<tr class="memitem:gad9a3eacd54fb7043464006cd57866edf"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a> (const struct rsb_mtx_t *mtxAp, enum <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a> miflags, void *min [...]
+<tr class="memitem:ga2b7d51b9822f73d2fe7fcf5b9d0be1e9"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a> (const struct rsb_mtx_t *mtxAp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *mis, void *minfop, size_t  [...]
+<tr class="memitem:ga2d7533a97c97b215090d69c2d9235412"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a> (struct rsb_mtx_t *mtxAp, enum <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a> elop_flags, const void [...]
+<tr class="memitem:gadaee12cc24dac7f8ebc68efd3d09c819"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a> (void *opdp, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb_precf_t</a> prec_flags, const void * [...]
+<tr class="memitem:gab8069ad6d5a67bc8a726131891e98c46"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a> (struct rsb_mtx_t *mtxAp, const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a clas [...]
+<tr class="memitem:gad8f1aa9ac5081edd789374e7bb82697f"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a> (const struct rsb_mtx_t *mtxAp, void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a clas [...]
+<tr class="memitem:ga8c11024d248e2e686476fd9e89aa7c15"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a> (struct rsb_mtx_t **mtxOpp, <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> *sfp, <a class="el" href="rsb_8h.html#aefcdc [...]
+<tr class="memitem:ga8d7a05bbc165bd6ac20e8e23487a5871"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a> (struct rsb_mtx_t **mtxOpp, <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> *sfp, <a class="el" href="rsb_8h.html#aefcdc [...]
+<tr class="memitem:ga7459601f0d54bd95549959b9749fedde"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a> (const char psbtrans)</td></tr>
+<tr class="memitem:gad911ac7528c95c874d02cb17e6b76c54"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a> (const struct rsb_mtx_t *mtxAp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename)</td></tr>
+<tr class="memitem:ga00833b0cf57da8e430f9d0e2b5375bb3"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> flagsA, <a class="el" href="r [...]
+<tr class="memitem:gad071e0373a08f74ee7ae910e9e4fd140"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6 [...]
+<tr class="memitem:gac4b2a63cdfe1cd4083b1561ee4bea696"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6 [...]
+<tr class="memitem:gaa79f69918eafbd8f737b7866a00a0330"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#a4874ba61df0ff [...]
+<tr class="memitem:gaa09eca432d5bb8c57fcff5d9ab98dfb8"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a> (void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b43952 [...]
+<tr class="memitem:ga6677d4e20c00bdf4ebf53567246f5693"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a> (void)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>This file declares the user interface functions and data structures for the <code>librsb</code> library. </p>
+<dl class="section author"><dt>Author</dt><dd>Michele Martone </dd></dl>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="ad396755fe9a1d81991d5ac238058db18"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_BOOL_FALSE   0</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A "false" value for <a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="af580e920b9f507028d3b7d34b4dadd6f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_BOOL_TRUE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A "true" value for <a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a5749695a0fccd6348d669c6790185a68"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_CHAR_BIT   8	/* bits per byte; if not 8, librsb compilation should fail */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a3579d00f3b97cd569707f7c62e462322"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_BLOCKING   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A safe value for column blocking (reserved for future use). </p>
+
+</div>
+</div>
+<a class="anchor" id="a0f7e634867763b3cc1faaa3ba8e106db"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_COL_BLOCKING   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Reserved for future use. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7be45869842d6ecc5646740350d27d26"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_ROW_BLOCKING   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Reserved for future use. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3949d8af584a0e0e0a17e96d28b8d078"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DO_FLAG_ADD</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">V, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">F </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   (V) |=  (F)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The flag variable <code>V</code> gets the logical OR value with flag <code>F</code>. </p>
+
+</div>
+</div>
+<a class="anchor" id="aee33ededde2130f79f6c84966f1a180b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DO_FLAG_DEL</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">V, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">F </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   (V) &= ~(F)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The flag variable <code>V</code> gets the logical NAND value with flag <code>F</code>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a70e87c7a0afaf9b27650d252086559f7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DO_FLAG_FILTERONLY</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">V, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">F </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   ((V) & (F))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The flag variable <code>V</code> after logical AND value with flag <code>F</code>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a23beda4691d4e83e6d3984960dc9f422"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DO_FLAG_FILTEROUT</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">V, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">F </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   ((V) & ~(F))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The flag variable <code>V</code> after logical NAND value with flag <code>F</code>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad155950ce44eddd61911184bccba86ab"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DO_FLAG_HAS</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">V, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">F </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   ((((V)&(F))==(F))?RSB_BOOL_TRUE:RSB_BOOL_FALSE)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Presence check for flag <code>F</code>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a116d0af2caf6bddd358035597a260244"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DO_FLAG_HAS_INTERSECTION</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">V, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">F </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   (((V)&(F))?RSB_BOOL_TRUE:RSB_BOOL_FALSE)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Presence check for flag <code>F</code>. </p>
+
+</div>
+</div>
+<a class="anchor" id="af0b262c6c554403269234219b3aec409"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_BADARGS   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x020)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The user supplied some corrupt data as argument. </p>
+
+</div>
+</div>
+<a class="anchor" id="a43e6277fc54647f36c97956e4c92062d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_CAST</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">E</td><td>)</td>
+          <td>   (-(E))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A macro for the error code value. </p>
+
+</div>
+</div>
+<a class="anchor" id="a14103828be5eb82e40d3b772ce54abda"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_CORRUPT_INPUT_DATA   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x1000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>User supplied data (e.g.: from file) was corrupt. </p>
+
+</div>
+</div>
+<a class="anchor" id="a935de71c3acc5714ad539d65288e2593"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x4000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>User gave flags for an in place assembly in a copy-based function. </p>
+
+</div>
+</div>
+<a class="anchor" id="a538215b32e908646c979a2e446ae5467"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_ENOMEM   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x040)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>There is not enough dynamical memory to perform the requested operation. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3cacb604d0ad892e195c7c97eda18dba"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_FAILED_MEMHIER_DETECTION   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x2000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Memory hierarchy info failed to be detected. You can bypass this by setting a meaningful <code>RSB_USER_SET_MEM_HIERARCHY_INFO</code> environment variable. </p>
+
+</div>
+</div>
+<a class="anchor" id="a40628c24058f45a481e18b6ad491bf1b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_FORTRAN_ERROR   <a class="el" href="rsb_8h.html#ad46ebc803d7cad695babdc7d8c709828">RSB_ERR_GENERIC_ERROR</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A Fortran specific error occurred. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad46ebc803d7cad695babdc7d8c709828"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_GENERIC_ERROR   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x001)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An unspecified, generic error occurred. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8e650a7e3b5c5aa1fb9763b0f1498126"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_INTERNAL_ERROR   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x010)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An error occurred which is not apparently caused by a user's fault (internal error). </p>
+
+</div>
+</div>
+<a class="anchor" id="a8d504baa13048da05bb71235e2c8d181"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_INVALID_NUMERICAL_DATA   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x10000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>User gave some input with invalid numerical data. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3d7758ee9127e0c93c9075402999d154"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_LIMITS   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x200)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The requested operation could not be executed, or index overflow will happen. </p>
+
+</div>
+</div>
+<a class="anchor" id="a1b63053f52d6426b726a05b206a3862a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_MEMORY_LEAK   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x20000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Probable memory leak (user did not deallocate librsb structures before calling <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit()</a>). </p>
+
+</div>
+</div>
+<a class="anchor" id="a1ad3f986b2e84249785751bf42ff3f8a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_NO_ERROR   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>No error occurred (success). The return value that means function operation success, in most cases. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9d7fe7c0e3fabfba57bf2318459ed18a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x8000)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>User requested writing to a file stream, while this feature is configured out. </p>
+
+</div>
+</div>
+<a class="anchor" id="a5ab0f86009e1f934b25b23fc4837b9b0"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_NO_USER_CONFIGURATION   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x800)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A file containing user set configuration was not present. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9738e6b8b638ca234acd92b49c6ac1db"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_TO_PROGRAM_ERROR</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">E</td><td>)</td>
+          <td>   ((E)==(<a class="el" href="rsb_8h.html#a1ad3f986b2e84249785751bf42ff3f8a">RSB_ERR_NO_ERROR</a>)?RSB_PROGRAM_SUCCESS:RSB_PROGRAM_ERROR)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Program error code (int). </p>
+
+</div>
+</div>
+<a class="anchor" id="a0bd20d0f68cf911bf9dfda495d8e12db"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_UNIMPLEMENTED_YET   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x100)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The requested operation was not implemented yet in this code revision (but probably will be, someday). </p>
+
+</div>
+</div>
+<a class="anchor" id="accf836c8eb3145e9ab4fd277d6911764"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_UNSUPPORTED_FEATURE   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x400)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The requested feature (e.g.:blocking) is not available because it was opted out or not configured at build time. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac00cd41eab18a0d2b9323b401029dd73"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_UNSUPPORTED_FORMAT   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x008)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The user requested to use a matrix storage format which is not supported (e.g.: was opted out at build time). </p>
+
+</div>
+</div>
+<a class="anchor" id="ab4f407e7c8364bee51cc77546d6f0922"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_UNSUPPORTED_OPERATION   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x002)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The user requested an operation which is not supported (e.g.: was opted out at build time). </p>
+
+</div>
+</div>
+<a class="anchor" id="afdf2ab3912960ee19f23e7d585371548"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERR_UNSUPPORTED_TYPE   <a class="el" href="rsb_8h.html#a43e6277fc54647f36c97956e4c92062d">RSB_ERR_CAST</a>(0x004)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The user requested to use a type which is not supported (e.g.: was opted out at build time). </p>
+
+</div>
+</div>
+<a class="anchor" id="a4d8eb05488b681b75449f64c418b8893"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ERRS_UNSUPPORTED_FEATURES   (<a class="el" href="rsb_8h.html#accf836c8eb3145e9ab4fd277d6911764">RSB_ERR_UNSUPPORTED_FEATURE</a>|<a class="el" href="rsb_8h.html#a9d7fe7c0e3fabfba57bf2318459ed18a">RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Collation of "unsupported" type errors. </p>
+
+</div>
+</div>
+<a class="anchor" id="a97106c8db99424b5b69cd6be5bf59937"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_file_mtx_get_dimensions   <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000011">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#a97106c8db99424b5b69cd6be5bf59937">rsb_file_mtx_get_dimensions</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a191af7bdb17d4b0abb3a195c11e56c3b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_file_mtx_render   <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000006">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#a191af7bdb17d4b0abb3a195c11e56c3b">rsb_file_mtx_render</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="adce7e20015d4a549bb8c44a00a80fc7e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS   0x040000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, matrices will be fit in the three input coo arrays, after conversion. </p>
+
+</div>
+</div>
+<a class="anchor" id="a49a9315ba7e702e323eadca04d0d735a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_C_INDICES_INTERFACE   0x000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the input/output coordinate indices will be assumed to be 0 based (default). </p>
+
+</div>
+</div>
+<a class="anchor" id="a6b21a3edf4231070a10223f1a9ae1dc4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS   <a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A flag combination specifying a pure COO matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8c90a9ad92722ffbbf1bfcadb805c520"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS   <a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A flag combination specifying a pure CSR matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="acac4b9c09a3fd6be63e511fc5042038f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DEFAULT_MATRIX_FLAGS   <a class="el" href="rsb_8h.html#a17c314e28220f3b81aed9cc7d79f97e4">RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A flag combination specifying a matrix in a default, supported format. </p>
+
+</div>
+</div>
+<a class="anchor" id="a17c314e28220f3b81aed9cc7d79f97e4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS   (<a class="el" href="rsb_8h.html#a5ca428920608e6dd6fcc4e9a4fa8ee70">RSB_FLAG_QUAD_PARTITIONING</a>|<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>|<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>|<a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A flag combination specifying a pure RSB matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="aa83897e25c1235a780ed7fe317c78555"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DEFAULT_STORAGE_FLAGS   (<a class="el" href="rsb_8h.html#a12c780564b9c8db7f8104cc5952a490f">RSB_FLAG_WANT_BCSS_STORAGE</a>|<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Default storage flags. </p>
+
+</div>
+</div>
+<a class="anchor" id="abccb47886fb3f8352e4e6ad801fd8efa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DIAGONAL   (<a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for a diagonal matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="abf243a6f15925734e143703c4ad33512"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DISCARD_ZEROS   0x000400</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, explicit zeros will not be inserted </p>
+<dl class="section warning"><dt>Warning</dt><dd>: this flag is active by default </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a7fee489042762b3b22d8184c592a9e52"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DUPLICATES_DEFAULT_HANDLE   0x000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The default nonzeroes duplicates handling. </p>
+
+</div>
+</div>
+<a class="anchor" id="aff85f26964888f838aa97eb371ce5da3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DUPLICATES_KEEP_LAST   0x000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Keep the last nonzero duplicate, at matrix assembly time. </p>
+
+</div>
+</div>
+<a class="anchor" id="afd1b39c625f4249cd32fccea38957f97"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_DUPLICATES_SUM   0x000200</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Compute and keep the sum of nonzero duplicates, at matrix assembly time. </p>
+
+</div>
+</div>
+<a class="anchor" id="a1d3b9bd7a31257cc8116be3dee0125b5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT   0x080000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6abc0e23c782b817e2ef96d8294f990d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS   0x40000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the input COO arrays to the assembly functions will not be freed at matrix destruction time. </p>
+<dl class="section warning"><dt>Warning</dt><dd>Please do NOT use this flag, for the default memory allocation handling is still not specified. Instead, use the in place allocation functions: <a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace()</a> and <a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace()</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a8ccb4d7203ce7707f9d13bd6c5ef4169"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_FORTRAN_INDICES_INTERFACE   0x000001</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the input/output coordinate indices will be assumed to be 1 based. </p>
+
+</div>
+</div>
+<a class="anchor" id="ae3e1d6090dd2912acba58b4bc0530ab7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_HERMITIAN   0x800000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the input matrix will be treated as symmetric hermitian (stored as a lower triangular one). </p>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>,<a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="aacf404fe630d480353ce767fd27ba097"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_IDENTICAL_FLAGS   <a class="el" href="rsb_8h.html#a0ea7640214ee34c87e483c475b15827d">RSB_FLAG_NOFLAGS</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The identical flag (used in cloning function <a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a>). </p>
+
+</div>
+</div>
+<a class="anchor" id="aca1c9530dfb366137304d196eb0393c1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_LOWER   0x000010</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the matrix will be stored in as lower (triangular or symmetric). </p>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a>,<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a>,<a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="aa06dcddcdd4f42fe2eeda8eb6168bd2d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_LOWER_HERMITIAN   (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for a lower hermitian matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="a6933030c784596e3c8dbbbd8daf62805"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_LOWER_SYMMETRIC   (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for a symmetric, lower-stored matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="aed7916ce610549fc75aa0c3e2d2ae1b9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_LOWER_TRIANGULAR   (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> | <a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for a lower triangular matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="a6f4335cce5234a69e06188bcad418091"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES   (<a class="el" href="rsb_8h.html#a3051409699970a0df3acfee8cf70b9aa">RSB_FLAG_USE_HALFWORD_INDICES_COO</a>|<a class="el" href="rsb_8h.html#a1b1cf74b08234e3c7c7d463e7c4acea1">RSB_FLAG_USE_HALFWORD_INDICES_CSR</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A combination of flags which is forbidden (so don't use it). </p>
+
+</div>
+</div>
+<a class="anchor" id="a0ea7640214ee34c87e483c475b15827d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_NOFLAGS   0x000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The null (empty) flag. </p>
+
+</div>
+</div>
+<a class="anchor" id="a5ca428920608e6dd6fcc4e9a4fa8ee70"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_QUAD_PARTITIONING   0x002000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, matrix will be organized as a quad tree of submatrices. </p>
+
+</div>
+</div>
+<a class="anchor" id="a54d04b341465bf3dadc62ad99d55f8ca"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS   0x1000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, recursion on small matrices will last at least the number of active threads. </p>
+
+</div>
+</div>
+<a class="anchor" id="ad8e75dfa2b78fa82cdd31665a375d257"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG   0x8000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the matrix will be subdivided at a finer grain on diagonal blocks. </p>
+
+</div>
+</div>
+<a class="anchor" id="a726fa64beccf21ae1b70149b88c3affb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_SORTED_INPUT   0x000004</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the code will assume the input nonzeroes as sorted. </p>
+
+</div>
+</div>
+<a class="anchor" id="a183c4b8ead89e452d1c204c92b3f8f61"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_SYMMETRIC   0x400000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the input matrix will be treated as symmetric (stored as a lower triangular one by default). </p>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>,<a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="adca72e259846399da3512fcb062ad518"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_TRIANGULAR   0x000008</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the matrix is considered as triangular. </p>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a>,<a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a4af24812309eb471c861ba618cb996f2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_UNIT_DIAG_IMPLICIT   0x000040</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the (whole super-)matrix will not store the diagonal, which will be assumed to be unitary. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7e66ba39d7ea80c4be17bc524a21056f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_UPPER   0x000020</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the matrix will be stored in as upper (triangular or symmetric). </p>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#aca1c9530dfb366137304d196eb0393c1">RSB_FLAG_LOWER</a> </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a0565be78af9bac79d07376d501237b00"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_UPPER_HERMITIAN   (<a class="el" href="rsb_8h.html#ae3e1d6090dd2912acba58b4bc0530ab7">RSB_FLAG_HERMITIAN</a> | <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for an upper hermitian matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3c2701b010fa2928685f3253a0ff1a99"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_UPPER_SYMMETRIC   (<a class="el" href="rsb_8h.html#a183c4b8ead89e452d1c204c92b3f8f61">RSB_FLAG_SYMMETRIC</a> | <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for a symmetric, upper-stored matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9168d244582c1a4c57a9ec93d9432539"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_UPPER_TRIANGULAR   (<a class="el" href="rsb_8h.html#adca72e259846399da3512fcb062ad518">RSB_FLAG_TRIANGULAR</a> | <a class="el" href="rsb_8h.html#a7e66ba39d7ea80c4be17bc524a21056f">RSB_FLAG_UPPER</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for an upper triangular matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="a45ae263259390619ea303a5fbe2640f2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_USE_CSR_RESERVED   0x200000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a693ed0d053ad81ca2ad6dc383afa0586"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_USE_HALFWORD_INDICES   0x000002</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the matrix will internally use a half word (16 bit) type for indices. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3051409699970a0df3acfee8cf70b9aa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_USE_HALFWORD_INDICES_COO   (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>|<a class="el" href="rsb_8h.html#a0ee1c6081692a3ca98ee7ea0c7648ec8">RSB_FLAG_WANT_COO_STORAGE</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Combined flags for half word COO. </p>
+
+</div>
+</div>
+<a class="anchor" id="a1b1cf74b08234e3c7c7d463e7c4acea1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_USE_HALFWORD_INDICES_CSR   (<a class="el" href="rsb_8h.html#a693ed0d053ad81ca2ad6dc383afa0586">RSB_FLAG_USE_HALFWORD_INDICES</a>|<a class="el" href="rsb_8h.html#a45ae263259390619ea303a5fbe2640f2">RSB_FLAG_USE_CSR_RESERVED</a>)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a12c780564b9c8db7f8104cc5952a490f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_WANT_BCSS_STORAGE   0x004000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the block partitioning will be fixed (BCSS: BCSR or BCSC, but no VBR). </p>
+
+</div>
+</div>
+<a class="anchor" id="a6ed7790c2f7129a6e051b8167c48a43c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_WANT_COLUMN_MAJOR_ORDER   0x4000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify multi-vector (dense matrix) operations. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0ee1c6081692a3ca98ee7ea0c7648ec8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_WANT_COO_STORAGE   0x000100</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>If set, the matrix will use COO storage, where necessary. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7e9ef3a7ae3c22ab5c76d36b3ac482cc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_FLAG_WANT_ROW_MAJOR_ORDER   0x000000</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify multi-vector (dense matrix) operations. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab16e9407330a11d4163be1cc586990b3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HALF_MAX_SIGNED</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">T</td><td>)</td>
+          <td>   ((T)1 << (sizeof(T)*<a class="el" href="rsb_8h.html#a5749695a0fccd6348d669c6790185a68">RSB_CHAR_BIT</a>-2))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a88e6b599d650b509b54d4fe7c3008b12"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_INVALID_COO_IDX_VAL   ((<a class="el" href="rsb_8h.html#af88edb77d90929bf6cef617ab862d2bc">RSB_MARKER_COO_VALUE</a>)+1)	/*< A value which is illegal for any #<a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> variable. */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a20253111f2fa6a4bc0c75fe7e6430890"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_INVALID_NNZ_IDX_VAL   ((<a class="el" href="rsb_8h.html#a967c5aae0dc536668ed67d810378e7fc">RSB_MARKER_NNZ_VALUE</a>)+1)	/*< A value which is illegal for any #<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> variable. */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="afd8b1de2977b2d810f9c615195d9acec"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_IO_SPECIFIER_GET   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies to <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a> that a given <a class="el" href="structrsb__initopts.html" title="A structure specifying library (initialization) options, to be used with the rsb_lib_reinit() functio...">rsb_initopts</a> is going to be get by the user. </p>
+
+</div>
+</div>
+<a class="anchor" id="aef619407815752dc767cfd6870b72101"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_IO_SPECIFIER_SET   0</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies to <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a> that a given <a class="el" href="structrsb__initopts.html" title="A structure specifying library (initialization) options, to be used with the rsb_lib_reinit() functio...">rsb_initopts</a> is going to be set by the user. </p>
+
+</div>
+</div>
+<a class="anchor" id="af7d43df61fa72c8971cece701ae53a22"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_IS_SIGNED</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">T</td><td>)</td>
+          <td>   (((T)0) > (((T)-1)))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a2d332e6ed899c019e54ab4e540c82fd8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARF_EPS   0x00000030</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> Flag value for requesting an Encapsulated Postscript rendering of a matrix (spy plot + blocks). </p>
+
+</div>
+</div>
+<a class="anchor" id="a77106fe2435306ef028060d0eb7dca14"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARF_EPS_B   0x00000020</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> Flag value for requesting an Encapsulated Postscript rendering of a matrix (blocks plot). </p>
+
+</div>
+</div>
+<a class="anchor" id="a3562195777ed886282bd6287551a235c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARF_EPS_L   0x00000070</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> Flag value for requesting an Encapsulated Postscript rendering of a matrix (spy plot + blocks + labels). </p>
+
+</div>
+</div>
+<a class="anchor" id="a8055e62d2824131421d22de1a0256f79"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARF_EPS_S   0x00000010</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> Flag value for requesting an Encapsulated Postscript rendering of a matrix (spy plot). </p>
+
+</div>
+</div>
+<a class="anchor" id="a53604f78febc54c616282c66bca02daf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARF_RGB   0x00000001</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p><a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a> Flag value for requesting an RGB rendering of a matrix. </p>
+
+</div>
+</div>
+<a class="anchor" id="af88edb77d90929bf6cef617ab862d2bc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARKER_COO_VALUE   (<a class="el" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">RSB_MAX_MATRIX_DIM</a>+1)		/* */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a967c5aae0dc536668ed67d810378e7fc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MARKER_NNZ_VALUE   (<a class="el" href="rsb_8h.html#a63c69ef30355064d818326768674c9b2">RSB_MAX_MATRIX_NNZ</a>+1)		/* */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a318a92d60883f6ade7345459074374f5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MAX_MATRIX_DIM   (<a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">RSB_MAX_VALUE_FOR_TYPE</a>(<a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a>)-<a class="el" href="rsb_8h.html#af576621f0846e0b9a999ea21641e13c8">RSB_NNZ_BLK_MAX</a>-255) /*!> Maximum allowed matrix dimension. */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a63c69ef30355064d818326768674c9b2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MAX_MATRIX_NNZ   (<a class="el" href="rsb_8h.html#a0ad77b7888128f3e1b144b48e6e93b87">RSB_MAX_VALUE_FOR_TYPE</a>(<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a>)-<a class="el" href="rsb_8h.html#af576621f0846e0b9a999ea21641e13c8">RSB_NNZ_BLK_MAX</a>) /*!> Maximum allowed matrix nonzeroes count. */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a465659728318d495a364e906806ffae7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MAX_SIGNED</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">T</td><td>)</td>
+          <td>   (<a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">RSB_HALF_MAX_SIGNED</a>(T) - 1 + <a class="el" href="rsb_8h.html#ab16e9407330a11d4163be1cc586990b3">RSB_HALF_MAX_SIGNED</a>(T))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a9ea900484e72f4876b3fd8d9f402ea39"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MAX_UNSIGNED</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">T</td><td>)</td>
+          <td>   ((T)-1)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a0ad77b7888128f3e1b144b48e6e93b87"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MAX_VALUE_FOR_TYPE</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">T</td><td>)</td>
+          <td>   (<a class="el" href="rsb_8h.html#af7d43df61fa72c8971cece701ae53a22">RSB_IS_SIGNED</a>(T)?<a class="el" href="rsb_8h.html#a465659728318d495a364e906806ffae7">RSB_MAX_SIGNED</a>(T):<a class="el" href="rsb_8h.html#a9ea900484e72f4876b3fd8d9f402ea39">RSB_MAX_UNSIGNED</a>(T))</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="abaccfe39f69712cebf501c9d55b1a4b8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MIN_MATRIX_DIM   0 /*!> Minimum allowed matrix dimension. */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a425f78c0a49004e45df20db728f8196d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_MIN_MATRIX_NNZ   0 /*!> Minimum allowed matrix nonzeroes count. */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a552fe79778c824e8d88ddfd0d9c58586"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_mtx_get_norm   <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000005">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#a552fe79778c824e8d88ddfd0d9c58586">rsb_mtx_get_norm</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a> . </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a8ba1704fe1f07cb9abe856d9a1a20ea9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_mtx_get_preconditioner   <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000008">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#a8ba1704fe1f07cb9abe856d9a1a20ea9">rsb_mtx_get_preconditioner</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="af08b72a410e54fd7db6dcb12db232aec"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_mtx_get_values   <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000010">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#af08b72a410e54fd7db6dcb12db232aec">rsb_mtx_get_values</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a5b622f80450cdef4f8a06742eacbb045"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_mtx_set_values   <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000009">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#a5b622f80450cdef4f8a06742eacbb045">rsb_mtx_set_values</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a40d40562867aceec2899cdddf79b3086"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define rsb_mtx_upd_values   <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000007">Deprecated:</a></b></dt><dd><a class="el" href="rsb_8h.html#a40d40562867aceec2899cdddf79b3086">rsb_mtx_upd_values</a> has been deprecated: use <a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a>. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="af576621f0846e0b9a999ea21641e13c8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NNZ_BLK_MAX   255 /* Dense block maximal allowed size (still unused, for now internal) */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a2234a5e51156de6c95c3f8c2951ae09f"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NULL_EXIT_OPTIONS   NULL</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A valid value for specifying default (null) options to <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit()</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="add105c42e570c5c269680d437f8c51e2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NULL_INIT_OPTIONS   NULL</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A valid value for specifying default (null) options to <a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init()</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a56bb6be11af9a5a0ed9aaa8774ab6db9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_PRECF_ILU0   0x00000001</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>ILU-0 preconditioner request to <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec()</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7f6f859f61b0855e5389e1bc98829bd4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_PROGRAM_ERROR   (-1)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Program error code (int). </p>
+
+</div>
+</div>
+<a class="anchor" id="a61f8a9ebc9bced69076389ba3cd2cce8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_PROGRAM_SUCCESS   (0)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Program success error code (int). </p>
+
+</div>
+</div>
+<a class="anchor" id="afeb783fe4dca5762623a621b7095dd01"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_REINIT_SINGLE_VALUE</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOF, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOP, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOS, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVAL </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   { enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> keys[]={IOF}; void*values[]={(IOP)}; struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> io; io.action=(IOS); io.keys=keys; io.values=values; io.n_pairs=1; ERRVAL=<a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>(&io); }</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A handy macro for invoking <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit()</a> with a single get/set specifier. An appropriate I/O flag is supplied as first parameter; a valid pointer (according to the flag) should be passed as second parameter; either <a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">RSB_IO_SPECIFIER_SET</a> or <a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">RSB_IO_SPECIFIER_GET</a [...]
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000001">Deprecated:</a></b></dt><dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> or <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a> instead. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="aa0ca08a816983bc6294317d0e22e0509"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_REINIT_SINGLE_VALUE_C_IOP</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOF, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOP, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOS, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVAL </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   { enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> keys[]={IOF}; const void*values[]={(IOP)}; struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> io; io.action=(IOS); io.keys=keys; (io.values)=(void**)values; io.n_pairs=1; ERRVAL=<a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>(&io); }</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Like <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>, but considering <code>IOP</code> <code>const</code>. </p>
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000002">Deprecated:</a></b></dt><dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> instead. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="ae6f837f13f6413a163f2c6b0c02dadf2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_REINIT_SINGLE_VALUE_GET</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOF, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOP, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVAL </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>(IOF,IOP,<a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">RSB_IO_SPECIFIER_GET</a>,ERRVAL)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A handy macro for invoking <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a> with a single get specifier. An appropriate I/O flag is supplied as first parameter; a valid pointer (according to the flag) should be passed as second parameter; a <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> variable as third one, in order to detect any error. </p>
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000004">Deprecated:</a></b></dt><dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a> instead. </dd></dl>
+
+</div>
+</div>
+<a class="anchor" id="a20da3b07d4c17771762413010816e36e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_REINIT_SINGLE_VALUE_SET</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOF, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">IOP, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVAL </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td>   <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>(IOF,IOP,<a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">RSB_IO_SPECIFIER_SET</a>,ERRVAL)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A handy macro for invoking <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a> with a single set specifier. An appropriate I/O flag is supplied as first parameter; a valid pointer (according to the flag) should be passed as second parameter; a <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> variable as third one, in order to detect any error. </p>
+<dl class="deprecated"><dt><b><a class="el" href="deprecated.html#_deprecated000003">Deprecated:</a></b></dt><dd>This macro has been deprecated and will be removed in a future version: use <a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> instead. </dd></dl>
+
+</div>
+</div>
+<h2>Typedef Documentation</h2>
+<a class="anchor" id="ac6a4411e32793f5c150c6ab3c6f7e14e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef signed int <a class="el" href="rsb_8h.html#ac6a4411e32793f5c150c6ab3c6f7e14e">rsb_blk_idx_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The block arrays index type.</p>
+<p>Could be an unsigned type. Should not overflow when indexing matrix blocks by block coordinates. </p>
+
+</div>
+</div>
+<a class="anchor" id="aeeac94f4bf43460df839c8decd897523"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> <a class="el" href="rsb_8h.html#aeeac94f4bf43460df839c8decd897523">rsb_bool_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A boolean type. </p>
+
+</div>
+</div>
+<a class="anchor" id="a10ec0af478bcccdab11545b106678ef6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef char <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A type for character strings. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4874ba61df0ff15b4395278496f83a5d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef signed int <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The coordinate arrays index type.</p>
+<p>Should not overflow when indexing matrix elements by coordinates. Legal values when specifying a matrix size should be within <a class="el" href="rsb_8h.html#abaccfe39f69712cebf501c9d55b1a4b8">RSB_MIN_MATRIX_DIM</a> and <a class="el" href="rsb_8h.html#a318a92d60883f6ade7345459074374f5">RSB_MAX_MATRIX_DIM</a> </p>
+
+</div>
+</div>
+<a class="anchor" id="a640e84bcc5268cd92d5d31fd6ac321b8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef signed int <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A type specific for error flags. Should be >= 4 bytes.</p>
+<p>A textual description of an error value may be obtained via <a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r()</a> or <a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror()</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a569215d6312bf658b32d3e89cf2e0715"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef signed int <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A type for specifying matrix assembly or coordinate conversions option flags. Should be >= 4 bytes. See <a class="el" href="rsb_8h.html#flags_section">flags_section</a> for possible values. </p>
+
+</div>
+</div>
+<a class="anchor" id="aefcdc7de885ab34a89a0d36470e11deb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef signed int <a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An integer type declaration for interface functions. Should always be 'int'.A signed integer type </p>
+
+</div>
+</div>
+<a class="anchor" id="aa8f24976a4e4bdf8403ab433564c2005"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> <a class="el" href="rsb_8h.html#aa8f24976a4e4bdf8403ab433564c2005">rsb_marf_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Matrix rendering flags (see <a class="el" href="rsb_8h.html#marf_section">marf_section</a> for possible values). </p>
+
+</div>
+</div>
+<a class="anchor" id="a46b3366e54a5b4dda754a6ace22264df"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef signed int <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The nnz counter index type.</p>
+<p>Should not overflow when indexing matrix elements. On most common archs sizeof(long)>=sizeof(int). Legal values when specifying a matrix size should be within <a class="el" href="rsb_8h.html#a425f78c0a49004e45df20db728f8196d">RSB_MIN_MATRIX_NNZ</a> and <a class="el" href="rsb_8h.html#a63c69ef30355064d818326768674c9b2">RSB_MAX_MATRIX_NNZ</a> </p>
+
+</div>
+</div>
+<a class="anchor" id="a528640277b196f7cfce2016cffbdd340"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> <a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb_precf_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Basic preconditioner flags to be used with <a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec()</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab6fedd060aee0dd9f61f0438987a99a9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef double <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A floating point numerical type. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab7a0af874a2765e9271a63ee4acf3d5d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> <a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A floating point numerical type for time measurements with <a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time()</a>. </p>
+
+</div>
+</div>
+<a class="anchor" id="a46095ea7e61e1d1ec0ad055cf0291901"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The type for specifying transposition (See <a class="el" href="rsb__types_8h.html#matrix_transposition_flags_section">matrix_transposition_flags_section</a>) </p>
+
+</div>
+</div>
+<a class="anchor" id="ac0f6a03345c8874f6e50f0ed033d984b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef char <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A type for specifying numerical type codes (See <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a> for a list of valid values). </p>
+
+</div>
+</div>
+<h2>Function Documentation</h2>
+<a class="anchor" id="a96a28efc32dd050d2a74208b3ad2f227"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_get_opt </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> </td>
+          <td class="paramname"><em>iof</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>iop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Gets value of a library option. A value specified by the request flag <code>iof</code> will be fetched from the library internal state and <code>*iop</code> will be updated accordingly.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iof</td><td>library options flags. See <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6" title="library option values for rsb_lib_init, rsb_lib_set_opt_str, rsb_lib_reinit, rsb_lib_exit, rsb_lib_get_opt, rsb_lib_set_opt, or (deprecated) macros RSB_REINIT_SINGLE_VALUE_GET, RSB_REINIT_SINGLE_VALUE_SET, RSB_REINIT_SINGLE_VALUE, RSB_REINIT_SINGLE_VALUE_C_IOP..">rsb_opt_t</a> for a list of valid options. </td></tr>
+    <tr><td class="paramname">iop</td><td>library options value output pointer (pointed location will be updated). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a>, <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_REINIT_SINGLE_VALUE_SET</a>, <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>, <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">RSB_REINIT_SINGLE_VALUE_C_IOP</a> </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get [...]
+
+</div>
+</div>
+<a class="anchor" id="a2a08c5a23f3999fe8cf36440680e4a05"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_set_opt </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> </td>
+          <td class="paramname"><em>iof</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>iop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sets value of a library option. A value specified by the request flag <code>iof</code> will be fetched from <code>*iop</code> and will be used to update the selected option in the library internal state.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iof</td><td>library options flags. See <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6" title="library option values for rsb_lib_init, rsb_lib_set_opt_str, rsb_lib_reinit, rsb_lib_exit, rsb_lib_get_opt, rsb_lib_set_opt, or (deprecated) macros RSB_REINIT_SINGLE_VALUE_GET, RSB_REINIT_SINGLE_VALUE_SET, RSB_REINIT_SINGLE_VALUE, RSB_REINIT_SINGLE_VALUE_C_IOP..">rsb_opt_t</a> for a list of valid options. </td></tr>
+    <tr><td class="paramname">iop</td><td>library options value output pointer (pointed location will be updated). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a>, <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_REINIT_SINGLE_VALUE_SET</a>, <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>, <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">RSB_REINIT_SINGLE_VALUE_C_IOP</a> </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get [...]
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb__blas__sparse_8F90.html b/doc/html/rsb__blas__sparse_8F90.html
new file mode 100644
index 0000000..ea9b26c
--- /dev/null
+++ b/doc/html/rsb__blas__sparse_8F90.html
@@ -0,0 +1,109 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_blas_sparse.F90 File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#nested-classes">Data Structures</a> |
+<a href="#define-members">Macros</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_blas_sparse.F90 File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>This file implements the Fortran Sparse BLAS interface to <code>librsb</code>.  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="nested-classes"></a>
+Data Structures</h2></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">module  </td><td class="memItemRight" valign="bottom"><a class="el" href="classblas__sparse.html">blas_sparse</a></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entry.html">blas_sparse::uscr_insert_entry</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">A Sparse BLAS interface for RSB.  <a href="interfaceblas__sparse_1_1uscr__insert__entry.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__entries.html">blas_sparse::uscr_insert_entries</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts multiple entries  <a href="interfaceblas__sparse_1_1uscr__insert__entries.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__col.html">blas_sparse::uscr_insert_col</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a sparse column  <a href="interfaceblas__sparse_1_1uscr__insert__col.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__row.html">blas_sparse::uscr_insert_row</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a sparse row  <a href="interfaceblas__sparse_1_1uscr__insert__row.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__clique.html">blas_sparse::uscr_insert_clique</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a clique  <a href="interfaceblas__sparse_1_1uscr__insert__clique.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1uscr__insert__block.html">blas_sparse::uscr_insert_block</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">inserts a dense block  <a href="interfaceblas__sparse_1_1uscr__insert__block.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmv.html">blas_sparse::usmv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">multiplication : c <- beta c + alpha A b  <a href="interfaceblas__sparse_1_1usmv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussv.html">blas_sparse::ussv</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">triangular solve: b <- alpha A^-1 b  <a href="interfaceblas__sparse_1_1ussv.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1usmm.html">blas_sparse::usmm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">multiplication : c <- beta c + alpha A b  <a href="interfaceblas__sparse_1_1usmm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1ussm.html">blas_sparse::ussm</a></td></tr>
+<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">triangular solve: b <- alpha A^-1 b  <a href="interfaceblas__sparse_1_1ussm.html#details">More...</a><br/></td></tr>
+<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">interface  </td><td class="memItemRight" valign="bottom"><a class="el" href="interfaceblas__sparse_1_1rsb__blas__get__mtx.html">blas_sparse::rsb_blas_get_mtx</a></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:a151b6b061725a39255ee4de3db2faf8e"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__blas__sparse_8F90.html#a151b6b061725a39255ee4de3db2faf8e">RSB_HAVE_RSB_KERNELS</a>   1</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>This file implements the Fortran Sparse BLAS interface to <code>librsb</code>. </p>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="a151b6b061725a39255ee4de3db2faf8e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HAVE_RSB_KERNELS   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb__libspblas_8c.html b/doc/html/rsb__libspblas_8c.html
new file mode 100644
index 0000000..1673cb8
--- /dev/null
+++ b/doc/html/rsb__libspblas_8c.html
@@ -0,0 +1,1695 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_libspblas.c File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#define-members">Macros</a> |
+<a href="#enum-members">Enumerations</a> |
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_libspblas.c File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>This file implements Sparse BLAS for librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:aab00e94b9818e92bb03c32f7ec677932"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#aab00e94b9818e92bb03c32f7ec677932">BLAS_ENUM_H</a></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="enum-members"></a>
+Enumerations</h2></td></tr>
+<tr class="memitem:a9e6ec9e515f9d9b7e47110ae5f6ea04e"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102, 
+<a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102
+<br/>
+ }</td></tr>
+<tr class="memitem:a23e5e138364c80074ac014a3dfd346b7"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113, 
+<a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113, 
+<a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113
+<br/>
+ }</td></tr>
+<tr class="memitem:acc2b26a405868ca1bd8a18e0eb62e820"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122, 
+<a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122
+<br/>
+ }</td></tr>
+<tr class="memitem:ad7b35ac9114bfe21e15d011bf878b164"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132, 
+<a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132
+<br/>
+ }</td></tr>
+<tr class="memitem:ac10de4d3a9ae38c876ec94ee7929e695"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_side_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142, 
+<a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142
+<br/>
+ }</td></tr>
+<tr class="memitem:a6ef40f4bf16a7f484390a20fdb55d3aa"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_cmach_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161
+<br/>
+ }</td></tr>
+<tr class="memitem:a07072da9995d9196d9176f56c784952b"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952b">blas_norm_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178
+<br/>
+ }</td></tr>
+<tr class="memitem:a4a9825e92ac3a85e524c58283ac42c14"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sort_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182, 
+<a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182
+<br/>
+ }</td></tr>
+<tr class="memitem:a125c156d54359fba48a6b9cf2a2d0a07"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192, 
+<a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192
+<br/>
+ }</td></tr>
+<tr class="memitem:abdf3d2dd2387ff18e265347d2dfc1f04"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_jrot_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203, 
+<a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203, 
+<a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203
+<br/>
+ }</td></tr>
+<tr class="memitem:a8970170b9fd2a64eb18d9509ea624475"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475">blas_prec_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214
+<br/>
+ }</td></tr>
+<tr class="memitem:a3fe740ad5a139d723de260d638987e9e"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222, 
+<a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222
+<br/>
+ }</td></tr>
+<tr class="memitem:a7da08ccc1c4c7f5ff40768d502a6e63b"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_symmetry_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240
+<br/>
+ }</td></tr>
+<tr class="memitem:a09d8be749e909b403b1563f0ca84aef8"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8">blas_field_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244
+<br/>
+ }</td></tr>
+<tr class="memitem:a540f6a907f9f5e49d84a65c530e598c6"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6">blas_size_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253, 
+<a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253, 
+<a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253
+<br/>
+ }</td></tr>
+<tr class="memitem:a7cb10fb1b47b79ef278d6f09d571bd06"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_handle_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264
+<br/>
+ }</td></tr>
+<tr class="memitem:a3f95e19247de0359b56de195704e05a5"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5">blas_sparsity_optimization_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274
+<br/>
+ }</td></tr>
+<tr class="memitem:aee94244609acd12511418bfbf0a77729"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999
+<br/>
+ }</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:ga88a22a58b50ce89708abb232e4cbffcd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">BLAS_susdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const float *x, const int *indx, const float *y, int incy, float *r, enum <a class="el" href="blas__sparse_8 [...]
+<tr class="memitem:ga3d4d6df66fbbdfb8585770ce2ce37e6b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_susdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const float *x, const int *indx, const float *y, int *incy, float *r, enum <a class="el" href="blas__spa [...]
+<tr class="memitem:ga2ff8ae1b5a89cdb1bfd23b7b27635614"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">BLAS_dusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const double *x, const int *indx, const double *y, int incy, double *r, enum <a class="el" href="blas__spars [...]
+<tr class="memitem:ga891919cc22b2f9db6b26c857e2080b48"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">blas_dusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const double *x, const int *indx, const double *y, int *incy, double *r, enum <a class="el" href="blas__ [...]
+<tr class="memitem:gae02711e85989d740894aa260028cab15"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">BLAS_cusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:ga6805ad5c8346534e68b436708920d135"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">blas_cusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:ga1baea6bd05a2117418d333f5365e34df"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">BLAS_zusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:gaa9f54b685570087469d21462d089ef7d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">blas_zusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:gaeedaef37cd7591d8b15bc7e8ee049414"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">BLAS_susaxpy</a> (int nnz, float alpha, const float *x, const int *indx, float *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga863f07d7735eaa4fc0c6dbe1be09974e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">blas_susaxpy_</a> (int *nnz, float *alpha, const float *x, const int *indx, float *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga31b475fb2cc3f50775a5b6db930ab570"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">BLAS_dusaxpy</a> (int nnz, double alpha, const double *x, const int *indx, double *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga90f1fe9fa99b947c8096befdbfb49fb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">blas_dusaxpy_</a> (int *nnz, double *alpha, const double *x, const int *indx, double *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafaf15e2530cd078b260bb744e00487cb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">BLAS_cusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gac6189fef9b94289f2b8a5b6b7287b50b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">blas_cusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga20f8bb20cf00554547342750d80b2197"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">BLAS_zusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga58ad4724155b0cef43cdb7d95f879d8c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">blas_zusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga40cdf6b61694154efa1ba8d180381827"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">BLAS_susga</a> (int nnz, const float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga69bea2986de886f37a493464b1006456"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">blas_susga_</a> (int *nnz, const float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaa253fd591971e664e48e058e85855882"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">BLAS_dusga</a> (int nnz, const double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga21d8b0bd816bfd21371f70ca82ee9d9c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">blas_dusga_</a> (int *nnz, const double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga71f2df0176e5f44bf482ea2386ac5fac"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">BLAS_cusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga284485bb91904fe1324257ba1ab3a982"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">blas_cusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a29ab06d610d011109dd0c3da94992f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">BLAS_zusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga245af9e95488dece29876354c6e91fed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">blas_zusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2c53b81e979cbae6a5d198509f6d905a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">BLAS_susgz</a> (int nnz, float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga74964bd95bd8945b13c7fe2c7f559e5c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">blas_susgz_</a> (int *nnz, float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0b26bd51a324ee09433dbfa995396344"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">BLAS_dusgz</a> (int nnz, double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gadd448e0d4a33417634e6232c77d8a82a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">blas_dusgz_</a> (int *nnz, double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a4c72eb85493e921f4d40e18edb83ef"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">BLAS_cusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga32fdcc497a0db0ba36b413725ddc8c13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">blas_cusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0d52a140d65ab78ee0c515c445b42451"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">BLAS_zusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga5a6be1c191d51a622b99fe1b9a776bdc"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">blas_zusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gad58ff27808df2287b9cc77f6ed4d55ff"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">BLAS_sussc</a> (int nnz, const float *x, float *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga3f88389831294ad45b84ec31313fbc15"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">blas_sussc_</a> (int *nnz, const float *x, float *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gac71029e615c6c893b54e2f9395a536a4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">BLAS_dussc</a> (int nnz, const double *x, double *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga98ac28de307a8713020edd41be98d455"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">blas_dussc_</a> (int *nnz, const double *x, double *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga1b93628d321fbb77a50f98b467a3ff84"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">BLAS_cussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gafc77b392db05fc22122d4639595cccb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">blas_cussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaad333ae644010e3b059190b98528c79d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">BLAS_zussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gab89e9860df0ed52620651cfc607a987a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">blas_zussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafb4d039eb5319613ed30db7fb323278c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A,  [...]
+<tr class="memitem:ga651b1d1df5c964dbb21c1a5b14d7878b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">blas_susmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:ga9a8f45ddd3c890a296239b212f0c033b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, [...]
+<tr class="memitem:ga7172d1d1d0f3310ceaf9ecd1d128407b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">blas_dusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga9ec2e63176f2d6b11ee48bb523b4f7c7"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga3d60593a2a4ea8c081590b392c39419d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">blas_cusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga1ee2eb4be4c1e0565051fe04ca7415a2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga6747bd2d7930018d8693a97a3eb2865c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">blas_zusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:gafc9acf48136458baa6ace90355e7abb2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">BLAS_sussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T,  [...]
+<tr class="memitem:ga3b63c0a83f8088e60c8e609b451354f0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">blas_sussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:gade1bbec9b8263a2a5e76112f1042576b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">BLAS_dussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T, [...]
+<tr class="memitem:ga36f989895809beaafaa57bb5ab41347f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">blas_dussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga4c327ba1fa391b550f2fc5580ad49bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">BLAS_cussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga39b0ab077486c1fc3766d68ae9048447"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">blas_cussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga7c1e740064369d0029cd627643eb841a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">BLAS_zussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga5d14a5df82e93614e8c524f6d20bb5c5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">blas_zussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga29c11c0c304637e89852359b0f8b10b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">BLAS_susmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2c1da8c4c1473a930ebfaa62f360ca8e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">blas_susmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:gaeeddeb634efe4448a31d62fb547362f6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">BLAS_dusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaa6f99d27ec6f88cca6c6cfac1e8ce7e3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">blas_dusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga8c87639294b57d2893cd29f64902a64d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">BLAS_cusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2dc070f4b09c4b37d89ab9a0fb16352b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">blas_cusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga88138db4545610d234d18d42237f36ee"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">BLAS_zusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaf7018fb638e25fe8b149d0cab4e844c0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">blas_zusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga3d7835bb3621aaf70787d72f86355f8d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">BLAS_sussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga916f5af1f63f33a3a084accaf2dfd6f1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">blas_sussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gaad6ff4b3cce242f76362e6ad8a947713"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">BLAS_dussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga4b93f6ef00d1aa3197a45a7e492edcd6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">blas_dussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad864666e842f7d0878b1fb9d57e80c28"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">BLAS_cussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:gac3d8f0b6742566cbbadf6b18c9aa40b5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">blas_cussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:ga8602eae41f9e5248ff086087abe68bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">BLAS_zussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga60f808ded982233be9a4faaa5fb75db3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">blas_zussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">BLAS_suscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad2f7ede753754c2474d5460a92bba99e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">blas_suscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac931dcb1129ee3016ab82602c3d14fee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad7d5969e9edee49441fc89d22715e60d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">blas_duscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga3483c364b4afec22621e46059b166247"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">BLAS_cuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gaf4d21720c592de22cfd4139517d9d255"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">blas_cuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga52b67393ad16e3d40e74fcdba88c7da4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">BLAS_zuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gae0246836bd8d4b8697c6674998397f3a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">blas_zuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga11c5559450e186c2a86d714f564411f3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">BLAS_suscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga0067882e19affabebf581452a7c05252"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">blas_suscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac0ca32cd2c78c8553d6d6b324e06ef59"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga864facf0316453a27af4b7024a11453b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">blas_duscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga050218d0fa552a3e2c2d5452f876d9b5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga967bfc819ed66559e96ae55a6826d1f8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">blas_cuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5a261b2d1cc996c2a982ff8469faf286"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">BLAS_zuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga62c3bd7ba1a96f82055478d40af67370"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">blas_zuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae7e006a448094a70204be60f24cdf1a3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">BLAS_suscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaab267e13449c999ad8a8e3e358f4b2ed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">blas_suscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae0c3c6dc5503e21afb8192efb0f66edd"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">BLAS_duscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga12c7c1bdd46724147dbbd9b38dd2028e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">blas_duscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga95174fcf3bfbef91ab6b3b85fc90b128"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">BLAS_cuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga24a2dac4570e6021fdcc5c84b52fb5bb"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">blas_cuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gaa582b369a0233027349f8f844cce7622"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">BLAS_zuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaa51253d1c144c8aa744b2e13742fec40"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">blas_zuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga7176a90049256cb0e0fe45db66f57dd2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">BLAS_suscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga5822f3be35eeb550c323de69ec9933d3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">blas_suscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5d9ce97bf054b1e3750eaae5d4e6c335"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga2120eb06b87f0e85d03a368e5bc55485"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">blas_duscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac2b5eccd5cf442b5e2e79201d62ca2b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gaa78d3bef027e5a29ab5e5dd6188bcd75"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">blas_cuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gad6315d71f6f7abf8b82c89c70d6abbf3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga6c23466b531e84f472d5fa75228cb895"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">blas_zuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga26e2c422895e5df8492bdb561cab4a54"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">BLAS_suscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float val, int i, int j)</td></tr>
+<tr class="memitem:ga9b3085c739330bca518e8ef371f7d3b1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">blas_suscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga346ff5263bf0b3a5d7dda94e2000130c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">BLAS_duscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double val, int i, int j)</td></tr>
+<tr class="memitem:ga29c2f202a144845cc1d32c8d65bd5c5f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">blas_duscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gaa39564978ebda8a88f8d19e3e060bc4d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">BLAS_cuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:ga6d735497bdd3bbafbb6168cb0fde5103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">blas_cuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga1ffe345c537b53ac5839da21b236d87c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">BLAS_zuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:gaad6627231dc4230affa318726ff3f345"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">blas_zuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gac6158601459aabebc22795864a2a62ba"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">BLAS_suscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const float *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga9119b49fd049bcaa310bccb36fcda664"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">blas_suscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const float *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gae0683bc8f0af5dd3e53b964190f9e1b4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const double *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gac2c1a4c7b2cebca56aedbad7a002e15f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">blas_duscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const double *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga5af752a3fcb2898412f576eee7d9d618"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">BLAS_cuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga3deb906fcd5f9b9221b5865541c57d18"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">blas_cuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gaacc9c9e5c95df4ea6656ad93f1f09666"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">BLAS_zuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gad9ad3afc16fc0181117004fd46ff78ae"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">blas_zuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga547d271038794dfc797aecc70e294761"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">BLAS_suscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga2d8c691851acf099c25eff1a4c2885c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">blas_suscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga8ee73d3b27bdc68e12c85ba281a337be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">BLAS_duscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:ga5645393bb00d715d882e8e2d55c3f0d1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">blas_duscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga589495aa8acd4eac99ef9132bc4062c9"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">BLAS_cuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga1aadf4dc810ff6eb123a1bf9c859efe8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">blas_cuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga00cfdd3669b146b25d42a32f104ff8a3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">BLAS_zuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga10a2dc6a5399459c83282bda757f5096"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">blas_zuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga9b815fa125e3c84a6e6a6ead2c9ef87b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">BLAS_suscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga71080ddbf0e0e602c7bc36993a6c88ca"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">blas_suscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gac3472ca6b036771a68d6f5f01387e482"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">BLAS_duscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:gaa72e5450302fa424dcd6cfae0bad872d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">blas_duscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga77929c94cee3278cc7594a3f1377f5f8"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">BLAS_cuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gad4acfbfdf33a5682ac657add0292711d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">blas_cuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gab52e13dc7c61fc48e593276f04cb2d30"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">BLAS_zuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gaf871e29bfce399dedbebe2aa9c7831df"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">blas_zuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga6e567e79f675ed861c8f446d0e7a78f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">BLAS_suscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const float *val, const int row_stride, const int col_stride, const int *indx, con [...]
+<tr class="memitem:gafcee9667fc445e32012c960fca7e698d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">blas_suscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const float *val, const int *row_stride, const int *col_stride, const int *in [...]
+<tr class="memitem:ga290547e34be3648b2fe6a7378e59a7ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">BLAS_duscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const double *val, const int row_stride, const int col_stride, const int *indx, co [...]
+<tr class="memitem:ga1f7870f8a1114b94444c721c933e8bef"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">blas_duscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const double *val, const int *row_stride, const int *col_stride, const int *i [...]
+<tr class="memitem:gaf089aaac5d65a4e38130b25d5ba2ba27"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">BLAS_cuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga06acafbf28371b1ad8a75a85173261e6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">blas_cuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:ga52519d2caa1070b0c80ac3c6cb104d92"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">BLAS_zuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga8c3430083655b74988536d823e40c723"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">blas_zuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:gaa682b478ac48e12d4a091977e8c45768"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">BLAS_suscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga61080e2828351bd1585deb2713ed8a29"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">blas_suscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga5d35aa3e27cdbf8a50db5b47ff5e0892"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">BLAS_duscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga38012bbc4e99df72fb95409a4860ead7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">blas_duscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga42054351f49850f079733143b2af87fb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">BLAS_cuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga527ae15ee9e003d948494d9fcdad5dba"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">blas_cuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga573ee2ea89db4a133b8729abbb1223f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">BLAS_zuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:gac3837cd5c7b2e8ac11c6c0e5cff8914c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">blas_zuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga2ff68116b5ae79c37bf335096de973c0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">BLAS_uscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga60974067bf5367a9a3c6eaa9f6f8f4ab"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">blas_uscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga8b0cca8196f40f7b55084a978b40717f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gae4db91cffaf71632bd41b7423c64b757"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">blas_usds_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae671b9fc06140680a8c104ef4f0f54f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">BLAS_susrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</ [...]
+<tr class="memitem:ga9de54361f778577330c6c5ece88a63c3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">blas_susrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga01917c64887638dfb5226be1f87d964a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">BLAS_dusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga9f09f9d05e01d5b354ce234781e3945a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">blas_dusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gafc79de03622ceeb2e0b4343fe5904a36"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">BLAS_cusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:gae09ac29c14cede27a8d6a2be2687453e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">blas_cusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gad551879cdde6d16d9dd5b9edc647c667"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">BLAS_zusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:ga806bb32c4231e4cd9d833370484ad369"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">blas_zusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:ga1113eda1c806ca3631fefde07624fbd6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">BLAS_susget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *d)</td></tr>
+<tr class="memitem:ga0444e8a4b321bf1488fb496bdf3116d2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">blas_susget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *d, int *istat)</td></tr>
+<tr class="memitem:ga35b70a7c3083b791cf1b94cb20ef57be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">BLAS_dusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *d)</td></tr>
+<tr class="memitem:ga7cfde04c833adeb887db75f4b2e104dd"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">blas_dusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *d, int *istat)</td></tr>
+<tr class="memitem:ga4ec4b6dce3701c5803efa6b7455e1504"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">BLAS_cusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga4865a8fda031074a0d91cf5c548584b9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">blas_cusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad175937c05d3d05d3aa7fa35eb3028ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">BLAS_zusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga73feb9adc685f7ff1d66763b0801a0f9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">blas_zusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad84dbcdeda549e1b0361f7ade7a38b13"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">BLAS_susget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga1a8c39f41962e3be6ac84ea3be73f7a0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">blas_susget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gab866cf0951b576a47da3864d668919f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">BLAS_dusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:gac09a79789dc8b79d2e5a375732703103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">blas_dusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gac50e955d6e2bff77e2c3ac2146c77aaf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">BLAS_cusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga9e11da08762387d8a7a885665298e815"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">blas_cusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gadeb3cbe1cc6987763a55665bcdb8aef5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">BLAS_zusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga50cba1e236b63775110d6d1b292417da"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">blas_zusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:ga8f78343207ff584d2d78789bd90e5533"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">BLAS_susget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga0977f63d781215c826aa5a0ea2df9f47"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">blas_susget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga498d143bae71d800dc35e2f1ee071359"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">BLAS_dusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:gaf2e6ab2c5cbd23a7690bbe8e26794033"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">blas_dusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga23f0c1852e05a426d24d2eb1bcae168b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">BLAS_cusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga04751c01dcfb6730a33eaa91f403dd09"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">blas_cusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gaf9d44fc73526a4fdf9627424626bf4a5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">BLAS_zusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga63f072aa25f7f7f8ac1ac4e32aae0c2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">blas_zusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gafc031d78d0274c81039c2448a403cd10"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">BLAS_susget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga039a9d4da3423ea71726242e1c1251e7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">blas_susget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga441bff94fdc50b9bf6e180d36f51c3ce"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">BLAS_dusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga3a4bc573dc07849e7a72ecb2d2f0c31d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">blas_dusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafe27f3044269d37cadb569fc6796ac01"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">BLAS_cusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga19e30bb70673342b4d6308bd9cf46884"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">blas_cusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga85e15d7a3331e8ed4d702908477e2896"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">BLAS_zusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga9bdd048dea68ecbd8fd712349d4fbf13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">blas_zusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafc49f44b76021677000bebe7d7fe133b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">BLAS_susget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gaffaaf5b49e850adda0163b6bc082077d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">blas_susget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_t [...]
+<tr class="memitem:ga39b4e25d5d5ce080f8dd994856e41fd0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">BLAS_dusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga15c7a93ed41a5488c0ef814d2061214a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">blas_dusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ [...]
+<tr class="memitem:ga65e5bef193bd5a2d47e80bff7eebed8e"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">BLAS_cusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:gacefa288104224e6c8f069f4001dacc08"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">blas_cusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:ga286c2cf2c749c80c8b71ff2f4bdb1566"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">BLAS_zusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga01b88a27714ca87085421fd9a4f3e479"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">blas_zusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gad3e05b01efa2857c0938ada63f30cadf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">BLAS_susset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const float *va, int nnz)</td></tr>
+<tr class="memitem:gac0abb530fc46d610bf56e7fb1ef42c6c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">blas_susset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const float *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gae34ff937437af99d317739192e2783da"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">BLAS_dusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const double *va, int nnz)</td></tr>
+<tr class="memitem:ga8e2acb49dac4221d1554c30238bd6747"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">blas_dusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const double *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga3b358be87656e2d8065e1d30dd8060f4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">BLAS_cusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga38398053da29e668ee440e55f675532b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">blas_cusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gac542af7517c9f667122e8bdc408487b3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">BLAS_zusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga156a8d0225d9761cd58e15e026b9ba2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">blas_zusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gaf17e549ec8cf353144ac1e3a1f080f46"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">BLAS_susset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gab8c3e5745870d4399382051dcedad144"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">blas_susset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gac8aa3ed1e29f2555519421290d236d0c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">BLAS_dusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:gab50cd8a5a6a5d866789628da0c9141a2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">blas_dusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga778acfebd02199f440b890b0176af19c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">BLAS_cusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga4a32533889a4ed82a21f457d1253317d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">blas_cusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gaca954a070d476342e254587fc2faa7fd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">BLAS_zusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga52efe19f0972fa51ac6329cf717b676c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">blas_zusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gad86989cd1f58003617f3db251b6fc0f1"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">BLAS_susget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gaac53e141083bc9871d81b587e5f785c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">blas_susget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gacf35fa073f6cc991efe75f6a012a9a04"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:ga6443c32b223693698a8a0f0198ae4bee"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">blas_dusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga4c7eae1cfcd8cafc16f31b169c4a7514"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">BLAS_cusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga104bc9ee1e6ce32012933e822019ecf0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">blas_cusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga27417bc0d923f7288ed736837492275c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">BLAS_zusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga845cca2b512e38b467fc0d4b93d660b7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">blas_zusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga852f4a68eef6963708d11f37e975b178"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:ga2cb97e106eb117547157a8fc61491b91"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2cb97e106eb117547157a8fc61491b91">blas_usgp_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *pname, int *istat)</td></tr>
+<tr class="memitem:ga5ea0303be1db6c9dd73c03bba6dc6158"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5ea0303be1db6c9dd73c03bba6dc6158">blas_ussp_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *pname, int *istat)</td></tr>
+<tr class="memitem:ga89577a4a63cc8659f1d463fb819bc002"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:gac4d8c73e5d9faa85209bcc4e885d4ff1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_blas_get_mtx</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>This file implements Sparse BLAS for librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) . </p>
+<dl class="section author"><dt>Author</dt><dd>Michele Martone </dd></dl>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="aab00e94b9818e92bb03c32f7ec677932"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_ENUM_H</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<h2>Enumeration Type Documentation</h2>
+<a class="anchor" id="a3fe740ad5a139d723de260d638987e9e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Index base (valid at matrix build/modify time). </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_cmach_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies (<a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>) or inquiries (<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>) whether the diagonal of a matrix is (implicitly) unitary or not. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_field_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Numerical field type; can be used with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_handle_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The following are not fully implemented. Usable with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_jrot_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a07072da9995d9196d9176f56c784952b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952b">blas_norm_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify a dense array's elements layout. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_prec_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aee94244609acd12511418bfbf0a77729"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Properties suitable to be used with <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. All of these are not in the Sparse BLAS standard. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_side_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_size_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Quantities that can be obtained via <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sort_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3f95e19247de0359b56de195704e05a5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparsity_optimization_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The following are usable with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_symmetry_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Symmetry properties. If not specified otherwise, valid for the both of <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> and <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify a transposition operator to a matrix operand. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies (<a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>) or inquiries (<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>) upper or lower triangularity of a matrix. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb__libspblas_8h.html b/doc/html/rsb__libspblas_8h.html
new file mode 100644
index 0000000..d29e486
--- /dev/null
+++ b/doc/html/rsb__libspblas_8h.html
@@ -0,0 +1,1768 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_libspblas.h File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#define-members">Macros</a> |
+<a href="#typedef-members">Typedefs</a> |
+<a href="#enum-members">Enumerations</a> |
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_libspblas.h File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) .  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:aab00e94b9818e92bb03c32f7ec677932"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#aab00e94b9818e92bb03c32f7ec677932">BLAS_ENUM_H</a></td></tr>
+<tr class="memitem:a6719ae77dfef6d6dd0790e34a65c1924"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a6719ae77dfef6d6dd0790e34a65c1924">BLAS_ussp</a>   rsb_wp__BLAS_ussp</td></tr>
+<tr class="memitem:a5eec91b6d95962811bd9cb4e37266214"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a5eec91b6d95962811bd9cb4e37266214">BLAS_usgp</a>   rsb_wp__BLAS_usgp</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="typedef-members"></a>
+Typedefs</h2></td></tr>
+<tr class="memitem:a6f56456b01e0cc6b25b81201aa67c163"><td class="memItemLeft" align="right" valign="top">typedef int </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a></td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="enum-members"></a>
+Enumerations</h2></td></tr>
+<tr class="memitem:a9e6ec9e515f9d9b7e47110ae5f6ea04e"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102, 
+<a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="rsb__libspblas_8c.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d">blas_rowmajor</a> =  101, 
+<a class="el" href="rsb__libspblas_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e">blas_colmajor</a> =  102
+<br/>
+ }</td></tr>
+<tr class="memitem:a23e5e138364c80074ac014a3dfd346b7"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113, 
+<a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<a class="el" href="rsb__libspblas_8c.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113, 
+<a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f">blas_no_trans</a> =  111, 
+<a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54">blas_trans</a> =  112, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c">blas_conj_trans</a> =  113
+<br/>
+ }</td></tr>
+<tr class="memitem:acc2b26a405868ca1bd8a18e0eb62e820"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122, 
+<a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="rsb__libspblas_8c.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07">blas_upper</a> =  121, 
+<a class="el" href="rsb__libspblas_8h.html#acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26">blas_lower</a> =  122
+<br/>
+ }</td></tr>
+<tr class="memitem:ad7b35ac9114bfe21e15d011bf878b164"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132, 
+<a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="rsb__libspblas_8c.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f">blas_non_unit_diag</a> =  131, 
+<a class="el" href="rsb__libspblas_8h.html#ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c">blas_unit_diag</a> =  132
+<br/>
+ }</td></tr>
+<tr class="memitem:ac10de4d3a9ae38c876ec94ee7929e695"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_side_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142, 
+<a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="rsb__libspblas_8c.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181">blas_left_side</a> =  141, 
+<a class="el" href="rsb__libspblas_8h.html#ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9">blas_right_side</a> =  142
+<br/>
+ }</td></tr>
+<tr class="memitem:a6ef40f4bf16a7f484390a20fdb55d3aa"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_cmach_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<a class="el" href="rsb__libspblas_8c.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b">blas_base</a> =  151, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10">blas_t</a> =  152, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280">blas_rnd</a> =  153, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714">blas_ieee</a> =  154, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df">blas_emin</a> =  155, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363">blas_emax</a> =  156, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a">blas_eps</a> =  157, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f">blas_prec</a> =  158, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428">blas_underflow</a> =  159, 
+<a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02">blas_overflow</a> =  160, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a">blas_sfmin</a> =  161
+<br/>
+ }</td></tr>
+<tr class="memitem:a07072da9995d9196d9176f56c784952b"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952b">blas_norm_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="rsb__libspblas_8c.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250">blas_one_norm</a> =  171, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3">blas_real_one_norm</a> =  172, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e">blas_two_norm</a> =  173, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed">blas_frobenius_norm</a> =  174, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c">blas_inf_norm</a> =  175, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5">blas_real_inf_norm</a> =  176, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf">blas_max_norm</a> =  177, 
+<a class="el" href="rsb__libspblas_8h.html#a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953">blas_real_max_norm</a> =  178
+<br/>
+ }</td></tr>
+<tr class="memitem:a4a9825e92ac3a85e524c58283ac42c14"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sort_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182, 
+<a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="rsb__libspblas_8c.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7">blas_increasing_order</a> =  181, 
+<a class="el" href="rsb__libspblas_8h.html#a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4">blas_decreasing_order</a> =  182
+<br/>
+ }</td></tr>
+<tr class="memitem:a125c156d54359fba48a6b9cf2a2d0a07"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192, 
+<a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="rsb__libspblas_8c.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1">blas_conj</a> =  191, 
+<a class="el" href="rsb__libspblas_8h.html#a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7">blas_no_conj</a> =  192
+<br/>
+ }</td></tr>
+<tr class="memitem:abdf3d2dd2387ff18e265347d2dfc1f04"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_jrot_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203, 
+<a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<a class="el" href="rsb__libspblas_8c.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203, 
+<a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a">blas_jrot_inner</a> =  201, 
+<a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05">blas_jrot_outer</a> =  202, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042">blas_jrot_sorted</a> =  203
+<br/>
+ }</td></tr>
+<tr class="memitem:a8970170b9fd2a64eb18d9509ea624475"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_prec_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="rsb__libspblas_8c.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4">blas_prec_single</a> =  211, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a">blas_prec_double</a> =  212, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4">blas_prec_indigenous</a> =  213, 
+<a class="el" href="rsb__libspblas_8h.html#a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9">blas_prec_extra</a> =  214
+<br/>
+ }</td></tr>
+<tr class="memitem:a3fe740ad5a139d723de260d638987e9e"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222, 
+<a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="rsb__libspblas_8c.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0">blas_zero_base</a> =  221, 
+<a class="el" href="rsb__libspblas_8h.html#a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad">blas_one_base</a> =  222
+<br/>
+ }</td></tr>
+<tr class="memitem:a7da08ccc1c4c7f5ff40768d502a6e63b"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_symmetry_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="rsb__libspblas_8c.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866">blas_general</a> =  231, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0">blas_symmetric</a> =  232, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c">blas_hermitian</a> =  233, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a">blas_triangular</a> =  234, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> =  235, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a> =  236, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> =  237, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a> =  238, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> =  239, 
+<a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a> =  240
+<br/>
+ }</td></tr>
+<tr class="memitem:a09d8be749e909b403b1563f0ca84aef8"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_field_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="rsb__libspblas_8c.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8">blas_complex</a> =  241, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d">blas_real</a> =  242, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852">blas_double_precision</a> =  243, 
+<a class="el" href="rsb__libspblas_8h.html#a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6">blas_single_precision</a> =  244
+<br/>
+ }</td></tr>
+<tr class="memitem:a540f6a907f9f5e49d84a65c530e598c6"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_size_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253, 
+<a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<a class="el" href="rsb__libspblas_8c.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253, 
+<a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23">blas_num_rows</a> =  251, 
+<a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b">blas_num_cols</a> =  252, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd">blas_num_nonzeros</a> =  253
+<br/>
+ }</td></tr>
+<tr class="memitem:a7cb10fb1b47b79ef278d6f09d571bd06"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_handle_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="rsb__libspblas_8c.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7">blas_invalid_handle</a> =  261, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5">blas_new_handle</a> =  262, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0">blas_open_handle</a> =  263, 
+<a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a> =  264
+<br/>
+ }</td></tr>
+<tr class="memitem:a3f95e19247de0359b56de195704e05a5"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparsity_optimization_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="rsb__libspblas_8c.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79">blas_regular</a> =  271, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66">blas_irregular</a> =  272, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa">blas_block</a> =  273, 
+<a class="el" href="rsb__libspblas_8h.html#a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5">blas_unassembled</a> =  274
+<br/>
+ }</td></tr>
+<tr class="memitem:aee94244609acd12511418bfbf0a77729"><td class="memItemLeft" align="right" valign="top">enum  </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a> { <br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="rsb__libspblas_8c.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0">blas_rsb_spmv_autotuning_on</a> =  6660, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7">blas_rsb_spmv_autotuning_off</a> =  6661, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff">blas_rsb_spmv_n_autotuning_on</a> =  6662, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0">blas_rsb_spmv_n_autotuning_off</a> =  6663, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8">blas_rsb_spmv_t_autotuning_on</a> =  6664, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e">blas_rsb_spmv_t_autotuning_off</a> =  6665, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4">blas_rsb_autotune_next_operation</a> =  6666, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b">blas_rsb_rep_rsb</a> =  9995, 
+<br/>
+  <a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878">blas_rsb_rep_csr</a> =  9996, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60">blas_rsb_rep_coo</a> =  9997, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f">blas_rsb_duplicates_ovw</a> =  9998, 
+<a class="el" href="rsb__libspblas_8h.html#aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a">blas_rsb_duplicates_sum</a> =  9999
+<br/>
+ }</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:ga88a22a58b50ce89708abb232e4cbffcd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88a22a58b50ce89708abb232e4cbffcd">BLAS_susdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const float *x, const int *indx, const float *y, int incy, float *r, enum <a class="el" href="blas__sparse_8 [...]
+<tr class="memitem:ga3d4d6df66fbbdfb8585770ce2ce37e6b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d4d6df66fbbdfb8585770ce2ce37e6b">blas_susdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const float *x, const int *indx, const float *y, int *incy, float *r, enum <a class="el" href="blas__spa [...]
+<tr class="memitem:ga2ff8ae1b5a89cdb1bfd23b7b27635614"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff8ae1b5a89cdb1bfd23b7b27635614">BLAS_dusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const double *x, const int *indx, const double *y, int incy, double *r, enum <a class="el" href="blas__spars [...]
+<tr class="memitem:ga891919cc22b2f9db6b26c857e2080b48"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga891919cc22b2f9db6b26c857e2080b48">blas_dusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const double *x, const int *indx, const double *y, int *incy, double *r, enum <a class="el" href="blas__ [...]
+<tr class="memitem:gae02711e85989d740894aa260028cab15"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae02711e85989d740894aa260028cab15">BLAS_cusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:ga6805ad5c8346534e68b436708920d135"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6805ad5c8346534e68b436708920d135">blas_cusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:ga1baea6bd05a2117418d333f5365e34df"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1baea6bd05a2117418d333f5365e34df">BLAS_zusdot</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum <a class="el" href="blas__sparse_8h.h [...]
+<tr class="memitem:gaa9f54b685570087469d21462d089ef7d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa9f54b685570087469d21462d089ef7d">blas_zusdot_</a> (enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a> *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum <a class="el" href="blas__sparse [...]
+<tr class="memitem:gaeedaef37cd7591d8b15bc7e8ee049414"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeedaef37cd7591d8b15bc7e8ee049414">BLAS_susaxpy</a> (int nnz, float alpha, const float *x, const int *indx, float *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga863f07d7735eaa4fc0c6dbe1be09974e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga863f07d7735eaa4fc0c6dbe1be09974e">blas_susaxpy_</a> (int *nnz, float *alpha, const float *x, const int *indx, float *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga31b475fb2cc3f50775a5b6db930ab570"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga31b475fb2cc3f50775a5b6db930ab570">BLAS_dusaxpy</a> (int nnz, double alpha, const double *x, const int *indx, double *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga90f1fe9fa99b947c8096befdbfb49fb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga90f1fe9fa99b947c8096befdbfb49fb3">blas_dusaxpy_</a> (int *nnz, double *alpha, const double *x, const int *indx, double *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafaf15e2530cd078b260bb744e00487cb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafaf15e2530cd078b260bb744e00487cb">BLAS_cusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gac6189fef9b94289f2b8a5b6b7287b50b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6189fef9b94289f2b8a5b6b7287b50b">blas_cusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga20f8bb20cf00554547342750d80b2197"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga20f8bb20cf00554547342750d80b2197">BLAS_zusaxpy</a> (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga58ad4724155b0cef43cdb7d95f879d8c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga58ad4724155b0cef43cdb7d95f879d8c">blas_zusaxpy_</a> (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga40cdf6b61694154efa1ba8d180381827"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga40cdf6b61694154efa1ba8d180381827">BLAS_susga</a> (int nnz, const float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga69bea2986de886f37a493464b1006456"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga69bea2986de886f37a493464b1006456">blas_susga_</a> (int *nnz, const float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaa253fd591971e664e48e058e85855882"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa253fd591971e664e48e058e85855882">BLAS_dusga</a> (int nnz, const double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga21d8b0bd816bfd21371f70ca82ee9d9c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga21d8b0bd816bfd21371f70ca82ee9d9c">blas_dusga_</a> (int *nnz, const double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga71f2df0176e5f44bf482ea2386ac5fac"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71f2df0176e5f44bf482ea2386ac5fac">BLAS_cusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga284485bb91904fe1324257ba1ab3a982"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga284485bb91904fe1324257ba1ab3a982">blas_cusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a29ab06d610d011109dd0c3da94992f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a29ab06d610d011109dd0c3da94992f">BLAS_zusga</a> (int nnz, const void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga245af9e95488dece29876354c6e91fed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga245af9e95488dece29876354c6e91fed">blas_zusga_</a> (int *nnz, const void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2c53b81e979cbae6a5d198509f6d905a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c53b81e979cbae6a5d198509f6d905a">BLAS_susgz</a> (int nnz, float *y, int incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga74964bd95bd8945b13c7fe2c7f559e5c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga74964bd95bd8945b13c7fe2c7f559e5c">blas_susgz_</a> (int *nnz, float *y, int *incy, float *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0b26bd51a324ee09433dbfa995396344"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0b26bd51a324ee09433dbfa995396344">BLAS_dusgz</a> (int nnz, double *y, int incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gadd448e0d4a33417634e6232c77d8a82a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadd448e0d4a33417634e6232c77d8a82a">blas_dusgz_</a> (int *nnz, double *y, int *incy, double *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga2a4c72eb85493e921f4d40e18edb83ef"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2a4c72eb85493e921f4d40e18edb83ef">BLAS_cusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga32fdcc497a0db0ba36b413725ddc8c13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga32fdcc497a0db0ba36b413725ddc8c13">blas_cusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga0d52a140d65ab78ee0c515c445b42451"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0d52a140d65ab78ee0c515c445b42451">BLAS_zusgz</a> (int nnz, void *y, int incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga5a6be1c191d51a622b99fe1b9a776bdc"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a6be1c191d51a622b99fe1b9a776bdc">blas_zusgz_</a> (int *nnz, void *y, int *incy, void *x, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gad58ff27808df2287b9cc77f6ed4d55ff"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad58ff27808df2287b9cc77f6ed4d55ff">BLAS_sussc</a> (int nnz, const float *x, float *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga3f88389831294ad45b84ec31313fbc15"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3f88389831294ad45b84ec31313fbc15">blas_sussc_</a> (int *nnz, const float *x, float *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gac71029e615c6c893b54e2f9395a536a4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac71029e615c6c893b54e2f9395a536a4">BLAS_dussc</a> (int nnz, const double *x, double *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:ga98ac28de307a8713020edd41be98d455"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga98ac28de307a8713020edd41be98d455">blas_dussc_</a> (int *nnz, const double *x, double *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:ga1b93628d321fbb77a50f98b467a3ff84"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1b93628d321fbb77a50f98b467a3ff84">BLAS_cussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gafc77b392db05fc22122d4639595cccb3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc77b392db05fc22122d4639595cccb3">blas_cussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gaad333ae644010e3b059190b98528c79d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad333ae644010e3b059190b98528c79d">BLAS_zussc</a> (int nnz, const void *x, void *y, int incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> index_base)</td></tr>
+<tr class="memitem:gab89e9860df0ed52620651cfc607a987a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab89e9860df0ed52620651cfc607a987a">blas_zussc_</a> (int *nnz, const void *x, void *y, int *incy, const int *indx, enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a> *index_base, int *istat)</td></tr>
+<tr class="memitem:gafb4d039eb5319613ed30db7fb323278c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A,  [...]
+<tr class="memitem:ga651b1d1df5c964dbb21c1a5b14d7878b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga651b1d1df5c964dbb21c1a5b14d7878b">blas_susmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:ga9a8f45ddd3c890a296239b212f0c033b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, [...]
+<tr class="memitem:ga7172d1d1d0f3310ceaf9ecd1d128407b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7172d1d1d0f3310ceaf9ecd1d128407b">blas_dusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga9ec2e63176f2d6b11ee48bb523b4f7c7"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga3d60593a2a4ea8c081590b392c39419d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d60593a2a4ea8c081590b392c39419d">blas_cusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga1ee2eb4be4c1e0565051fe04ca7415a2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga6747bd2d7930018d8693a97a3eb2865c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6747bd2d7930018d8693a97a3eb2865c">blas_zusmv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:gafc9acf48136458baa6ace90355e7abb2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc9acf48136458baa6ace90355e7abb2">BLAS_sussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, float alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T,  [...]
+<tr class="memitem:ga3b63c0a83f8088e60c8e609b451354f0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b63c0a83f8088e60c8e609b451354f0">blas_sussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, float *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> [...]
+<tr class="memitem:gade1bbec9b8263a2a5e76112f1042576b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gade1bbec9b8263a2a5e76112f1042576b">BLAS_dussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, double alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> T, [...]
+<tr class="memitem:ga36f989895809beaafaa57bb5ab41347f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga36f989895809beaafaa57bb5ab41347f">blas_dussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, double *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a [...]
+<tr class="memitem:ga4c327ba1fa391b550f2fc5580ad49bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c327ba1fa391b550f2fc5580ad49bdf">BLAS_cussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga39b0ab077486c1fc3766d68ae9048447"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b0ab077486c1fc3766d68ae9048447">blas_cussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga7c1e740064369d0029cd627643eb841a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7c1e740064369d0029cd627643eb841a">BLAS_zussv</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</ [...]
+<tr class="memitem:ga5d14a5df82e93614e8c524f6d20bb5c5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d14a5df82e93614e8c524f6d20bb5c5">blas_zussv_</a> (enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, const void *alpha, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matri [...]
+<tr class="memitem:ga29c11c0c304637e89852359b0f8b10b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c11c0c304637e89852359b0f8b10b5">BLAS_susmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2c1da8c4c1473a930ebfaa62f360ca8e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2c1da8c4c1473a930ebfaa62f360ca8e">blas_susmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:gaeeddeb634efe4448a31d62fb547362f6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaeeddeb634efe4448a31d62fb547362f6">BLAS_dusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaa6f99d27ec6f88cca6c6cfac1e8ce7e3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa6f99d27ec6f88cca6c6cfac1e8ce7e3">blas_dusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga8c87639294b57d2893cd29f64902a64d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c87639294b57d2893cd29f64902a64d">BLAS_cusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:ga2dc070f4b09c4b37d89ab9a0fb16352b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2dc070f4b09c4b37d89ab9a0fb16352b">blas_cusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga88138db4545610d234d18d42237f36ee"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga88138db4545610d234d18d42237f36ee">BLAS_zusmm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transA, int nrh [...]
+<tr class="memitem:gaf7018fb638e25fe8b149d0cab4e844c0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf7018fb638e25fe8b149d0cab4e844c0">blas_zusmm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transA, int [...]
+<tr class="memitem:ga3d7835bb3621aaf70787d72f86355f8d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3d7835bb3621aaf70787d72f86355f8d">BLAS_sussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga916f5af1f63f33a3a084accaf2dfd6f1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga916f5af1f63f33a3a084accaf2dfd6f1">blas_sussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gaad6ff4b3cce242f76362e6ad8a947713"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6ff4b3cce242f76362e6ad8a947713">BLAS_dussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga4b93f6ef00d1aa3197a45a7e492edcd6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4b93f6ef00d1aa3197a45a7e492edcd6">blas_dussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad864666e842f7d0878b1fb9d57e80c28"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad864666e842f7d0878b1fb9d57e80c28">BLAS_cussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:gac3d8f0b6742566cbbadf6b18c9aa40b5"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3d8f0b6742566cbbadf6b18c9aa40b5">blas_cussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:ga8602eae41f9e5248ff086087abe68bdf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8602eae41f9e5248ff086087abe68bdf">BLAS_zussm</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> transT, int nrh [...]
+<tr class="memitem:ga60f808ded982233be9a4faaa5fb75db3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60f808ded982233be9a4faaa5fb75db3">blas_zussm_</a> (enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a> *order, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a> *transT, int [...]
+<tr class="memitem:gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9fe50c2e7a26e6ef83dfd3ea4cfcdd5">BLAS_suscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad2f7ede753754c2474d5460a92bba99e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad2f7ede753754c2474d5460a92bba99e">blas_suscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac931dcb1129ee3016ab82602c3d14fee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac931dcb1129ee3016ab82602c3d14fee">BLAS_duscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gad7d5969e9edee49441fc89d22715e60d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad7d5969e9edee49441fc89d22715e60d">blas_duscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga3483c364b4afec22621e46059b166247"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3483c364b4afec22621e46059b166247">BLAS_cuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gaf4d21720c592de22cfd4139517d9d255"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf4d21720c592de22cfd4139517d9d255">blas_cuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga52b67393ad16e3d40e74fcdba88c7da4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52b67393ad16e3d40e74fcdba88c7da4">BLAS_zuscr_begin</a> (int m, int n)</td></tr>
+<tr class="memitem:gae0246836bd8d4b8697c6674998397f3a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0246836bd8d4b8697c6674998397f3a">blas_zuscr_begin_</a> (int *m, int *n, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga11c5559450e186c2a86d714f564411f3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga11c5559450e186c2a86d714f564411f3">BLAS_suscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga0067882e19affabebf581452a7c05252"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0067882e19affabebf581452a7c05252">blas_suscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac0ca32cd2c78c8553d6d6b324e06ef59"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0ca32cd2c78c8553d6d6b324e06ef59">BLAS_duscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga864facf0316453a27af4b7024a11453b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga864facf0316453a27af4b7024a11453b">blas_duscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga050218d0fa552a3e2c2d5452f876d9b5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga050218d0fa552a3e2c2d5452f876d9b5">BLAS_cuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga967bfc819ed66559e96ae55a6826d1f8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga967bfc819ed66559e96ae55a6826d1f8">blas_cuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5a261b2d1cc996c2a982ff8469faf286"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5a261b2d1cc996c2a982ff8469faf286">BLAS_zuscr_block_begin</a> (int Mb, int Nb, int k, int l)</td></tr>
+<tr class="memitem:ga62c3bd7ba1a96f82055478d40af67370"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga62c3bd7ba1a96f82055478d40af67370">blas_zuscr_block_begin_</a> (int *Mb, int *Nb, int *k, int *l, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae7e006a448094a70204be60f24cdf1a3"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae7e006a448094a70204be60f24cdf1a3">BLAS_suscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaab267e13449c999ad8a8e3e358f4b2ed"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaab267e13449c999ad8a8e3e358f4b2ed">blas_suscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae0c3c6dc5503e21afb8192efb0f66edd"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0c3c6dc5503e21afb8192efb0f66edd">BLAS_duscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga12c7c1bdd46724147dbbd9b38dd2028e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga12c7c1bdd46724147dbbd9b38dd2028e">blas_duscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga95174fcf3bfbef91ab6b3b85fc90b128"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga95174fcf3bfbef91ab6b3b85fc90b128">BLAS_cuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:ga24a2dac4570e6021fdcc5c84b52fb5bb"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga24a2dac4570e6021fdcc5c84b52fb5bb">blas_cuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gaa582b369a0233027349f8f844cce7622"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa582b369a0233027349f8f844cce7622">BLAS_zuscr_variable_block_begin</a> (int Mb, int Nb, const int *K, const int *L)</td></tr>
+<tr class="memitem:gaa51253d1c144c8aa744b2e13742fec40"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa51253d1c144c8aa744b2e13742fec40">blas_zuscr_variable_block_begin_</a> (int *Mb, int *Nb, const int *K, const int *L, <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga7176a90049256cb0e0fe45db66f57dd2"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7176a90049256cb0e0fe45db66f57dd2">BLAS_suscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga5822f3be35eeb550c323de69ec9933d3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5822f3be35eeb550c323de69ec9933d3">blas_suscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga5d9ce97bf054b1e3750eaae5d4e6c335"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga2120eb06b87f0e85d03a368e5bc55485"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2120eb06b87f0e85d03a368e5bc55485">blas_duscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gac2b5eccd5cf442b5e2e79201d62ca2b5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gaa78d3bef027e5a29ab5e5dd6188bcd75"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa78d3bef027e5a29ab5e5dd6188bcd75">blas_cuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gad6315d71f6f7abf8b82c89c70d6abbf3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga6c23466b531e84f472d5fa75228cb895"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6c23466b531e84f472d5fa75228cb895">blas_zuscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga26e2c422895e5df8492bdb561cab4a54"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga26e2c422895e5df8492bdb561cab4a54">BLAS_suscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float val, int i, int j)</td></tr>
+<tr class="memitem:ga9b3085c739330bca518e8ef371f7d3b1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b3085c739330bca518e8ef371f7d3b1">blas_suscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga346ff5263bf0b3a5d7dda94e2000130c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga346ff5263bf0b3a5d7dda94e2000130c">BLAS_duscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double val, int i, int j)</td></tr>
+<tr class="memitem:ga29c2f202a144845cc1d32c8d65bd5c5f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga29c2f202a144845cc1d32c8d65bd5c5f">blas_duscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gaa39564978ebda8a88f8d19e3e060bc4d"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa39564978ebda8a88f8d19e3e060bc4d">BLAS_cuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:ga6d735497bdd3bbafbb6168cb0fde5103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6d735497bdd3bbafbb6168cb0fde5103">blas_cuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga1ffe345c537b53ac5839da21b236d87c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1ffe345c537b53ac5839da21b236d87c">BLAS_zuscr_insert_entry</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int i, int j)</td></tr>
+<tr class="memitem:gaad6627231dc4230affa318726ff3f345"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaad6627231dc4230affa318726ff3f345">blas_zuscr_insert_entry_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:gac6158601459aabebc22795864a2a62ba"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac6158601459aabebc22795864a2a62ba">BLAS_suscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const float *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga9119b49fd049bcaa310bccb36fcda664"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9119b49fd049bcaa310bccb36fcda664">blas_suscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const float *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gae0683bc8f0af5dd3e53b964190f9e1b4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae0683bc8f0af5dd3e53b964190f9e1b4">BLAS_duscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const double *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gac2c1a4c7b2cebca56aedbad7a002e15f"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac2c1a4c7b2cebca56aedbad7a002e15f">blas_duscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const double *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga5af752a3fcb2898412f576eee7d9d618"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5af752a3fcb2898412f576eee7d9d618">BLAS_cuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:ga3deb906fcd5f9b9221b5865541c57d18"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3deb906fcd5f9b9221b5865541c57d18">blas_cuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:gaacc9c9e5c95df4ea6656ad93f1f09666"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaacc9c9e5c95df4ea6656ad93f1f09666">BLAS_zuscr_insert_entries</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int nnz, const void *val, const int *indx, const int *jndx)</td></tr>
+<tr class="memitem:gad9ad3afc16fc0181117004fd46ff78ae"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad9ad3afc16fc0181117004fd46ff78ae">blas_zuscr_insert_entries_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)</td></tr>
+<tr class="memitem:ga547d271038794dfc797aecc70e294761"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga547d271038794dfc797aecc70e294761">BLAS_suscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga2d8c691851acf099c25eff1a4c2885c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2d8c691851acf099c25eff1a4c2885c1">blas_suscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga8ee73d3b27bdc68e12c85ba281a337be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8ee73d3b27bdc68e12c85ba281a337be">BLAS_duscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:ga5645393bb00d715d882e8e2d55c3f0d1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5645393bb00d715d882e8e2d55c3f0d1">blas_duscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga589495aa8acd4eac99ef9132bc4062c9"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga589495aa8acd4eac99ef9132bc4062c9">BLAS_cuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga1aadf4dc810ff6eb123a1bf9c859efe8"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1aadf4dc810ff6eb123a1bf9c859efe8">blas_cuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga00cfdd3669b146b25d42a32f104ff8a3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga00cfdd3669b146b25d42a32f104ff8a3">BLAS_zuscr_insert_col</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int j, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:ga10a2dc6a5399459c83282bda757f5096"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga10a2dc6a5399459c83282bda757f5096">blas_zuscr_insert_col_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *j, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga9b815fa125e3c84a6e6a6ead2c9ef87b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9b815fa125e3c84a6e6a6ead2c9ef87b">BLAS_suscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const float *val, const int *indx)</td></tr>
+<tr class="memitem:ga71080ddbf0e0e602c7bc36993a6c88ca"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga71080ddbf0e0e602c7bc36993a6c88ca">blas_suscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const float *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gac3472ca6b036771a68d6f5f01387e482"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3472ca6b036771a68d6f5f01387e482">BLAS_duscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const double *val, const int *indx)</td></tr>
+<tr class="memitem:gaa72e5450302fa424dcd6cfae0bad872d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa72e5450302fa424dcd6cfae0bad872d">blas_duscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const double *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga77929c94cee3278cc7594a3f1377f5f8"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga77929c94cee3278cc7594a3f1377f5f8">BLAS_cuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gad4acfbfdf33a5682ac657add0292711d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad4acfbfdf33a5682ac657add0292711d">blas_cuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:gab52e13dc7c61fc48e593276f04cb2d30"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab52e13dc7c61fc48e593276f04cb2d30">BLAS_zuscr_insert_row</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int nnz, const void *val, const int *indx)</td></tr>
+<tr class="memitem:gaf871e29bfce399dedbebe2aa9c7831df"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf871e29bfce399dedbebe2aa9c7831df">blas_zuscr_insert_row_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *nnz, const void *val, const int *indx, int *istat)</td></tr>
+<tr class="memitem:ga6e567e79f675ed861c8f446d0e7a78f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6e567e79f675ed861c8f446d0e7a78f5">BLAS_suscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const float *val, const int row_stride, const int col_stride, const int *indx, con [...]
+<tr class="memitem:gafcee9667fc445e32012c960fca7e698d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafcee9667fc445e32012c960fca7e698d">blas_suscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const float *val, const int *row_stride, const int *col_stride, const int *in [...]
+<tr class="memitem:ga290547e34be3648b2fe6a7378e59a7ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga290547e34be3648b2fe6a7378e59a7ec">BLAS_duscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const double *val, const int row_stride, const int col_stride, const int *indx, co [...]
+<tr class="memitem:ga1f7870f8a1114b94444c721c933e8bef"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1f7870f8a1114b94444c721c933e8bef">blas_duscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const double *val, const int *row_stride, const int *col_stride, const int *i [...]
+<tr class="memitem:gaf089aaac5d65a4e38130b25d5ba2ba27"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf089aaac5d65a4e38130b25d5ba2ba27">BLAS_cuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga06acafbf28371b1ad8a75a85173261e6"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga06acafbf28371b1ad8a75a85173261e6">blas_cuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:ga52519d2caa1070b0c80ac3c6cb104d92"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52519d2caa1070b0c80ac3c6cb104d92">BLAS_zuscr_insert_clique</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, cons [...]
+<tr class="memitem:ga8c3430083655b74988536d823e40c723"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8c3430083655b74988536d823e40c723">blas_zuscr_insert_clique_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *ind [...]
+<tr class="memitem:gaa682b478ac48e12d4a091977e8c45768"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaa682b478ac48e12d4a091977e8c45768">BLAS_suscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga61080e2828351bd1585deb2713ed8a29"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga61080e2828351bd1585deb2713ed8a29">blas_suscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga5d35aa3e27cdbf8a50db5b47ff5e0892"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga5d35aa3e27cdbf8a50db5b47ff5e0892">BLAS_duscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga38012bbc4e99df72fb95409a4860ead7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38012bbc4e99df72fb95409a4860ead7">blas_duscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga42054351f49850f079733143b2af87fb"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga42054351f49850f079733143b2af87fb">BLAS_cuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:ga527ae15ee9e003d948494d9fcdad5dba"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga527ae15ee9e003d948494d9fcdad5dba">blas_cuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga573ee2ea89db4a133b8729abbb1223f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga573ee2ea89db4a133b8729abbb1223f0">BLAS_zuscr_insert_block</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *val, int row_stride, int col_stride, int i, int j)</td></tr>
+<tr class="memitem:gac3837cd5c7b2e8ac11c6c0e5cff8914c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac3837cd5c7b2e8ac11c6c0e5cff8914c">blas_zuscr_insert_block_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)</td></tr>
+<tr class="memitem:ga2ff68116b5ae79c37bf335096de973c0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga2ff68116b5ae79c37bf335096de973c0">BLAS_uscr_end</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:ga60974067bf5367a9a3c6eaa9f6f8f4ab"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga60974067bf5367a9a3c6eaa9f6f8f4ab">blas_uscr_end_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:ga8b0cca8196f40f7b55084a978b40717f"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8b0cca8196f40f7b55084a978b40717f">BLAS_usds</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+<tr class="memitem:gae4db91cffaf71632bd41b7423c64b757"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae4db91cffaf71632bd41b7423c64b757">blas_usds_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *istat)</td></tr>
+<tr class="memitem:gae671b9fc06140680a8c104ef4f0f54f0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae671b9fc06140680a8c104ef4f0f54f0">BLAS_susrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</ [...]
+<tr class="memitem:ga9de54361f778577330c6c5ece88a63c3"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9de54361f778577330c6c5ece88a63c3">blas_susrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const float *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga01917c64887638dfb5226be1f87d964a"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01917c64887638dfb5226be1f87d964a">BLAS_dusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga9f09f9d05e01d5b354ce234781e3945a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9f09f9d05e01d5b354ce234781e3945a">blas_dusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const double *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gafc79de03622ceeb2e0b4343fe5904a36"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc79de03622ceeb2e0b4343fe5904a36">BLAS_cusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:gae09ac29c14cede27a8d6a2be2687453e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae09ac29c14cede27a8d6a2be2687453e">blas_cusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gad551879cdde6d16d9dd5b9edc647c667"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad551879cdde6d16d9dd5b9edc647c667">BLAS_zusrows_scale</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a [...]
+<tr class="memitem:ga806bb32c4231e4cd9d833370484ad369"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga806bb32c4231e4cd9d833370484ad369">blas_zusrows_scale_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const void *d, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:ga1113eda1c806ca3631fefde07624fbd6"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1113eda1c806ca3631fefde07624fbd6">BLAS_susget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *d)</td></tr>
+<tr class="memitem:ga0444e8a4b321bf1488fb496bdf3116d2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0444e8a4b321bf1488fb496bdf3116d2">blas_susget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *d, int *istat)</td></tr>
+<tr class="memitem:ga35b70a7c3083b791cf1b94cb20ef57be"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga35b70a7c3083b791cf1b94cb20ef57be">BLAS_dusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *d)</td></tr>
+<tr class="memitem:ga7cfde04c833adeb887db75f4b2e104dd"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga7cfde04c833adeb887db75f4b2e104dd">blas_dusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *d, int *istat)</td></tr>
+<tr class="memitem:ga4ec4b6dce3701c5803efa6b7455e1504"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4ec4b6dce3701c5803efa6b7455e1504">BLAS_cusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga4865a8fda031074a0d91cf5c548584b9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4865a8fda031074a0d91cf5c548584b9">blas_cusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad175937c05d3d05d3aa7fa35eb3028ec"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad175937c05d3d05d3aa7fa35eb3028ec">BLAS_zusget_diag</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *d)</td></tr>
+<tr class="memitem:ga73feb9adc685f7ff1d66763b0801a0f9"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga73feb9adc685f7ff1d66763b0801a0f9">blas_zusget_diag_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *d, int *istat)</td></tr>
+<tr class="memitem:gad84dbcdeda549e1b0361f7ade7a38b13"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad84dbcdeda549e1b0361f7ade7a38b13">BLAS_susget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga1a8c39f41962e3be6ac84ea3be73f7a0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga1a8c39f41962e3be6ac84ea3be73f7a0">blas_susget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gab866cf0951b576a47da3864d668919f5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab866cf0951b576a47da3864d668919f5">BLAS_dusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:gac09a79789dc8b79d2e5a375732703103"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac09a79789dc8b79d2e5a375732703103">blas_dusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gac50e955d6e2bff77e2c3ac2146c77aaf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac50e955d6e2bff77e2c3ac2146c77aaf">BLAS_cusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga9e11da08762387d8a7a885665298e815"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9e11da08762387d8a7a885665298e815">blas_cusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:gadeb3cbe1cc6987763a55665bcdb8aef5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gadeb3cbe1cc6987763a55665bcdb8aef5">BLAS_zusget_rows_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int fr, int lr, int *nnzp)</td></tr>
+<tr class="memitem:ga50cba1e236b63775110d6d1b292417da"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga50cba1e236b63775110d6d1b292417da">blas_zusget_rows_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *fr, int *lr, int *nnzp, int *istat)</td></tr>
+<tr class="memitem:ga8f78343207ff584d2d78789bd90e5533"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8f78343207ff584d2d78789bd90e5533">BLAS_susget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga0977f63d781215c826aa5a0ea2df9f47"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga0977f63d781215c826aa5a0ea2df9f47">blas_susget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga498d143bae71d800dc35e2f1ee071359"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga498d143bae71d800dc35e2f1ee071359">BLAS_dusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:gaf2e6ab2c5cbd23a7690bbe8e26794033"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf2e6ab2c5cbd23a7690bbe8e26794033">blas_dusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:ga23f0c1852e05a426d24d2eb1bcae168b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga23f0c1852e05a426d24d2eb1bcae168b">BLAS_cusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga04751c01dcfb6730a33eaa91f403dd09"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga04751c01dcfb6730a33eaa91f403dd09">blas_cusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gaf9d44fc73526a4fdf9627424626bf4a5"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf9d44fc73526a4fdf9627424626bf4a5">BLAS_zusget_rows_sparse</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)</td></tr>
+<tr class="memitem:ga63f072aa25f7f7f8ac1ac4e32aae0c2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga63f072aa25f7f7f8ac1ac4e32aae0c2e">blas_zusget_rows_sparse_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)</td></tr>
+<tr class="memitem:gafc031d78d0274c81039c2448a403cd10"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc031d78d0274c81039c2448a403cd10">BLAS_susget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga039a9d4da3423ea71726242e1c1251e7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga039a9d4da3423ea71726242e1c1251e7">blas_susget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga441bff94fdc50b9bf6e180d36f51c3ce"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga441bff94fdc50b9bf6e180d36f51c3ce">BLAS_dusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga3a4bc573dc07849e7a72ecb2d2f0c31d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3a4bc573dc07849e7a72ecb2d2f0c31d">blas_dusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafe27f3044269d37cadb569fc6796ac01"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafe27f3044269d37cadb569fc6796ac01">BLAS_cusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga19e30bb70673342b4d6308bd9cf46884"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga19e30bb70673342b4d6308bd9cf46884">blas_cusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga85e15d7a3331e8ed4d702908477e2896"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga85e15d7a3331e8ed4d702908477e2896">BLAS_zusget_matrix_nnz</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int *nnz)</td></tr>
+<tr class="memitem:ga9bdd048dea68ecbd8fd712349d4fbf13"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga9bdd048dea68ecbd8fd712349d4fbf13">blas_zusget_matrix_nnz_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gafc49f44b76021677000bebe7d7fe133b"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gafc49f44b76021677000bebe7d7fe133b">BLAS_susget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type [...]
+<tr class="memitem:gaffaaf5b49e850adda0163b6bc082077d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaffaaf5b49e850adda0163b6bc082077d">blas_susget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, float *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_t [...]
+<tr class="memitem:ga39b4e25d5d5ce080f8dd994856e41fd0"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga39b4e25d5d5ce080f8dd994856e41fd0">BLAS_dusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_typ [...]
+<tr class="memitem:ga15c7a93ed41a5488c0ef814d2061214a"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga15c7a93ed41a5488c0ef814d2061214a">blas_dusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, double *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ [...]
+<tr class="memitem:ga65e5bef193bd5a2d47e80bff7eebed8e"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga65e5bef193bd5a2d47e80bff7eebed8e">BLAS_cusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:gacefa288104224e6c8f069f4001dacc08"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacefa288104224e6c8f069f4001dacc08">blas_cusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:ga286c2cf2c749c80c8b71ff2f4bdb1566"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga286c2cf2c749c80c8b71ff2f4bdb1566">BLAS_zusget_infinity_norm</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type< [...]
+<tr class="memitem:ga01b88a27714ca87085421fd9a4f3e479"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga01b88a27714ca87085421fd9a4f3e479">blas_zusget_infinity_norm_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, void *in, enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_ty [...]
+<tr class="memitem:gad3e05b01efa2857c0938ada63f30cadf"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad3e05b01efa2857c0938ada63f30cadf">BLAS_susset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const float *va, int nnz)</td></tr>
+<tr class="memitem:gac0abb530fc46d610bf56e7fb1ef42c6c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac0abb530fc46d610bf56e7fb1ef42c6c">blas_susset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const float *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gae34ff937437af99d317739192e2783da"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gae34ff937437af99d317739192e2783da">BLAS_dusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const double *va, int nnz)</td></tr>
+<tr class="memitem:ga8e2acb49dac4221d1554c30238bd6747"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga8e2acb49dac4221d1554c30238bd6747">blas_dusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const double *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:ga3b358be87656e2d8065e1d30dd8060f4"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga3b358be87656e2d8065e1d30dd8060f4">BLAS_cusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga38398053da29e668ee440e55f675532b"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga38398053da29e668ee440e55f675532b">blas_cusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gac542af7517c9f667122e8bdc408487b3"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac542af7517c9f667122e8bdc408487b3">BLAS_zusset_elements</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, const int *ia, const int *ja, const void *va, int nnz)</td></tr>
+<tr class="memitem:ga156a8d0225d9761cd58e15e026b9ba2e"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga156a8d0225d9761cd58e15e026b9ba2e">blas_zusset_elements_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)</td></tr>
+<tr class="memitem:gaf17e549ec8cf353144ac1e3a1f080f46"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaf17e549ec8cf353144ac1e3a1f080f46">BLAS_susset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gab8c3e5745870d4399382051dcedad144"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab8c3e5745870d4399382051dcedad144">blas_susset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gac8aa3ed1e29f2555519421290d236d0c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac8aa3ed1e29f2555519421290d236d0c">BLAS_dusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:gab50cd8a5a6a5d866789628da0c9141a2"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gab50cd8a5a6a5d866789628da0c9141a2">blas_dusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga778acfebd02199f440b890b0176af19c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga778acfebd02199f440b890b0176af19c">BLAS_cusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga4a32533889a4ed82a21f457d1253317d"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4a32533889a4ed82a21f457d1253317d">blas_cusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gaca954a070d476342e254587fc2faa7fd"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaca954a070d476342e254587fc2faa7fd">BLAS_zusset_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga52efe19f0972fa51ac6329cf717b676c"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga52efe19f0972fa51ac6329cf717b676c">blas_zusset_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:gad86989cd1f58003617f3db251b6fc0f1"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gad86989cd1f58003617f3db251b6fc0f1">BLAS_susget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, float *v)</td></tr>
+<tr class="memitem:gaac53e141083bc9871d81b587e5f785c1"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gaac53e141083bc9871d81b587e5f785c1">blas_susget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, float *v, int *istat)</td></tr>
+<tr class="memitem:gacf35fa073f6cc991efe75f6a012a9a04"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gacf35fa073f6cc991efe75f6a012a9a04">BLAS_dusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, double *v)</td></tr>
+<tr class="memitem:ga6443c32b223693698a8a0f0198ae4bee"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga6443c32b223693698a8a0f0198ae4bee">blas_dusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, double *v, int *istat)</td></tr>
+<tr class="memitem:ga4c7eae1cfcd8cafc16f31b169c4a7514"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga4c7eae1cfcd8cafc16f31b169c4a7514">BLAS_cusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga104bc9ee1e6ce32012933e822019ecf0"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga104bc9ee1e6ce32012933e822019ecf0">blas_cusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga27417bc0d923f7288ed736837492275c"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga27417bc0d923f7288ed736837492275c">BLAS_zusget_element</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int i, int j, void *v)</td></tr>
+<tr class="memitem:ga845cca2b512e38b467fc0d4b93d660b7"><td class="memItemLeft" align="right" valign="top">void </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga845cca2b512e38b467fc0d4b93d660b7">blas_zusget_element_</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> *A, int *i, int *j, void *v, int *istat)</td></tr>
+<tr class="memitem:ga89577a4a63cc8659f1d463fb819bc002"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:ga852f4a68eef6963708d11f37e975b178"><td class="memItemLeft" align="right" valign="top">int </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A, int pname)</td></tr>
+<tr class="memitem:a7769e3aac9ffdba04f29dd1f8f57daa4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas_8h.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_load_spblas_matrix_file_as_matrix_market</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class=" [...]
+<tr class="memitem:gac4d8c73e5d9faa85209bcc4e885d4ff1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__sparse__blas.html#gac4d8c73e5d9faa85209bcc4e885d4ff1">rsb_blas_get_mtx</a> (<a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> A)</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>This file specifies the Sparse BLAS interface to librsb. Supported types :(float,double,float complex,double complex) . Unsupported types:() . Level 1 ops :(dot,axpy,ga,gz,sc) . Level 2 ops :(mv,sv) . Level 3 ops :(mm,sm) . </p>
+<dl class="section author"><dt>Author</dt><dd>Michele Martone </dd></dl>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="aab00e94b9818e92bb03c32f7ec677932"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_ENUM_H</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a5eec91b6d95962811bd9cb4e37266214"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_usgp   rsb_wp__BLAS_usgp</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a6719ae77dfef6d6dd0790e34a65c1924"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define BLAS_ussp   rsb_wp__BLAS_ussp</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<h2>Typedef Documentation</h2>
+<a class="anchor" id="a6f56456b01e0cc6b25b81201aa67c163"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">typedef int <a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>the sparse matrix descriptor type </p>
+
+</div>
+</div>
+<h2>Enumeration Type Documentation</h2>
+<a class="anchor" id="a3fe740ad5a139d723de260d638987e9e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a3fe740ad5a139d723de260d638987e9e">blas_base_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Index base (valid at matrix build/modify time). </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea86431e076106ab9784bc5b203d4aa3e0"></a>blas_zero_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the C interface). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3fe740ad5a139d723de260d638987e9ea7afb2ddbf81bc727135963e14a2c62ad"></a>blas_one_base</em> </td><td>
+<p>Zero based indices (default when matrix created using the Fortran interface). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aa"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a6ef40f4bf16a7f484390a20fdb55d3aa">blas_cmach_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa450c812108b1c81a0f6ef65c51f64d7b"></a>blas_base</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa16a1c297dab1551cf40bbe5210395f10"></a>blas_t</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3d5fea2fad72607b2368ace39fa89280"></a>blas_rnd</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa640085acde3bcb1c78c42e9b5838c714"></a>blas_ieee</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaad93796f5d1a8bc7bb4d9512dc312e8df"></a>blas_emin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa26e73a26ce9e06149fff858bdfb5f363"></a>blas_emax</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa3e407f69dd9a70e04e91602a3d76ae4a"></a>blas_eps</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaaadf329e788494c80e522348ef1210d9f"></a>blas_prec</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa4159c63ae4ee2275d8e09d02ecb85428"></a>blas_underflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa51424a153ba5a72b4fb5018732bbaa02"></a>blas_overflow</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a6ef40f4bf16a7f484390a20fdb55d3aaa0a3cdfdc2ddd9ce036017d4c57aa941a"></a>blas_sfmin</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a125c156d54359fba48a6b9cf2a2d0a07">blas_conj_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a818f88453d90ea14a38c3f24ab9c47b1"></a>blas_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a125c156d54359fba48a6b9cf2a2d0a07a809495dc4e17c4b059c009bc90f00bf7"></a>blas_no_conj</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#ad7b35ac9114bfe21e15d011bf878b164">blas_diag_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies (<a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>) or inquiries (<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>) whether the diagonal of a matrix is (implicitly) unitary or not. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a3e6acad666ce6b542ac341569b83624f"></a>blas_non_unit_diag</em> </td><td>
+<p>Unit diagional matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ad7b35ac9114bfe21e15d011bf878b164a2f5e42e04fbce66ae47fe91d9a31b52c"></a>blas_unit_diag</em> </td><td>
+<p>Non unit diagional matrix (the default). </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a09d8be749e909b403b1563f0ca84aef8">blas_field_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Numerical field type; can be used with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a1a77ce97fa91f37a776fe3af3f0589d8"></a>blas_complex</em> </td><td>
+<p>Will succeed if matrix is of 'C' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8adf886a38a73b1de541eb9d32adb50a4d"></a>blas_real</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'D' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8a28a1eb1d9dde753641767cb33f7d5852"></a>blas_double_precision</em> </td><td>
+<p>Will succeed if matrix is of 'D' or 'Z' type. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a09d8be749e909b403b1563f0ca84aef8aa4e3065824f579e62b15ba908e625df6"></a>blas_single_precision</em> </td><td>
+<p>Will succeed if matrix is of 'S' or 'C' type. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06">blas_handle_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The following are not fully implemented. Usable with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a51022d3d696b9aee38d51040a5b01da7"></a>blas_invalid_handle</em> </td><td>
+<p>Used to check whether the handle is invalid. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a0af06bd9167e03014cc95fffaa2901e5"></a>blas_new_handle</em> </td><td>
+<p>Will give 1 if the handle is new. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06a711ecc7da9546cf3ac76a29e297f3eb0"></a>blas_open_handle</em> </td><td>
+<p>will give 1 if the handle is open. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6"></a>blas_valid_handle</em> </td><td>
+<p>Will give 1 if the handle is valid (that is, after <a class="el" href="group__rsb__doc__sparse__blas.html#ga5d9ce97bf054b1e3750eaae5d4e6c335">BLAS_duscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gac2b5eccd5cf442b5e2e79201d62ca2b5">BLAS_cuscr_end</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#gad6315d71f6f7abf8b82c89c70d6abbf3">BLAS_zuscr_end</a [...]
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#abdf3d2dd2387ff18e265347d2dfc1f04">blas_jrot_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a3c18fddd1929b245ab4b948b63d57b0a"></a>blas_jrot_inner</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a1ab4a6e0e69cdaa540b3415617e1ea05"></a>blas_jrot_outer</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="abdf3d2dd2387ff18e265347d2dfc1f04a85c43836ee3a19a39f41d2001761e042"></a>blas_jrot_sorted</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a07072da9995d9196d9176f56c784952b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a07072da9995d9196d9176f56c784952b">blas_norm_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab661151b14ab3c58c0b3d335528db250"></a>blas_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6f826b18a3a197b97b228961fdab47b3"></a>blas_real_one_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba57c558d28842a2b7b90df3a796fde77e"></a>blas_two_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba607f3142e766379f65fecd8964e9a8ed"></a>blas_frobenius_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bab50c138192cb933e81550900a44d187c"></a>blas_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba1ff3a55280960c17e59d37500ab4eec5"></a>blas_real_inf_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952ba6a806e7014a17f2b175780210e43d0cf"></a>blas_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a07072da9995d9196d9176f56c784952bae48280621b0adfec78d7a180c1026953"></a>blas_real_max_norm</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a9e6ec9e515f9d9b7e47110ae5f6ea04e">blas_order_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify a dense array's elements layout. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea635ab08ac28ae417e25c0d163c40f19d"></a>blas_rowmajor</em> </td><td>
+<p>Row major. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a9e6ec9e515f9d9b7e47110ae5f6ea04ea69ab6bcac9a135e7321dc4ca72616d1e"></a>blas_colmajor</em> </td><td>
+<p>Column major. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a8970170b9fd2a64eb18d9509ea624475">blas_prec_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475aab04803ec917ea9ae8b4d40ed1cdc7c4"></a>blas_prec_single</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475af5e092268082a0306216cbad6d3d8b8a"></a>blas_prec_double</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a63c139aa91e4f496acd6cfb85385d7d4"></a>blas_prec_indigenous</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a8970170b9fd2a64eb18d9509ea624475a2138d39c899dac6396f817c6cfdc91d9"></a>blas_prec_extra</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="aee94244609acd12511418bfbf0a77729"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#aee94244609acd12511418bfbf0a77729">blas_rsb_ext_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Properties suitable to be used with <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>/<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. All of these are not in the Sparse BLAS standard. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ae05ed12240987c33f90ee6cf012985a0"></a>blas_rsb_spmv_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. As an extension to t [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729addbb7f37e0069794e22567ce9c58a1a7"></a>blas_rsb_spmv_autotuning_off</em> </td><td>
+<p>Turn off executing threads autotuning for <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a class="el" h [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a7b8fe3214f5ed2fde6bd413a7e2153ff"></a>blas_rsb_spmv_n_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aebe2d6eaa16b58c2bd5d90498aaecdd0"></a>blas_rsb_spmv_n_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for untransposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a  [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a6d13e1d64ef564d7a4e6de11fe7484c8"></a>blas_rsb_spmv_t_autotuning_on</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729aac849884a1c55b7817c5dd4656730d7e"></a>blas_rsb_spmv_t_autotuning_off</em> </td><td>
+<p>Turn on executing threads autotuning for transposed <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv</a>. See <a cl [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729ac88f907f61c86a61837a37274e2f97d4"></a>blas_rsb_autotune_next_operation</em> </td><td>
+<p>Turn on executing threads autotuning for the next operation among <a class="el" href="group__rsb__doc__sparse__blas.html#ga9a8f45ddd3c890a296239b212f0c033b">BLAS_dusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga1ee2eb4be4c1e0565051fe04ca7415a2">BLAS_zusmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#gafb4d039eb5319613ed30db7fb323278c">BLAS_susmv</a>, <a class="el" href="group__rsb__doc__sparse__blas.html#ga9ec2e63176f2d6b11ee48bb523b4f7c7">BLAS_cusmv< [...]
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729af05068a213770fb2574e849bf1f1879b"></a>blas_rsb_rep_rsb</em> </td><td>
+<p>Request/check for RSB representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729abd17fdf950b653b674e0cb8680a70878"></a>blas_rsb_rep_csr</em> </td><td>
+<p>Request/check for CSR representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a388de22eadedb827f56db0eb3eea4c60"></a>blas_rsb_rep_coo</em> </td><td>
+<p>Request/check for COO representation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a5b36997b0ce8ea4238c6561c676f9c5f"></a>blas_rsb_duplicates_ovw</em> </td><td>
+<p>Request/check for duplicate nonzeroes overwriting policy. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="aee94244609acd12511418bfbf0a77729a13a96aa1440f1b35ed55518094f8bb6a"></a>blas_rsb_duplicates_sum</em> </td><td>
+<p>Request/check for duplicate nonzeroes summation policy. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#ac10de4d3a9ae38c876ec94ee7929e695">blas_side_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a76ed6c5405d254fff23870d7d8e4a181"></a>blas_left_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="ac10de4d3a9ae38c876ec94ee7929e695a7a52dfeb6e7d5c0c4e24325a6f803ee9"></a>blas_right_side</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a540f6a907f9f5e49d84a65c530e598c6">blas_size_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Quantities that can be obtained via <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6ace8432d9075d2530ffc0b474c5560e23"></a>blas_num_rows</em> </td><td>
+<p>Get the matrix rows count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a9b8cd10d952e1405feeac4f81e3b9f4b"></a>blas_num_cols</em> </td><td>
+<p>Get the matrix columns count. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a540f6a907f9f5e49d84a65c530e598c6a1968168c04860e3bb4ed8eb3232084fd"></a>blas_num_nonzeros</em> </td><td>
+<p>Get the matrix nonzeros count. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a4a9825e92ac3a85e524c58283ac42c14">blas_sort_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Unused/Unsupported. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a5998f1d9bb992284c2bf02a0fc7482d7"></a>blas_increasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a4a9825e92ac3a85e524c58283ac42c14a390d680413dcc00c62f886c2ed3061e4"></a>blas_decreasing_order</em> </td><td>
+<p>Unsupported. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a3f95e19247de0359b56de195704e05a5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a3f95e19247de0359b56de195704e05a5">blas_sparsity_optimization_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The following are usable with <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a1bff0036f6afe5490a50f5f421094c79"></a>blas_regular</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a8fe944fe8839b6c6c5224a9f46dcce66"></a>blas_irregular</em> </td><td>
+<p>Will give 1. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5aba3193a82dc25762807807af65704faa"></a>blas_block</em> </td><td>
+<p>Will give 0. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a3f95e19247de0359b56de195704e05a5a3de8f2747369cc9f6226280c26bfbbb5"></a>blas_unassembled</em> </td><td>
+<p>Complementary to <a class="el" href="rsb__libspblas_8h.html#a7cb10fb1b47b79ef278d6f09d571bd06aafa0149e23b7fa4f0cfdd3ada6c056b6">blas_valid_handle</a>. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63b">blas_symmetry_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Symmetry properties. If not specified otherwise, valid for the both of <a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a> and <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bab56e7dc16e197ab01f48a53c3f866866"></a>blas_general</em> </td><td>
+<p>General unsymmetric matrix (default). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad8275c5afad7cf56a0c1da44051a5fc0"></a>blas_symmetric</em> </td><td>
+<p>Symmetric matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487">blas_lower_symmetric</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de">blas_upper_symmetric</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba1bf962e848b3f29d03d2bc341186a16c"></a>blas_hermitian</em> </td><td>
+<p>Hermitian matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1">blas_lower_hermitian</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe">blas_upper_hermitian</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba45e6e3b446cf4d266e48af8bfa37950a"></a>blas_triangular</em> </td><td>
+<p>Triangular matrix (either <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae">blas_lower_triangular</a> or <a class="el" href="rsb__libspblas_8h.html#a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533">blas_upper_triangular</a>). For <a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a> only. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad026fa6623368578424b2dd64d12ebae"></a>blas_lower_triangular</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba12253fe914708e9c2b21f4e80b462533"></a>blas_upper_triangular</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63baba96b7c19a0ccfe3be9d78cb27690487"></a>blas_lower_symmetric</em> </td><td>
+<p>Lower symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba2ac709e94a120358c4bbd620ec8888de"></a>blas_upper_symmetric</em> </td><td>
+<p>Upper symmetric matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63bad06a1fd032c0bb2cbbe628d96ca800f1"></a>blas_lower_hermitian</em> </td><td>
+<p>Lower hermitian matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a7da08ccc1c4c7f5ff40768d502a6e63ba5377bb47a85d04c507e8883f8c17d1fe"></a>blas_upper_hermitian</em> </td><td>
+<p>Upper hermitian matrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#a23e5e138364c80074ac014a3dfd346b7">blas_trans_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Used to specify a transposition operator to a matrix operand. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a60c827bef60beeea296c26486e28d85f"></a>blas_no_trans</em> </td><td>
+<p>No transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a8b18010e436e35f6f39065868dd47e54"></a>blas_trans</em> </td><td>
+<p>Transposition. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="a23e5e138364c80074ac014a3dfd346b7a6660afe668077b67cdce26fd0ae1469c"></a>blas_conj_trans</em> </td><td>
+<p>Transposition and conjugation. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="blas__sparse_8h.html#acc2b26a405868ca1bd8a18e0eb62e820">blas_uplo_type</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Specifies (<a class="el" href="group__rsb__doc__sparse__blas.html#ga89577a4a63cc8659f1d463fb819bc002">BLAS_ussp</a>) or inquiries (<a class="el" href="group__rsb__doc__sparse__blas.html#ga852f4a68eef6963708d11f37e975b178">BLAS_usgp</a>) upper or lower triangularity of a matrix. </p>
+<dl><dt><b>Enumerator: </b></dt><dd><table border="0" cellspacing="2" cellpadding="0">
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820aad135195d29df942d45a5f10ed2bdf07"></a>blas_upper</em> </td><td>
+<p>Upper triangular matrix. </p>
+</td></tr>
+<tr><td valign="top"><em><a class="anchor" id="acc2b26a405868ca1bd8a18e0eb62e820a9f11b29c19db99b1caf0563867bfbc26"></a>blas_lower</em> </td><td>
+<p>Lower triangular matrix. </p>
+</td></tr>
+</table>
+</dd>
+</dl>
+
+</div>
+</div>
+<h2>Function Documentation</h2>
+<a class="anchor" id="a7769e3aac9ffdba04f29dd1f8f57daa4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> rsb_load_spblas_matrix_file_as_matrix_market </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Loads a BLAS Sparse matrix from a Matrix Market file. This is a <code>librsb</code> extension.</p>
+<p>Sets either blas_upper_triangular, blas_lower_triangular, blas_upper_hermitian, blas_lower_hermitian, blas_upper_symmetric or blas_lower_symmetric property according to the loaded file.</p>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb__libspblas__handle_8c.html b/doc/html/rsb__libspblas__handle_8c.html
new file mode 100644
index 0000000..6ade5a1
--- /dev/null
+++ b/doc/html/rsb__libspblas__handle_8c.html
@@ -0,0 +1,94 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_libspblas_handle.c File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_libspblas_handle.c File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:a7769e3aac9ffdba04f29dd1f8f57daa4"><td class="memItemLeft" align="right" valign="top"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__libspblas__handle_8c.html#a7769e3aac9ffdba04f29dd1f8f57daa4">rsb_load_spblas_matrix_file_as_matrix_market</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a [...]
+</table>
+<h2>Function Documentation</h2>
+<a class="anchor" id="a7769e3aac9ffdba04f29dd1f8f57daa4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="blas__sparse_8h.html#a6f56456b01e0cc6b25b81201aa67c163">blas_sparse_matrix</a> rsb_load_spblas_matrix_file_as_matrix_market </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> * </td>
+          <td class="paramname"><em>filename</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"><a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> </td>
+          <td class="paramname"><em>typecode</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Loads a BLAS Sparse matrix from a Matrix Market file. This is a <code>librsb</code> extension.</p>
+<p>Sets either blas_upper_triangular, blas_lower_triangular, blas_upper_hermitian, blas_lower_hermitian, blas_upper_symmetric or blas_lower_symmetric property according to the loaded file.</p>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb__rsb_8c.html b/doc/html/rsb__rsb_8c.html
new file mode 100644
index 0000000..09f2a16
--- /dev/null
+++ b/doc/html/rsb__rsb_8c.html
@@ -0,0 +1,300 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_rsb.c File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#define-members">Macros</a> |
+<a href="#func-members">Functions</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_rsb.c File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>Implementation of the library user interface.  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:af6ebbe2e678aef616abb33526b312f65"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#af6ebbe2e678aef616abb33526b312f65">RSB_INTERFACE_RETURN_MTX_ERRP</a>(MTXAP, ERRVAL, ERRVALP)</td></tr>
+<tr class="memitem:abd0f924354130cfb2cbe4b8345dbc6fd"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#abd0f924354130cfb2cbe4b8345dbc6fd">RSB_INTERFACE_RETURN_MTX</a>(MTXAP)   RSB_INTERFACE_ENDCMD return MTXAP;</td></tr>
+<tr class="memitem:a85dee9bd15f321bfac4a8f055f072d1b"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#a85dee9bd15f321bfac4a8f055f072d1b">RSB_INTERFACE_RETURN_ERR</a>(ERRVAL)   RSB_INTERFACE_ENDCMD RSB_DO_ERR_RETURN_INTERFACE(ERRVAL)</td></tr>
+<tr class="memitem:a619e228eb1a40cb1ae303be5ca6fa2ed"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#a619e228eb1a40cb1ae303be5ca6fa2ed">RSB_INTERFACE_RETURN_VAL</a>(VAL)   RSB_INTERFACE_ENDCMD {return (VAL);}</td></tr>
+<tr class="memitem:a6a77bece998693a0c9bc500d444eb8a3"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#a6a77bece998693a0c9bc500d444eb8a3">RSB_EXPOSE_NEW_GENERAL_INTERFACE</a>   1	/* temporary (internals) to delimit the new interface which supersedes the deprecated one */</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="func-members"></a>
+Functions</h2></td></tr>
+<tr class="memitem:gaf2b874d9f117ee6a6899634472b17946"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:a2a08c5a23f3999fe8cf36440680e4a05"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#a2a08c5a23f3999fe8cf36440680e4a05">rsb_lib_set_opt</a> (enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> iof, const void *iop)</td></tr>
+<tr class="memitem:a96a28efc32dd050d2a74208b3ad2f227"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get_opt</a> (enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> iof, void *iop)</td></tr>
+<tr class="memitem:ga4670aa682e70f82d5039c600e426a368"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *opnp, const <a class="el" href="rsb_8h.html#a10ec0af478bcc [...]
+<tr class="memitem:ga1707f8b0c28805f692146cf2fb28ae70"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga86db30487afe975ed18a7aa6ee0db81d"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a> (struct <a class="el" href="structrsb__initopts.html">rsb_initopts</a> *iop)</td></tr>
+<tr class="memitem:ga86c1b0d0586f817ee31ca1caa3fee9be"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga86c1b0d0586f817ee31ca1caa3fee9be">rsb_mtx_alloc_from_coo_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:ga3b7f9a461377de348b33a873f2e1893f"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3b7f9a461377de348b33a873f2e1893f">rsb_mtx_alloc_from_coo_inplace</a> (void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *JA, <a class [...]
+<tr class="memitem:gab64a020286a8b58d23d84d4512bd9132"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab64a020286a8b58d23d84d4512bd9132">rsb_mtx_free</a> (struct rsb_mtx_t *mtxAp)</td></tr>
+<tr class="memitem:gae181671ba19191caa5a282cbde4fdfc5"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gae181671ba19191caa5a282cbde4fdfc5">rsb_mtx_clone</a> (struct rsb_mtx_t **mtxBpp, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a4 [...]
+<tr class="memitem:ga4a16a82d289c76a437915db449553d4d"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4a16a82d289c76a437915db449553d4d">rsb_spmv</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, const void *Xp,  [...]
+<tr class="memitem:ga9b044332b720d3f8083ae792068fb04a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga9b044332b720d3f8083ae792068fb04a">rsb_spsv</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transT, const void *alphap, const struct rsb_mtx_t *mtxTp, const void *Xp,  [...]
+<tr class="memitem:ga48e6f3844605fffac9f622f05afa6043"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga48e6f3844605fffac9f622f05afa6043">rsb_spsm</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transT, const void *alphap, const struct rsb_mtx_t *mtxTp, <a class="el" hr [...]
+<tr class="memitem:gaa09eca432d5bb8c57fcff5d9ab98dfb8"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa09eca432d5bb8c57fcff5d9ab98dfb8">rsb_coo_sort</a> (void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b43952 [...]
+<tr class="memitem:gaa79f69918eafbd8f737b7866a00a0330"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa79f69918eafbd8f737b7866a00a0330">rsb_file_mtx_get_dims</a> (const char *filename, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *nrp, <a class="el" href="rsb_8h.html# [...]
+<tr class="memitem:gab660cf8aff876ae88b59c7a22ddfc912"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab660cf8aff876ae88b59c7a22ddfc912">rsb_perror</a> (void *stream, <a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval)</td></tr>
+<tr class="memitem:ga28710b8dade48738ea8e075aa1a3d262"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga28710b8dade48738ea8e075aa1a3d262">rsb_strerror_r</a> (<a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> errval, <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678e [...]
+<tr class="memitem:ga2d7533a97c97b215090d69c2d9235412"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga2d7533a97c97b215090d69c2d9235412">rsb_mtx_upd_vals</a> (struct rsb_mtx_t *mtxAp, enum <a class="el" href="group__rsb__doc__rsb.html#ga16c86c65a187bfbe94ecfdb87b97cade">rsb_elopf_t</a> elop_flags, const void [...]
+<tr class="memitem:gab8069ad6d5a67bc8a726131891e98c46"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab8069ad6d5a67bc8a726131891e98c46">rsb_mtx_set_vals</a> (struct rsb_mtx_t *mtxAp, const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a clas [...]
+<tr class="memitem:gad8f1aa9ac5081edd789374e7bb82697f"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad8f1aa9ac5081edd789374e7bb82697f">rsb_mtx_get_vals</a> (const struct rsb_mtx_t *mtxAp, void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a clas [...]
+<tr class="memitem:gad911ac7528c95c874d02cb17e6b76c54"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad911ac7528c95c874d02cb17e6b76c54">rsb_file_mtx_save</a> (const struct rsb_mtx_t *mtxAp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename)</td></tr>
+<tr class="memitem:gac4b2a63cdfe1cd4083b1561ee4bea696"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gac4b2a63cdfe1cd4083b1561ee4bea696">rsb_file_vec_save</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6 [...]
+<tr class="memitem:gad071e0373a08f74ee7ae910e9e4fd140"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad071e0373a08f74ee7ae910e9e4fd140">rsb_file_vec_load</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6 [...]
+<tr class="memitem:ga00833b0cf57da8e430f9d0e2b5375bb3"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga00833b0cf57da8e430f9d0e2b5375bb3">rsb_file_mtx_load</a> (const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *filename, <a class="el" href="rsb_8h.html#a569215d6312bf658b32d3e89cf2e0715">rsb_flags_t</a> flagsA, <a class="el" href="r [...]
+<tr class="memitem:ga30823d02e577e59da4ccff6baaeb8ea1"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga30823d02e577e59da4ccff6baaeb8ea1">rsb_sppsp</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb [...]
+<tr class="memitem:ga8813ccbbb1065ac76bfe22c42feafa05"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8813ccbbb1065ac76bfe22c42feafa05">rsb_spmsp</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb [...]
+<tr class="memitem:gaf30a70ea183d30d216f700782fc01524"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaf30a70ea183d30d216f700782fc01524">rsb_mtx_add_to_dense</a> (const void *alphap, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> ldB, <a cl [...]
+<tr class="memitem:ga7459601f0d54bd95549959b9749fedde"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga7459601f0d54bd95549959b9749fedde">rsb_psblas_trans_to_rsb_trans</a> (const char psbtrans)</td></tr>
+<tr class="memitem:ga13d417f776654fd159f274e56191573e"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga13d417f776654fd159f274e56191573e">rsb_mtx_alloc_from_csr_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *RP, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:gaebf57d9e5263f41eb6163581ffc141aa"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaebf57d9e5263f41eb6163581ffc141aa">rsb_mtx_alloc_from_csc_const</a> (const void *VA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, const <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</ [...]
+<tr class="memitem:ga60121166daf00968ba717931f04ea455"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga60121166daf00968ba717931f04ea455">rsb_mtx_alloc_from_csr_inplace</a> (void *VA, <a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> *RP, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *JA, <a class [...]
+<tr class="memitem:ga3c46a4942a6acb90063d721b6446e78e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3c46a4942a6acb90063d721b6446e78e">rsb_mtx_switch_to_csr</a> (struct rsb_mtx_t *mtxAp, void **VAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> **IAp, <a class="el" hr [...]
+<tr class="memitem:gaac3c6c033733a8101b9ccf56f8fc7112"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaac3c6c033733a8101b9ccf56f8fc7112">rsb_mtx_get_coo</a> (const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el" href=" [...]
+<tr class="memitem:ga4adca460f50bc1ad7d9ffdfda2273b87"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4adca460f50bc1ad7d9ffdfda2273b87">rsb_mtx_get_csr</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href [...]
+<tr class="memitem:gaa01c4a69db732f99e8a960ee8c9afa23"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gaa01c4a69db732f99e8a960ee8c9afa23">rsb_mtx_get_rows_sparse</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, v [...]
+<tr class="memitem:ga68115178d85cd28c645058deb0aa6379"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga68115178d85cd28c645058deb0aa6379">rsb_mtx_get_coo_block</a> (const struct rsb_mtx_t *mtxAp, void *VA, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> *IA, <a class="el"  [...]
+<tr class="memitem:ga3ec8d721b5333aae6ea9b03eb0039285"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga3ec8d721b5333aae6ea9b03eb0039285">rsb_spmm</a> (<a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> transA, const void *alphap, const struct rsb_mtx_t *mtxAp, <a class="el" hr [...]
+<tr class="memitem:ga74d97612d4af70244c886b9eadd90a0e"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga74d97612d4af70244c886b9eadd90a0e">rsb_spmsp_to_dense</a> (<a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055 [...]
+<tr class="memitem:gab0702d7080d1699162e4201bc70cc5ee"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab0702d7080d1699162e4201bc70cc5ee">rsb_mtx_rndr</a> (const char *filename, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> pmWidth, <a clas [...]
+<tr class="memitem:ga4b45a74b985f5cbd869bc9a540951771"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga4b45a74b985f5cbd869bc9a540951771">rsb_file_mtx_rndr</a> (void *pmp, const char *filename, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> pmlWidth, <a class="el" href="r [...]
+<tr class="memitem:gadf75c148fe661486ab0d8140657b8d9a"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gadf75c148fe661486ab0d8140657b8d9a">rsb_mtx_switch_to_coo</a> (struct rsb_mtx_t *mtxAp, void **VAp, <a class="el" href="rsb_8h.html#a4874ba61df0ff15b4395278496f83a5d">rsb_coo_idx_t</a> **IAp, <a class="el" hr [...]
+<tr class="memitem:gadaee12cc24dac7f8ebc68efd3d09c819"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gadaee12cc24dac7f8ebc68efd3d09c819">rsb_mtx_get_prec</a> (void *opdp, const struct rsb_mtx_t *mtxAp, <a class="el" href="rsb_8h.html#a528640277b196f7cfce2016cffbdd340">rsb_precf_t</a> prec_flags, const void * [...]
+<tr class="memitem:gad9a3eacd54fb7043464006cd57866edf"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad9a3eacd54fb7043464006cd57866edf">rsb_mtx_get_info</a> (const struct rsb_mtx_t *mtxAp, enum <a class="el" href="group__rsb__doc__rsb.html#ga211914bd1afe8044a70dc864f3c1fc8f">rsb_mif_t</a> miflags, void *min [...]
+<tr class="memitem:ga2b7d51b9822f73d2fe7fcf5b9d0be1e9"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga2b7d51b9822f73d2fe7fcf5b9d0be1e9">rsb_mtx_get_info_str</a> (const struct rsb_mtx_t *mtxAp, const <a class="el" href="rsb_8h.html#a10ec0af478bcccdab11545b106678ef6">rsb_char_t</a> *mis, void *minfop, size_t  [...]
+<tr class="memitem:ga6a645ce89fd167d72c92cdcfbcd8ed81"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga6a645ce89fd167d72c92cdcfbcd8ed81">rsb_mtx_get_nrm</a> (const struct rsb_mtx_t *mtxAp, void *Np, enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> flags)< [...]
+<tr class="memitem:gad0b2352cea6b7512b466d1c51327fcf8"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gad0b2352cea6b7512b466d1c51327fcf8">rsb_mtx_get_vec</a> (const struct rsb_mtx_t *mtxAp, void *Dp, enum <a class="el" href="group__rsb__doc__rsb.html#ga14750ca720fd92a2be879a59ae36dfe9">rsb_extff_t</a> flags)< [...]
+<tr class="memitem:ga6677d4e20c00bdf4ebf53567246f5693"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#ab7a0af874a2765e9271a63ee4acf3d5d">rsb_time_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga6677d4e20c00bdf4ebf53567246f5693">rsb_time</a> (void)</td></tr>
+<tr class="memitem:gafca80e53d47a7ec3eb116e755fe47c58"><td class="memItemLeft" align="right" valign="top">struct rsb_mtx_t * </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gafca80e53d47a7ec3eb116e755fe47c58">rsb_mtx_alloc_from_coo_begin</a> (<a class="el" href="rsb_8h.html#a46b3366e54a5b4dda754a6ace22264df">rsb_nnz_idx_t</a> nnzA, <a class="el" href="rsb_8h.html#ac0f6a03345c8874f6e50f0ed033d984b">rsb_type_t</a> typecode, <a class="el" hre [...]
+<tr class="memitem:gab583fbefa0a66e9d30dac034480c2d86"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#gab583fbefa0a66e9d30dac034480c2d86">rsb_mtx_alloc_from_coo_end</a> (struct rsb_mtx_t **mtxApp)</td></tr>
+<tr class="memitem:ga8c11024d248e2e686476fd9e89aa7c15"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a> (struct rsb_mtx_t **mtxOpp, <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> *sfp, <a class="el" href="rsb_8h.html#aefcdc [...]
+<tr class="memitem:ga8d7a05bbc165bd6ac20e8e23487a5871"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a> (struct rsb_mtx_t **mtxOpp, <a class="el" href="rsb_8h.html#ab6fedd060aee0dd9f61f0438987a99a9">rsb_real_t</a> *sfp, <a class="el" href="rsb_8h.html#aefcdc [...]
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>Implementation of the library user interface. </p>
+<dl class="section author"><dt>Author</dt><dd>Michele Martone </dd></dl>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="a6a77bece998693a0c9bc500d444eb8a3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_EXPOSE_NEW_GENERAL_INTERFACE   1	/* temporary (internals) to delimit the new interface which supersedes the deprecated one */</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="a85dee9bd15f321bfac4a8f055f072d1b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_INTERFACE_RETURN_ERR</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVAL</td><td>)</td>
+          <td>   RSB_INTERFACE_ENDCMD RSB_DO_ERR_RETURN_INTERFACE(ERRVAL)</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="abd0f924354130cfb2cbe4b8345dbc6fd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_INTERFACE_RETURN_MTX</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">MTXAP</td><td>)</td>
+          <td>   RSB_INTERFACE_ENDCMD return MTXAP;</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<a class="anchor" id="af6ebbe2e678aef616abb33526b312f65"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_INTERFACE_RETURN_MTX_ERRP</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">MTXAP, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVAL, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype"> </td>
+          <td class="paramname">ERRVALP </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<b>Value:</b><div class="fragment"><div class="line">RSB_INTERFACE_ENDCMD \</div>
+<div class="line">        RSB_CONDITIONAL_ERRPSET(ERRVALP,ERRVAL) RSB_DO_MTX_RETURN_INTERFACE(MTXAP,ERRVAL);</div>
+</div><!-- fragment -->
+</div>
+</div>
+<a class="anchor" id="a619e228eb1a40cb1ae303be5ca6fa2ed"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_INTERFACE_RETURN_VAL</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">VAL</td><td>)</td>
+          <td>   RSB_INTERFACE_ENDCMD {return (VAL);}</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+</div>
+</div>
+<h2>Function Documentation</h2>
+<a class="anchor" id="a96a28efc32dd050d2a74208b3ad2f227"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_get_opt </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> </td>
+          <td class="paramname"><em>iof</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">void * </td>
+          <td class="paramname"><em>iop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Gets value of a library option. A value specified by the request flag <code>iof</code> will be fetched from the library internal state and <code>*iop</code> will be updated accordingly.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iof</td><td>library options flags. See <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6" title="library option values for rsb_lib_init, rsb_lib_set_opt_str, rsb_lib_reinit, rsb_lib_exit, rsb_lib_get_opt, rsb_lib_set_opt, or (deprecated) macros RSB_REINIT_SINGLE_VALUE_GET, RSB_REINIT_SINGLE_VALUE_SET, RSB_REINIT_SINGLE_VALUE, RSB_REINIT_SINGLE_VALUE_C_IOP..">rsb_opt_t</a> for a list of valid options. </td></tr>
+    <tr><td class="paramname">iop</td><td>library options value output pointer (pointed location will be updated). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a>, <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_REINIT_SINGLE_VALUE_SET</a>, <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>, <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">RSB_REINIT_SINGLE_VALUE_C_IOP</a> </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get [...]
+
+</div>
+</div>
+<a class="anchor" id="a2a08c5a23f3999fe8cf36440680e4a05"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#a640e84bcc5268cd92d5d31fd6ac321b8">rsb_err_t</a> rsb_lib_set_opt </td>
+          <td>(</td>
+          <td class="paramtype">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> </td>
+          <td class="paramname"><em>iof</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const void * </td>
+          <td class="paramname"><em>iop</em> </td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Sets value of a library option. A value specified by the request flag <code>iof</code> will be fetched from <code>*iop</code> and will be used to update the selected option in the library internal state.</p>
+<dl class="params"><dt>Parameters</dt><dd>
+  <table class="params">
+    <tr><td class="paramname">iof</td><td>library options flags. See <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6" title="library option values for rsb_lib_init, rsb_lib_set_opt_str, rsb_lib_reinit, rsb_lib_exit, rsb_lib_get_opt, rsb_lib_set_opt, or (deprecated) macros RSB_REINIT_SINGLE_VALUE_GET, RSB_REINIT_SINGLE_VALUE_SET, RSB_REINIT_SINGLE_VALUE, RSB_REINIT_SINGLE_VALUE_C_IOP..">rsb_opt_t</a> for a list of valid options. </td></tr>
+    <tr><td class="paramname">iop</td><td>library options value output pointer (pointed location will be updated). </td></tr>
+  </table>
+  </dd>
+</dl>
+<dl class="section see"><dt>See Also</dt><dd><a class="el" href="rsb_8h.html#ae6f837f13f6413a163f2c6b0c02dadf2">RSB_REINIT_SINGLE_VALUE_GET</a>, <a class="el" href="rsb_8h.html#a20da3b07d4c17771762413010816e36e">RSB_REINIT_SINGLE_VALUE_SET</a>, <a class="el" href="rsb_8h.html#afeb783fe4dca5762623a621b7095dd01">RSB_REINIT_SINGLE_VALUE</a>, <a class="el" href="rsb_8h.html#aa0ca08a816983bc6294317d0e22e0509">RSB_REINIT_SINGLE_VALUE_C_IOP</a> </dd>
+<dd>
+<a class="el" href="group__rsb__doc__rsb.html#gaf2b874d9f117ee6a6899634472b17946">rsb_lib_init</a>, <a class="el" href="group__rsb__doc__rsb.html#ga4670aa682e70f82d5039c600e426a368">rsb_lib_set_opt_str</a>, <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit</a>, <a class="el" href="group__rsb__doc__rsb.html#ga86db30487afe975ed18a7aa6ee0db81d">rsb_lib_exit</a>, <a class="el" href="rsb__rsb_8c.html#a96a28efc32dd050d2a74208b3ad2f227">rsb_lib_get [...]
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/rsb__types_8h.html b/doc/html/rsb__types_8h.html
new file mode 100644
index 0000000..9242043
--- /dev/null
+++ b/doc/html/rsb__types_8h.html
@@ -0,0 +1,742 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_types.h File Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li class="current"><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="files.html"><span>File List</span></a></li>
+      <li><a href="globals.html"><span>Globals</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#define-members">Macros</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_types.h File Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>Macros and constants, which are type specific. <br/>
+ Here reside declarations related to supported matrix numerical types, and other declarations according to the build time options. <br/>
+ If you wish to use this library with different matrix numerical types, you shall regenerate the library source code accordingly; see the README file how to do this. <br/>
+ Only a small part of these declarations is needed to the user (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). <br/>
+ Therefore, only the declarations which are commented are actually meant to be used in functions; please regard the remaining ones as internal.  
+<a href="#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="define-members"></a>
+Macros</h2></td></tr>
+<tr class="memitem:af66941d5b1f1595c29f9c7e131d22242"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#af66941d5b1f1595c29f9c7e131d22242">RSB_LIBRSB_VER_STRING</a>   "1.2.0"</td></tr>
+<tr class="memitem:a8bc9584f994ecb2639ee548156562aae"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a8bc9584f994ecb2639ee548156562aae">RSB_HEADER_VERSION_STRING</a>   "librsb version 1.2.0-rc2 - June 30, 2015"</td></tr>
+<tr class="memitem:a7fd4e640e7aa86fdce8f3d25ac230b5c"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a7fd4e640e7aa86fdce8f3d25ac230b5c">RSB_LIBRSB_VER_MAJOR</a>   1</td></tr>
+<tr class="memitem:af8d3f63778c3120b14c3126259872cfe"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#af8d3f63778c3120b14c3126259872cfe">RSB_LIBRSB_VER_MINOR</a>   2</td></tr>
+<tr class="memitem:ab3384c84112fe759dc57c5dd206a0cde"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ab3384c84112fe759dc57c5dd206a0cde">RSB_LIBRSB_VER_PATCH</a>   0</td></tr>
+<tr class="memitem:a08fbe9d2c97a5b73bdad3dbe1402c83b"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a08fbe9d2c97a5b73bdad3dbe1402c83b">RSB_LIBRSB_VER</a>   10200</td></tr>
+<tr class="memitem:ae26b1dec914b2cf2f233c07d2f4815d1"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ae26b1dec914b2cf2f233c07d2f4815d1">RSB_LIBRSB_VER_DATE</a>   RSB_M4_WANT_RSB_LIBRSB_VER_DATE</td></tr>
+<tr class="memitem:a50018495517829b14797a568788e1526"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a50018495517829b14797a568788e1526">RSB_HAVE_TYPE_DOUBLE</a>   1</td></tr>
+<tr class="memitem:a82f77f519ff60dffac284034c12d2635"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a82f77f519ff60dffac284034c12d2635">RSB_HAVE_TYPE_FLOAT</a>   1</td></tr>
+<tr class="memitem:a782af474ca5eba101233fc265965fbbb"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a782af474ca5eba101233fc265965fbbb">RSB_HAVE_TYPE_FLOAT_COMPLEX</a>   1</td></tr>
+<tr class="memitem:a922101e7269ccc3184935c451b606a2c"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a922101e7269ccc3184935c451b606a2c">RSB_HAVE_TYPE_DOUBLE_COMPLEX</a>   1</td></tr>
+<tr class="memitem:aa5e96f00841ec8f4f3ca1ff0bf1b5bbd"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#aa5e96f00841ec8f4f3ca1ff0bf1b5bbd">RSB_DEFAULT_TYPE</a>   double</td></tr>
+<tr class="memitem:ab2ec9d6e0af8a10a032d597423fef559"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ab2ec9d6e0af8a10a032d597423fef559">RSB_DEFAULT_POSSIBLY_INTEGER_TYPE</a>   double</td></tr>
+<tr class="memitem:ae7da5c374c2384c32084fc50ede06a4e"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ae7da5c374c2384c32084fc50ede06a4e">RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE</a>   float</td></tr>
+<tr class="memitem:acf1cad553e2bb07697c34bc5a6123ca1"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#acf1cad553e2bb07697c34bc5a6123ca1">RSB_DEFAULT_TYPE_STRING</a>   "double"</td></tr>
+<tr class="memitem:a2a35f3f9a39d1b2016cf6aae4bfbf3e4"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a2a35f3f9a39d1b2016cf6aae4bfbf3e4">RSB_DEFAULT_POSSIBLY_INTEGER_TYPE_STRING</a>   "double"</td></tr>
+<tr class="memitem:a898310ae6ad07802d6d261b6053cc3c5"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a898310ae6ad07802d6d261b6053cc3c5">RSB_DEFAULT_SYMMETRY</a>   RSB_SYMMETRY_U</td></tr>
+<tr class="memitem:a2fb899b07173e590c8a13ae2b32ca383"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a2fb899b07173e590c8a13ae2b32ca383">RSB_DEFAULT_TRANSPOSITION</a>   <a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></td></tr>
+<tr class="memitem:a9fcc01fb97c5b5482be8ab4cd7c2ee33"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a9fcc01fb97c5b5482be8ab4cd7c2ee33">RSB_ROWS_TRANSPOSITIONS_ARRAY</a>   {<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>, <a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>, <a c [...]
+<tr class="memitem:a4abf98873753295350143ca544b79db3"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a4abf98873753295350143ca544b79db3">RSB_TYPE_INDEX_DOUBLE</a>   0</td></tr>
+<tr class="memitem:a8d5222339367566d624a1e678d116d0d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a8d5222339367566d624a1e678d116d0d">RSB_TYPE_INDEX_FLOAT</a>   1</td></tr>
+<tr class="memitem:a8445bf2e852a4b20d178ae4b475f4552"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a8445bf2e852a4b20d178ae4b475f4552">RSB_TYPE_INDEX_FLOAT_COMPLEX</a>   2</td></tr>
+<tr class="memitem:a1a13d13b3c7f84e7fc8ca1df3878a07d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a1a13d13b3c7f84e7fc8ca1df3878a07d">RSB_TYPE_INDEX_DOUBLE_COMPLEX</a>   3</td></tr>
+<tr><td colspan="2"><div class="groupHeader">Values for valid matrix transposition flags.</div></td></tr>
+<tr><td colspan="2"><div class="groupText"><p><a class="anchor" id="matrix_transposition_flags_section"></a>The Hermitian flag will act as simple transposed, for non complex types. </p>
+</div></td></tr>
+<tr class="memitem:a9673f34330af77b1c0fd4a585e0c62cc"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>   0x4E</td></tr>
+<tr class="memitem:a37f8cea71946de2f832bdb9d438d5edf"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>   0x54</td></tr>
+<tr class="memitem:abd3aaf223656dece97dee2107e485217"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#abd3aaf223656dece97dee2107e485217">RSB_TRANSPOSITION_C</a>   0x43</td></tr>
+<tr><td colspan="2"><div class="groupHeader">Valid symbol values for matrix numerical type specification -- type codes -- (type \see #rsb_type_t).</div></td></tr>
+<tr><td colspan="2"><div class="groupText"><p><a class="anchor" id="matrix_type_symbols_section"></a></p>
+</div></td></tr>
+<tr class="memitem:a532c3e9733221d59bac99cb1f795d266"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>   1</td></tr>
+<tr class="memitem:a7849bc51eadedaa51a1b27569be89d86"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">RSB_NUMERICAL_TYPE_DOUBLE</a>   'D'</td></tr>
+<tr class="memitem:a532c3e9733221d59bac99cb1f795d266"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>   1</td></tr>
+<tr class="memitem:a7628cd01c7e84e4ada529b3412d118b3"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a7628cd01c7e84e4ada529b3412d118b3">RSB_NUMERICAL_TYPE_FLOAT</a>   'S'</td></tr>
+<tr class="memitem:a532c3e9733221d59bac99cb1f795d266"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>   1</td></tr>
+<tr class="memitem:ac46f79bff4499a5e8b6075150ecabf69"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ac46f79bff4499a5e8b6075150ecabf69">RSB_NUMERICAL_TYPE_FLOAT_COMPLEX</a>   'C'</td></tr>
+<tr class="memitem:a532c3e9733221d59bac99cb1f795d266"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a532c3e9733221d59bac99cb1f795d266">RSB_NUMERICAL_TYPE_SAME_TYPE</a>   1</td></tr>
+<tr class="memitem:a51ca2ff55d0c852f659f5c76ecd536cd"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a51ca2ff55d0c852f659f5c76ecd536cd">RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX</a>   'Z'</td></tr>
+<tr class="memitem:a17195a2481a24153b99f2be1f0577ff1"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a17195a2481a24153b99f2be1f0577ff1">RSB_NUMERICAL_TYPE_FORTRAN_SAME_TYPE</a>   1</td></tr>
+<tr class="memitem:a16d646278df635b6e4fc57c43241fb98"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a16d646278df635b6e4fc57c43241fb98">RSB_NUMERICAL_TYPE_FORTRAN_INT</a>   ICHAR('I')</td></tr>
+<tr class="memitem:af465e222cfdede5b5df9a26a35b5e115"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#af465e222cfdede5b5df9a26a35b5e115">RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE</a>   ICHAR('D')</td></tr>
+<tr class="memitem:a262db8d5b52285bd503cc1e60039135a"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a262db8d5b52285bd503cc1e60039135a">RSB_NUMERICAL_TYPE_FORTRAN_FLOAT</a>   ICHAR('S')</td></tr>
+<tr class="memitem:a3bab97530d248482496ac20667e102f4"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a3bab97530d248482496ac20667e102f4">RSB_NUMERICAL_TYPE_FORTRAN_FLOAT_COMPLEX</a>   ICHAR('C')</td></tr>
+<tr class="memitem:a2fc48337d7c3ac2cd4e9e509c73edbf9"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a2fc48337d7c3ac2cd4e9e509c73edbf9">RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE_COMPLEX</a>   ICHAR('Z')</td></tr>
+<tr class="memitem:a56fc5ef14266266227797621e0a1e217"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a56fc5ef14266266227797621e0a1e217">RSB_NUMERICAL_TYPE_DEFAULT</a>   <a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">RSB_NUMERICAL_TYPE_DOUBLE</a></td></tr>
+<tr class="memitem:a70b99562829107b4fe1f529aacd4729a"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a70b99562829107b4fe1f529aacd4729a">RSB_NUMERICAL_TYPE_DEFAULT_INTEGER</a>   <a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">RSB_NUMERICAL_TYPE_DOUBLE</a></td></tr>
+<tr class="memitem:ac418f097835ff41e0baaf5635d21b6f9"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ac418f097835ff41e0baaf5635d21b6f9">RSB_NUMERICAL_TYPE_INVALID_TYPE</a>   '?'</td></tr>
+<tr class="memitem:ac51619f9cbe0a9a4cbc55e0451bfb59d"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#ac51619f9cbe0a9a4cbc55e0451bfb59d">RSB_NUMERICAL_TYPE_FIRST_BLAS</a>   <a class="el" href="rsb__types_8h.html#a7628cd01c7e84e4ada529b3412d118b3">RSB_NUMERICAL_TYPE_FLOAT</a></td></tr>
+<tr class="memitem:a6ea10439ed32405f43a9f5e6c9b64787"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#a6ea10439ed32405f43a9f5e6c9b64787">RSB_CHAR_AS_TRANSPOSITION</a>(TRANSC)</td></tr>
+<tr><td colspan="2"><div class="groupHeader">Miscellaneous constants.</div></td></tr>
+<tr class="memitem:aef1f5467f82116857e5003daa0f75ccd"><td class="memItemLeft" align="right" valign="top">#define </td><td class="memItemRight" valign="bottom"><a class="el" href="rsb__types_8h.html#aef1f5467f82116857e5003daa0f75ccd">RSB_CONST_MAX_TUNING_ROUNDS</a>   16</td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>Macros and constants, which are type specific. <br/>
+ Here reside declarations related to supported matrix numerical types, and other declarations according to the build time options. <br/>
+ If you wish to use this library with different matrix numerical types, you shall regenerate the library source code accordingly; see the README file how to do this. <br/>
+ Only a small part of these declarations is needed to the user (see <a class="el" href="rsb__types_8h.html#matrix_type_symbols_section">matrix_type_symbols_section</a>). <br/>
+ Therefore, only the declarations which are commented are actually meant to be used in functions; please regard the remaining ones as internal. </p>
+</div><h2>Macro Definition Documentation</h2>
+<a class="anchor" id="a6ea10439ed32405f43a9f5e6c9b64787"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_CHAR_AS_TRANSPOSITION</td>
+          <td>(</td>
+          <td class="paramtype"> </td>
+          <td class="paramname">TRANSC</td><td>)</td>
+          <td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<b>Value:</b><div class="fragment"><div class="line">(                                                                                                               \</div>
+<div class="line">                (TRANSC) == (<span class="charliteral">'N'</span>) ? (<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>) :             \</div>
+<div class="line">                (TRANSC) == (<span class="charliteral">'n'</span>) ? (<a class="code" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>) :             \</div>
+<div class="line">                (TRANSC) == (<span class="charliteral">'T'</span>) ? (<a class="code" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>) :             \</div>
+<div class="line">                (TRANSC) == (<span class="charliteral">'t'</span>) ? (<a class="code" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>) :             \</div>
+<div class="line">                (TRANSC) == (<span class="charliteral">'C'</span>) ? (<a class="code" href="rsb__types_8h.html#abd3aaf223656dece97dee2107e485217">RSB_TRANSPOSITION_C</a>) :             \</div>
+<div class="line">                (TRANSC) == (<span class="charliteral">'c'</span>) ? (<a class="code" href="rsb__types_8h.html#abd3aaf223656dece97dee2107e485217">RSB_TRANSPOSITION_C</a>) :             \</div>
+<div class="line">                <span class="charliteral">'?'</span>                                                                                             \</div>
+<div class="line">)</div>
+</div><!-- fragment --><p>Get the right transposition flag out of either n, c, t chars. </p>
+
+</div>
+</div>
+<a class="anchor" id="aef1f5467f82116857e5003daa0f75ccd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_CONST_MAX_TUNING_ROUNDS   16</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Maximal count of tuning rounds in one invocation of (rsb_tune_spmm/rsb_tune_spsm). </p>
+
+</div>
+</div>
+<a class="anchor" id="ae7da5c374c2384c32084fc50ede06a4e"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE   float</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The default, blas if possible , numerical type (can be used for declarations). </p>
+
+</div>
+</div>
+<a class="anchor" id="ab2ec9d6e0af8a10a032d597423fef559"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_POSSIBLY_INTEGER_TYPE   double</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The default, integer if possible , numerical type (can be used for declarations). </p>
+
+</div>
+</div>
+<a class="anchor" id="a2a35f3f9a39d1b2016cf6aae4bfbf3e4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_POSSIBLY_INTEGER_TYPE_STRING   "double"</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A string specifying the name of the default possibly integer type. </p>
+
+</div>
+</div>
+<a class="anchor" id="a898310ae6ad07802d6d261b6053cc3c5"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_SYMMETRY   RSB_SYMMETRY_U</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The default symmetry flag. </p>
+
+</div>
+</div>
+<a class="anchor" id="a2fb899b07173e590c8a13ae2b32ca383"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_TRANSPOSITION   <a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The default transposition flag (no transposition). </p>
+
+</div>
+</div>
+<a class="anchor" id="aa5e96f00841ec8f4f3ca1ff0bf1b5bbd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_TYPE   double</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The default numerical matrix type (can be used for declarations), used in the example programs. </p>
+
+</div>
+</div>
+<a class="anchor" id="acf1cad553e2bb07697c34bc5a6123ca1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_DEFAULT_TYPE_STRING   "double"</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A string specifying the name of the default type. </p>
+
+</div>
+</div>
+<a class="anchor" id="a50018495517829b14797a568788e1526"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HAVE_TYPE_DOUBLE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Type double is supported, so RSB_HAVE_TYPE_DOUBLE is defined . </p>
+
+</div>
+</div>
+<a class="anchor" id="a922101e7269ccc3184935c451b606a2c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HAVE_TYPE_DOUBLE_COMPLEX   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Type double complex is supported, so RSB_HAVE_TYPE_DOUBLE_COMPLEX is defined . </p>
+
+</div>
+</div>
+<a class="anchor" id="a82f77f519ff60dffac284034c12d2635"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HAVE_TYPE_FLOAT   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Type float is supported, so RSB_HAVE_TYPE_FLOAT is defined . </p>
+
+</div>
+</div>
+<a class="anchor" id="a782af474ca5eba101233fc265965fbbb"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HAVE_TYPE_FLOAT_COMPLEX   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Type float complex is supported, so RSB_HAVE_TYPE_FLOAT_COMPLEX is defined . </p>
+
+</div>
+</div>
+<a class="anchor" id="a8bc9584f994ecb2639ee548156562aae"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_HEADER_VERSION_STRING   "librsb version 1.2.0-rc2 - June 30, 2015"</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Library header version string. </p>
+
+</div>
+</div>
+<a class="anchor" id="a08fbe9d2c97a5b73bdad3dbe1402c83b"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_LIBRSB_VER   10200</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Version number. </p>
+
+</div>
+</div>
+<a class="anchor" id="ae26b1dec914b2cf2f233c07d2f4815d1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_LIBRSB_VER_DATE   RSB_M4_WANT_RSB_LIBRSB_VER_DATE</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Version release date. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7fd4e640e7aa86fdce8f3d25ac230b5c"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_LIBRSB_VER_MAJOR   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Major version. </p>
+
+</div>
+</div>
+<a class="anchor" id="af8d3f63778c3120b14c3126259872cfe"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_LIBRSB_VER_MINOR   2</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Minor version. </p>
+
+</div>
+</div>
+<a class="anchor" id="ab3384c84112fe759dc57c5dd206a0cde"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_LIBRSB_VER_PATCH   0</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Patch version. </p>
+
+</div>
+</div>
+<a class="anchor" id="af66941d5b1f1595c29f9c7e131d22242"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_LIBRSB_VER_STRING   "1.2.0"</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Library version string. </p>
+
+</div>
+</div>
+<a class="anchor" id="a56fc5ef14266266227797621e0a1e217"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_DEFAULT   <a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">RSB_NUMERICAL_TYPE_DOUBLE</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A default numerical matrix type. </p>
+
+</div>
+</div>
+<a class="anchor" id="a70b99562829107b4fe1f529aacd4729a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_DEFAULT_INTEGER   <a class="el" href="rsb__types_8h.html#a7849bc51eadedaa51a1b27569be89d86">RSB_NUMERICAL_TYPE_DOUBLE</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A default numerical matrix type; if possible, an integer one. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7849bc51eadedaa51a1b27569be89d86"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_DOUBLE   'D'</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type double. </p>
+
+</div>
+</div>
+<a class="anchor" id="a51ca2ff55d0c852f659f5c76ecd536cd"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX   'Z'</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type double complex. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac51619f9cbe0a9a4cbc55e0451bfb59d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FIRST_BLAS   <a class="el" href="rsb__types_8h.html#a7628cd01c7e84e4ada529b3412d118b3">RSB_NUMERICAL_TYPE_FLOAT</a></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>A default numerical matrix type; if possible, not integer one. If no such type is configured in, then the invalid type. </p>
+
+</div>
+</div>
+<a class="anchor" id="a7628cd01c7e84e4ada529b3412d118b3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FLOAT   'S'</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type float. </p>
+
+</div>
+</div>
+<a class="anchor" id="ac46f79bff4499a5e8b6075150ecabf69"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FLOAT_COMPLEX   'C'</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type float complex. </p>
+
+</div>
+</div>
+<a class="anchor" id="af465e222cfdede5b5df9a26a35b5e115"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE   ICHAR('D')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type double, to be used (only) from Fortran. </p>
+
+</div>
+</div>
+<a class="anchor" id="a2fc48337d7c3ac2cd4e9e509c73edbf9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE_COMPLEX   ICHAR('Z')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type double complex, to be used (only) from Fortran. </p>
+
+</div>
+</div>
+<a class="anchor" id="a262db8d5b52285bd503cc1e60039135a"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FORTRAN_FLOAT   ICHAR('S')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type float, to be used (only) from Fortran. </p>
+
+</div>
+</div>
+<a class="anchor" id="a3bab97530d248482496ac20667e102f4"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FORTRAN_FLOAT_COMPLEX   ICHAR('C')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type float complex, to be used (only) from Fortran. </p>
+
+</div>
+</div>
+<a class="anchor" id="a16d646278df635b6e4fc57c43241fb98"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FORTRAN_INT   ICHAR('I')</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>Character code for type int, to be used (only) from Fortran. </p>
+
+</div>
+</div>
+<a class="anchor" id="a17195a2481a24153b99f2be1f0577ff1"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_FORTRAN_SAME_TYPE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>a bogus type flag for specifying no type conversion </p>
+
+</div>
+</div>
+<a class="anchor" id="ac418f097835ff41e0baaf5635d21b6f9"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_INVALID_TYPE   '?'</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>By definition, an invalid type code. </p>
+
+</div>
+</div>
+<a class="anchor" id="a532c3e9733221d59bac99cb1f795d266"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_SAME_TYPE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>a bogus type flag for specifying no type conversion </p>
+
+</div>
+</div>
+<a class="anchor" id="a532c3e9733221d59bac99cb1f795d266"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_SAME_TYPE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>a bogus type flag for specifying no type conversion </p>
+
+</div>
+</div>
+<a class="anchor" id="a532c3e9733221d59bac99cb1f795d266"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_SAME_TYPE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>a bogus type flag for specifying no type conversion </p>
+
+</div>
+</div>
+<a class="anchor" id="a532c3e9733221d59bac99cb1f795d266"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_NUMERICAL_TYPE_SAME_TYPE   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>a bogus type flag for specifying no type conversion </p>
+
+</div>
+</div>
+<a class="anchor" id="a9fcc01fb97c5b5482be8ab4cd7c2ee33"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_ROWS_TRANSPOSITIONS_ARRAY   {<a class="el" href="rsb__types_8h.html#a9673f34330af77b1c0fd4a585e0c62cc">RSB_TRANSPOSITION_N</a>, <a class="el" href="rsb__types_8h.html#a37f8cea71946de2f832bdb9d438d5edf">RSB_TRANSPOSITION_T</a>, <a class="el" href="rsb__types_8h.html#abd3aaf223656dece97dee2107e485217">RSB_TRANSPOSITION_C</a>, RSB_INVALID_TRANS }</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An array with transposition constants. </p>
+
+</div>
+</div>
+<a class="anchor" id="abd3aaf223656dece97dee2107e485217"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TRANSPOSITION_C   0x43</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>C: Conjugated transpose flag, valid for <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> typed variables. </p>
+
+</div>
+</div>
+<a class="anchor" id="a9673f34330af77b1c0fd4a585e0c62cc"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TRANSPOSITION_N   0x4E</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>N: Non transposed flag, valid for <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> typed variables. </p>
+
+</div>
+</div>
+<a class="anchor" id="a37f8cea71946de2f832bdb9d438d5edf"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TRANSPOSITION_T   0x54</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>T: Transposed flag value, valid for <a class="el" href="rsb_8h.html#a46095ea7e61e1d1ec0ad055cf0291901">rsb_trans_t</a> valued variables. </p>
+
+</div>
+</div>
+<a class="anchor" id="a4abf98873753295350143ca544b79db3"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TYPE_INDEX_DOUBLE   0</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>This preprocessor index can be used to address the double-related arrays. </p>
+
+</div>
+</div>
+<a class="anchor" id="a1a13d13b3c7f84e7fc8ca1df3878a07d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TYPE_INDEX_DOUBLE_COMPLEX   3</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>This preprocessor index can be used to address the double complex-related arrays. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8d5222339367566d624a1e678d116d0d"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TYPE_INDEX_FLOAT   1</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>This preprocessor index can be used to address the float-related arrays. </p>
+
+</div>
+</div>
+<a class="anchor" id="a8445bf2e852a4b20d178ae4b475f4552"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">#define RSB_TYPE_INDEX_FLOAT_COMPLEX   2</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>This preprocessor index can be used to address the float complex-related arrays. </p>
+
+</div>
+</div>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/structrsb__initopts.html b/doc/html/structrsb__initopts.html
new file mode 100644
index 0000000..037e40c
--- /dev/null
+++ b/doc/html/structrsb__initopts.html
@@ -0,0 +1,147 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: rsb_initopts Struct Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li class="current"><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+  <div id="navrow2" class="tabs2">
+    <ul class="tablist">
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="functions.html"><span>Data Fields</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="summary">
+<a href="#pub-attribs">Data Fields</a>  </div>
+  <div class="headertitle">
+<div class="title">rsb_initopts Struct Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p>A structure specifying library (initialization) options, to be used with the <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit()</a> function. <br/>
+.  
+ <a href="structrsb__initopts.html#details">More...</a></p>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2><a name="pub-attribs"></a>
+Data Fields</h2></td></tr>
+<tr class="memitem:a4319168f5f1183d3ea65960e7111e7ee"><td class="memItemLeft" align="right" valign="top">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a> * </td><td class="memItemRight" valign="bottom"><a class="el" href="structrsb__initopts.html#a4319168f5f1183d3ea65960e7111e7ee">keys</a></td></tr>
+<tr class="memitem:a0a64d546db2c6445e4a33068cffa6694"><td class="memItemLeft" align="right" valign="top">void ** </td><td class="memItemRight" valign="bottom"><a class="el" href="structrsb__initopts.html#a0a64d546db2c6445e4a33068cffa6694">values</a></td></tr>
+<tr class="memitem:a15ff3b060d88b3d937b5e028647c0af2"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">n_pairs</a></td></tr>
+<tr class="memitem:ad087930c58602fd3c0761f5af3aae7ce"><td class="memItemLeft" align="right" valign="top"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="structrsb__initopts.html#ad087930c58602fd3c0761f5af3aae7ce">action</a></td></tr>
+</table>
+<a name="details" id="details"></a><h2>Detailed Description</h2>
+<div class="textblock"><p>A structure specifying library (initialization) options, to be used with the <a class="el" href="group__rsb__doc__rsb.html#ga1707f8b0c28805f692146cf2fb28ae70">rsb_lib_reinit()</a> function. <br/>
+. </p>
+<p>The structure specifies, for <code>i=0</code>,..,n_pairs-1 , a list of (key,value) pairs, stored respectively as (<code>keys</code>[i],values[i]). <br/>
+ Each flag specifies the type and possible range of values it accepts. <br/>
+ The structure may he used to set or query various library parameters.</p>
+<p>Example: </p>
+<div class="fragment"><div class="line"><span class="keyword">const</span> <span class="keywordtype">int</span> max_io=10; <span class="comment">// the number of different options we want to set</span></div>
+<div class="line"><span class="keyword">struct </span><a class="code" href="structrsb__initopts.html" title="A structure specifying library (initialization) options, to be used with the rsb_lib_reinit() functio...">rsb_initopts</a> io={NULL,NULL,0,<a class="code" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">RSB_IO_SPECIFIER_SET</a>},</div>
+<div class="line">*iop=&io; <span class="comment">// pointer to the options structure</span></div>
+<div class="line"><span class="keywordtype">void</span> * io_values[max_io]; <span class="comment">// an array of pointers to max_io different option values (we shall set)</span></div>
+<div class="line"><span class="keyword">enum</span> <a class="code" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6" title="library option values for rsb_lib_init, rsb_lib_set_opt_str, rsb_lib_reinit, rsb_lib_exit, rsb_lib_get_opt, rsb_lib_set_opt, or (deprecated) macros RSB_REINIT_SINGLE_VALUE_GET, RSB_REINIT_SINGLE_VALUE_SET, RSB_REINIT_SINGLE_VALUE, RSB_REINIT_SINGLE_VALUE_C_IOP..">rsb_opt_t</a> io_keys[max_io]; <span class="comment">// an array of max_io flag value [...]
+<div class="line">io.<a class="code" href="structrsb__initopts.html#a4319168f5f1183d3ea65960e7111e7ee">keys</a>=io_keys; <span class="comment">// io.keys will now point to io_keys as its keys array</span></div>
+<div class="line">io.<a class="code" href="structrsb__initopts.html#a0a64d546db2c6445e4a33068cffa6694">values</a>=io_values; <span class="comment">// io.values will now point to io_keys as its values array</span></div>
+<div class="line">io.<a class="code" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">n_pairs</a>=0; <span class="comment">// we have 0 pairs specified so far</span></div>
+<div class="line">io.<a class="code" href="structrsb__initopts.html#a4319168f5f1183d3ea65960e7111e7ee">keys</a>[io.<a class="code" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">n_pairs</a>]=<a class="code" href="group__rsb__doc__rsb.html#ggae0bada88731b01751401847d60110fb6a3d3a5bf255dfc8719f6553e8ac4ecd53">RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</a>; <span class="comment">// the first (at index 0) option we want to specify is RSB_IO_WANT_BOUNDED_BOX_COMPUTATION</span></div>
+<div class="line">io.<a class="code" href="structrsb__initopts.html#a0a64d546db2c6445e4a33068cffa6694">values</a>[io.<a class="code" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">n_pairs</a>]=1; <span class="comment">// the value we want to set the RSB_IO_WANT_BOUNDED_BOX_COMPUTATION option to</span></div>
+<div class="line">io.<a class="code" href="structrsb__initopts.html#a15ff3b060d88b3d937b5e028647c0af2">n_pairs</a>++; <span class="comment">// io.n_pairs is set to 1: we have one option set, so even if we have (max_io-io.n_pairs) left, only the first will be read</span></div>
+<div class="line">... <span class="comment">// we are free to specify other option (type, value) pairs</span></div>
+</div><!-- fragment --> </div><h2>Field Documentation</h2>
+<a class="anchor" id="ad087930c58602fd3c0761f5af3aae7ce"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> rsb_initopts::action</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The action we are requesting (either one of <a class="el" href="rsb_8h.html#afd8b1de2977b2d810f9c615195d9acec">RSB_IO_SPECIFIER_GET</a> or <a class="el" href="rsb_8h.html#aef619407815752dc767cfd6870b72101">RSB_IO_SPECIFIER_SET</a>) </p>
+
+</div>
+</div>
+<a class="anchor" id="a4319168f5f1183d3ea65960e7111e7ee"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">enum <a class="el" href="group__rsb__doc__rsb.html#gae0bada88731b01751401847d60110fb6">rsb_opt_t</a>* rsb_initopts::keys</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An array of value types key flags. </p>
+
+</div>
+</div>
+<a class="anchor" id="a15ff3b060d88b3d937b5e028647c0af2"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="rsb_8h.html#aefcdc7de885ab34a89a0d36470e11deb">rsb_int_t</a> rsb_initopts::n_pairs</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>The length of the <code>keys</code> and <code>values</code> arrays. </p>
+
+</div>
+</div>
+<a class="anchor" id="a0a64d546db2c6445e4a33068cffa6694"></a>
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname">void** rsb_initopts::values</td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+<p>An array of value pointers, as specified by each flag value. </p>
+
+</div>
+</div>
+<hr/>The documentation for this struct was generated from the following file:<ul>
+<li><a class="el" href="rsb_8h.html">rsb.h</a></li>
+</ul>
+</div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:25 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/html/sync_off.png b/doc/html/sync_off.png
new file mode 100644
index 0000000..3b443fc
Binary files /dev/null and b/doc/html/sync_off.png differ
diff --git a/doc/html/sync_on.png b/doc/html/sync_on.png
new file mode 100644
index 0000000..e08320f
Binary files /dev/null and b/doc/html/sync_on.png differ
diff --git a/doc/html/tab_a.png b/doc/html/tab_a.png
new file mode 100644
index 0000000..3b725c4
Binary files /dev/null and b/doc/html/tab_a.png differ
diff --git a/doc/html/tab_b.png b/doc/html/tab_b.png
new file mode 100644
index 0000000..258c141
Binary files /dev/null and b/doc/html/tab_b.png differ
diff --git a/doc/html/tab_h.png b/doc/html/tab_h.png
new file mode 100644
index 0000000..4ca9102
Binary files /dev/null and b/doc/html/tab_h.png differ
diff --git a/doc/html/tab_s.png b/doc/html/tab_s.png
new file mode 100644
index 0000000..ab478c9
Binary files /dev/null and b/doc/html/tab_s.png differ
diff --git a/doc/html/tabs.css b/doc/html/tabs.css
new file mode 100644
index 0000000..9cf578f
--- /dev/null
+++ b/doc/html/tabs.css
@@ -0,0 +1,60 @@
+.tabs, .tabs2, .tabs3 {
+    background-image: url('tab_b.png');
+    width: 100%;
+    z-index: 101;
+    font-size: 13px;
+    font-family: 'Lucida Grande',Geneva,Helvetica,Arial,sans-serif;
+}
+
+.tabs2 {
+    font-size: 10px;
+}
+.tabs3 {
+    font-size: 9px;
+}
+
+.tablist {
+    margin: 0;
+    padding: 0;
+    display: table;
+}
+
+.tablist li {
+    float: left;
+    display: table-cell;
+    background-image: url('tab_b.png');
+    line-height: 36px;
+    list-style: none;
+}
+
+.tablist a {
+    display: block;
+    padding: 0 20px;
+    font-weight: bold;
+    background-image:url('tab_s.png');
+    background-repeat:no-repeat;
+    background-position:right;
+    color: #283A5D;
+    text-shadow: 0px 1px 1px rgba(255, 255, 255, 0.9);
+    text-decoration: none;
+    outline: none;
+}
+
+.tabs3 .tablist a {
+    padding: 0 10px;
+}
+
+.tablist a:hover {
+    background-image: url('tab_h.png');
+    background-repeat:repeat-x;
+    color: #fff;
+    text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
+    text-decoration: none;
+}
+
+.tablist li.current a {
+    background-image: url('tab_a.png');
+    background-repeat:repeat-x;
+    color: #fff;
+    text-shadow: 0px 1px 1px rgba(0, 0, 0, 1.0);
+}
diff --git a/doc/html/todo.html b/doc/html/todo.html
new file mode 100644
index 0000000..965c3bc
--- /dev/null
+++ b/doc/html/todo.html
@@ -0,0 +1,82 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<title>librsb: Todo List</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <td style="padding-left: 0.5em;">
+   <div id="projectname">librsb
+    <span id="projectnumber">1.2.0-rc5</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.1.2 -->
+  <div id="navrow1" class="tabs">
+    <ul class="tablist">
+      <li><a href="index.html"><span>Main Page</span></a></li>
+      <li class="current"><a href="pages.html"><span>Related Pages</span></a></li>
+      <li><a href="modules.html"><span>Modules</span></a></li>
+      <li><a href="annotated.html"><span>Data Structures</span></a></li>
+      <li><a href="files.html"><span>Files</span></a></li>
+    </ul>
+  </div>
+</div><!-- top -->
+<div class="header">
+  <div class="headertitle">
+<div class="title">Todo List </div>  </div>
+</div><!--header-->
+<div class="contents">
+<div class="textblock"><dl class="reflist">
+<dt><a class="anchor" id="_todo000005"></a>Global <a class="el" href="classblas__sparse.html#af4e9f97f85799c5e8f60c78d40d906f3">blas_sparse::cuscr_begin</a>  (m, n, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000009"></a>Global <a class="el" href="classblas__sparse.html#a6085ddf99c2459e051a6106e4a2c4785">blas_sparse::cuscr_block_begin</a>  (Mb, Nb, k, l, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000013"></a>Global <a class="el" href="classblas__sparse.html#abd5c88929ed1c7133169c401881fa1c7">blas_sparse::cuscr_variable_block_begin</a>  (Mb, Nb, K, L, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000004"></a>Global <a class="el" href="classblas__sparse.html#acf14608f8b0375ca133b7f850bde3b50">blas_sparse::duscr_begin</a>  (m, n, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000008"></a>Global <a class="el" href="classblas__sparse.html#ab33c2f497f0a53213f38cd8449ab4349">blas_sparse::duscr_block_begin</a>  (Mb, Nb, k, l, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000012"></a>Global <a class="el" href="classblas__sparse.html#ab1fd9e9f8cdd5f79134873fd6af47c28">blas_sparse::duscr_variable_block_begin</a>  (Mb, Nb, K, L, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000003"></a>Global <a class="el" href="classblas__sparse.html#ae78739e1ebe48fe8b9752a43cd5c15a0">blas_sparse::suscr_begin</a>  (m, n, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000007"></a>Global <a class="el" href="classblas__sparse.html#a8ccdce913bf1b8a1d30b6889611143cb">blas_sparse::suscr_block_begin</a>  (Mb, Nb, k, l, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000011"></a>Global <a class="el" href="classblas__sparse.html#aab5942faf7f9fe31f9dfd13143f37dc7">blas_sparse::suscr_variable_block_begin</a>  (Mb, Nb, K, L, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000006"></a>Global <a class="el" href="classblas__sparse.html#a9ec8326625fe0762e3e6e523260d2655">blas_sparse::zuscr_begin</a>  (m, n, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000010"></a>Global <a class="el" href="classblas__sparse.html#a5fbd2bae9f3849fda1be4691ca3df5ea">blas_sparse::zuscr_block_begin</a>  (Mb, Nb, k, l, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000014"></a>Global <a class="el" href="classblas__sparse.html#a700e8b151004b9c8829a1fe4fd331465">blas_sparse::zuscr_variable_block_begin</a>  (Mb, Nb, K, L, A, istat)</dt>
+<dd>Shall make <code>A</code> <code>intent(inout)</code> as well.  </dd>
+<dt><a class="anchor" id="_todo000001"></a>Global <a class="el" href="group__rsb__doc__rsb.html#ga8c11024d248e2e686476fd9e89aa7c15">rsb_tune_spmm</a>  (struct rsb_mtx_t **mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t *mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void *Bp, rsb_nnz_idx_t ldB, const void *betap, void *Cp, rsb_nnz_idx_t ldC)</dt>
+<dd>In the future, autotuning functionality shall improve considerably. Need support for lightweight, threads-only optimization. May support strided vectors in the future.  </dd>
+<dt><a class="anchor" id="_todo000002"></a>Global <a class="el" href="group__rsb__doc__rsb.html#ga8d7a05bbc165bd6ac20e8e23487a5871">rsb_tune_spsm</a>  (struct rsb_mtx_t **mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t *mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void *Bp, rsb_nnz_idx_t ldB, const void *betap, void *Cp, rsb_nnz_idx_t ldC)</dt>
+<dd>In the future, autotuning functionality shall improve considerably. Need support for lightweight, threads-only optimization. May support strided vectors in the future. </dd>
+</dl>
+</div></div><!-- contents -->
+<!-- start footer part -->
+<hr class="footer"/><address class="footer"><small>
+Generated on Fri Sep 2 2016 11:50:22 for librsb by  <a href="http://www.doxygen.org/index.html">
+<img class="footer" src="doxygen.png" alt="doxygen"/>
+</a> 1.8.1.2
+</small></address>
+</body>
+</html>
diff --git a/doc/man/librsb-config.3 b/doc/man/librsb-config.3
new file mode 100644
index 0000000..70f50ff
--- /dev/null
+++ b/doc/man/librsb-config.3
@@ -0,0 +1,63 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.40.10.
+.TH LIBRSB-CONFIG "1" "September 2016" "librsb-config 1.2.0" "User Commands"
+.SH NAME
+librsb-config \- manual page for librsb-config 1.2.0
+.SH SYNOPSIS
+.B librsb-config
+[\fIOPTION\fR] ...
+.SH DESCRIPTION
+Known values for OPTION are:
+.TP
+\fB\-\-prefix\fR
+print librsb prefix
+.TP
+\fB\-\-libdir\fR
+print path to directory containing library
+.TP
+\fB\-\-libs\fR
+print library linking information
+.TP
+\fB\-\-extra_libs\fR
+print extra linking information (e.g.: dependency libs)
+.TP
+\fB\-\-ccopts\fR
+print compiler options
+.TP
+\fB\-\-cc\fR
+print C compiler
+.TP
+\fB\-\-fc\fR
+print Fortran compiler
+.TP
+\fB\-\-cxx\fR
+print C++ compiler
+.TP
+\fB\-\-cppflags\fR
+print pre\-processor flags
+.TP
+\fB\-\-cflags\fR
+print preprocessor flags, I_opts, and compiler options
+.TP
+\fB\-\-I_opts\fR
+print "\-I" include options
+.TP
+\fB\-\-L_opts\fR
+print linker "\-L" flags for dynamic linking
+.TP
+\fB\-\-R_opts\fR
+print dynamic linker "\-R" or "\-rpath" flags
+.TP
+\fB\-\-ldopts\fR
+print linker options
+.TP
+\fB\-\-ldflags\fR
+print linker flags (ldopts, L_opts, R_opts, and libs)
+.TP
+\fB\-\-static\fR
+revise subsequent outputs for static linking
+.TP
+\fB\-\-help\fR
+print this help and exit
+.TP
+\fB\-\-version\fR
+print version information
diff --git a/doc/man/man3/rsb-examples.3 b/doc/man/man3/rsb-examples.3
new file mode 100644
index 0000000..7d09daf
--- /dev/null
+++ b/doc/man/man3/rsb-examples.3
@@ -0,0 +1,1739 @@
+.TH "Example programs and code" 3 "Fri Sep 2 2016" "Version 1.2.0-rc5" "librsb" \" -*- nroff -*-
+.ad l
+.nh
+.SH NAME
+librsb - 
+Example programs and code \- 
+.SH DESCRIPTION
+.PP
+Examples of usage of \fClibrsb\fP\&.  
+Examples of usage of \fClibrsb\fP\&. 
+
+.PP
+.nf
+    The following fully working example programs illustrate correct ways of using the library.
+   The script displayed here should be sufficient to build them.
+.fi
+.PP
+ 
+.PP
+.nf
+#!/bin/bash
+# Script to build the librsb example programs\&.
+
+LIBRSB_CONFIG=${LIBRSB_CONFIG:-librsb-config}
+
+for s in *\&.c
+do
+        p=${s/\&.c/}
+        rm -f $p 
+        CFLAGS=`${LIBRSB_CONFIG} --I_opts`
+        LDFLAGS=`${LIBRSB_CONFIG} --static --ldflags --extra_libs`
+        CC=`${LIBRSB_CONFIG} --cc`
+        cmd='$CC $CFLAGS $s $LDFLAGS -o $p'
+        echo $cmd
+        $cmd
+done
+
+# replace false with true if you have built the Fortran modules and installed them in the include directory\&.
+if false ; then
+for s in *\&.F90
+do
+        p=${s/\&.F90/}
+        rm -f $p 
+        CFLAGS=`${LIBRSB_CONFIG} --I_opts`
+        LDFLAGS=`${LIBRSB_CONFIG} --static --ldflags --extra_libs`
+        FC=`${LIBRSB_CONFIG} --fc`
+        cmd='$FC $CFLAGS $s $LDFLAGS -o $p'
+        echo $cmd
+        $cmd
+done
+fi
+
+
+.fi
+.PP
+.PP
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ \ingroup rsb-examples
+ @file
+ @author Michele Martone
+ @brief This is a first 'hello RSB' example program\&.
+
+ \include hello\&.c
+*/
+#include <rsb\&.h> /* librsb header to include */
+#include <stdio\&.h>       /* printf() */
+
+int main(const int argc, char * const argv[])
+{
+        /*!
+          A Hello-RSB program\&.
+         
+          This program shows how to use the rsb\&.h interface correctly to:
+         
+          - initialize the library using #rsb_lib_init()
+          - set library options using #rsb_lib_set_opt()
+          - revert such changes 
+          - allocate (build) a single sparse matrix in the RSB format
+            using #rsb_mtx_alloc_from_coo_const()
+          - prints information obtained via #rsb_mtx_get_info_str()
+          - multiply the matrix times a vector using #rsb_spmv()
+          - deallocate the matrix using #rsb_mtx_free() 
+          - finalize the library using #rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) 
+         
+          In this example, we use #RSB_DEFAULT_TYPE as matrix type\&.
+          This type depends on what was configured at library build time\&.
+         * */
+        struct rsb_mtx_t *mtxAp = NULL; /* matrix structure pointer */
+        const int bs = RSB_DEFAULT_BLOCKING;
+        const int brA = bs, bcA = bs;
+        const RSB_DEFAULT_TYPE one = 1;
+        rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+        const rsb_nnz_idx_t nnzA = 4;           /* matrix nonzeroes count */
+        const rsb_coo_idx_t nrA = 3;            /* matrix rows count */
+        const rsb_coo_idx_t ncA = 3;            /* matrix columns count */
+        /* nonzero row indices coordinates: */
+        rsb_coo_idx_t IA[] = {0,1,2,2};
+        /* nonzero column indices coordinates: */
+        rsb_coo_idx_t JA[] = {0,1,2,2};
+        RSB_DEFAULT_TYPE VA[] = {11,22,32,1};/* values of nonzeroes */
+        RSB_DEFAULT_TYPE X[] = { 0, 0, 0 };     /* X vector's array */
+        const RSB_DEFAULT_TYPE B[] = { -1, -2, -5 }; /* B vector's array */
+        char ib[200];
+
+        printf('Hello, RSB!\n');
+        printf('Initializing the library\&.\&.\&.\n');
+        if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != 
+                        RSB_ERR_NO_ERROR)
+        {
+                printf('Error initializing the library!\n');
+                goto err;
+        }
+        printf('Correctly initialized the library\&.\n');
+
+        printf('Attempting to set the'
+               ' RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE library option\&.\n');
+        {
+                rsb_int_t evi=1; 
+                /* Setting a single optional library parameter\&. */
+                errval = rsb_lib_set_opt(
+                        RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE, &evi);
+                if(errval != RSB_ERR_NO_ERROR)
+                {
+                        char errbuf[256];
+                        rsb_strerror_r(errval,&errbuf[0],sizeof(errbuf));
+                        printf('Failed setting the'
+                        ' RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE'
+                        ' library option (reason string:\n%s)\&.\n',errbuf);
+                        if(errval&RSB_ERRS_UNSUPPORTED_FEATURES)
+                        {
+                          printf('This error may be safely ignored\&.\n');
+                        }
+                        else
+                        {
+                          printf('Some unexpected error occurred!\n');
+                          goto err;
+                        }
+                }
+                else
+                {
+                        printf('Setting back the '
+                                'RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE'
+                                ' library option\&.\n');
+                        evi = 0;
+                        errval = rsb_lib_set_opt(RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE,
+                                        &evi);
+                        errval = RSB_ERR_NO_ERROR;
+                }
+        }
+
+        mtxAp = rsb_mtx_alloc_from_coo_const(
+                VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,
+                RSB_FLAG_NOFLAGS    /* default format will be chosen */
+                |RSB_FLAG_DUPLICATES_SUM/* duplicates will be summed */
+                        ,&errval);
+        if((!mtxAp) || (errval != RSB_ERR_NO_ERROR))
+        {
+                printf('Error while allocating the matrix!\n');
+                goto err;
+        }
+        printf('Correctly allocated a matrix\&.\n');
+        printf('Summary information of the matrix:\n');
+        /* print out the matrix summary information  */
+        rsb_mtx_get_info_str(mtxAp,'RSB_MIF_MATRIX_INFO__TO__CHAR_P',
+                        ib,sizeof(ib));
+        printf('%s',ib);
+        printf('\n');
+
+        if((errval = 
+                rsb_spmv(RSB_TRANSPOSITION_N,&one,mtxAp,B,1,&one,X,1))
+                        != RSB_ERR_NO_ERROR )
+        {
+                printf('Error performing a multiplication!\n');
+                goto err;
+        }
+        printf('Correctly performed a SPMV\&.\n');
+        rsb_mtx_free(mtxAp);
+        printf('Correctly freed the matrix\&.\n');
+        if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+                        != RSB_ERR_NO_ERROR)
+        {
+                printf('Error finalizing the library!\n');
+                goto err;
+        }
+        printf('Correctly finalized the library\&.\n');
+        printf('Program terminating with no error\&.\n');
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        printf('Program terminating with error\&.\n');
+        return -1;
+}
+
+.fi
+.PP
+ 
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ \ingroup rsb-examples
+ @file
+ @author Michele Martone
+ @brief This is a first 'hello RSB' example program using 
+        a Sparse BLAS interface\&.
+
+ \include hello-spblas\&.c
+*/
+#include <rsb\&.h> /* for rsb_lib_init */
+#include <blas_sparse\&.h> /* Sparse BLAS on the top of librsb */
+#include <stdio\&.h>       /* printf */
+
+int main(const int argc, char * const argv[])
+{
+        /*!
+         * A Hello/Sparse BLAS program\&.
+         *
+         * This program shows how to use the blas_sparse\&.h
+         * interface correctly to:
+         *
+         * - initialize the library using #rsb_lib_init()
+         * - allocate (build) a single sparse matrix in the RSB
+         *   format using #BLAS_duscr_begin()/#BLAS_duscr_insert_entries()
+         *   /#BLAS_duscr_end()
+         * - extract one matrix element with #BLAS_dusget_element()
+         * - multiply the matrix times a vector using #BLAS_dusmv()
+         * - deallocate the matrix using #BLAS_usds() 
+         * - finalize the library using
+         *   #rsb_lib_exit(#RSB_NULL_EXIT_OPTIONS) 
+        */
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE   
+        printf(''double' type configured out\&.'
+        ' Please reconfigure the library with it and recompile\&.\n');
+        return 0;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+        blas_sparse_matrix A = blas_invalid_handle; /* handle for A */
+        const int nnz = 4;      /* number of nonzeroes of matrix A */
+        const int  nr = 3;      /* number of A's rows */
+        const int  nc = 3;      /* number of A's columns */
+        /* A's nonzero elements row indices (coordinates): */
+        int   IA[] = { 0, 1, 2, 2 };
+        /* A's nonzero elements column indices (coordinates): */
+        int   JA[] = { 0, 1, 0, 2 };
+        /* A's nonzero values (matrix coefficients): */
+        double VA[] = { 11\&.0, 22\&.0, 13\&.0, 33\&.0  };
+        /* the X vector's array: */
+        double X[] = { 0\&.0, 0\&.0, 0\&.0 };
+        /* the B vector's array: */
+        double B[] = { -1\&.0, -2\&.0, -2\&.0 };
+        /* the (known) result array: */
+        double AB[] = { 11\&.0+26\&.0, 44\&.0, 66\&.0+13\&.0 };
+        /* rsb error variable: */
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+        int i;
+
+        printf('Hello, RSB!\n');
+        /* initialize the library */
+        if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) 
+                        != RSB_ERR_NO_ERROR)
+        {
+                goto err;
+        }
+        printf('Correctly initialized the library\&.\n');
+
+        /* initialize a matrix descriptor */
+        A = BLAS_duscr_begin(nr,nc);
+        if( A == blas_invalid_handle )
+        {
+                goto err;
+        }
+        
+        /* specify properties (e\&.g\&.: symmetry)*/
+        if( BLAS_ussp(A,blas_lower_symmetric) != 0 )
+        {
+                goto err;
+        }
+
+        /* get properties (e\&.g\&.: symmetry) */
+        if( BLAS_usgp(A,blas_lower_symmetric) != 1 )
+        {
+                printf('Symmetry property non set ?!\n');
+                goto err;
+        }
+
+        /* insert the nonzeroes (here, all at once) */
+        if( BLAS_duscr_insert_entries(A, nnz, VA, IA, JA)
+                        == blas_invalid_handle)
+        {
+                goto err;
+        }
+
+        /* finalize (allocate) the matrix build  */
+        if( BLAS_duscr_end(A) == blas_invalid_handle )
+        {
+                goto err;
+        }
+        printf('Correctly allocated a matrix\&.\n');
+
+        VA[0] = 0\&.0;
+        if( BLAS_dusget_element(A, IA[0], JA[0], &VA[0]) )
+        {
+                goto err;
+        }
+
+        /* a check */
+        if( VA[0] != 11\&.0 )
+        {
+                goto err;
+        }
+
+        /* compute X = X + (-1) * A * B   */
+        if(BLAS_dusmv(blas_no_trans,-1,A,B,1,X,1))
+        {
+                goto err;
+        }
+
+        for( i = 0 ; i < nc; ++i )
+                if( X[i] != AB[i] )
+                {
+                        printf('Computed SPMV result seems wrong\&. Terminating\&.\n');
+                        goto err;
+                }
+        printf('Correctly performed a SPMV\&.\n');
+
+        /* deallocate matrix A */
+        if( BLAS_usds(A) )
+        {
+                goto err;
+        }
+        printf('Correctly freed the matrix\&.\n');
+
+        /* finalize the library */
+        if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+                        != RSB_ERR_NO_ERROR)
+        {
+                goto err;
+        }
+        printf('Correctly finalized the library\&.\n');
+        printf('Program terminating with no error\&.\n');
+
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        printf('Program terminating with error\&.\n');
+        return -1;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+}
+
+
+.fi
+.PP
+ 
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ \ingroup rsb-examples
+ @file
+ @author Michele Martone
+ @brief This is a first 'hello RSB' example program using 
+        a Sparse BLAS interface\&.
+
+ \include hello-spblas\&.c
+*/
+#include <rsb\&.h> /* for rsb_lib_init */
+#include <blas_sparse\&.h> /* Sparse BLAS on the top of librsb */
+#include <stdio\&.h>       /* printf */
+
+int main(const int argc, char * const argv[])
+{
+        /*!
+         * A Hello/Sparse BLAS program\&.
+         *
+         * This program shows how to use the blas_sparse\&.h
+         * interface correctly to:
+         *
+         * - initialize the library using #rsb_lib_init()
+         * - allocate (build) a single sparse matrix in the RSB
+         *   format using #BLAS_duscr_begin()/#BLAS_duscr_insert_entries()
+         *   /#BLAS_duscr_end()
+         * - extract one matrix element with #BLAS_dusget_element()
+         * - multiply the matrix times a vector using #BLAS_dusmv()
+         * - deallocate the matrix using #BLAS_usds() 
+         * - finalize the library using
+         *   #rsb_lib_exit(#RSB_NULL_EXIT_OPTIONS) 
+        */
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE   
+        printf(''double' type configured out\&.'
+        ' Please reconfigure the library with it and recompile\&.\n');
+        return 0;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+        blas_sparse_matrix A = blas_invalid_handle; /* handle for A */
+        const int nnz = 4;      /* number of nonzeroes of matrix A */
+        const int  nr = 3;      /* number of A's rows */
+        const int  nc = 3;      /* number of A's columns */
+        /* A's nonzero elements row indices (coordinates): */
+        int   IA[] = { 0, 1, 2, 2 };
+        /* A's nonzero elements column indices (coordinates): */
+        int   JA[] = { 0, 1, 0, 2 };
+        /* A's nonzero values (matrix coefficients): */
+        double VA[] = { 11\&.0, 22\&.0, 13\&.0, 33\&.0  };
+        /* the X vector's array: */
+        double X[] = { 0\&.0, 0\&.0, 0\&.0 };
+        /* the B vector's array: */
+        double B[] = { -1\&.0, -2\&.0, -2\&.0 };
+        /* the (known) result array: */
+        double AB[] = { 11\&.0+26\&.0, 44\&.0, 66\&.0+13\&.0 };
+        /* rsb error variable: */
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+        int i;
+
+        printf('Hello, RSB!\n');
+        /* initialize the library */
+        if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) 
+                        != RSB_ERR_NO_ERROR)
+        {
+                goto err;
+        }
+        printf('Correctly initialized the library\&.\n');
+
+        /* initialize a matrix descriptor */
+        A = BLAS_duscr_begin(nr,nc);
+        if( A == blas_invalid_handle )
+        {
+                goto err;
+        }
+        
+        /* specify properties (e\&.g\&.: symmetry)*/
+        if( BLAS_ussp(A,blas_lower_symmetric) != 0 )
+        {
+                goto err;
+        }
+
+        /* get properties (e\&.g\&.: symmetry) */
+        if( BLAS_usgp(A,blas_lower_symmetric) != 1 )
+        {
+                printf('Symmetry property non set ?!\n');
+                goto err;
+        }
+
+        /* insert the nonzeroes (here, all at once) */
+        if( BLAS_duscr_insert_entries(A, nnz, VA, IA, JA)
+                        == blas_invalid_handle)
+        {
+                goto err;
+        }
+
+        /* finalize (allocate) the matrix build  */
+        if( BLAS_duscr_end(A) == blas_invalid_handle )
+        {
+                goto err;
+        }
+        printf('Correctly allocated a matrix\&.\n');
+
+        VA[0] = 0\&.0;
+        if( BLAS_dusget_element(A, IA[0], JA[0], &VA[0]) )
+        {
+                goto err;
+        }
+
+        /* a check */
+        if( VA[0] != 11\&.0 )
+        {
+                goto err;
+        }
+
+        /* compute X = X + (-1) * A * B   */
+        if(BLAS_dusmv(blas_no_trans,-1,A,B,1,X,1))
+        {
+                goto err;
+        }
+
+        for( i = 0 ; i < nc; ++i )
+                if( X[i] != AB[i] )
+                {
+                        printf('Computed SPMV result seems wrong\&. Terminating\&.\n');
+                        goto err;
+                }
+        printf('Correctly performed a SPMV\&.\n');
+
+        /* deallocate matrix A */
+        if( BLAS_usds(A) )
+        {
+                goto err;
+        }
+        printf('Correctly freed the matrix\&.\n');
+
+        /* finalize the library */
+        if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+                        != RSB_ERR_NO_ERROR)
+        {
+                goto err;
+        }
+        printf('Correctly finalized the library\&.\n');
+        printf('Program terminating with no error\&.\n');
+
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        printf('Program terminating with error\&.\n');
+        return -1;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+}
+
+
+.fi
+.PP
+ 
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ \ingroup rsb-examples
+ @file
+ @author Michele Martone
+ @brief This is a first 'RSB autotuning' example program\&.
+
+ \include autotuning\&.c
+*/
+#include <rsb\&.h> /* librsb header to include */
+#include <stdio\&.h>       /* printf() */
+#include <ctype\&.h>       /* isdigit() */
+#include <stdlib\&.h>      /* atoi() */
+/* #include 'rsb_internals\&.h' */
+
+int tune_from_file(char * const filename, rsb_int_t wvat)
+{
+        struct rsb_mtx_t *mtxMp = NULL;
+        /* spmv specific variables */
+        const RSB_DEFAULT_TYPE alpha = 1;
+        const RSB_DEFAULT_TYPE beta = 1;
+        rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+        const rsb_coo_idx_t nrhs = 2;  /* number of right hand sides */
+        rsb_trans_t transA = RSB_TRANSPOSITION_N; /* transposition */
+        rsb_nnz_idx_t ldB = 0;
+        rsb_nnz_idx_t ldC = 0;
+        /* misc variables */
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+        rsb_time_t dt;
+        char ib[200];
+        const char*is = 'RSB_MIF_MATRIX_INFO__TO__CHAR_P';
+        /* misc variables */
+        /* input autotuning variables */
+        rsb_int_t oitmax = 1 /*15*/;    /* auto-tune iterations */
+        rsb_time_t tmax = 0\&.1;   /* time per autotune operation */
+        /* output autotuning variables */
+        rsb_flags_t flagsA = RSB_FLAG_NOFLAGS;
+        int ione = 1;
+        rsb_type_t typecodea [] = RSB_MATRIX_SPBLAS_TYPE_CODES_ARRAY;
+        int typecodei;
+
+        errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS);
+
+        if( (errval) != RSB_ERR_NO_ERROR )
+                goto err;
+
+        errval = rsb_lib_set_opt(RSB_IO_WANT_VERBOSE_TUNING, &wvat );
+        
+        /*
+        errval = rsb_lib_set_opt(RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE, &ione);
+        */
+
+        if( (errval) != RSB_ERR_NO_ERROR )
+                goto err;
+
+        printf('Loading matrix from file \'%s\'\&.\n',filename);
+
+        mtxMp = rsb_file_mtx_load(filename, flagsA, typecodea[0], &errval);
+
+        if( (errval) != RSB_ERR_NO_ERROR )
+                goto err;
+
+        for( typecodei = 0 ; typecodei < RSB_IMPLEMENTED_TYPES; ++typecodei )
+        {
+                rsb_type_t typecode = typecodea[typecodei];
+                struct rsb_mtx_t *mtxAp = NULL;
+                struct rsb_mtx_t *mtxOp = NULL;
+                rsb_real_t sf = 0\&.0;
+                rsb_int_t tn = 0;
+
+                sf = 0\&.0;
+                tn = 0;
+
+                printf('Considering %c clone\&.\n',typecode);
+                
+                errval = rsb_mtx_clone(&mtxAp, typecode, transA, NULL, mtxMp,
+                                flagsA);
+
+                if( (errval) != RSB_ERR_NO_ERROR )
+                        goto err;
+
+                printf('Base matrix:\n');
+                rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+                printf('%s\n\n',ib);
+
+                dt = -rsb_time();
+                errval = rsb_tune_spmm(NULL, &sf, &tn, oitmax, tmax, transA,
+                     &alpha, mtxAp, nrhs, order, NULL, ldB, &beta, NULL, ldC);
+
+                dt += rsb_time();
+                if(tn == 0)
+                printf('After %lfs, autotuning routine did not find a better'
+                        ' threads count configuration\&.\n',dt);
+                else
+                printf('After %lfs, thread autotuning declared speedup of %lg x,'
+                        ' when using threads count of %d\&.\n',dt,sf,tn);
+                printf('\n');
+
+
+                dt = -rsb_time();
+
+                mtxOp = mtxAp;
+                errval = rsb_tune_spmm(&mtxAp, &sf, &tn, oitmax, tmax, transA,
+                        &alpha, NULL, nrhs, order, NULL, ldB, &beta, NULL, ldC);
+                if( (errval) != RSB_ERR_NO_ERROR )
+                        goto err;
+
+                dt += rsb_time();
+                if( mtxOp == mtxAp )
+                {
+                        printf('After %lfs, global autotuning found old matrix optimal,'
+                        ' with declared speedup %lg x when using %d threads\n',dt,sf,tn);
+                }
+                else
+                {
+                        printf('After %lfs, global autotuning declared speedup of %lg x,'
+                        ' when using threads count of %d and a new matrix:\n',dt,sf,tn);
+                        rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+                        printf('%s\n',ib);
+                }
+                printf('\n');
+
+                /* user is expected to:
+                errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+                and use mtxAp in SpMV\&.
+                */
+                rsb_mtx_free(mtxAp);
+                mtxAp = NULL;
+        }
+        rsb_mtx_free(mtxMp);
+        mtxMp = NULL;
+
+        goto ret;
+ret:
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        printf('Program terminating with error\&.\n');
+        return -1;
+}
+
+int main(const int argc, char * const argv[])
+{
+        /*!
+         Autotuning example\&.
+         */
+        /* matrix variables */
+        struct rsb_mtx_t *mtxAp = NULL; /* matrix structure pointer */
+        const int bs = RSB_DEFAULT_BLOCKING;
+        rsb_coo_idx_t nrA = 500; /* number of rows */
+        rsb_coo_idx_t ncA = 500; /* number of cols */
+        rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+        rsb_coo_idx_t rd = 1; /* every rd rows one is non empty */
+        rsb_coo_idx_t cd = 4; /* every cd cols one is non empty */
+        rsb_nnz_idx_t nnzA = (nrA/rd)*(ncA/cd); /* nonzeroes */
+        rsb_coo_idx_t*IA = NULL;
+        rsb_coo_idx_t*JA = NULL;
+        RSB_DEFAULT_TYPE*VA = NULL;
+        /* spmv specific variables */
+        const RSB_DEFAULT_TYPE alpha = 1;
+        const RSB_DEFAULT_TYPE beta = 1;
+        RSB_DEFAULT_TYPE*Cp = NULL;
+        RSB_DEFAULT_TYPE*Bp = NULL;
+        rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+        const rsb_coo_idx_t nrhs = 2;  /* number of right hand sides */
+        rsb_trans_t transA = RSB_TRANSPOSITION_N; /* transposition */
+        rsb_nnz_idx_t ldB = nrA;
+        rsb_nnz_idx_t ldC = ncA;
+        /* misc variables */
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+        size_t so = sizeof(RSB_DEFAULT_TYPE);
+        size_t si = sizeof(rsb_coo_idx_t);
+        rsb_time_t dt,odt;
+        rsb_int_t t,tt = 100;   /* will repeat spmv tt times */
+        char ib[200];
+        const char*is = 'RSB_MIF_MATRIX_INFO__TO__CHAR_P';
+        /* misc counters */
+        rsb_coo_idx_t ci; 
+        rsb_coo_idx_t ri;
+        rsb_coo_idx_t ni;
+        rsb_int_t nrhsi;
+        /* misc variables */
+        rsb_time_t etime = 0\&.0;
+        /* input autotuning variables */
+        rsb_int_t oitmax = 15;  /* auto-tune iterations */
+        rsb_time_t tmax = 0\&.1;   /* time per autotune operation */
+        /* input/output autotuning variables */
+        rsb_int_t tn = 0;       /* threads number */
+        /* output autotuning variables */
+        rsb_real_t sf = 0\&.0;     /* speedup factor obtained from auto tuning */
+        rsb_int_t wvat = 1;     /* want verbose autotuning; see documentation
+                                   of RSB_IO_WANT_VERBOSE_TUNING */
+
+        if(argc > 1 && !isdigit(argv[1][0]) )
+                return tune_from_file(argv[1],wvat);
+
+        if(argc > 1)
+        {
+                nrA = ncA = atoi(argv[1]);
+                if ( nrA < RSB_MIN_MATRIX_DIM || (nrA > (RSB_MAX_MATRIX_DIM) ))
+                        goto err;
+
+                nnzA = (nrA/rd)*(ncA/cd);
+                ldB = nrA;
+                ldC = ncA;
+        }
+
+        printf('Creating %d x %d matrix with %d nonzeroes\&.\n',nrA,ncA,nnzA);
+
+        IA = calloc(nnzA, si);
+        JA = calloc(nnzA, si);
+        VA = calloc(nnzA, so);
+        Bp = calloc(nrhs*ncA ,so);
+        Cp = calloc(nrhs*nrA ,so);
+
+        if( ! ( VA && IA && JA && Bp && Cp ) )
+                goto err;
+
+        for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+                for(ci=0;ci<ncA/cd;++ci)
+                        Bp[nrhsi*ldC+ci] = 1\&.0;
+
+        for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+                for(ri=0;ri<nrA/rd;++ri)
+                        Cp[nrhsi*ldC+ri] = 1\&.0;
+
+        ni = 0;
+
+        for(ci=0;ci<ncA/cd;++ci)
+                for(ri=0;ri<nrA/rd;++ri)
+                {
+                        VA[ni] = nrA * ri + ci,
+                        IA[ni] = ri;
+                        JA[ni] = ci;
+                        ni++;
+                }
+
+        if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS))
+                        != RSB_ERR_NO_ERROR) goto err;
+
+        errval = rsb_lib_set_opt(RSB_IO_WANT_VERBOSE_TUNING, &wvat );
+
+        mtxAp = rsb_mtx_alloc_from_coo_const(
+                VA,IA,JA,nnzA,typecode,nrA,ncA,bs,bs,
+                RSB_FLAG_NOFLAGS,&errval);
+
+        /* VA, IA, JA are not necessary anymore */
+        free(VA);
+        free(IA);
+        free(JA);
+        VA = NULL;
+        IA = NULL;
+        JA = NULL;
+
+        if((!mtxAp) || (errval != RSB_ERR_NO_ERROR))
+                goto err;
+
+        printf('Allocated matrix of %zd nonzeroes:\n',(size_t)nnzA);
+        rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+        printf('%s\n\n',ib);
+
+        dt = - rsb_time();
+        for(t=0;t<tt;++t)
+                /* 
+                   If nrhs == 1, the following is equivalent to
+                   rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);
+                */
+                rsb_spmm(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+        dt += rsb_time();
+        odt = dt;
+        printf('Before auto-tuning, %d multiplications took %lfs\&.\n',tt,dt);
+
+        printf('Threads autotuning (may take more than %lfs)\&.\&.\&.\n',
+                        oitmax*tmax);
+        dt = -rsb_time();
+        errval = rsb_tune_spmm(NULL, &sf, &tn, oitmax, tmax, transA,
+                        &alpha, mtxAp, nrhs, order, Bp, ldB, &beta, Cp, ldC);
+        dt += rsb_time();
+        if(errval != RSB_ERR_NO_ERROR)
+                goto err;
+
+        if(tn == 0)
+        printf('After %lfs, autotuning routine did not find a better'
+                        ' threads count configuration\&.\n',dt);
+        else
+        printf('After %lfs, autotuning routine declared speedup of %lg x,'
+                        ' when using threads count of %d\&.\n',dt,sf,tn);
+
+        errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+        if(errval != RSB_ERR_NO_ERROR)
+                goto err;
+
+        rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+        printf('%s\n',ib);
+
+        dt = -rsb_time();
+        for(t=0;t<tt;++t)
+                /*rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);*/
+                rsb_spmm(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+        dt += rsb_time();
+        printf('After threads auto-tuning, %d multiplications took %lfs'
+                        '  --  effective speedup of %lg x\n',tt,dt,odt/dt);
+        odt = dt;
+
+
+        tn = 0; /* this will restore default threads count */
+        errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+        if(errval != RSB_ERR_NO_ERROR)
+                goto err;
+        errval = rsb_lib_get_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+        if(errval != RSB_ERR_NO_ERROR)
+                goto err;
+
+        printf('Matrix autotuning (may take more than %lfs; using %d'
+                        ' threads )\&.\&.\&.\n', oitmax*tmax, tn);
+
+        /* A negative tn will request also threads autotuning: */
+        /* tn = -tn; */
+
+        dt = -rsb_time();
+        errval = rsb_tune_spmm(&mtxAp, &sf, &tn, oitmax, tmax, transA,
+                        &alpha,  NULL, nrhs, order, Bp, ldB, &beta, Cp, ldC);
+        dt += rsb_time();
+
+        if(errval != RSB_ERR_NO_ERROR)
+                goto err;
+
+        if(tn == 0)
+        printf('After %lfs, autotuning routine did not find a better'
+                        ' threads count configuration\&.\n',dt);
+        else
+        printf('After %lfs, autotuning routine declared speedup of %lg x,'
+                        ' when using threads count of %d\&.\n',dt,sf,tn);
+
+        rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+        printf('%s\n',ib);
+
+        dt = -rsb_time();
+        for(t=0;t<tt;++t)
+                /*rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);*/
+                rsb_spmm(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+        dt += rsb_time();
+        printf('After threads auto-tuning, %d multiplications took %lfs'
+                        '  --  further speedup of %lg x\n',tt,dt,odt/dt);
+
+        rsb_mtx_free(mtxAp);
+        free(Cp);
+        free(Bp);
+
+
+        errval = rsb_lib_get_opt(RSB_IO_WANT_LIBRSB_ETIME,&etime);
+        if(errval == RSB_ERR_UNSUPPORTED_FEATURE)
+        {
+                printf('librsb timer-based profiling is not supported in '
+                'this build\&. If you wish to have it, re-configure librsb '
+                'with its support\&. So you can safely ignore the error you'
+                ' might just have seen printed out on screen\&.\n');
+                errval = RSB_ERR_NO_ERROR;
+        }
+        else
+        if(etime) /* This will only work if enabled at configure time\&. */
+                printf('Elapsed program time is %5\&.2lfs\n',etime);
+
+        if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+                        !=RSB_ERR_NO_ERROR)
+                goto err;
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        printf('Program terminating with error\&.\n');
+        return -1;
+}
+
+.fi
+.PP
+ 
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ \ingroup rsb-examples
+ @file
+ @author Michele Martone
+ @brief This is an example program using a Sparse BLAS interface
+        and reading from file using the RSB library\&.
+
+ \include io-spblas\&.c
+*/
+#include <rsb\&.h> /* for rsb_lib_init */
+#include <blas_sparse\&.h>
+#include <stdio\&.h>
+        
+int main(const int argc, char * const argv[])
+{
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE   
+        printf('Skipping a test because of 'double' type opted out\&.\n');
+        return 0;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+        blas_sparse_matrix A = blas_invalid_handle;
+        rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE;
+        rsb_char_t * filename = argc > 1 ? argv[1] : '\&.\&./pd\&.mtx';
+
+        printf('Hello, RSB!\n');
+        if((rsb_perror(NULL,
+                rsb_lib_init(RSB_NULL_INIT_OPTIONS)))!=RSB_ERR_NO_ERROR)
+        {
+                printf('Error while initializing the library\&.\n');
+                goto err;
+        }
+
+        printf('Correctly initialized the library\&.\n');
+
+        A = rsb_load_spblas_matrix_file_as_matrix_market(filename,
+                        typecode );
+        if( A == blas_invalid_handle )
+        {
+                printf('Error while loading matrix %s from file\&.\n',
+                                filename);
+                goto err;
+        }
+
+        printf('Correctly loaded and allocated a matrix'
+                        ' from file %s\&.\n',filename);
+
+        if( BLAS_usgp(A,blas_symmetric) == 1 )
+                printf('Matrix is symmetric\n');
+
+        if( BLAS_usgp(A,blas_hermitian) == 1 )
+                printf('Matrix is hermitian\n');
+
+        printf('Now SPMV with NULL vectors will be attempted,'
+                        ' resulting in an error (so don't worry)\&.\n');
+
+        if(BLAS_dusmv(blas_no_trans,-1,A,NULL,1,NULL,1))
+        {
+                printf('Correctly detected an error condition\&.\n');
+                goto okerr;
+        }
+
+        printf('No error detected ?\nIf you see this line printed out,'
+                ' please report as a bug, because the above NULL pointers'
+                ' should have been detected\n');
+        return -1;
+
+okerr:
+        printf('Program correctly recovered from intentional'
+                        ' error condition\&.\n');
+        if(BLAS_usds(A))
+        {
+                printf('Error while freeing the matrix!\n');
+                goto err;
+        }
+
+        printf('Correctly freed the matrix\&.\n');
+err:
+        if(rsb_perror(NULL,
+                rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))!=RSB_ERR_NO_ERROR)
+        {
+                printf('Failed finalizing the library\&.\n');
+                goto ferr;
+        }
+
+        printf('Correctly finalized the library\&.\n');
+        return 0;
+ferr:
+        return -1;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+}
+
+
+.fi
+.PP
+ 
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ @file
+ @author Michele Martone
+ @brief A toy program showing instantiation, transposition and other
+ operations on a single matrix\&.
+ \ingroup rsb-examples
+
+ \include transpose\&.c
+*/
+#include <rsb\&.h>
+#include <stdio\&.h>       /* printf */
+
+int main(const int argc, char * const argv[])
+{
+        struct rsb_mtx_t *mtxAp = NULL;
+        rsb_blk_idx_t brA = RSB_DEFAULT_BLOCKING, bcA=RSB_DEFAULT_BLOCKING;
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+        rsb_nnz_idx_t nnzA = 4;
+        rsb_coo_idx_t  nrA = 3;
+        rsb_coo_idx_t  ncA = 3;
+        rsb_coo_idx_t    IA[] = { 0, 1, 2, 0 };
+        rsb_coo_idx_t    JA[] = { 0, 1, 2, 2 };
+        RSB_DEFAULT_TYPE VA[] = { 11, 22, 33, 13 };
+        RSB_DEFAULT_TYPE XV[] = { 0,0,0,0,0,0 };
+        rsb_coo_idx_t  vl = 0;
+        rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+
+        /* library initialization */
+        if(rsb_lib_init(RSB_NULL_INIT_OPTIONS)!=RSB_ERR_NO_ERROR)
+        {
+                return -1;
+        }
+
+        /* allocation */
+        mtxAp = rsb_mtx_alloc_from_coo_const(
+                        VA,IA,JA,nnzA,typecode,nrA,ncA,
+                        brA,bcA,RSB_FLAG_NOFLAGS,NULL);
+        if(!mtxAp)
+        {
+                return -1;
+        }
+
+        /* printout */
+        if(RSB_ERR_NO_ERROR!=(errval = rsb_file_mtx_save(mtxAp,NULL)))
+        {
+                if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+                        goto err;
+        }
+        
+        /* matrix transposition */
+        if( RSB_ERR_NO_ERROR != (errval =
+                rsb_mtx_clone(&mtxAp,RSB_NUMERICAL_TYPE_SAME_TYPE,
+                RSB_TRANSPOSITION_T,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS)))
+        {
+                goto err;
+        }
+
+        /* printout */
+        if(RSB_ERR_NO_ERROR!=(errval = rsb_file_mtx_save(mtxAp,NULL)))
+        {
+                if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+                        goto err;
+        }
+
+        rsb_mtx_free(mtxAp);
+
+        /* doing the same after load from file */
+        mtxAp = rsb_file_mtx_load('\&.\&./pd\&.mtx',
+                RSB_FLAG_NOFLAGS,typecode,NULL);
+        if(!mtxAp)
+        {
+                return -1;
+        }
+
+        /* printout */
+        if(RSB_ERR_NO_ERROR!=(errval = rsb_file_mtx_save(mtxAp,NULL)))
+        {
+                if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+                        goto err;
+        }
+
+        /* one can see dimensions in advance, also */
+        if(RSB_ERR_NO_ERROR!=(errval =
+                rsb_file_mtx_get_dims('\&.\&./pd\&.mtx',&nrA,&ncA,&nnzA,NULL)))
+        {
+                if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+                        goto err;
+        }
+
+        /* A matrix can be rendered to Postscript\&. */
+        {
+                if(RSB_ERR_NO_ERROR!=(errval =
+                rsb_mtx_rndr('pd\&.eps',mtxAp,512,512,RSB_MARF_EPS_B)))
+                        goto err;
+        }
+
+        rsb_mtx_free(mtxAp);
+
+        /* also vectors can be loaded */
+        if(RSB_ERR_NO_ERROR!=(errval = 
+                rsb_file_vec_load('\&.\&./vf\&.mtx',typecode,NULL,&vl )))
+                goto err;
+        /* we expecy vf\&.mtx to be 6 rows long */
+        if( vl != 6 )
+        {
+                goto err;
+        }
+
+        if(RSB_ERR_NO_ERROR!=(errval = 
+                rsb_file_vec_load('\&.\&./vf\&.mtx',typecode,XV, NULL )))
+                goto err;
+
+        /* matrices can be rendered from file to a pixelmap as well */
+        {
+                unsigned char pixmap[3*2*2];
+
+                if(RSB_ERR_NO_ERROR!=(errval =
+                rsb_file_mtx_rndr(pixmap,'\&.\&./pd\&.mtx',2,2,2,RSB_MARF_RGB)))
+                        goto err;
+        }
+
+        if(RSB_ERR_NO_ERROR != rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+        {
+                goto err;
+        }
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        return -1;
+}
+
+
+.fi
+.PP
+ 
+.PP
+.nf
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb\&.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version\&.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+License for more details\&.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING\&.
+If not, see <http://www\&.gnu\&.org/licenses/>\&.
+
+*/
+/*!
+ @file
+ @author Michele Martone
+ @brief A toy program implementing the power method
+        for computing matrix eigenvalues\&.
+ \ingroup rsb-examples
+
+ \include power\&.c
+*/
+
+#include <stdio\&.h>       // printf
+#include <math\&.h>        // sqrt
+#include <stdlib\&.h>      // calloc
+#include <rsb\&.h>
+
+int main(const int argc, char * const argv[])
+{
+        int WANT_VERBOSE = 0;
+        struct rsb_mtx_t *mtxAp = NULL;
+        const int bs = RSB_DEFAULT_BLOCKING;
+        int i;
+        const int br = bs, bc = bs; /* bs x bs blocked */
+        rsb_err_t errval = 0;
+        rsb_nnz_idx_t nnzA = 4;
+        rsb_coo_idx_t  nrA = 3;
+        rsb_coo_idx_t  ncA = 3;
+        rsb_int_t it = 0, maxit = 100;
+        const rsb_coo_idx_t    IA[] = { 0, 1, 2, 0 };
+        const rsb_coo_idx_t    JA[] = { 0, 1, 2, 2 };
+        const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE VA[] = { 11, 22, 33, 13 };
+        const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE ZERO = 0;
+
+        RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE norm = 0\&.0, /* nu */
+        oldnorm = 1\&.0, /* oldnorm */
+        *b1 = NULL, *b2 = NULL,
+        *bnow = NULL, *bnext = NULL;/* b1 and b2 aliases */
+        rsb_type_t typecode = RSB_NUMERICAL_TYPE_FIRST_BLAS;
+        size_t ds = 0;
+        /* tolerance */
+        const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE tol = 1e-14;
+
+        /* library initialization */
+        if(rsb_lib_init(RSB_NULL_INIT_OPTIONS)!=RSB_ERR_NO_ERROR)
+                return -1;
+
+        /* allocation */
+        mtxAp = rsb_mtx_alloc_from_coo_const(VA,IA,JA,nnzA,
+                        typecode,nrA,ncA,br,bc,RSB_FLAG_NOFLAGS,NULL);
+        if(!mtxAp)
+                return -1;
+
+        ds = (nrA)*sizeof(RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE);
+        b1 = calloc(1,ds);
+        b2 = calloc(1,ds);
+
+        if(! (b1 && b2))
+        {
+                errval = RSB_ERR_ENOMEM;
+                goto err;
+        }
+
+        for( i = 0; i < nrA; ++i )
+                b1[i] = 1;
+
+        bnow = b1, bnext = b2;/* b,b' */
+
+        while( fabs(norm-oldnorm) > tol && it<maxit )
+        {
+                ++ it;
+                oldnorm = norm;
+                /* b'<-Ab */
+                if(( rsb_spmv(RSB_TRANSPOSITION_N,NULL,mtxAp,bnow,
+                        1,&ZERO,bnext,1)) != RSB_ERR_NO_ERROR )
+                        goto err;
+                /* nu<-||Ab||^2 */
+                norm = 0;
+                for(i=0;i<nrA;++i) 
+                        norm += bnext[i]*bnext[i];
+                /* nu<-||Ab|| */
+                norm = sqrt(norm);
+                norm = 1\&.0/norm;
+                /* b'<- Ab / ||Ab|| */
+                for(i=0;i<nrA;++i)
+                        bnext[i] *= norm;
+                norm = 1\&.0/norm;
+                printf('it:%d norm:%lg norm diff:%lg\n',it,norm,norm-oldnorm);
+
+                {void *tmp=bnow;bnow=bnext;bnext=tmp;/* pointers swap */}
+                if(WANT_VERBOSE)
+                {
+                        printf('norm:%lg\n',norm);
+                        if(isinf(norm))
+                        /* isinf is a C99 feature (need correct
+                         * compilation flags) */
+                                goto err;
+
+                        for(i=0;i<2;++i)
+                                printf('x[%d]=%lg\n',i,((double*)bnext)[i]);
+                }
+        }
+        /* the biggest eigenvalue should be in bnow */
+
+        rsb_mtx_free(mtxAp);
+        free(b1);
+        free(b2);
+        if(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)!=RSB_ERR_NO_ERROR)
+                goto err;
+        if( it == maxit )
+        {
+                printf('ERROR: hit iterations limit without convergence!');
+                errval=RSB_ERR_GENERIC_ERROR;
+        }
+        return 0;
+err:
+        rsb_perror(NULL,errval);
+        return -1;
+}
+
+
+.fi
+.PP
+ 
+.PP
+.nf
+! 
+! Copyright (C) 2008-2016 Michele Martone
+! 
+! This file is part of librsb\&.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version\&.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+! License for more details\&.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING\&.
+! If not, see <http://www\&.gnu\&.org/licenses/>\&.
+! 
+
+      SUBROUTINE blas_sparse_mod_example(res)
+      USE blas_sparse
+      USE rsb ! For the second part of the example
+      IMPLICIT NONE
+      INTEGER :: res, istat = 0, i
+      TYPE(c_ptr),TARGET :: mtxap = c_null_ptr ! matrix pointer
+      INTEGER :: a
+      INTEGER,PARAMETER :: transn = blas_no_trans
+      INTEGER,PARAMETER :: incx = 1
+      INTEGER,PARAMETER :: incy = 1
+      REAL(KIND=8),PARAMETER :: alpha = 3
+! Symmetric (declared via lower triangle) matrix based example, e\&.g\&.:
+! 1 0
+! 1 1
+      ! declaration of VA,IA,JA 
+      !INTEGER,PARAMETER :: nr = 100
+      INTEGER,PARAMETER :: nr = 20
+      INTEGER,PARAMETER :: nc = nr
+      INTEGER,PARAMETER :: nnz = (nr*(nr+1))/2 ! half the square
+      INTEGER :: nt = 0
+      INTEGER :: ic, ir
+      INTEGER,PARAMETER :: ia(nnz) = (/ (((ir), ic=1,ir), ir=1,nr ) /) ! (/1, 2, 2/)
+      INTEGER,PARAMETER :: ja(nnz) = (/ (((ic), ic=1,ir), ir=1,nr ) /) ! (/1, 1, 2/)
+      REAL(KIND=8),PARAMETER :: va(nnz) = (/ ((1, ic=1,ir), ir=1,nr ) /) ! (/1, 1, 1/)
+      REAL(KIND=8) :: x(nc) = (/((1), ir=1,nc)/) ! reference x ! (/1, 1/)
+      REAL(KIND=8),PARAMETER :: cy(nr) = (/((alpha+alpha*nr), ir=1,nr)/) ! reference cy after ! (/9, 9/)
+      REAL(KIND=8) :: y(nr) = (/((alpha), ir=1,nr)/) ! y will be overwritten ! (/3, 3/)
+      ! First example part: pure blas_sparse code\&.
+      res = 0
+      CALL duscr_begin(nr,nc,a,res)
+      IF (res\&.NE\&.0) goto 9999
+      CALL ussp(a,blas_lower_symmetric,istat)
+      IF (istat\&.NE\&.0) goto 9997
+      CALL ussp(a,blas_rsb_spmv_autotuning_on,istat) ! (experimental) turns auto-tuning + thread setting on
+      IF (istat\&.NE\&.0) print *,'autotuning returned nonzero:', istat &
+       &,' \&.\&.\&.did you enable autotuning ?'
+      !
+      ! First style example 
+      CALL uscr_insert_entries(a,nnz,va,ia,ja,istat)
+      IF (istat\&.NE\&.0) goto 9997
+      CALL uscr_end(a,istat)
+      IF (istat\&.NE\&.0) goto 9997
+      ! CALL ussp(A,blas_rsb_duplicates_sum,istat)
+      ! CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat) ! uncomment this to activate add of coefficients to pattern
+      CALL usgp(a,blas_rsb_spmv_autotuning_on,nt)  ! (experimental)
+      IF (nt\&.NE\&.0) print*,'autotuner chose ',nt,' threads'
+      CALL ussp(a,blas_rsb_spmv_autotuning_off,istat) ! (experimental) turns auto-tuning + thread setting off
+      IF (istat\&.NE\&.0) goto 9997
+
+      CALL usmv(transn,alpha,a,x,incx,y,incy,istat)
+      IF (istat\&.NE\&.0) goto 9997
+      !
+      DO i = 1, nr
+            IF (y(i)\&.NE\&.cy(i)) print *, 'first check results are not ok'
+            IF (y(i)\&.NE\&.cy(i)) goto 9997
+      END DO
+      !
+      y(:) = alpha ! reset
+      !
+      ! Second style example 
+      CALL ussp(a,blas_rsb_autotune_next_operation,istat) ! (experimental) turns auto-tuning + thread setting on
+      IF (istat\&.NE\&.0) goto 9997
+      CALL usmv(transn,alpha,a,x,incx,y,incy,istat)
+      CALL usmm(blas_colmajor,transn,1, alpha,a,x,nr,y,nc,istat) ! Equivalent to the above (as long as incx=incy=1)\&.
+      CALL usmm(blas_colmajor,transn,1,-alpha,a,x,nr,y,nc,istat) ! Subtract the last usmm call contribution\&.
+      IF (istat\&.NE\&.0) goto 9997
+      !
+      DO i = 1, nr
+            IF (y(i)\&.NE\&.cy(i)) print *,'second check results are not ok'
+            IF (y(i)\&.NE\&.cy(i)) goto 9997
+      END DO
+      !
+      print *, 'check results are ok'
+      
+      ! Second part of the example: access to the rsb\&.h interface via
+      ! the ISO C Binding interface\&.
+      mtxap = rsb_blas_get_mtx(a) ! get pointer to rsb structure (as in the rsb\&.h API)
+      IF(nr\&.LT\&.5) istat = rsb_file_mtx_save(mtxap,c_null_ptr) ! write to stdout (only if matrix small enough)
+
+      goto 9998
+9997      res = -1
+9998      CONTINUE
+      CALL usds(a,istat)
+      IF (istat\&.NE\&.0) res = -1
+9999      CONTINUE
+      end SUBROUTINE blas_sparse_mod_example
+
+      PROGRAM main
+      USE rsb, ONLY: rsb_lib_init, rsb_lib_exit, C_PTR, C_NULL_PTR,&
+       & rsb_io_want_extra_verbose_interface,rsb_io_want_verbose_tuning,&
+       & rsb_lib_set_opt
+      USE iso_c_binding
+      IMPLICIT NONE
+      INTEGER :: res = 0, passed = 0, failed = 0
+      !TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS
+      !TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS
+      ! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc\&.gnu\&.org/bugzilla/show_bug\&.cgi?id=59411
+      TYPE(c_ptr),PARAMETER :: eo = c_null_ptr
+      TYPE(c_ptr),PARAMETER :: io = c_null_ptr
+      INTEGER,TARGET::ione=1
+      res = rsb_lib_init(io)
+      res = rsb_lib_set_opt(rsb_io_want_verbose_tuning,c_loc(ione))
+      
+      CALL blas_sparse_mod_example(res)
+      IF (res\&.LT\&.0) failed = failed + 1
+      IF (res\&.EQ\&.0) passed = passed + 1
+
+      res = rsb_lib_exit(eo)
+      
+      print *, 'FAILED:', failed
+      print *, 'PASSED:', passed
+      IF (failed \&.GT\&. 0) THEN
+       stop 1
+      END IF
+      END PROGRAM
+
+.fi
+.PP
+ 
+.PP
+.nf
+! 
+! Copyright (C) 2008-2016 Michele Martone
+! 
+! This file is part of librsb\&.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version\&.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE\&.  See the GNU Lesser General Public
+! License for more details\&.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING\&.
+! If not, see <http://www\&.gnu\&.org/licenses/>\&.
+! 
+      SUBROUTINE rsb_mod_example1(res)
+      USE rsb
+      USE iso_c_binding
+      IMPLICIT NONE
+      INTEGER ::res
+      INTEGER,TARGET :: istat = 0, i
+      INTEGER :: transt = rsb_transposition_n ! Please note that this interface is unfinished
+      INTEGER :: incx = 1, incy = 1
+      REAL(KIND=8),TARGET :: alpha = 3, beta = 1
+! 1 1
+! 1 1
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz = 4
+      INTEGER :: nr = 2
+      INTEGER :: nc = 2
+      INTEGER :: nrhs = 1
+      INTEGER :: order = rsb_flag_want_column_major_order ! rhs layout
+      INTEGER :: flags = rsb_flag_noflags 
+      INTEGER,TARGET :: ia(4) = (/0, 1, 1,0/)
+      INTEGER,TARGET :: ja(4) = (/0, 0, 1,1/)
+      REAL(KIND=8),TARGET :: va(4) = (/1,1,1,1/)
+      REAL(KIND=8),TARGET :: x(2) = (/1, 1/)! reference x 
+      REAL(KIND=8),TARGET :: cy(2) = (/9, 9/)! reference cy after 
+      REAL(KIND=8),TARGET :: y(2) = (/3, 3/)! y will be overwritten
+      TYPE(c_ptr),TARGET :: mtxap = c_null_ptr ! matrix pointer
+      REAL(KIND=8) :: tmax = 2\&.0 ! tuning max time
+      INTEGER :: titmax = 2 ! tuning max iterations
+      INTEGER,TARGET :: ont = 0     ! optimal number of threads
+
+      res = 0
+      mtxap = rsb_mtx_alloc_from_coo_const(c_loc(va),c_loc(ia),c_loc(ja)&
+       &,nnz,&
+       & rsb_numerical_type_double,nr,nc,1,1,flags,c_loc(istat))
+
+      IF (istat\&.NE\&.rsb_err_no_error) goto 9997
+
+      istat = rsb_file_mtx_save(mtxap,c_null_ptr)
+
+      ! Structure autotuning:
+      istat = rsb_tune_spmm(c_loc(mtxap),c_null_ptr,c_null_ptr,titmax,&
+       & tmax,&
+       & transt,c_loc(alpha),c_null_ptr,nrhs,order,c_loc(x),nr,&
+       & c_loc(beta),c_loc(y),nc)
+
+      IF (istat\&.NE\&.rsb_err_no_error) goto 9997
+
+      ! Thread count autotuning:
+      istat = rsb_tune_spmm(c_null_ptr,c_null_ptr,c_loc(ont),titmax,&
+       & tmax,&
+       & transt,c_loc(alpha),mtxap,nrhs,order,c_loc(x),nr,c_loc(beta),&
+       & c_loc(y),nc)
+      print *, 'Optimal number of threads:', ont
+
+      y(:) = (/3, 3/)! reference y 
+      IF (istat\&.NE\&.rsb_err_no_error) goto 9997
+      
+      istat = rsb_file_mtx_save(mtxap,c_null_ptr)
+      IF (istat\&.NE\&.rsb_err_no_error) goto 9997
+
+      istat = rsb_spmv(transt,c_loc(alpha),mtxap,c_loc(x),incx,&
+       & c_loc(beta),c_loc(y),incy)
+      IF (istat\&.NE\&.rsb_err_no_error) goto 9997
+      DO i = 1, 2
+            IF (y(i)\&.NE\&.cy(i)) print *, 
+'type=d dims=2x2 sym=g diag=g &      &blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok'
+            IF (y(i)\&.NE\&.cy(i)) goto 9997
+      END DO
+      print*,
+'type=d dims=2x2 sym=g diag=g blocks=1x1 usmv alpha= 3&       & beta= 1 incx=1 incy=1 trans=n is ok'
+      goto 9998
+9997      res = -1
+9998      CONTINUE
+      mtxap = rsb_mtx_free(mtxap)
+      IF (istat\&.NE\&.rsb_err_no_error) res = -1 
+! 9999      CONTINUE
+      istat = rsb_perror(c_null_ptr,istat)
+      end SUBROUTINE rsb_mod_example1
+
+      SUBROUTINE rsb_mod_example2(res)
+      USE rsb
+      USE iso_c_binding
+      IMPLICIT NONE
+      INTEGER,TARGET :: errval
+      INTEGER :: res
+      INTEGER :: transt = rsb_transposition_n  ! no transposition
+      INTEGER :: incx = 1, incb = 1        ! X, B vectors increment
+      REAL(KIND=8),TARGET :: alpha = 3,beta = 1
+      INTEGER :: nnza = 4, nra = 3, nca = 3     ! nonzeroes, rows, columns of matrix A
+      INTEGER,TARGET :: ia(4) = (/1, 2, 3, 3/)  ! row    indices
+      INTEGER,TARGET :: ja(4) = (/1, 2, 1, 3/)  ! column indices
+      INTEGER(C_SIGNED_CHAR) :: typecode = rsb_numerical_type_double
+      INTEGER :: flags =rsb_flag_default_matrix_flags+rsb_flag_symmetric
+      REAL(KIND=8),TARGET :: va(4) = (/11\&.0, 22\&.0, 13\&.0, 33\&.0/) ! coefficients
+      REAL(KIND=8),TARGET :: x(3) = (/   0,    0,    0/)
+      REAL(KIND=8),TARGET :: b(3) = (/-1\&.0, -2\&.0, -2\&.0/)
+      TYPE(c_ptr),TARGET  :: mtxap = c_null_ptr
+      TYPE(c_ptr)  :: mtxapp = c_null_ptr
+      REAL(KIND=8),TARGET :: etime = 0\&.0
+      !TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS
+      !TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS
+      ! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc\&.gnu\&.org/bugzilla/show_bug\&.cgi?id=59411
+      TYPE(c_ptr),PARAMETER :: eo = c_null_ptr
+      TYPE(c_ptr),PARAMETER :: io = c_null_ptr
+
+      errval = rsb_lib_init(io)                ! librsb initialization
+      IF (errval\&.NE\&.rsb_err_no_error) &
+       & stop 'error calling rsb_lib_init'
+#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 5)
+#define RSB_SKIP_BECAUSE_OLD_COMPILER 1
+#endif
+#ifndef RSB_SKIP_BECAUSE_OLD_COMPILER
+      mtxap = rsb_mtx_alloc_from_coo_begin(nnza,typecode,nra,nca,flags,&
+       & c_loc(errval)) ! begin matrix creation
+      errval = rsb_mtx_set_vals(mtxap,&
+       & c_loc(va),c_loc(ia),c_loc(ja),nnza,flags) ! insert some nonzeroes
+      mtxapp = c_loc(mtxap) ! Old compilers like e\&.g\&.: Gfortran 4\&.4\&.7 will NOT compile this\&.
+      IF (errval\&.NE\&.rsb_err_no_error) &
+       & stop 'error calling rsb_mtx_set_vals'
+      errval = rsb_mtx_alloc_from_coo_end(mtxapp)                   ! end matrix creation
+      IF (errval\&.NE\&.rsb_err_no_error) &
+       & stop 'error calling rsb_mtx_alloc_from_coo_end'
+      errval = rsb_spmv(transt,c_loc(alpha),mtxap,c_loc(x),&
+       & incx,c_loc(beta),c_loc(b),incb) ! X := X + (3) * A * B 
+      IF (errval\&.NE\&.rsb_err_no_error)&
+       & stop 'error calling rsb_spmv'
+      mtxap = rsb_mtx_free(mtxap)                                 ! destroy matrix
+
+      ! The following is optional and depends on configure options, so it is allowed to fail
+      errval = rsb_lib_get_opt(rsb_io_want_librsb_etime,c_loc(etime))
+      IF (errval\&.EQ\&.rsb_err_no_error)&
+       & print*,'Time spent in librsb is:',etime
+      ! IF (errval\&.NE\&.0)STOP 'error calling rsb_lib_get_opt' 
+      errval = rsb_err_no_error
+
+      IF (errval\&.NE\&.rsb_err_no_error) &
+       & stop 'error calling rsb_mtx_free'
+#else
+      print*,'You have an old Fortran compiler not supporting C_LOC\&.'
+      print*,'Skipping a part of the test'
+#endif
+      errval=rsb_lib_exit(eo)                 ! librsb finalization
+      IF (errval\&.NE\&.rsb_err_no_error)&
+       & stop 'error calling rsb_lib_exit'
+      print *, 'rsb module fortran test is ok'
+      res = errval
+      end SUBROUTINE rsb_mod_example2
+
+      PROGRAM main
+      USE rsb
+      IMPLICIT NONE
+      INTEGER :: res = rsb_err_no_error, passed = 0, failed = 0
+      !TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS
+      !TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS
+      ! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc\&.gnu\&.org/bugzilla/show_bug\&.cgi?id=59411
+      TYPE(c_ptr),PARAMETER :: eo = c_null_ptr
+      TYPE(c_ptr),PARAMETER :: io = c_null_ptr
+
+      res = rsb_lib_init(io)
+      
+      CALL rsb_mod_example1(res)
+      IF (res\&.LT\&.0) failed = failed + 1
+      IF (res\&.EQ\&.0) passed = passed + 1
+
+      res = rsb_lib_exit(eo)
+
+      CALL rsb_mod_example2(res)
+      IF (res\&.LT\&.0) failed = failed + 1
+      IF (res\&.EQ\&.0) passed = passed + 1
+      
+      print *, 'FAILED:', failed
+      print *, 'PASSED:', passed
+      IF (failed\&.GT\&.0) THEN
+       stop 1
+      END IF
+      END PROGRAM
+
+
+.fi
+.PP
+ 
+.SH "Author"
+.PP 
+librsb was written by Michele Martone; this documentation has been generated by Doxygen.
+.SH "SEE ALSO"
+.B rsb-examples
+.B rsb.h
+.B rsb-spblas.h
diff --git a/doc/man/man3/rsb-spblas.h.3 b/doc/man/man3/rsb-spblas.h.3
new file mode 100644
index 0000000..b5066ed
--- /dev/null
+++ b/doc/man/man3/rsb-spblas.h.3
@@ -0,0 +1,7053 @@
+.TH "The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90)" 3 "Fri Sep 2 2016" "Version 1.2.0-rc5" "librsb" \" -*- nroff -*-
+.ad l
+.nh
+.SH NAME
+librsb - 
+The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90) \- 
+.SH DESCRIPTION
+.PP
+A Sparse BLAS interface (see http://www.netlib.org/blas/blast-forum/) to \fClibrsb\fP\&. Level 1 (vector-vector operations) is supported in a basic way\&. Level 2 (sparse matrix-dense vector operations) is supported fully\&. Level 3 (sparse matrix-dense matrix operations) is supported as a wrapper around Level 2\&.  
+
+.SS "Functions"
+
+.in +1c
+.ti -1c
+.RI "int \fBBLAS_susdot\fP (enum \fBblas_conj_type\fP conj, int nnz, const float *x, const int *indx, const float *y, int incy, float *r, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_susdot_\fP (enum \fBblas_conj_type\fP *conj, int *nnz, const float *x, const int *indx, const float *y, int *incy, float *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusdot\fP (enum \fBblas_conj_type\fP conj, int nnz, const double *x, const int *indx, const double *y, int incy, double *r, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_dusdot_\fP (enum \fBblas_conj_type\fP *conj, int *nnz, const double *x, const int *indx, const double *y, int *incy, double *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusdot\fP (enum \fBblas_conj_type\fP conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_cusdot_\fP (enum \fBblas_conj_type\fP *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusdot\fP (enum \fBblas_conj_type\fP conj, int nnz, const void *x, const int *indx, const void *y, int incy, void *r, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_zusdot_\fP (enum \fBblas_conj_type\fP *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susaxpy\fP (int nnz, float alpha, const float *x, const int *indx, float *y, int incy, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_susaxpy_\fP (int *nnz, float *alpha, const float *x, const int *indx, float *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusaxpy\fP (int nnz, double alpha, const double *x, const int *indx, double *y, int incy, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_dusaxpy_\fP (int *nnz, double *alpha, const double *x, const int *indx, double *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusaxpy\fP (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_cusaxpy_\fP (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusaxpy\fP (int nnz, const void *alpha, const void *x, const int *indx, void *y, int incy, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_zusaxpy_\fP (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susga\fP (int nnz, const float *y, int incy, float *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_susga_\fP (int *nnz, const float *y, int *incy, float *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusga\fP (int nnz, const double *y, int incy, double *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_dusga_\fP (int *nnz, const double *y, int *incy, double *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusga\fP (int nnz, const void *y, int incy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_cusga_\fP (int *nnz, const void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusga\fP (int nnz, const void *y, int incy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_zusga_\fP (int *nnz, const void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susgz\fP (int nnz, float *y, int incy, float *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_susgz_\fP (int *nnz, float *y, int *incy, float *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusgz\fP (int nnz, double *y, int incy, double *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_dusgz_\fP (int *nnz, double *y, int *incy, double *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusgz\fP (int nnz, void *y, int incy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_cusgz_\fP (int *nnz, void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusgz\fP (int nnz, void *y, int incy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_zusgz_\fP (int *nnz, void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_sussc\fP (int nnz, const float *x, float *y, int incy, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_sussc_\fP (int *nnz, const float *x, float *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dussc\fP (int nnz, const double *x, double *y, int incy, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_dussc_\fP (int *nnz, const double *x, double *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cussc\fP (int nnz, const void *x, void *y, int incy, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_cussc_\fP (int *nnz, const void *x, void *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zussc\fP (int nnz, const void *x, void *y, int incy, const int *indx, enum \fBblas_base_type\fP index_base)"
+.br
+.ti -1c
+.RI "void \fBblas_zussc_\fP (int *nnz, const void *x, void *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susmv\fP (enum \fBblas_trans_type\fP transA, float alpha, \fBblas_sparse_matrix\fP A, const float *x, int incx, float *y, int incy)"
+.br
+.ti -1c
+.RI "void \fBblas_susmv_\fP (enum \fBblas_trans_type\fP *transA, float *alpha, \fBblas_sparse_matrix\fP *A, const float *x, int *incx, float *y, int *incy, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusmv\fP (enum \fBblas_trans_type\fP transA, double alpha, \fBblas_sparse_matrix\fP A, const double *x, int incx, double *y, int incy)"
+.br
+.ti -1c
+.RI "void \fBblas_dusmv_\fP (enum \fBblas_trans_type\fP *transA, double *alpha, \fBblas_sparse_matrix\fP *A, const double *x, int *incx, double *y, int *incy, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusmv\fP (enum \fBblas_trans_type\fP transA, const void *alpha, \fBblas_sparse_matrix\fP A, const void *x, int incx, void *y, int incy)"
+.br
+.ti -1c
+.RI "void \fBblas_cusmv_\fP (enum \fBblas_trans_type\fP *transA, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *x, int *incx, void *y, int *incy, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusmv\fP (enum \fBblas_trans_type\fP transA, const void *alpha, \fBblas_sparse_matrix\fP A, const void *x, int incx, void *y, int incy)"
+.br
+.ti -1c
+.RI "void \fBblas_zusmv_\fP (enum \fBblas_trans_type\fP *transA, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *x, int *incx, void *y, int *incy, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_sussv\fP (enum \fBblas_trans_type\fP transT, float alpha, \fBblas_sparse_matrix\fP T, float *x, int incx)"
+.br
+.ti -1c
+.RI "void \fBblas_sussv_\fP (enum \fBblas_trans_type\fP *transT, float *alpha, \fBblas_sparse_matrix\fP *T, float *x, int *incx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dussv\fP (enum \fBblas_trans_type\fP transT, double alpha, \fBblas_sparse_matrix\fP T, double *x, int incx)"
+.br
+.ti -1c
+.RI "void \fBblas_dussv_\fP (enum \fBblas_trans_type\fP *transT, double *alpha, \fBblas_sparse_matrix\fP *T, double *x, int *incx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cussv\fP (enum \fBblas_trans_type\fP transT, const void *alpha, \fBblas_sparse_matrix\fP T, void *x, int incx)"
+.br
+.ti -1c
+.RI "void \fBblas_cussv_\fP (enum \fBblas_trans_type\fP *transT, const void *alpha, \fBblas_sparse_matrix\fP *T, void *x, int *incx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zussv\fP (enum \fBblas_trans_type\fP transT, const void *alpha, \fBblas_sparse_matrix\fP T, void *x, int incx)"
+.br
+.ti -1c
+.RI "void \fBblas_zussv_\fP (enum \fBblas_trans_type\fP *transT, const void *alpha, \fBblas_sparse_matrix\fP *T, void *x, int *incx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susmm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, int nrhs, float alpha, \fBblas_sparse_matrix\fP A, const float *b, int ldb, float *c, int ldc)"
+.br
+.ti -1c
+.RI "void \fBblas_susmm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, float *alpha, \fBblas_sparse_matrix\fP *A, const float *b, int *ldb, float *c, int *ldc, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusmm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, int nrhs, double alpha, \fBblas_sparse_matrix\fP A, const double *b, int ldb, double *c, int ldc)"
+.br
+.ti -1c
+.RI "void \fBblas_dusmm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, double *alpha, \fBblas_sparse_matrix\fP *A, const double *b, int *ldb, double *c, int *ldc, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusmm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, int nrhs, const void *alpha, \fBblas_sparse_matrix\fP A, const void *b, int ldb, void *c, int ldc)"
+.br
+.ti -1c
+.RI "void \fBblas_cusmm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *b, int *ldb, void *c, int *ldc, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusmm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, int nrhs, const void *alpha, \fBblas_sparse_matrix\fP A, const void *b, int ldb, void *c, int ldc)"
+.br
+.ti -1c
+.RI "void \fBblas_zusmm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *b, int *ldb, void *c, int *ldc, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_sussm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, int nrhs, float alpha, \fBblas_sparse_matrix\fP T, float *b, int ldb)"
+.br
+.ti -1c
+.RI "void \fBblas_sussm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, float *alpha, \fBblas_sparse_matrix\fP *T, float *b, int *ldb, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dussm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, int nrhs, double alpha, \fBblas_sparse_matrix\fP T, double *b, int ldb)"
+.br
+.ti -1c
+.RI "void \fBblas_dussm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, double *alpha, \fBblas_sparse_matrix\fP *T, double *b, int *ldb, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cussm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, int nrhs, const void *alpha, \fBblas_sparse_matrix\fP T, void *b, int ldb)"
+.br
+.ti -1c
+.RI "void \fBblas_cussm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *T, void *b, int *ldb, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zussm\fP (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, int nrhs, const void *alpha, \fBblas_sparse_matrix\fP T, void *b, int ldb)"
+.br
+.ti -1c
+.RI "void \fBblas_zussm_\fP (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *T, void *b, int *ldb, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_suscr_begin\fP (int m, int n)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_begin_\fP (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_duscr_begin\fP (int m, int n)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_begin_\fP (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_cuscr_begin\fP (int m, int n)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_begin_\fP (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_zuscr_begin\fP (int m, int n)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_begin_\fP (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_suscr_block_begin\fP (int Mb, int Nb, int k, int l)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_block_begin_\fP (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_duscr_block_begin\fP (int Mb, int Nb, int k, int l)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_block_begin_\fP (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_cuscr_block_begin\fP (int Mb, int Nb, int k, int l)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_block_begin_\fP (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_zuscr_block_begin\fP (int Mb, int Nb, int k, int l)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_block_begin_\fP (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_suscr_variable_block_begin\fP (int Mb, int Nb, const int *K, const int *L)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_variable_block_begin_\fP (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_duscr_variable_block_begin\fP (int Mb, int Nb, const int *K, const int *L)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_variable_block_begin_\fP (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_cuscr_variable_block_begin\fP (int Mb, int Nb, const int *K, const int *L)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_variable_block_begin_\fP (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "\fBblas_sparse_matrix\fP \fBBLAS_zuscr_variable_block_begin\fP (int Mb, int Nb, const int *K, const int *L)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_variable_block_begin_\fP (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_end\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_end_\fP (\fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_end\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_end_\fP (\fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_end\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_end_\fP (\fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_end\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_end_\fP (\fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_insert_entry\fP (\fBblas_sparse_matrix\fP A, float val, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_insert_entry_\fP (\fBblas_sparse_matrix\fP *A, float *val, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_insert_entry\fP (\fBblas_sparse_matrix\fP A, double val, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_insert_entry_\fP (\fBblas_sparse_matrix\fP *A, double *val, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_insert_entry\fP (\fBblas_sparse_matrix\fP A, const void *val, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_insert_entry_\fP (\fBblas_sparse_matrix\fP *A, const void *val, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_insert_entry\fP (\fBblas_sparse_matrix\fP A, const void *val, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_insert_entry_\fP (\fBblas_sparse_matrix\fP *A, const void *val, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_insert_entries\fP (\fBblas_sparse_matrix\fP A, int nnz, const float *val, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_insert_entries_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, const float *val, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_insert_entries\fP (\fBblas_sparse_matrix\fP A, int nnz, const double *val, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_insert_entries_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, const double *val, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_insert_entries\fP (\fBblas_sparse_matrix\fP A, int nnz, const void *val, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_insert_entries_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_insert_entries\fP (\fBblas_sparse_matrix\fP A, int nnz, const void *val, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_insert_entries_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_insert_col\fP (\fBblas_sparse_matrix\fP A, int j, int nnz, const float *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_insert_col_\fP (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const float *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_insert_col\fP (\fBblas_sparse_matrix\fP A, int j, int nnz, const double *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_insert_col_\fP (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const double *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_insert_col\fP (\fBblas_sparse_matrix\fP A, int j, int nnz, const void *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_insert_col_\fP (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const void *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_insert_col\fP (\fBblas_sparse_matrix\fP A, int j, int nnz, const void *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_insert_col_\fP (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const void *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_insert_row\fP (\fBblas_sparse_matrix\fP A, int i, int nnz, const float *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_insert_row_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const float *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_insert_row\fP (\fBblas_sparse_matrix\fP A, int i, int nnz, const double *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_insert_row_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const double *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_insert_row\fP (\fBblas_sparse_matrix\fP A, int i, int nnz, const void *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_insert_row_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const void *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_insert_row\fP (\fBblas_sparse_matrix\fP A, int i, int nnz, const void *val, const int *indx)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_insert_row_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const void *val, const int *indx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_insert_clique\fP (\fBblas_sparse_matrix\fP A, const int k, const int l, const float *val, const int row_stride, const int col_stride, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_insert_clique_\fP (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const float *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_insert_clique\fP (\fBblas_sparse_matrix\fP A, const int k, const int l, const double *val, const int row_stride, const int col_stride, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_insert_clique_\fP (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const double *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_insert_clique\fP (\fBblas_sparse_matrix\fP A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_insert_clique_\fP (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_insert_clique\fP (\fBblas_sparse_matrix\fP A, const int k, const int l, const void *val, const int row_stride, const int col_stride, const int *indx, const int *jndx)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_insert_clique_\fP (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_suscr_insert_block\fP (\fBblas_sparse_matrix\fP A, const float *val, int row_stride, int col_stride, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_suscr_insert_block_\fP (\fBblas_sparse_matrix\fP *A, const float *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_duscr_insert_block\fP (\fBblas_sparse_matrix\fP A, const double *val, int row_stride, int col_stride, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_duscr_insert_block_\fP (\fBblas_sparse_matrix\fP *A, const double *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cuscr_insert_block\fP (\fBblas_sparse_matrix\fP A, const void *val, int row_stride, int col_stride, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_cuscr_insert_block_\fP (\fBblas_sparse_matrix\fP *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zuscr_insert_block\fP (\fBblas_sparse_matrix\fP A, const void *val, int row_stride, int col_stride, int i, int j)"
+.br
+.ti -1c
+.RI "void \fBblas_zuscr_insert_block_\fP (\fBblas_sparse_matrix\fP *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_uscr_end\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.ti -1c
+.RI "void \fBblas_uscr_end_\fP (\fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_usds\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.ti -1c
+.RI "void \fBblas_usds_\fP (\fBblas_sparse_matrix\fP *A, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susrows_scale\fP (\fBblas_sparse_matrix\fP A, const float *d, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_susrows_scale_\fP (\fBblas_sparse_matrix\fP *A, const float *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusrows_scale\fP (\fBblas_sparse_matrix\fP A, const double *d, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_dusrows_scale_\fP (\fBblas_sparse_matrix\fP *A, const double *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusrows_scale\fP (\fBblas_sparse_matrix\fP A, const void *d, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_cusrows_scale_\fP (\fBblas_sparse_matrix\fP *A, const void *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusrows_scale\fP (\fBblas_sparse_matrix\fP A, const void *d, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_zusrows_scale_\fP (\fBblas_sparse_matrix\fP *A, const void *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susget_diag\fP (\fBblas_sparse_matrix\fP A, float *d)"
+.br
+.ti -1c
+.RI "void \fBblas_susget_diag_\fP (\fBblas_sparse_matrix\fP *A, float *d, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusget_diag\fP (\fBblas_sparse_matrix\fP A, double *d)"
+.br
+.ti -1c
+.RI "void \fBblas_dusget_diag_\fP (\fBblas_sparse_matrix\fP *A, double *d, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusget_diag\fP (\fBblas_sparse_matrix\fP A, void *d)"
+.br
+.ti -1c
+.RI "void \fBblas_cusget_diag_\fP (\fBblas_sparse_matrix\fP *A, void *d, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusget_diag\fP (\fBblas_sparse_matrix\fP A, void *d)"
+.br
+.ti -1c
+.RI "void \fBblas_zusget_diag_\fP (\fBblas_sparse_matrix\fP *A, void *d, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susget_rows_nnz\fP (\fBblas_sparse_matrix\fP A, int fr, int lr, int *nnzp)"
+.br
+.ti -1c
+.RI "void \fBblas_susget_rows_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusget_rows_nnz\fP (\fBblas_sparse_matrix\fP A, int fr, int lr, int *nnzp)"
+.br
+.ti -1c
+.RI "void \fBblas_dusget_rows_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusget_rows_nnz\fP (\fBblas_sparse_matrix\fP A, int fr, int lr, int *nnzp)"
+.br
+.ti -1c
+.RI "void \fBblas_cusget_rows_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusget_rows_nnz\fP (\fBblas_sparse_matrix\fP A, int fr, int lr, int *nnzp)"
+.br
+.ti -1c
+.RI "void \fBblas_zusget_rows_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susget_rows_sparse\fP (\fBblas_sparse_matrix\fP A, float *VA, int *IA, int *JA, int *nnz, int fr, int lr)"
+.br
+.ti -1c
+.RI "void \fBblas_susget_rows_sparse_\fP (\fBblas_sparse_matrix\fP *A, float *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusget_rows_sparse\fP (\fBblas_sparse_matrix\fP A, double *VA, int *IA, int *JA, int *nnz, int fr, int lr)"
+.br
+.ti -1c
+.RI "void \fBblas_dusget_rows_sparse_\fP (\fBblas_sparse_matrix\fP *A, double *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusget_rows_sparse\fP (\fBblas_sparse_matrix\fP A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)"
+.br
+.ti -1c
+.RI "void \fBblas_cusget_rows_sparse_\fP (\fBblas_sparse_matrix\fP *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusget_rows_sparse\fP (\fBblas_sparse_matrix\fP A, void *VA, int *IA, int *JA, int *nnz, int fr, int lr)"
+.br
+.ti -1c
+.RI "void \fBblas_zusget_rows_sparse_\fP (\fBblas_sparse_matrix\fP *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susget_matrix_nnz\fP (\fBblas_sparse_matrix\fP A, int *nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_susget_matrix_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusget_matrix_nnz\fP (\fBblas_sparse_matrix\fP A, int *nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_dusget_matrix_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusget_matrix_nnz\fP (\fBblas_sparse_matrix\fP A, int *nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_cusget_matrix_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusget_matrix_nnz\fP (\fBblas_sparse_matrix\fP A, int *nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_zusget_matrix_nnz_\fP (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susget_infinity_norm\fP (\fBblas_sparse_matrix\fP A, float *in, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_susget_infinity_norm_\fP (\fBblas_sparse_matrix\fP *A, float *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusget_infinity_norm\fP (\fBblas_sparse_matrix\fP A, double *in, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_dusget_infinity_norm_\fP (\fBblas_sparse_matrix\fP *A, double *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusget_infinity_norm\fP (\fBblas_sparse_matrix\fP A, void *in, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_cusget_infinity_norm_\fP (\fBblas_sparse_matrix\fP *A, void *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusget_infinity_norm\fP (\fBblas_sparse_matrix\fP A, void *in, enum \fBblas_trans_type\fP trans)"
+.br
+.ti -1c
+.RI "void \fBblas_zusget_infinity_norm_\fP (\fBblas_sparse_matrix\fP *A, void *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susset_elements\fP (\fBblas_sparse_matrix\fP A, const int *ia, const int *ja, const float *va, int nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_susset_elements_\fP (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const float *va, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusset_elements\fP (\fBblas_sparse_matrix\fP A, const int *ia, const int *ja, const double *va, int nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_dusset_elements_\fP (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const double *va, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusset_elements\fP (\fBblas_sparse_matrix\fP A, const int *ia, const int *ja, const void *va, int nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_cusset_elements_\fP (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusset_elements\fP (\fBblas_sparse_matrix\fP A, const int *ia, const int *ja, const void *va, int nnz)"
+.br
+.ti -1c
+.RI "void \fBblas_zusset_elements_\fP (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susset_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, float *v)"
+.br
+.ti -1c
+.RI "void \fBblas_susset_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, float *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusset_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, double *v)"
+.br
+.ti -1c
+.RI "void \fBblas_dusset_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, double *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusset_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, void *v)"
+.br
+.ti -1c
+.RI "void \fBblas_cusset_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusset_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, void *v)"
+.br
+.ti -1c
+.RI "void \fBblas_zusset_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_susget_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, float *v)"
+.br
+.ti -1c
+.RI "void \fBblas_susget_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, float *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_dusget_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, double *v)"
+.br
+.ti -1c
+.RI "void \fBblas_dusget_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, double *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_cusget_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, void *v)"
+.br
+.ti -1c
+.RI "void \fBblas_cusget_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_zusget_element\fP (\fBblas_sparse_matrix\fP A, int i, int j, void *v)"
+.br
+.ti -1c
+.RI "void \fBblas_zusget_element_\fP (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_usgp\fP (\fBblas_sparse_matrix\fP A, int pname)"
+.br
+.ti -1c
+.RI "void \fBblas_usgp_\fP (\fBblas_sparse_matrix\fP *A, int *pname, int *istat)"
+.br
+.ti -1c
+.RI "void \fBblas_ussp_\fP (\fBblas_sparse_matrix\fP *A, int *pname, int *istat)"
+.br
+.ti -1c
+.RI "int \fBBLAS_ussp\fP (\fBblas_sparse_matrix\fP A, int pname)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_blas_get_mtx\fP (\fBblas_sparse_matrix\fP A)"
+.br
+.in -1c
+.SH "Detailed Description"
+.PP 
+A Sparse BLAS interface (see http://www.netlib.org/blas/blast-forum/) to \fClibrsb\fP\&. Level 1 (vector-vector operations) is supported in a basic way\&. Level 2 (sparse matrix-dense vector operations) is supported fully\&. Level 3 (sparse matrix-dense matrix operations) is supported as a wrapper around Level 2\&. 
+
+We also implement a number of useful extra functions as custom extensions, giving access to other \fClibrsb\fP functionality\&.
+.PP
+The usage pattern of this interface matches that of the Sparse BLAS standard, exception made for the necessity of initialization/finalization of \fClibrsb\fP\&. The Sparse BLAS interface is also available for Fortran: see \fBrsb_blas_sparse\&.F90\fP\&.
+.PP
+The user should be aware of the following: 
+.PD 0
+
+.IP "\(bu" 2
+Because this Sparse BLAS implementation is built around \fClibrsb\fP, initialization with \fBrsb_lib_init()\fP and finalization with \fBrsb_lib_exit()\fP is necessary\&. Inclusion of the \fC\fBrsb\&.h\fP\fP header is necessary\&. 
+.IP "\(bu" 2
+\fClibrsb\fP gives users freedom of in/out arbitrarily BLAS types support at configure/build time\&. Hence, while all the interface functions are always included the Sparse BLAS header file, they may return an error code\&. Be sure of having configured correctly the library at configure time (and see the \fBblas_sparse\&.h\fP header file for types configured in the current build)\&. 
+.IP "\(bu" 2
+According to the standard, the complex type functions for C accept scalar values by reference rather than by copy; equivalent functions for other types do not do so, so this may cause confusion\&. Be careful\&. 
+.IP "\(bu" 2
+Error checking is weak; so for instance, passing a function the handle of a matrix of mismatching type will not be detected as an error, although it's incorrect\&. 
+.IP "\(bu" 2
+According to the standard, VBR and BCSR styled constructors are supported, although these are interfaces for \fClibrsb's\fP own matrix representation\&. 
+.IP "\(bu" 2
+Here we list functions for both Fortran and C functions\&. However, the Fortran functions are declared and documented with the C notation\&. We may provide a better documentation in a subsequent release\&. 
+.IP "\(bu" 2
+Each identifier documented here suffixed by \fC_\fP (e\&.g\&.: \fBblas_susdot_()\fP) can be used from Fortran with the name stripped by that suffix (so in this case, \fCblas_susdot\fP)\&. We will provide a proper fix to this inconvenience in a subsequent release\&. 
+.IP "\(bu" 2
+Each Fortran program using \fClibrsb's\fP Sparse BLAS Implementation shall \fCuse\fP modules \fC\fBblas_sparse\fP\fP and \fCrsb\fP\&. 
+.IP "\(bu" 2
+Also Fortran programs have to call \fBrsb_lib_init()\fP and \fBrsb_lib_exit()\fP e\&.g\&.: 
+.PP
+.nf
+        USE blas_sparse             ! module implementing the Sparse BLAS on the top of librsb
+        USE rsb                     ! rsb module
+        ...
+        INTEGER :: istat            ! integer variable
+        ...
+        istat = rsb_lib_init(RSB_NULL_INIT_OPTIONS) ! please note that this is not part of Sparse BLAS but it is needed by librsb
+        if(istat.NE.0)STOP          ! a value different than zero signals an error
+        ...
+        ! code calling Sparse BLAS routines
+        ...
+        istat = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) ! please note that this is not part of Sparse BLAS but it is needed by librsb
+        if(istat.NE.0)STOP          ! a value different than zero signals an error
+        ...
+
+.fi
+.PP
+ 
+.IP "\(bu" 2
+For Fortran, more procedures exist, although they are not documented here\&. According to the Sparse BLAS (http://www.netlib.org/blas/blast-forum/), for almost each subroutine whose identifier prefixed with \fCblas_X\fP (with \fCX\fP being one of S,D,C,Z), a corresponding generic modern Fortran version exists\&. Please note how not all of the certain procedures identifier prefixes include the type character\&.
+.PP
+E\&.g\&.: 
+.PP
+.nf
+! the following code ('d' stays for 'double precision'):
+CALL blas_duscr_begin(nr,nc,A,istat)
+CALL blas_ussp(A,blas_lower_symmetric,istat)
+CALL blas_duscr_insert_entries(A,nnz,VA,IA,JA,istat)
+CALL blas_duscr_end(A,istat)
+CALL blas_dusmv(transT,alpha,A,X,incX,B,incB,istat) 
+CALL blas_dusds(A,istat)
+! is equivalent to:
+CALL duscr_begin(nr,nc,A,istat) ! here, 'd' must be retained for avoiding ambiguity
+CALL ussp(A,blas_lower_symmetric,istat)
+CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+CALL uscr_end(A,istat)
+CALL usmv(transT,alpha,A,X,incX,B,incB,istat) 
+CALL usds(A,istat)
+
+.fi
+.PP
+ 
+.SH "Function Documentation"
+.PP 
+.SS "int BLAS_cusaxpy (intnnz, const void *alpha, const void *x, const int *indx, void *y, intincy, enum \fBblas_base_type\fP index_base)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_cusaxpy_ (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_cuscr_begin (intm, intn)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_begin_ (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_cuscr_block_begin (intMb, intNb, intk, intl)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_block_begin_ (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_end (\fBblas_sparse_matrix\fPA)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_end_ (\fBblas_sparse_matrix\fP *A, int *istat)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_insert_block (\fBblas_sparse_matrix\fPA, const void *val, introw_stride, intcol_stride, inti, intj)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_insert_block_ (\fBblas_sparse_matrix\fP *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_insert_clique (\fBblas_sparse_matrix\fPA, const intk, const intl, const void *val, const introw_stride, const intcol_stride, const int *indx, const int *jndx)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_insert_clique_ (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_insert_col (\fBblas_sparse_matrix\fPA, intj, intnnz, const void *val, const int *indx)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_insert_col_ (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const void *val, const int *indx, int *istat)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_insert_entries (\fBblas_sparse_matrix\fPA, intnnz, const void *val, const int *indx, const int *jndx)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_insert_entries_ (\fBblas_sparse_matrix\fP *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_insert_entry (\fBblas_sparse_matrix\fPA, const void *val, inti, intj)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_insert_entry_ (\fBblas_sparse_matrix\fP *A, const void *val, int *i, int *j, int *istat)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cuscr_insert_row (\fBblas_sparse_matrix\fPA, inti, intnnz, const void *val, const int *indx)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_insert_row_ (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const void *val, const int *indx, int *istat)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_cuscr_variable_block_begin (intMb, intNb, const int *K, const int *L)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_cuscr_variable_block_begin_ (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_cusdot (enum \fBblas_conj_type\fP conj, intnnz, const void *x, const int *indx, const void *y, intincy, void *r, enum \fBblas_base_type\fP index_base)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_cusdot_ (enum \fBblas_conj_type\fP *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_cusga (intnnz, const void *y, intincy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_cusga_ (int *nnz, const void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_cusget_diag (\fBblas_sparse_matrix\fPA, void *d)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusget_diag_ (\fBblas_sparse_matrix\fP *A, void *d, int *istat)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusget_element (\fBblas_sparse_matrix\fPA, inti, intj, void *v)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusget_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusget_infinity_norm (\fBblas_sparse_matrix\fPA, void *in, enum \fBblas_trans_type\fP trans)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusget_infinity_norm_ (\fBblas_sparse_matrix\fP *A, void *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusget_matrix_nnz (\fBblas_sparse_matrix\fPA, int *nnz)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusget_matrix_nnz_ (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusget_rows_nnz (\fBblas_sparse_matrix\fPA, intfr, intlr, int *nnzp)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusget_rows_nnz_ (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusget_rows_sparse (\fBblas_sparse_matrix\fPA, void *VA, int *IA, int *JA, int *nnz, intfr, intlr)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusget_rows_sparse_ (\fBblas_sparse_matrix\fP *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusgz (intnnz, void *y, intincy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_cusgz_ (int *nnz, void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_cusmm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, intnrhs, const void *alpha, \fBblas_sparse_matrix\fPA, const void *b, intldb, void *c, intldc)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusmm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *b, int *ldb, void *c, int *ldc, int *istat)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusmv (enum \fBblas_trans_type\fP transA, const void *alpha, \fBblas_sparse_matrix\fPA, const void *x, intincx, void *y, intincy)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusmv_ (enum \fBblas_trans_type\fP *transA, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *x, int *incx, void *y, int *incy, int *istat)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusrows_scale (\fBblas_sparse_matrix\fPA, const void *d, enum \fBblas_trans_type\fP trans)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusrows_scale_ (\fBblas_sparse_matrix\fP *A, const void *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cussc (intnnz, const void *x, void *y, intincy, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_cussc_ (int *nnz, const void *x, void *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_cusset_element (\fBblas_sparse_matrix\fPA, inti, intj, void *v)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusset_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cusset_elements (\fBblas_sparse_matrix\fPA, const int *ia, const int *ja, const void *va, intnnz)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cusset_elements_ (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cussm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, intnrhs, const void *alpha, \fBblas_sparse_matrix\fPT, void *b, intldb)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cussm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *T, void *b, int *ldb, int *istat)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_cussv (enum \fBblas_trans_type\fP transT, const void *alpha, \fBblas_sparse_matrix\fPT, void *x, intincx)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_cussv_ (enum \fBblas_trans_type\fP *transT, const void *alpha, \fBblas_sparse_matrix\fP *T, void *x, int *incx, int *istat)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusaxpy (intnnz, doublealpha, const double *x, const int *indx, double *y, intincy, enum \fBblas_base_type\fP index_base)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_dusaxpy_ (int *nnz, double *alpha, const double *x, const int *indx, double *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_duscr_begin (intm, intn)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_duscr_begin_ (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_duscr_block_begin (intMb, intNb, intk, intl)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_duscr_block_begin_ (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_end (\fBblas_sparse_matrix\fPA)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_end_ (\fBblas_sparse_matrix\fP *A, int *istat)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_insert_block (\fBblas_sparse_matrix\fPA, const double *val, introw_stride, intcol_stride, inti, intj)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_insert_block_ (\fBblas_sparse_matrix\fP *A, const double *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_insert_clique (\fBblas_sparse_matrix\fPA, const intk, const intl, const double *val, const introw_stride, const intcol_stride, const int *indx, const int *jndx)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_insert_clique_ (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const double *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_insert_col (\fBblas_sparse_matrix\fPA, intj, intnnz, const double *val, const int *indx)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_insert_col_ (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const double *val, const int *indx, int *istat)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_insert_entries (\fBblas_sparse_matrix\fPA, intnnz, const double *val, const int *indx, const int *jndx)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_insert_entries_ (\fBblas_sparse_matrix\fP *A, int *nnz, const double *val, const int *indx, const int *jndx, int *istat)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_insert_entry (\fBblas_sparse_matrix\fPA, doubleval, inti, intj)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_insert_entry_ (\fBblas_sparse_matrix\fP *A, double *val, int *i, int *j, int *istat)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_duscr_insert_row (\fBblas_sparse_matrix\fPA, inti, intnnz, const double *val, const int *indx)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_duscr_insert_row_ (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const double *val, const int *indx, int *istat)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_duscr_variable_block_begin (intMb, intNb, const int *K, const int *L)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_duscr_variable_block_begin_ (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_dusdot (enum \fBblas_conj_type\fP conj, intnnz, const double *x, const int *indx, const double *y, intincy, double *r, enum \fBblas_base_type\fP index_base)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_dusdot_ (enum \fBblas_conj_type\fP *conj, int *nnz, const double *x, const int *indx, const double *y, int *incy, double *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_dusga (intnnz, const double *y, intincy, double *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_dusga_ (int *nnz, const double *y, int *incy, double *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_dusget_diag (\fBblas_sparse_matrix\fPA, double *d)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusget_diag_ (\fBblas_sparse_matrix\fP *A, double *d, int *istat)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusget_element (\fBblas_sparse_matrix\fPA, inti, intj, double *v)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusget_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, double *v, int *istat)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusget_infinity_norm (\fBblas_sparse_matrix\fPA, double *in, enum \fBblas_trans_type\fP trans)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusget_infinity_norm_ (\fBblas_sparse_matrix\fP *A, double *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusget_matrix_nnz (\fBblas_sparse_matrix\fPA, int *nnz)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusget_matrix_nnz_ (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusget_rows_nnz (\fBblas_sparse_matrix\fPA, intfr, intlr, int *nnzp)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusget_rows_nnz_ (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusget_rows_sparse (\fBblas_sparse_matrix\fPA, double *VA, int *IA, int *JA, int *nnz, intfr, intlr)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusget_rows_sparse_ (\fBblas_sparse_matrix\fP *A, double *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusgz (intnnz, double *y, intincy, double *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_dusgz_ (int *nnz, double *y, int *incy, double *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_dusmm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, intnrhs, doublealpha, \fBblas_sparse_matrix\fPA, const double *b, intldb, double *c, intldc)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusmm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, double *alpha, \fBblas_sparse_matrix\fP *A, const double *b, int *ldb, double *c, int *ldc, int *istat)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusmv (enum \fBblas_trans_type\fP transA, doublealpha, \fBblas_sparse_matrix\fPA, const double *x, intincx, double *y, intincy)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusmv_ (enum \fBblas_trans_type\fP *transA, double *alpha, \fBblas_sparse_matrix\fP *A, const double *x, int *incx, double *y, int *incy, int *istat)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusrows_scale (\fBblas_sparse_matrix\fPA, const double *d, enum \fBblas_trans_type\fP trans)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusrows_scale_ (\fBblas_sparse_matrix\fP *A, const double *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dussc (intnnz, const double *x, double *y, intincy, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_dussc_ (int *nnz, const double *x, double *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_dusset_element (\fBblas_sparse_matrix\fPA, inti, intj, double *v)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusset_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, double *v, int *istat)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dusset_elements (\fBblas_sparse_matrix\fPA, const int *ia, const int *ja, const double *va, intnnz)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dusset_elements_ (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const double *va, int *nnz, int *istat)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dussm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, intnrhs, doublealpha, \fBblas_sparse_matrix\fPT, double *b, intldb)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dussm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, double *alpha, \fBblas_sparse_matrix\fP *T, double *b, int *ldb, int *istat)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_dussv (enum \fBblas_trans_type\fP transT, doublealpha, \fBblas_sparse_matrix\fPT, double *x, intincx)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_dussv_ (enum \fBblas_trans_type\fP *transT, double *alpha, \fBblas_sparse_matrix\fP *T, double *x, int *incx, int *istat)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susaxpy (intnnz, floatalpha, const float *x, const int *indx, float *y, intincy, enum \fBblas_base_type\fP index_base)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_susaxpy_ (int *nnz, float *alpha, const float *x, const int *indx, float *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_suscr_begin (intm, intn)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_suscr_begin_ (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_suscr_block_begin (intMb, intNb, intk, intl)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_suscr_block_begin_ (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_end (\fBblas_sparse_matrix\fPA)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_end_ (\fBblas_sparse_matrix\fP *A, int *istat)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_insert_block (\fBblas_sparse_matrix\fPA, const float *val, introw_stride, intcol_stride, inti, intj)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_insert_block_ (\fBblas_sparse_matrix\fP *A, const float *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_insert_clique (\fBblas_sparse_matrix\fPA, const intk, const intl, const float *val, const introw_stride, const intcol_stride, const int *indx, const int *jndx)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_insert_clique_ (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const float *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_insert_col (\fBblas_sparse_matrix\fPA, intj, intnnz, const float *val, const int *indx)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_insert_col_ (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const float *val, const int *indx, int *istat)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_insert_entries (\fBblas_sparse_matrix\fPA, intnnz, const float *val, const int *indx, const int *jndx)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_insert_entries_ (\fBblas_sparse_matrix\fP *A, int *nnz, const float *val, const int *indx, const int *jndx, int *istat)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_insert_entry (\fBblas_sparse_matrix\fPA, floatval, inti, intj)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_insert_entry_ (\fBblas_sparse_matrix\fP *A, float *val, int *i, int *j, int *istat)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_suscr_insert_row (\fBblas_sparse_matrix\fPA, inti, intnnz, const float *val, const int *indx)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_suscr_insert_row_ (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const float *val, const int *indx, int *istat)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_suscr_variable_block_begin (intMb, intNb, const int *K, const int *L)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_suscr_variable_block_begin_ (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_susdot (enum \fBblas_conj_type\fP conj, intnnz, const float *x, const int *indx, const float *y, intincy, float *r, enum \fBblas_base_type\fP index_base)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_susdot_ (enum \fBblas_conj_type\fP *conj, int *nnz, const float *x, const int *indx, const float *y, int *incy, float *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_susga (intnnz, const float *y, intincy, float *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_susga_ (int *nnz, const float *y, int *incy, float *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_susget_diag (\fBblas_sparse_matrix\fPA, float *d)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susget_diag_ (\fBblas_sparse_matrix\fP *A, float *d, int *istat)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susget_element (\fBblas_sparse_matrix\fPA, inti, intj, float *v)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susget_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, float *v, int *istat)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susget_infinity_norm (\fBblas_sparse_matrix\fPA, float *in, enum \fBblas_trans_type\fP trans)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susget_infinity_norm_ (\fBblas_sparse_matrix\fP *A, float *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susget_matrix_nnz (\fBblas_sparse_matrix\fPA, int *nnz)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susget_matrix_nnz_ (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susget_rows_nnz (\fBblas_sparse_matrix\fPA, intfr, intlr, int *nnzp)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susget_rows_nnz_ (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susget_rows_sparse (\fBblas_sparse_matrix\fPA, float *VA, int *IA, int *JA, int *nnz, intfr, intlr)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susget_rows_sparse_ (\fBblas_sparse_matrix\fP *A, float *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susgz (intnnz, float *y, intincy, float *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_susgz_ (int *nnz, float *y, int *incy, float *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_susmm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, intnrhs, floatalpha, \fBblas_sparse_matrix\fPA, const float *b, intldb, float *c, intldc)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susmm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, float *alpha, \fBblas_sparse_matrix\fP *A, const float *b, int *ldb, float *c, int *ldc, int *istat)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susmv (enum \fBblas_trans_type\fP transA, floatalpha, \fBblas_sparse_matrix\fPA, const float *x, intincx, float *y, intincy)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susmv_ (enum \fBblas_trans_type\fP *transA, float *alpha, \fBblas_sparse_matrix\fP *A, const float *x, int *incx, float *y, int *incy, int *istat)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susrows_scale (\fBblas_sparse_matrix\fPA, const float *d, enum \fBblas_trans_type\fP trans)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susrows_scale_ (\fBblas_sparse_matrix\fP *A, const float *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_sussc (intnnz, const float *x, float *y, intincy, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_sussc_ (int *nnz, const float *x, float *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_susset_element (\fBblas_sparse_matrix\fPA, inti, intj, float *v)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susset_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, float *v, int *istat)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_susset_elements (\fBblas_sparse_matrix\fPA, const int *ia, const int *ja, const float *va, intnnz)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_susset_elements_ (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const float *va, int *nnz, int *istat)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_sussm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, intnrhs, floatalpha, \fBblas_sparse_matrix\fPT, float *b, intldb)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_sussm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, float *alpha, \fBblas_sparse_matrix\fP *T, float *b, int *ldb, int *istat)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_sussv (enum \fBblas_trans_type\fP transT, floatalpha, \fBblas_sparse_matrix\fPT, float *x, intincx)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_sussv_ (enum \fBblas_trans_type\fP *transT, float *alpha, \fBblas_sparse_matrix\fP *T, float *x, int *incx, int *istat)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_uscr_end (\fBblas_sparse_matrix\fPA)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_uscr_end_ (\fBblas_sparse_matrix\fP *A, int *istat)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_usds (\fBblas_sparse_matrix\fPA)"
+Destroys a matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_usds_ (\fBblas_sparse_matrix\fP *A, int *istat)"
+Destroys a matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_usgp (\fBblas_sparse_matrix\fPA, intpname)"
+Get a matrix property\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A is the matrix to apply the property\&. 
+.br
+\fIpname\fP The desired matrix property\&. For valid matrix properties, see \fBblas_rsb_ext_type\fP, \fBblas_uplo_type\fP, \fBblas_diag_type\fP, \fBblas_conj_type\fP, \fBblas_base_type\fP, \fBblas_symmetry_type\fP, \fBblas_field_type\fP, \fBblas_size_type\fP, \fBblas_sparsity_optimization_type\fP\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_usgp_ (\fBblas_sparse_matrix\fP *A, int *pname, int *istat)"
+Get a matrix property\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A is the matrix to apply the property\&. 
+.br
+\fIpname\fP The desired matrix property\&. For valid matrix properties, see \fBblas_rsb_ext_type\fP, \fBblas_uplo_type\fP, \fBblas_diag_type\fP, \fBblas_conj_type\fP, \fBblas_base_type\fP, \fBblas_symmetry_type\fP, \fBblas_field_type\fP, \fBblas_size_type\fP, \fBblas_sparsity_optimization_type\fP\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_ussp (\fBblas_sparse_matrix\fPA, intpname)"
+Set a matrix property\&. Should be called just after creation, before nonzeroes insertion\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A is the matrix to apply the property\&. 
+.br
+\fIpname\fP The desired matrix property\&. For valid matrix properties, see \fBblas_rsb_ext_type\fP, \fBblas_uplo_type\fP, \fBblas_diag_type\fP, \fBblas_conj_type\fP, \fBblas_base_type\fP, \fBblas_symmetry_type\fP, \fBblas_field_type\fP, \fBblas_size_type\fP, \fBblas_sparsity_optimization_type\fP\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_ussp_ (\fBblas_sparse_matrix\fP *A, int *pname, int *istat)"
+Set a matrix property\&. Should be called just after creation, before nonzeroes insertion\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A is the matrix to apply the property\&. 
+.br
+\fIpname\fP The desired matrix property\&. For valid matrix properties, see \fBblas_rsb_ext_type\fP, \fBblas_uplo_type\fP, \fBblas_diag_type\fP, \fBblas_conj_type\fP, \fBblas_base_type\fP, \fBblas_symmetry_type\fP, \fBblas_field_type\fP, \fBblas_size_type\fP, \fBblas_sparsity_optimization_type\fP\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusaxpy (intnnz, const void *alpha, const void *x, const int *indx, void *y, intincy, enum \fBblas_base_type\fP index_base)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_zusaxpy_ (int *nnz, const void *alpha, const void *x, const int *indx, void *y, int *incy, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse vector update: $Y \leftarrow \alpha X + Y$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIalpha\fP Will scale values of $X$ before accumulating to $Y$\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_zuscr_begin (intm, intn)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_begin_ (int *m, int *n, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIm\fP Is the count of rows\&. 
+.br
+\fIn\fP Is the count of columns\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_zuscr_block_begin (intMb, intNb, intk, intl)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_block_begin_ (int *Mb, int *Nb, int *k, int *l, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIk,l\fP Are row and column dimensions when specifying a matrix as BCSR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_end (\fBblas_sparse_matrix\fPA)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_end_ (\fBblas_sparse_matrix\fP *A, int *istat)"
+Makes an assembled matrix out of a matrix in build state\&. After this, it is not possible anymore to insert nonzeroes, but computational routines\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_insert_block (\fBblas_sparse_matrix\fPA, const void *val, introw_stride, intcol_stride, inti, intj)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_insert_block_ (\fBblas_sparse_matrix\fP *A, const void *val, int *row_stride, int *col_stride, int *i, int *j, int *istat)"
+Inserts a whole block in a matrix, assuming it is in build state\&. The block size is assumed to be the one specified when calling the (type) corresponding matrix blocked \fCbegin\fP function\&. If not called a blocked \fCbegin\fP function, will assume 1x1 (that is, no) blocking\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row and column strides in accessing \fCval\fP\&. 
+.br
+\fIi,j\fP Block row/column indices\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBBLAS_cuscr_block_begin\fP, \fBBLAS_cuscr_block_begin\fP, \fBBLAS_duscr_block_begin\fP, \fBBLAS_zuscr_block_begin\fP, \fBBLAS_cuscr_begin\fP, \fBBLAS_suscr_begin\fP, \fBBLAS_duscr_begin\fP, \fBBLAS_zuscr_begin\fP\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_insert_clique (\fBblas_sparse_matrix\fPA, const intk, const intl, const void *val, const introw_stride, const intcol_stride, const int *indx, const int *jndx)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_insert_clique_ (\fBblas_sparse_matrix\fP *A, const int *k, const int *l, const void *val, const int *row_stride, const int *col_stride, const int *indx, const int *jndx, int *istat)"
+Inserts a whole clique in a matrix, assuming this is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIk,l\fP Clique rows and columns count\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIrow_stride,col_stride\fP Row/columns stride in accessing the clique\&. 
+.br
+\fIindx,jndx\fP Row/column indices arrays\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Signature of this routine for Fortran does not agree to the standard\&. This shall be corrected in a future release\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_insert_col (\fBblas_sparse_matrix\fPA, intj, intnnz, const void *val, const int *indx)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_insert_col_ (\fBblas_sparse_matrix\fP *A, int *j, int *nnz, const void *val, const int *indx, int *istat)"
+Inserts a whole column in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_insert_entries (\fBblas_sparse_matrix\fPA, intnnz, const void *val, const int *indx, const int *jndx)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_insert_entries_ (\fBblas_sparse_matrix\fP *A, int *nnz, const void *val, const int *indx, const int *jndx, int *istat)"
+Inserts entries in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row indices array\&. 
+.br
+\fIjndx\fP Column indices array\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_insert_entry (\fBblas_sparse_matrix\fPA, const void *val, inti, intj)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_insert_entry_ (\fBblas_sparse_matrix\fP *A, const void *val, int *i, int *j, int *istat)"
+Inserts an entry in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIi,j\fP Row and column indices\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zuscr_insert_row (\fBblas_sparse_matrix\fPA, inti, intnnz, const void *val, const int *indx)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_insert_row_ (\fBblas_sparse_matrix\fP *A, int *i, int *nnz, const void *val, const int *indx, int *istat)"
+Inserts a whole row in a matrix, assuming it is in build state\&. By default, duplicate entries will be summed together\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fInnz\fP Number of nonzeroes to insert\&. 
+.br
+\fIval\fP Array of values\&. 
+.br
+\fIindx\fP Row index\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "\fBblas_sparse_matrix\fP BLAS_zuscr_variable_block_begin (intMb, intNb, const int *K, const int *L)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A matrix handle in case of success, or -1 on error\&.
+.RE
+.PP
+
+.SS "void blas_zuscr_variable_block_begin_ (int *Mb, int *Nb, const int *K, const int *L, \fBblas_sparse_matrix\fP *A, int *istat)"
+Allocates an empty matrix (A) and leaves it in build state\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIK,L\fP Are arrays specifying row/column block sizes when specifying a matrix as VBR\&. 
+.br
+\fIMb\fP Block rows count\&. 
+.br
+\fINb\fP Block columns count\&.
+.br
+\fIA\fP A valid pointer to an empty matrix handle\&. 
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&. Will assign a valid matrix handle to $A$ in case of success, or set it to -1 on error\&.
+.RE
+.PP
+
+.SS "int BLAS_zusdot (enum \fBblas_conj_type\fP conj, intnnz, const void *x, const int *indx, const void *y, intincy, void *r, enum \fBblas_base_type\fP index_base)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_zusdot_ (enum \fBblas_conj_type\fP *conj, int *nnz, const void *x, const int *indx, const void *y, int *incy, void *r, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse dot product\&. $r \leftarrow X^T Y,$ $r \leftarrow X^H Y$ 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIr\fP Sparse dot result array\&. 
+.br
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&. 
+.br
+\fIconj\fP If \fBblas_conj\fP, values of X will be considered conjugated\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_zusga (intnnz, const void *y, intincy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_zusga_ (int *nnz, const void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather\&. $X \leftarrow Y |_x$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_zusget_diag (\fBblas_sparse_matrix\fPA, void *d)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusget_diag_ (\fBblas_sparse_matrix\fP *A, void *d, int *istat)"
+Get matrix diagonal\&. $d\leftarrow diag(A)$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Array for the diagonal entries\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusget_element (\fBblas_sparse_matrix\fPA, inti, intj, void *v)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusget_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+Get a single matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusget_infinity_norm (\fBblas_sparse_matrix\fPA, void *in, enum \fBblas_trans_type\fP trans)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusget_infinity_norm_ (\fBblas_sparse_matrix\fP *A, void *in, enum \fBblas_trans_type\fP *trans, int *istat)"
+Get infinity norm of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIin\fP Infinity norm pointer\&. 
+.br
+\fItrans\fP Transposition parameter\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusget_matrix_nnz (\fBblas_sparse_matrix\fPA, int *nnz)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusget_matrix_nnz_ (\fBblas_sparse_matrix\fP *A, int *nnz, int *istat)"
+Get nnz count of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fInnz\fP Output value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusget_rows_nnz (\fBblas_sparse_matrix\fPA, intfr, intlr, int *nnzp)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusget_rows_nnz_ (\fBblas_sparse_matrix\fP *A, int *fr, int *lr, int *nnzp, int *istat)"
+Get nnz count of matrix row interval\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIfr\fP First row\&. 
+.br
+\fIlr\fP Last row\&. 
+.br
+\fInnzp\fP Pointer to the nonzeroes variable\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusget_rows_sparse (\fBblas_sparse_matrix\fPA, void *VA, int *IA, int *JA, int *nnz, intfr, intlr)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusget_rows_sparse_ (\fBblas_sparse_matrix\fP *A, void *VA, int *IA, int *JA, int *nnz, int *fr, int *lr, int *istat)"
+Get sparse rows of matrix\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIVA\fP pointer to values\&. 
+.br
+\fIIA\fP Row indices array\&. 
+.br
+\fIJA\fP Column indices array\&. 
+.br
+\fInnz\fP Obtained nonzeroes\&. 
+.br
+\fIfr\fP first row\&. 
+.br
+\fIlr\fP Last row\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusgz (intnnz, void *y, intincy, void *x, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_zusgz_ (int *nnz, void *y, int *incy, void *x, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse gather and zero\&. $X \leftarrow Y |_x;Y|_x\leftarrow 0$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_zusmm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transA, intnrhs, const void *alpha, \fBblas_sparse_matrix\fPA, const void *b, intldb, void *c, intldc)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusmm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transA, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *b, int *ldb, void *c, int *ldc, int *istat)"
+Multiply by a dense matrix (aka multi-vector)\&. Either of $C \leftarrow \alpha AB+C,$ $C \leftarrow \alpha A^T B+C,$ $C \leftarrow \alpha A^H B+C$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&. 
+.br
+\fIc\fP Dense vector \fIc\fP\&. 
+.br
+\fIldc\fP Leading dimension of \fIc\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusmv (enum \fBblas_trans_type\fP transA, const void *alpha, \fBblas_sparse_matrix\fPA, const void *x, intincx, void *y, intincy)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusmv_ (enum \fBblas_trans_type\fP *transA, const void *alpha, \fBblas_sparse_matrix\fP *A, const void *x, int *incx, void *y, int *incy, int *istat)"
+Multiply by a dense vector\&. Either of $Y \leftarrow \alpha A X + Y ,$ $Y \leftarrow \alpha A^T X + Y,$ $Y \leftarrow \alpha A^H X + Y$, depending on the value of \fCtransA\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition operator for matrix \fIA\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&. 
+.br
+\fIy\fP Dense vector \fIy\fP\&. 
+.br
+\fIincy\fP Stride of \fIy\fP\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+.PP
+.PP
+By setting the \fC\fBblas_rsb_autotune_next_operation\fP\fP property via \fBBLAS_ussp\fP (at any time) the next multiplication routine call (either of \fBBLAS_dusmv\fP, \fBBLAS_susmv\fP, \fBBLAS_zusmv\fP, \fBBLAS_cusmv\fP, \fBBLAS_dusmm\fP, \fBBLAS_susmm\fP, \fBBLAS_zusmm\fP, \fBBLAS_cusmm\fP) will invoke autotuning before carrying out the effective operation\&. The tuning will take in account parameters like transposition, number of right hand sides, and scaling constants\&. By setting  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+On the topic of autotuning, see also \fBrsb_tune_spmm\fP\&. If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusrows_scale (\fBblas_sparse_matrix\fPA, const void *d, enum \fBblas_trans_type\fP trans)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusrows_scale_ (\fBblas_sparse_matrix\fP *A, const void *d, enum \fBblas_trans_type\fP *trans, int *istat)"
+Scale rows interval of matrix by specified factor\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fId\fP Rows scaling vector\&. 
+.br
+\fItrans\fP Transposition parameter (if transposed will scale columns)\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zussc (intnnz, const void *x, void *y, intincy, const int *indx, enum \fBblas_base_type\fP index_base)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "void blas_zussc_ (int *nnz, const void *x, void *y, int *incy, const int *indx, enum \fBblas_base_type\fP *index_base, int *istat)"
+Sparse scatter: $Y |_x\leftarrow X$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIy\fP Array for $Y$ vector\&. 
+.br
+\fIx\fP Array for $X$ vector\&. 
+.br
+\fInnz\fP Size of $X$\fIand\fP \fI$Y$\fP vectors\&. 
+.br
+\fIindx\fP Is the array of indices at which sparse vector $X$ will be accessed\&. 
+.br
+\fIindex_base\fP Specifies the contents of \fCindx\fP, either \fBblas_one_base\fP or \fBblas_one_base\fP\&. 
+.br
+\fIincy\fP The distance between consecutive \fCy\fP array elements\&.\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Sparse BLAS Level 1 has been implemented and is working, although not with performance in mind\&.
+.RE
+.PP
+
+.SS "int BLAS_zusset_element (\fBblas_sparse_matrix\fPA, inti, intj, void *v)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusset_element_ (\fBblas_sparse_matrix\fP *A, int *i, int *j, void *v, int *istat)"
+Set a single (existing) matrix nonzero coefficient $A_{i,j}$\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIi\fP Row index\&. 
+.br
+\fIj\fP Column index\&. 
+.br
+\fIv\fP Value pointer\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zusset_elements (\fBblas_sparse_matrix\fPA, const int *ia, const int *ja, const void *va, intnnz)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zusset_elements_ (\fBblas_sparse_matrix\fP *A, const int *ia, const int *ja, const void *va, int *nnz, int *istat)"
+Set individual matrix nonzero coefficients values\&. The operation is pattern preserving, that is, nonzeroes must already exist\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.br
+\fIia\fP Row indices array\&. 
+.br
+\fIja\fP Column indices array\&. 
+.br
+\fIva\fP Values array\&. 
+.br
+\fInnz\fP Length of the \fCia\fP,ja,va arrays\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.\&.
+.RE
+.PP
+\fBParameters:\fP
+.RS 4
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zussm (enum \fBblas_order_type\fP order, enum \fBblas_trans_type\fP transT, intnrhs, const void *alpha, \fBblas_sparse_matrix\fPT, void *b, intldb)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zussm_ (enum \fBblas_order_type\fP *order, enum \fBblas_trans_type\fP *transT, int *nrhs, const void *alpha, \fBblas_sparse_matrix\fP *T, void *b, int *ldb, int *istat)"
+Triangular solve, by a dense matrix (aka multi-vector)\&. Either of $B \leftarrow \alpha T^{-1} B,$ $B \leftarrow \alpha T^{-T} B,$ $B \leftarrow \alpha T^{-H} B$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fIorder\fP layour of the dense array\&. 
+.br
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fInrhs\fP Number of right hand side columns\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIb\fP Dense vector \fIb\fP\&. 
+.br
+\fIldb\fP Leading dimension of \fIb\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "int BLAS_zussv (enum \fBblas_trans_type\fP transT, const void *alpha, \fBblas_sparse_matrix\fPT, void *x, intincx)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, 0 is returned; on error, -1\&.
+.RE
+.PP
+
+.SS "void blas_zussv_ (enum \fBblas_trans_type\fP *transT, const void *alpha, \fBblas_sparse_matrix\fP *T, void *x, int *incx, int *istat)"
+Triangular solve, by a dense vector\&. Either of $X \leftarrow \alpha T^{-1}X,$ $X \leftarrow \alpha T^{-T}X,$ $X \leftarrow \alpha T^{-H}X$, depending on the value of \fCtransT\fP\&. 
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition operator for matrix \fIT\fP\&. 
+.br
+\fIalpha\fP Value for $ \alpha $\&. 
+.br
+\fIT\fP A valid triangular matrix handle\&. 
+.br
+\fIx\fP Dense vector \fIx\fP\&. 
+.br
+\fIincx\fP Stride of \fIx\fP\&.
+.br
+\fIistat\fP If non \fCNULL\fP, \fC*istat\fP will be set to the return code, either 0 (success) or -1 (failure)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+This is a subroutine for Fortran, so it does not return any value\&.
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_blas_get_mtx (\fBblas_sparse_matrix\fPA)\fC [read]\fP"
+Given a valid Sparse BLAS handle, returns a pointer to the inner rsb_mtx_t structure\&. Then, this can be used for many of the \fBrsb\&.h\fP functions\&. This is an experimental function, so we recommend to use it with functions not modifying the matrix (ones that take \fCconst\fP \fCstruct\fP \fCrsb_mtx_t*mtxAp\fP)\&. You can use this funtion from either Fortran or C\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIA\fP A valid matrix handle\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer to the inner matrix structure (\fCstruct\fP \fCrsb_mtx_t*\fP); on error, \fCNULL\fP\&.
+.RE
+.PP
+
+.br
+.PP
+An example using Fortran: 
+.PP
+.nf
+\&.\&.\&.  
+USE blas_sparse 
+USE rsb 
+IMPLICIT NONE 
+TYPE(C_PTR),TARGET :: mtxAp = C_NULL_PTR ! matrix pointer 
+INTEGER :: A ! blas_sparse_matrix handle 
+INTEGER, TARGET :: istat = 0 
+\&.\&.\&. ! begin, populate and finalize A, e\&.g\&. using BLAS_duscr_begin, BLAS_duscr_insert_entries, BLAS_uscr_end
+! get pointer to rsb structure: 
+mtxAp = rsb_blas_get_mtx(A) 
+! Now one can use it with any rsb\&.h/rsb\&.F90 function, e\&.g\&.: 
+istat = rsb_file_mtx_save(mtxAp, C_NULL_PTR) ! write to stdout 
+
+.fi
+.PP
+ 
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP, 
+.PP
+\fBrsb_mtx_upd_vals\fP, \fBrsb_mtx_set_vals\fP, 
+.PP
+\fBrsb_spmsp_to_dense\fP, \fBrsb_sppsp\fP, \fBrsb_spmsp\fP, \fBrsb_mtx_add_to_dense\fP, 
+.PP
+\fBrsb_mtx_rndr\fP, \fBrsb_file_mtx_rndr\fP, 
+.PP
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is an extension implemented by \fClibrsb\fP and thus it is not part of the standard\&. Do not rely on it, as it may change! Please contact the library maintainers if you need its functionality\&.
+.RE
+.PP
+
+.SH "Author"
+.PP 
+librsb was written by Michele Martone; this documentation has been generated by Doxygen.
+.SH "SEE ALSO"
+.B rsb-examples
+.B rsb.h
+.B rsb-spblas.h
diff --git a/doc/man/man3/rsb.h.3 b/doc/man/man3/rsb.h.3
new file mode 100644
index 0000000..39fc658
--- /dev/null
+++ b/doc/man/man3/rsb.h.3
@@ -0,0 +1,2097 @@
+.TH "The librsb library interface (rsb.h, rsb.F90)" 3 "Fri Sep 2 2016" "Version 1.2.0-rc5" "librsb" \" -*- nroff -*-
+.ad l
+.nh
+.SH NAME
+librsb - 
+The librsb library interface (rsb.h, rsb.F90) \- 
+.SH DESCRIPTION
+.PP
+The reference documentation of the \fClibrsb\fP library comes in both HTML and Unix man pages formats\&. The following sections/man pages are available: \fBThe librsb library interface (rsb\&.h, rsb\&.F90)\fP ; \fBThe Sparse BLAS interface to librsb (blas_sparse\&.h, rsb_blas_sparse\&.F90)\fP ; \fBExample programs and code\fP\&.  
+
+.SS "Macros"
+
+.in +1c
+.ti -1c
+.RI "#define \fBRSB_SIZEOF\fP(TYPE)   RSB_NUMERICAL_TYPE_SIZE(TYPE)"
+.br
+.in -1c
+.SS "Enumerations"
+
+.in +1c
+.ti -1c
+.RI "enum \fBrsb_opt_t\fP { \fBRSB_IO_WANT_VERBOSE_INIT\fP = 0x000001, \fBRSB_IO_WANT_VERBOSE_EXIT\fP = 0x000002, \fBRSB_IO_WANT_OUTPUT_STREAM\fP = 0x000003, \fBRSB_IO_WANT_SORT_METHOD\fP = 0x000004, \fBRSB_IO_WANT_CACHE_BLOCKING_METHOD\fP = 0x000005, \fBRSB_IO_WANT_SUBDIVISION_MULTIPLIER\fP = 0x000006, \fBRSB_IO_WANT_VERBOSE_ERRORS\fP = 0x000007, \fBRSB_IO_WANT_BOUNDED_BOX_COMPUTATION\fP = 0x000008, \fBRSB_IO_WANT_EXECUTING_THREADS\fP = 0x000009, \fBRSB_IO_WANT_EXTRA_VERBOSE_INTERFACE\f [...]
+.br
+.RI "\fIlibrary option values for \fBrsb_lib_init\fP, \fBrsb_lib_set_opt_str\fP, \fBrsb_lib_reinit\fP, \fBrsb_lib_exit\fP, \fBrsb_lib_get_opt\fP, \fBrsb_lib_set_opt\fP, or (deprecated) macros \fBRSB_REINIT_SINGLE_VALUE_GET\fP, \fBRSB_REINIT_SINGLE_VALUE_SET\fP, \fBRSB_REINIT_SINGLE_VALUE\fP, \fBRSB_REINIT_SINGLE_VALUE_C_IOP\fP\&.\&. \fP"
+.ti -1c
+.RI "enum \fBrsb_extff_t\fP { \fBRSB_EXTF_NORM_ONE\fP = 0x00001001, \fBRSB_EXTF_NORM_TWO\fP = 0x00001002, \fBRSB_EXTF_NORM_INF\fP = 0x00001003, \fBRSB_EXTF_SUMS_ROW\fP = 0x00001004, \fBRSB_EXTF_SUMS_COL\fP = 0x00001005, \fBRSB_EXTF_ASUMS_ROW\fP = 0x00001006, \fBRSB_EXTF_ASUMS_COL\fP = 0x00001007, \fBRSB_EXTF_DIAG\fP = 0x00000004 }"
+.br
+.RI "\fIExtraction filter flags, to be used with \fBrsb_mtx_get_nrm()\fP/\fBrsb_mtx_get_vec()\fP\&. \fP"
+.ti -1c
+.RI "enum \fBrsb_mif_t\fP { \fBRSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T\fP = 0x00000001, \fBRSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T\fP = 0x00000002, \fBRSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T\fP = 0x00000004, \fBRSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T\fP = 0x00000008, \fBRSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T\fP = 0x00000010, \fBRSB_MIF_TOTAL_SIZE__TO__SIZE_T\fP = 0x00000020, \fBRSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T\fP = 0x00000040, \fBRSB_MIF_MATRIX_TYPECODE__TO__RSB [...]
+.br
+.RI "\fIFlags for getting matrix informations via \fBrsb_mtx_get_info()\fP/\fBrsb_mtx_get_info_str()\fP\&. \fP"
+.ti -1c
+.RI "enum \fBrsb_elopf_t\fP { \fBRSB_ELOPF_MUL\fP = 0x00000001, \fBRSB_ELOPF_DIV\fP = 0x00000002, \fBRSB_ELOPF_POW\fP = 0x00000004, \fBRSB_ELOPF_NEG\fP = 0x00000008, \fBRSB_ELOPF_SCALE_ROWS\fP = 0x00000010, \fBRSB_ELOPF_SCALE_COLS\fP = 0x00000020, \fBRSB_ELOPF_SCALE_ROWS_REAL\fP = 0x00000040, \fBRSB_ELOPF_SCALE_COLS_REAL\fP = 0x00000080 }"
+.br
+.RI "\fIFlags for specifying a particular elemental/row-wise operation with \fBrsb_mtx_upd_vals()\fP\&. \fP"
+.in -1c
+.SS "Functions"
+
+.in +1c
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_lib_init\fP (struct \fBrsb_initopts\fP *iop)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_lib_set_opt_str\fP (const \fBrsb_char_t\fP *opnp, const \fBrsb_char_t\fP *opvp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_lib_reinit\fP (struct \fBrsb_initopts\fP *iop)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_lib_exit\fP (struct \fBrsb_initopts\fP *iop)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_alloc_from_coo_const\fP (const void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_alloc_from_coo_inplace\fP (void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_free\fP (struct rsb_mtx_t *mtxAp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_clone\fP (struct rsb_mtx_t **mtxBpp, \fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_spmv\fP (\fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, const void *Xp, \fBrsb_coo_idx_t\fP incX, const void *betap, void *Yp, \fBrsb_coo_idx_t\fP incY)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_spsv\fP (\fBrsb_trans_t\fP transT, const void *alphap, const struct rsb_mtx_t *mtxTp, const void *Xp, \fBrsb_coo_idx_t\fP incX, void *Yp, \fBrsb_coo_idx_t\fP incY)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_spsm\fP (\fBrsb_trans_t\fP transT, const void *alphap, const struct rsb_mtx_t *mtxTp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *betap, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_coo_sort\fP (void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_type_t\fP typecode, \fBrsb_flags_t\fP flagsA)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_file_mtx_get_dims\fP (const char *filename, \fBrsb_coo_idx_t\fP *nrp, \fBrsb_coo_idx_t\fP *ncp, \fBrsb_coo_idx_t\fP *nzp, \fBrsb_flags_t\fP *flagsp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_perror\fP (void *stream, \fBrsb_err_t\fP errval)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_strerror_r\fP (\fBrsb_err_t\fP errval, \fBrsb_char_t\fP *buf, size_t buflen)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_upd_vals\fP (struct rsb_mtx_t *mtxAp, enum \fBrsb_elopf_t\fP elop_flags, const void *omegap)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_set_vals\fP (struct rsb_mtx_t *mtxAp, const void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnz, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_vals\fP (const struct rsb_mtx_t *mtxAp, void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnz, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_file_mtx_save\fP (const struct rsb_mtx_t *mtxAp, const \fBrsb_char_t\fP *filename)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_file_vec_save\fP (const \fBrsb_char_t\fP *filename, \fBrsb_type_t\fP typecode, const void *Yp, \fBrsb_coo_idx_t\fP yvl)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_file_vec_load\fP (const \fBrsb_char_t\fP *filename, \fBrsb_type_t\fP typecode, void *Yp, \fBrsb_coo_idx_t\fP *yvlp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_file_mtx_load\fP (const \fBrsb_char_t\fP *filename, \fBrsb_flags_t\fP flagsA, \fBrsb_type_t\fP typecode, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_sppsp\fP (\fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_trans_t\fP transB, const void *betap, const struct rsb_mtx_t *mtxBp, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_spmsp\fP (\fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_trans_t\fP transB, const void *betap, const struct rsb_mtx_t *mtxBp, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_add_to_dense\fP (const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_nnz_idx_t\fP ldB, \fBrsb_nnz_idx_t\fP nrB, \fBrsb_nnz_idx_t\fP ncB, \fBrsb_bool_t\fP rowmajorB, void *Bp)"
+.br
+.ti -1c
+.RI "\fBrsb_trans_t\fP \fBrsb_psblas_trans_to_rsb_trans\fP (const char psbtrans)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_alloc_from_csr_const\fP (const void *VA, const \fBrsb_coo_idx_t\fP *RP, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_alloc_from_csc_const\fP (const void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *CP, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_alloc_from_csr_inplace\fP (void *VA, \fBrsb_nnz_idx_t\fP *RP, \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_switch_to_csr\fP (struct rsb_mtx_t *mtxAp, void **VAp, \fBrsb_coo_idx_t\fP **IAp, \fBrsb_coo_idx_t\fP **JAp, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_coo\fP (const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_csr\fP (\fBrsb_type_t\fP typecode, const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_nnz_idx_t\fP *RP, \fBrsb_coo_idx_t\fP *JA, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_rows_sparse\fP (\fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_coo_idx_t\fP frA, \fBrsb_coo_idx_t\fP lrA, \fBrsb_nnz_idx_t\fP *rnzp, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_coo_block\fP (const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_coo_idx_t\fP frA, \fBrsb_coo_idx_t\fP lrA, \fBrsb_coo_idx_t\fP fcA, \fBrsb_coo_idx_t\fP lcA, \fBrsb_coo_idx_t\fP *IREN, \fBrsb_coo_idx_t\fP *JREN, \fBrsb_nnz_idx_t\fP *rnzp, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_spmm\fP (\fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, const void *betap, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_spmsp_to_dense\fP (\fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_trans_t\fP transB, const void *betap, const struct rsb_mtx_t *mtxBp, \fBrsb_nnz_idx_t\fP ldC, \fBrsb_nnz_idx_t\fP nrC, \fBrsb_nnz_idx_t\fP ncC, \fBrsb_bool_t\fP rowmajorC, void *Cp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_rndr\fP (const char *filename, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP pmWidth, \fBrsb_coo_idx_t\fP pmHeight, \fBrsb_marf_t\fP rflags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_file_mtx_rndr\fP (void *pmp, const char *filename, \fBrsb_coo_idx_t\fP pmlWidth, \fBrsb_coo_idx_t\fP pmWidth, \fBrsb_coo_idx_t\fP pmHeight, \fBrsb_marf_t\fP rflags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_switch_to_coo\fP (struct rsb_mtx_t *mtxAp, void **VAp, \fBrsb_coo_idx_t\fP **IAp, \fBrsb_coo_idx_t\fP **JAp, \fBrsb_flags_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_prec\fP (void *opdp, const struct rsb_mtx_t *mtxAp, \fBrsb_precf_t\fP prec_flags, const void *ipdp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_info\fP (const struct rsb_mtx_t *mtxAp, enum \fBrsb_mif_t\fP miflags, void *minfop)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_info_str\fP (const struct rsb_mtx_t *mtxAp, const \fBrsb_char_t\fP *mis, void *minfop, size_t buflen)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_nrm\fP (const struct rsb_mtx_t *mtxAp, void *Np, enum \fBrsb_extff_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_get_vec\fP (const struct rsb_mtx_t *mtxAp, void *Dp, enum \fBrsb_extff_t\fP flags)"
+.br
+.ti -1c
+.RI "\fBrsb_time_t\fP \fBrsb_time\fP (void)"
+.br
+.ti -1c
+.RI "struct rsb_mtx_t * \fBrsb_mtx_alloc_from_coo_begin\fP (\fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_mtx_alloc_from_coo_end\fP (struct rsb_mtx_t **mtxApp)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_tune_spmm\fP (struct rsb_mtx_t **mtxOpp, \fBrsb_real_t\fP *sfp, \fBrsb_int_t\fP *tnp, \fBrsb_int_t\fP maxr, \fBrsb_time_t\fP maxt, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, const void *betap, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+.br
+.ti -1c
+.RI "\fBrsb_err_t\fP \fBrsb_tune_spsm\fP (struct rsb_mtx_t **mtxOpp, \fBrsb_real_t\fP *sfp, \fBrsb_int_t\fP *tnp, \fBrsb_int_t\fP maxr, \fBrsb_time_t\fP maxt, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, const void *betap, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+.br
+.in -1c
+.SH "Detailed Description"
+.PP 
+The reference documentation of the \fClibrsb\fP library comes in both HTML and Unix man pages formats\&. The following sections/man pages are available: \fBThe librsb library interface (rsb\&.h, rsb\&.F90)\fP ; \fBThe Sparse BLAS interface to librsb (blas_sparse\&.h, rsb_blas_sparse\&.F90)\fP ; \fBExample programs and code\fP\&. 
+
+In general, users of this library are interested in high performance sparse matrix computations on cache based shared memory parallel computers\&. For this, \fClibrsb\fP offers a native C interface (here documented) and a Fortran one (in \fBrsb\&.F90\fP, equivalent to the C declaration headers from \fBrsb\&.h\fP), in addition to a the Sparse BLAS one (both C and Fortran, documented)\&.
+.PP
+Configuration, build, and installation instructions are contained in the \fCREADME\fP file distributed in the sources archive\&.
+.PP
+\fB Typical program structure \fP
+.PP
+.PD 0
+.IP "\(bu" 2
+initialize \fClibrsb\fP with \fBrsb_lib_init()\fP 
+.IP "\(bu" 2
+(in any order) allocate matrices (e\&.g\&.: with \fBrsb_mtx_alloc_from_coo_inplace()\fP or others); do any computation with them (e\&.g\&.: \fBrsb_spmv()\fP, \fBrsb_spsv()\fP ); converting matrices (e\&.g\&.: with \fBrsb_mtx_switch_to_coo()\fP ); freeing matrices (\fBrsb_mtx_free()\fP ) 
+.IP "\(bu" 2
+finalize \fClibrsb\fP with \fBrsb_lib_exit()\fP
+.PP
+\fB Important usage notes \fP
+.PP
+\fB General program structure \fP Before calling any \fClibrsb\fP function, a program is required to initialize \fClibrsb's\fP internal status\&. This is done by calling \fBrsb_lib_init()\fP \&. Afterwards, any \fClibrsb\fP function can be safely used\&. When \fClibrsb\fP functions are not intended to be called anymore, a program may call \fBrsb_lib_exit()\fP to free any resource\&. Then, \fBrsb_lib_init()\fP should be called for further usage of \fClibrsb\fP\&.
+.PP
+\fB Manipulating matrices and vectors \fP In order to use \fClibrsb\fP, the user is not required to use explicitly any of \fClibrsb's\fP data structures: their manipulation is to be performed by \fClibrsb\fP functions\&. Therefore, knowledge of \fClibrsb's\fP matrix type (\fCrsb_mtx_t\fP) is not necessary at all: this structure is intended to be used as an opaque container\&.
+.PP
+On the contrary, arrays for numerical vectors (or more generally, dense matrices) are expected to be managed by the user: \fClibrsb\fP does not furnish any specific vector type\&. Computational functions treat dense vectors/matrices are simple arrays of a specified type; see the \fBExample programs and code\fP \&.
+.PP
+\fB Computational functions \fP This library can be configured at build time to support a custom subset of numerical types\&. To keep the programming interface compact, it has been decided to not replicate the computational functions to each numerical type\&. Instead, the type is expected to be specified by the user via a type flag\&. For instance, matrix assembly functions (e\&.g\&.: \fBrsb_mtx_alloc_from_coo_const()\fP ) accept a type information and keep it stored in the matrix struct [...]
+.PP
+\fB Memory management \fP
+.PP
+Matrix structures (\fCrsb_mtx_t\fP) allocated by \fClibrsb\fP shall be freed only via \fBrsb_mtx_free()\fP \&.
+.PP
+\fB Benchmarking \fP
+.PP
+If you want to benchmark this library, there are different possibilities: 
+.PP
+.nf
+#!/bin/sh
+
+# systematic comparative benchmark, mostly for dense matrices
+# (with Intel MKL, if linked) benchmark comparing   
+# produces a number of plots systematically
+bench/dense\&.sh
+
+# the benchmark command; assumes A\&.mtx is a file in Matrix Market format
+\&./rsbench -oa -Ob -f A\&.mtx -qH -R -n1 -t100 --verbose -TD --compare-competitors 
+
+# rsbench is very flexible tool; see the help for it:
+\&./rsbench -oa -Ob --help
+
+.fi
+.PP
+.PP
+\fB Tuning and Customization \fP
+.PP
+There are different \fC\fP\&./configure options you may look at for tuning or customizing the library\&. 
+.SH "Macro Definition Documentation"
+.PP 
+.SS "#define RSB_SIZEOF(TYPE)   RSB_NUMERICAL_TYPE_SIZE(TYPE)"
+Use \fBRSB_SIZEOF\fP macro to get the size (in bytes) of a type supported by the library (e\&.g\&.: when allocating numerical vectors)\&. 
+.SH "Enumeration Type Documentation"
+.PP 
+.SS "enum \fBrsb_elopf_t\fP"
+
+.PP
+Flags for specifying a particular elemental/row-wise operation with \fBrsb_mtx_upd_vals()\fP\&. 
+.PP
+\fBEnumerator: \fP
+.in +1c
+.TP
+\fB\fIRSB_ELOPF_MUL \fP\fP
+Elemental multiplication of the matrix by a specified scalar (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_DIV \fP\fP
+Elemental division by a specified scalar (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_POW \fP\fP
+Elemental power to a specified scalar (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_NEG \fP\fP
+Elemental negation (usable with \fBrsb_mtx_upd_vals()\fP, unary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_SCALE_ROWS \fP\fP
+Row wise scaling by a specified scaling vector (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_SCALE_COLS \fP\fP
+Column wise scaling by a specified scaling vector (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_SCALE_ROWS_REAL \fP\fP
+Row wise scaling by a specified scaling vector\&. If matrix is of a complex type, the argument is expected to be of the corresponding real type (assumed that that type has been enabled)\&. (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.TP
+\fB\fIRSB_ELOPF_SCALE_COLS_REAL \fP\fP
+Column wise scaling by a specified scaling vector\&. If matrix is of a complex type, the argument is expected to be of the corresponding real type (assumed that that type has been enabled)\&. (usable with \fBrsb_mtx_upd_vals()\fP, binary operation)\&. 
+.SS "enum \fBrsb_extff_t\fP"
+
+.PP
+Extraction filter flags, to be used with \fBrsb_mtx_get_nrm()\fP/\fBrsb_mtx_get_vec()\fP\&. 
+.PP
+\fBEnumerator: \fP
+.in +1c
+.TP
+\fB\fIRSB_EXTF_NORM_ONE \fP\fP
+\fBrsb_mtx_get_nrm()\fP flag value for computing the one-norm\&. 
+.TP
+\fB\fIRSB_EXTF_NORM_TWO \fP\fP
+\fBrsb_mtx_get_nrm()\fP flag value for computing the two-norm (Frobenius norm)\&. 
+.TP
+\fB\fIRSB_EXTF_NORM_INF \fP\fP
+\fBrsb_mtx_get_nrm()\fP flag value for computing the infinity-norm\&. 
+.TP
+\fB\fIRSB_EXTF_SUMS_ROW \fP\fP
+\fBrsb_mtx_get_vec()\fP flag value for computing the sum along each row\&. 
+.TP
+\fB\fIRSB_EXTF_SUMS_COL \fP\fP
+\fBrsb_mtx_get_vec()\fP flag value for computing the sum along each column\&. 
+.TP
+\fB\fIRSB_EXTF_ASUMS_ROW \fP\fP
+\fBrsb_mtx_get_vec()\fP flag value for computing the absolute values sum, along each row\&. 
+.TP
+\fB\fIRSB_EXTF_ASUMS_COL \fP\fP
+\fBrsb_mtx_get_vec()\fP flag value for computing the absolute values sum, along each column\&. 
+.TP
+\fB\fIRSB_EXTF_DIAG \fP\fP
+\fBrsb_mtx_get_vec()\fP flag value for extracting the diagonal submatrix\&. 
+.SS "enum \fBrsb_mif_t\fP"
+
+.PP
+Flags for getting matrix informations via \fBrsb_mtx_get_info()\fP/\fBrsb_mtx_get_info_str()\fP\&. 
+.PP
+\fBEnumerator: \fP
+.in +1c
+.TP
+\fB\fIRSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T \fP\fP
+Index storage occupation, in bytes\&. (size_t) 
+.TP
+\fB\fIRSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T \fP\fP
+Index storage occupation per nnz, in bytes\&. (\fBrsb_real_t\fP) 
+.TP
+\fB\fIRSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T \fP\fP
+Rows count(\fBrsb_coo_idx_t\fP) 
+.TP
+\fB\fIRSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T \fP\fP
+Columns count (\fBrsb_coo_idx_t\fP) 
+.TP
+\fB\fIRSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T \fP\fP
+Nonzeroes count (\fBrsb_nnz_idx_t\fP) 
+.TP
+\fB\fIRSB_MIF_TOTAL_SIZE__TO__SIZE_T \fP\fP
+Total size, in bytes (size_t) 
+.TP
+\fB\fIRSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T \fP\fP
+Matrix flags (\fBrsb_flags_t\fP) 
+.TP
+\fB\fIRSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T \fP\fP
+Matrix type code (\fBrsb_type_t\fP) 
+.TP
+\fB\fIRSB_MIF_MATRIX_INFO__TO__CHAR_P \fP\fP
+Matrix info string, only for \fBrsb_mtx_get_info_str()\fP (\fBrsb_char_t\fP*) 
+.TP
+\fB\fIRSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T \fP\fP
+Leaf submatrices count (\fBrsb_blk_idx_t\fP) 
+.SS "enum \fBrsb_opt_t\fP"
+
+.PP
+library option values for \fBrsb_lib_init\fP, \fBrsb_lib_set_opt_str\fP, \fBrsb_lib_reinit\fP, \fBrsb_lib_exit\fP, \fBrsb_lib_get_opt\fP, \fBrsb_lib_set_opt\fP, or (deprecated) macros \fBRSB_REINIT_SINGLE_VALUE_GET\fP, \fBRSB_REINIT_SINGLE_VALUE_SET\fP, \fBRSB_REINIT_SINGLE_VALUE\fP, \fBRSB_REINIT_SINGLE_VALUE_C_IOP\fP\&.\&. 
+.PP
+\fBEnumerator: \fP
+.in +1c
+.TP
+\fB\fIRSB_IO_WANT_VERBOSE_INIT \fP\fP
+\fBRSB_IO_WANT_VERBOSE_INIT\fP prompts for a verbose initialization of the library: messages will be written to the file descriptor (\fCFILE*\fP) pointed by the value pointer when calling \fBrsb_lib_init\fP\&. 
+.TP
+\fB\fIRSB_IO_WANT_VERBOSE_EXIT \fP\fP
+\fBRSB_IO_WANT_VERBOSE_EXIT\fP prompts for a verbose finalization of the library: messages will be written to the file descriptor (\fCFILE*\fP) pointed by the value pointer when calling \fBrsb_lib_exit\fP\&. 
+.TP
+\fB\fIRSB_IO_WANT_OUTPUT_STREAM \fP\fP
+Specifies the default output stream\&. Output (debug info) info will be written to the file descriptor (\fCFILE*\fP) pointed by the value pointer\&. 
+.TP
+\fB\fIRSB_IO_WANT_SORT_METHOD \fP\fP
+Specifies the default sorting method\&. Specified as a pointed integer (\fBrsb_int_t\fP) number, in {[0],1}\&. (internal) 
+.TP
+\fB\fIRSB_IO_WANT_CACHE_BLOCKING_METHOD \fP\fP
+Specifies the default cache blocking method\&. Specified as a pointed integer (\fBrsb_int_t\fP) number, in {-1,[0],1}\&. (internal) 
+.TP
+\fB\fIRSB_IO_WANT_SUBDIVISION_MULTIPLIER \fP\fP
+Specifies a multiplier for finer (if >1\&.0) or coarser (if <1\&.0) subdivisions\&. Specified as a pointed (\fBrsb_real_t\fP) number, in {\&.\&.,[1\&.0],\&.\&.}\&. (internal) 
+.TP
+\fB\fIRSB_IO_WANT_VERBOSE_ERRORS \fP\fP
+Prompts for a verbose error reporting: messages will be written to the file descriptor (\fCFILE*\fP) pointed by the value pointer\&. Only meaningful if an interface error verbosity greater than 0 was set at configure time\&. 
+.TP
+\fB\fIRSB_IO_WANT_BOUNDED_BOX_COMPUTATION \fP\fP
+Prompts for bounded box computation, for a smoother submatrices locking; pointed \fBrsb_int_t\fP in {0,[1]}\&. (internal)\&. 
+.TP
+\fB\fIRSB_IO_WANT_EXECUTING_THREADS \fP\fP
+Specifies the number of desired executing threads; pointed \fBrsb_int_t\fP in {[0],1,\&.\&.}\&. 
+.TP
+\fB\fIRSB_IO_WANT_EXTRA_VERBOSE_INTERFACE \fP\fP
+Specifies the level of interface verbosity; if setting, pointed \fBrsb_int_t\fP values should be in {[0],1,\&.\&.}\&. Support may be enabled or disabled at build time via the \fC--enable-internals-error-verbosity\fP configure option\&. If disabled, only getting is supported and yields -1, but setting is not supported and the \fBRSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT\fP error will be returned\&. 
+.TP
+\fB\fIRSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING \fP\fP
+Specifies a custom memory hierarchy info string; pointed \fCconst\fP \fBrsb_char_t\fP*; (may point to a NULL string pointer)\&. 
+.TP
+\fB\fIRSB_IO_WANT_IS_INITIALIZED_MARKER \fP\fP
+Used for getting whether the library has been initialized (\fBRSB_BOOL_TRUE\fP) or not (\fBRSB_BOOL_FALSE\fP) ; pointed \fCconst\fP \fBrsb_bool_t\fP*; (this is NOT for general users)\&. 
+.TP
+\fB\fIRSB_IO_WANT_MEM_ALLOC_CNT \fP\fP
+Used for getting the count of memory allocations performed by librsb employing librsb's memory allocation wrapper (if disabled, will return zero); pointed \fCconst\fP \fCsize_t*\fP; (this is for debugging purposes)\&. 
+.TP
+\fB\fIRSB_IO_WANT_MEM_ALLOC_TOT \fP\fP
+Used for getting the total amount of memory allocated by librsb employing librsb's memory allocation wrapper (if disabled, will return zero); pointed \fCconst\fP \fCsize_t*\fP; (this is for debugging purposes)\&. 
+.TP
+\fB\fIRSB_IO_WANT_LEAF_LEVEL_MULTIVEC \fP\fP
+Specifies whether the default multi-vector ops shall act at a leaf level (default value of 0 is yes)\&. Specified as a pointed integer (\fBrsb_int_t\fP) number, in {-1,[0]}\&. (internal) 
+.TP
+\fB\fIRSB_IO_WANT_MAX_MEMORY_ALLOCATIONS \fP\fP
+Specifies an upper limit to the count of allocated memory areas (default value of 0 means no limit)\&. Specified as a pointed \fCsize_t\fP\&. Only works if the memory wrapper (\fC--enable-allocator-wrapper\fP) has been specified at configure time\&. 
+.TP
+\fB\fIRSB_IO_WANT_MAX_MEMORY_ALLOCATED \fP\fP
+Specifies an upper limit to the amount of allocated memory (default value of 0 means no limit)\&. Specified as a pointed \fCsize_t\fP\&. Only works if the memory wrapper (\fC--enable-allocator-wrapper\fP) has been specified at configure time\&. 
+.TP
+\fB\fIRSB_IO_WANT_LIBRSB_ETIME \fP\fP
+Represents time spent in librsb\&. Specified as a pointed \fBrsb_time_t\fP\&. Only works if statistics collection (\fC--enable-librsb-stats\fP) was specified at configure time\&. 
+.TP
+\fB\fIRSB_IO_WANT_VERBOSE_TUNING \fP\fP
+Auto tuning verbosity level for rsb_tune_spmm/rsb_tune_spsm\&. If 0, no verbosity; if 1, verbose; if 2, verbose with trace files being dumped\&. 
+.SH "Function Documentation"
+.PP 
+.SS "\fBrsb_err_t\fP rsb_coo_sort (void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_type_t\fP typecode, \fBrsb_flags_t\fP flagsA)"
+Sorts row-major the given COO input arrays representing a sparse matrix $A$\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIVA,IA,JA\fP Output numerical values (\fCVA\fP) array; output row (\fCIA\fP) and column (\fCJA\fP) indices arrays\&. 
+.br
+\fInnzA\fP The number of nonzeroes in the input arrays representing matrix $A$\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fIflagsA\fP A valid combination of matrix storage flags\&. If unsure, use \fBRSB_FLAG_NOFLAGS\fP\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_time\fP, \fBrsb_coo_sort\fP
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+By invoking with swapped \fCIA\fP and \fCJA\fP (and swapping \fCnrA\fP and \fCncA\fP as well) one can obtain column major order\&.
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_file_mtx_get_dims (const char *filename, \fBrsb_coo_idx_t\fP *nrp, \fBrsb_coo_idx_t\fP *ncp, \fBrsb_coo_idx_t\fP *nzp, \fBrsb_flags_t\fP *flagsp)"
+Reads structural information (dimensions, structural flags) for a matrix file into user specified (and optionally \fCNULL\fP) variables\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIfilename\fP The specified matrix file name (cannot be \fCNULL\fP)\&. 
+.br
+\fInrp,ncp\fP Output pointers to rows and columns count variables (can be \fCNULL\fP)\&. 
+.br
+\fInzp\fP Output pointer to the nonzeroes count variable (can be \fCNULL\fP)\&. 
+.br
+\fIflagsp\fP Output pointer to the detected structural flags variable\&. Will be a combination of \fBRSB_FLAG_LOWER\fP, \fBRSB_FLAG_UPPER\fP, \fBRSB_FLAG_SYMMETRIC\fP, \fBRSB_FLAG_HERMITIAN\fP\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. If read dimensions are illegal (see \fBrsb_coo_idx_t\fP,\fBrsb_nnz_idx_t\fP), \fBRSB_ERR_LIMITS\fP will be returned\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+The only sparse matrix file format currently supported is Matrix Market\&. E\&.g\&.: 
+.PP
+.nf
+%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
+
+.fi
+.PP
+ In the above example header on the first line, you can specify either \fCreal\fP or \fCcomplex\fP or \fCpattern\fP for the numerical type\&. Either \fCgeneral\fP, \fCsymmetric\fP, \fChermitian\fP can be specified for the structure\&. In case of \fCpattern\fP matrices, only coordinate indices will be loaded (saving \fCpattern\fP matrices is not yet supported); in case of \fCreal\fP matrices, also one coefficient value will be saved/loaded; in the case of \fCcomplex\fP matrices, both the  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_file_mtx_load (const \fBrsb_char_t\fP *filename, \fBrsb_flags_t\fP flagsA, \fBrsb_type_t\fP typecode, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Loads a sparse matrix from the specified matrix file, assembling it in the format specified by flags, using the numerical type representation as specified by the user\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIfilename\fP The specified matrix file name (cannot be \fCNULL\fP)\&. 
+.br
+\fIflagsA\fP A valid combination of matrix storage flags\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+The only sparse matrix file format currently supported is Matrix Market\&. E\&.g\&.: 
+.PP
+.nf
+%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
+
+.fi
+.PP
+ In the above example header on the first line, you can specify either \fCreal\fP or \fCcomplex\fP or \fCpattern\fP for the numerical type\&. Either \fCgeneral\fP, \fCsymmetric\fP, \fChermitian\fP can be specified for the structure\&. In case of \fCpattern\fP matrices, only coordinate indices will be loaded (saving \fCpattern\fP matrices is not yet supported); in case of \fCreal\fP matrices, also one coefficient value will be saved/loaded; in the case of \fCcomplex\fP matrices, both the  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_file_mtx_rndr (void *pmp, const char *filename, \fBrsb_coo_idx_t\fP pmlWidth, \fBrsb_coo_idx_t\fP pmWidth, \fBrsb_coo_idx_t\fP pmHeight, \fBrsb_marf_t\fP rflags)"
+Renders as pixel map the matrix contained in a matrix file\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIpmp\fP Pixel map array pointer\&. 
+.br
+\fIfilename\fP The specified matrix file name (cannot be \fCNULL\fP)\&. 
+.br
+\fIpmlWidth\fP stride between lines (in pixels; no less than \fCpmWidth\fP)\&. 
+.br
+\fIpmWidth\fP Pixel map width (in pixels or points)\&. 
+.br
+\fIpmHeight\fP Pixel map height (in pixels or points)\&. 
+.br
+\fIrflags\fP The color mode; only \fBRSB_MARF_RGB\fP is supported for now (1 byte per channel, 3 channels --- red, green, blue): this requires array \fCpmp\fP to be at least (3*\fCpmlWidth*\fCpmHeight\fP)\fP bytes large\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+At the time being, \fCpmlWidth\fP is required to be equal to \fCpmWidth\fP\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_rndr\fP, \fBrsb_file_mtx_rndr\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_file_mtx_save (const struct rsb_mtx_t *mtxAp, const \fBrsb_char_t\fP *filename)"
+Saves the given matrix to the specified matrix file\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIfilename\fP The specified output file name (if \fCNULL\fP, will write to standard output)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Some structural info contained in the matrix structural flags may be lost in the output data\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+The only sparse matrix file format currently supported is Matrix Market\&. E\&.g\&.: 
+.PP
+.nf
+%%MatrixMarket matrix coordinate real symmetric
+%
+% A Hilbert Matrix of order 3, so with 3 rows, 3 columns, and 6 nonzeroes.
+%
+3 3 6
+1 1 1.0
+2 1 0.5
+2 2 0.33
+3 1 0.33
+3 2 0.25
+3 3 0.2
+
+.fi
+.PP
+ In the above example header on the first line, you can specify either \fCreal\fP or \fCcomplex\fP or \fCpattern\fP for the numerical type\&. Either \fCgeneral\fP, \fCsymmetric\fP, \fChermitian\fP can be specified for the structure\&. In case of \fCpattern\fP matrices, only coordinate indices will be loaded (saving \fCpattern\fP matrices is not yet supported); in case of \fCreal\fP matrices, also one coefficient value will be saved/loaded; in the case of \fCcomplex\fP matrices, both the  [...]
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_file_vec_load (const \fBrsb_char_t\fP *filename, \fBrsb_type_t\fP typecode, void *Yp, \fBrsb_coo_idx_t\fP *yvlp)"
+Loads a dense vector from the specified file, using the numerical type representation as specified by the user\&. This function is intended to be called in two steps: first with \fCYp=NULL\fP, in order to write the vector length to \fC*yvlp\fP ; then, with \fCyvlp=NULL\fP, to get \fCYp\fP written\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIfilename\fP The specified vector file name (cannot be \fCNULL\fP)\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fIYp\fP The input array vector\&. 
+.br
+\fIyvlp\fP An optional pointer (can be \fCNULL\fP)\&. If supplied, vector length will be written here, and no vector will be read\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+The only dense vector file format currently supported is Matrix Market\&. E\&.g\&.: 
+.PP
+.nf
+%%MatrixMarket matrix array complex general
+6           1
+11.000000000000000E+000 12.000000000000000E+000 
+21.000000000000000E+000 22.000000000000000E+000 
+31.000000000000000E+000 32.000000000000000E+000 
+41.000000000000000E+000 42.000000000000000E+000 
+51.000000000000000E+000 52.000000000000000E+000 
+61.000000000000000E+000 62.000000000000000E+000 
+
+.fi
+.PP
+ In the above example header on the first line, you can specify either \fCreal\fP or \fCcomplex\fP or \fCpattern\fP for the numerical type\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_file_vec_save (const \fBrsb_char_t\fP *filename, \fBrsb_type_t\fP typecode, const void *Yp, \fBrsb_coo_idx_t\fP yvl)"
+Saves a dense vector to the specified file, using the numerical type representation as specified by the user\&. This function assumes \fCYp!=NULL\fP and \fCyvl>0\fP\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIfilename\fP The specified vector file name (cannot be \fCNULL\fP)\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fIYp\fP The output array vector\&. 
+.br
+\fIyvl\fP Output vector length\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+The only dense vector file format currently supported is Matrix Market\&. E\&.g\&.: 
+.PP
+.nf
+%%MatrixMarket matrix array complex general
+6           1
+11.000000000000000E+000 12.000000000000000E+000 
+21.000000000000000E+000 22.000000000000000E+000 
+31.000000000000000E+000 32.000000000000000E+000 
+41.000000000000000E+000 42.000000000000000E+000 
+51.000000000000000E+000 52.000000000000000E+000 
+61.000000000000000E+000 62.000000000000000E+000 
+
+.fi
+.PP
+ In the above example header on the first line, you can specify either \fCreal\fP or \fCcomplex\fP or \fCpattern\fP for the numerical type\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_lib_exit (struct \fBrsb_initopts\fP *iop)"
+Finalize \fClibrsb\fP\&. 
+.br
+ \fBrsb_lib_exit\fP should be called after having freed all matrices\&. 
+.br
+ If not all of the data structures were properly deallocated before, this function may still attempt finalizing the library and return the \fBRSB_ERR_MEMORY_LEAK\fP error code (this depends on the \fC--enable-allocator-wrapper\fP configure time option)\&. Any allocated memory will be lost (\fClibrsb\fP does not keep track of allocated matrices)\&. 
+.br
+ Internal library state will be cleared\&. After this call, it is legal to initialize the library again, by calling \fBrsb_lib_init()\fP\&. 
+.br
+ On an error, the library state may be inconsistent, so it is advisable to either terminate program execution (rather than forcing a new initialization with \fBrsb_lib_init()\fP)\&. 
+.br
+ Parameter \fCiop\fP is reserved for future use; for now it is safe to pass \fBRSB_NULL_EXIT_OPTIONS\fP\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIiop\fP A pointer to a \fBrsb_initopts\fP structure with library options\&. It may be \fCNULL\fP (or better, \fBRSB_NULL_INIT_OPTIONS\fP/\fBRSB_NULL_EXIT_OPTIONS\fP) for specifying default options\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_lib_init\fP, \fBrsb_lib_set_opt_str\fP, \fBrsb_lib_reinit\fP, \fBrsb_lib_exit\fP, \fBrsb_lib_get_opt\fP, \fBrsb_lib_set_opt\fP, or (deprecated) macros \fBRSB_REINIT_SINGLE_VALUE_GET\fP, \fBRSB_REINIT_SINGLE_VALUE_SET\fP, \fBRSB_REINIT_SINGLE_VALUE\fP, \fBRSB_REINIT_SINGLE_VALUE_C_IOP\fP\&.\&.
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_lib_init (struct \fBrsb_initopts\fP *iop)"
+This is the library initialization function\&. 
+.br
+ It must be called only once before using any other library function\&. 
+.br
+ It is allowed to call it again after \fBrsb_lib_exit()\fP\&. 
+.br
+ To fine-tune the library behaviour, one may specify a number of options via the \fCiop\fP parameter\&. 
+.br
+ Options may be specified also after \fBrsb_lib_init()\fP by calling \fBrsb_lib_reinit()\fP\&. 
+.br
+ One may call \fBRSB_REINIT_SINGLE_VALUE_GET\fP with flag \fBRSB_IO_WANT_IS_INITIALIZED_MARKER\fP to verify whether the library has been initialized or not\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIiop\fP A pointer to a \fBrsb_initopts\fP structure with library options\&. It may be \fCNULL\fP (or better, \fBRSB_NULL_INIT_OPTIONS\fP/\fBRSB_NULL_EXIT_OPTIONS\fP) for specifying default options\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_lib_init\fP, \fBrsb_lib_set_opt_str\fP, \fBrsb_lib_reinit\fP, \fBrsb_lib_exit\fP, \fBrsb_lib_get_opt\fP, \fBrsb_lib_set_opt\fP, or (deprecated) macros \fBRSB_REINIT_SINGLE_VALUE_GET\fP, \fBRSB_REINIT_SINGLE_VALUE_SET\fP, \fBRSB_REINIT_SINGLE_VALUE\fP, \fBRSB_REINIT_SINGLE_VALUE_C_IOP\fP\&.\&.
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_lib_reinit (struct \fBrsb_initopts\fP *iop)"
+Changes the library operation options which were set at initialization time either by a user or as defaults\&. 
+.br
+ Not all options may be supported, depending on build time library settings\&. 
+.br
+ If an unsupported option was specified, an appropriate error (e\&.g\&.: \fBRSB_ERR_UNSUPPORTED_OPERATION\fP) will be returned\&. 
+.br
+ On the first error, option processing is interrupted and the remaining options (if any) are not processed\&. 
+.br
+ Program execution may continue safely even if an error code is returned (that is, library status should be consistent)\&. 
+.br
+.PP
+\fBParameters:\fP
+.RS 4
+\fIiop\fP A pointer to a \fBrsb_initopts\fP structure with library options\&. It may be \fCNULL\fP (or better, \fBRSB_NULL_INIT_OPTIONS\fP/\fBRSB_NULL_EXIT_OPTIONS\fP) for specifying default options\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_lib_init\fP, \fBrsb_lib_set_opt_str\fP, \fBrsb_lib_reinit\fP, \fBrsb_lib_exit\fP, \fBrsb_lib_get_opt\fP, \fBrsb_lib_set_opt\fP, or (deprecated) macros \fBRSB_REINIT_SINGLE_VALUE_GET\fP, \fBRSB_REINIT_SINGLE_VALUE_SET\fP, \fBRSB_REINIT_SINGLE_VALUE\fP, \fBRSB_REINIT_SINGLE_VALUE_C_IOP\fP\&.\&.
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_lib_set_opt_str (const \fBrsb_char_t\fP *opnp, const \fBrsb_char_t\fP *opvp)"
+Specifies individual library options in order to fine-tune the library behaviour\&. Both the option name and the value shall be expressed as strings, identical to their preprocessor identifiers (see \fBrsb_opt_t\fP )\&. The \fCopnp\fP string will be translated internally to the corresponding request flag values, and the passed value will be parsed out of the \fCopvp\fP string\&. 
+.br
+.PP
+\fBParameters:\fP
+.RS 4
+\fIopnp\fP A pointer to a library option input name string (may not be \fCNULL\fP)\&. 
+.br
+\fIopvp\fP A pointer to a library option input value string (may not be \fCNULL\fP)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_lib_init\fP, \fBrsb_lib_set_opt_str\fP, \fBrsb_lib_reinit\fP, \fBrsb_lib_exit\fP, \fBrsb_lib_get_opt\fP, \fBrsb_lib_set_opt\fP, or (deprecated) macros \fBRSB_REINIT_SINGLE_VALUE_GET\fP, \fBRSB_REINIT_SINGLE_VALUE_SET\fP, \fBRSB_REINIT_SINGLE_VALUE\fP, \fBRSB_REINIT_SINGLE_VALUE_C_IOP\fP\&.\&.
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_add_to_dense (const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_nnz_idx_t\fP ldB, \fBrsb_nnz_idx_t\fP nrB, \fBrsb_nnz_idx_t\fP ncB, \fBrsb_bool_t\fP rowmajorB, void *Bp)"
+Dense matrix B is updated by adding scaled sparse matrix ${A}$ to it: $B \leftarrow B + \alpha {A} $
+.PP
+\fBParameters:\fP
+.RS 4
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIldB\fP Leading dimension of \fCBp\fP array\&. 
+.br
+\fInrB,ncB\fP The number of rows and columns for the dense matrix $B$\&. 
+.br
+\fIrowmajorB\fP \fBRSB_BOOL_TRUE\fP if the dense matrix $B$ is considered stored as row major, or \fBRSB_BOOL_FALSE\fP if as column major\&. 
+.br
+\fIBp\fP Array representing the dense matrix $B$\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+Please note that it suffices to 'transpose' \fCBp's\fP description parameters to get $A$ transposed summed in\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmsp_to_dense\fP, \fBrsb_sppsp\fP, \fBrsb_spmsp\fP, \fBrsb_mtx_add_to_dense\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_alloc_from_coo_begin (\fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Creates an empty matrix structure in assembly state\&. The user then populates it using \fBrsb_mtx_set_vals()\fP repeatedly; then assembles it with \fBrsb_mtx_alloc_from_coo_end()\fP\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fInnzA\fP A rough estimate of the number of nonzeroes matrix $A$ will host (used for optimizing arrays allocation)\&. If you do not know yet, you can specify zero\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fIflagsA\fP A valid combination of index conversion and matrix storage flags and other meaningful flags\&. The encouraged base choice here is \fBRSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS\fP\&. If Fortran (1 based) indices are being used for the IA, JA arrays, then the \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP flag should be added\&. If symmetric storage is desired, then \fBRSB_FLAG_SYMMETRIC\fP (or \fBRSB_FLAG_HERMITIAN\fP, for Hermitian matrices) is necessary, in combination with either \fBRSB_ [...]
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+Pointer to a \fCrsb_mtx_t\fP matrix structure in assembly state, or \fCNULL\fP (on error)\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_alloc_from_coo_const (const void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Given as input COO arrays \fCVA\fP,IA,JA, allocates and assembles an RSB matrix using separate arrays\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIVA,IA,JA\fP Input numerical values (\fCVA\fP) array; row (\fCIA\fP) and column (\fCJA\fP) input indices arrays\&. 
+.br
+\fInnzA\fP The number of nonzeroes in the input arrays representing matrix $A$\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fIbrA,bcA\fP Blocking parameters: \fCbrA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use); \fCbcA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use)\&. 
+.br
+\fIflagsA\fP A valid combination of index conversion and matrix storage flags and other meaningful flags\&. The encouraged base choice here is \fBRSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS\fP\&. If Fortran (1 based) indices are being used for the IA, JA arrays, then the \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP flag should be added\&. If symmetric storage is desired, then \fBRSB_FLAG_SYMMETRIC\fP (or \fBRSB_FLAG_HERMITIAN\fP, for Hermitian matrices) is necessary, in combination with either \fBRSB_ [...]
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_alloc_from_coo_end (struct rsb_mtx_t **mtxApp)"
+Assembles RSB arrays for a matrix in build state created with \fBrsb_mtx_alloc_from_coo_begin()\fP and populated with \fBrsb_mtx_set_vals()\fP\&. After assembly, any operation on the matrix is allowed\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxApp\fP \fCrsb_mtx_t\fP pointer to an unassembled matrix address\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&. 
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+Note that the memory location of the matrix will be changed by this call, and the (old) \fC*mtxApp\fP address value will be not valid anymore\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_alloc_from_coo_inplace (void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Given as input COO arrays \fCVA\fP,IA,JA, allocates and assembles an RSB matrix reusing input arrays\&. 
+.br
+ Assumes all three \fCVA\fP,IA,JA arrays are at least min(\fCnnzA\fP,\fCnrA+1\fP,\fCncA+1\fP) sized\&. The user is expected NOT to use these arrays until the matrix has been destroyed with \fBrsb_mtx_free()\fP\&. Then, it is possible to use these arrays again\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIVA,IA,JA\fP Input/output numerical values array (\fCVA\fP); row (\fCIA\fP) and column (\fCJA\fP) indices arrays\&. 
+.br
+\fInnzA\fP The number of nonzeroes in the input arrays representing matrix $A$\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fIbrA,bcA\fP Blocking parameters: \fCbrA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use); \fCbcA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use)\&. 
+.br
+\fIflagsA\fP A valid combination of index conversion and matrix storage flags and other meaningful flags\&. The encouraged base choice here is \fBRSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS\fP\&. If Fortran (1 based) indices are being used for the IA, JA arrays, then the \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP flag should be added\&. If symmetric storage is desired, then \fBRSB_FLAG_SYMMETRIC\fP (or \fBRSB_FLAG_HERMITIAN\fP, for Hermitian matrices) is necessary, in combination with either \fBRSB_ [...]
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_alloc_from_csc_const (const void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *CP, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Given input read only CSC format arrays, allocates and assembles an RSB matrix (stored in separate arrays)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIVA,IA,CP\fP Input numerical values (\fCVA\fP) array, input row indices (\fCIA\fP) and compressed column (\fCCP\fP) indices arrays\&. 
+.br
+\fInnzA\fP The number of nonzeroes in the input arrays representing matrix $A$\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fIbrA,bcA\fP Blocking parameters: \fCbrA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use); \fCbcA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use)\&. 
+.br
+\fIflagsA\fP A valid combination of index conversion and matrix storage flags and other meaningful flags\&. The encouraged base choice here is \fBRSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS\fP\&. If Fortran (1 based) indices are being used for the IA, JA arrays, then the \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP flag should be added\&. If symmetric storage is desired, then \fBRSB_FLAG_SYMMETRIC\fP (or \fBRSB_FLAG_HERMITIAN\fP, for Hermitian matrices) is necessary, in combination with either \fBRSB_ [...]
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_alloc_from_csr_const (const void *VA, const \fBrsb_coo_idx_t\fP *RP, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Given input read only CSR format arrays, allocates and assembles an RSB matrix (stored in separate arrays)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIVA,RP,JA\fP Input numerical values (\fCVA\fP) array; compressed rows (\fCRP\fP) and column (\fCJA\fP) input indices arrays\&. 
+.br
+\fInnzA\fP The number of nonzeroes in the input arrays representing matrix $A$\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fIbrA,bcA\fP Blocking parameters: \fCbrA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use); \fCbcA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use)\&. 
+.br
+\fIflagsA\fP A valid combination of index conversion and matrix storage flags and other meaningful flags\&. The encouraged base choice here is \fBRSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS\fP\&. If Fortran (1 based) indices are being used for the IA, JA arrays, then the \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP flag should be added\&. If symmetric storage is desired, then \fBRSB_FLAG_SYMMETRIC\fP (or \fBRSB_FLAG_HERMITIAN\fP, for Hermitian matrices) is necessary, in combination with either \fBRSB_ [...]
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_alloc_from_csr_inplace (void *VA, \fBrsb_nnz_idx_t\fP *RP, \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnzA, \fBrsb_type_t\fP typecode, \fBrsb_coo_idx_t\fP nrA, \fBrsb_coo_idx_t\fP ncA, \fBrsb_blk_idx_t\fP brA, \fBrsb_blk_idx_t\fP bcA, \fBrsb_flags_t\fP flagsA, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Given as input CSR arrays \fCVA\fP,RP,JA , allocates and assembles an RSB matrix reusing input arrays\&. 
+.br
+ Assumes all three \fCVA\fP,IA,JA arrays are at least min(\fCnnzA\fP,\fCnrA+1\fP,\fCncA+1\fP) sized\&. The user is expected NOT to use these arrays until the matrix has been destroyed with \fBrsb_mtx_free()\fP\&. Then, it is possible to use these arrays again\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIVA,RP,JA\fP Input numerical values (\fCVA\fP) array; compressed rows (\fCRP\fP) and column (\fCJA\fP) input indices arrays\&. Will not be freed by \fBrsb_mtx_free()\fP\&. 
+.br
+\fInnzA\fP The number of nonzeroes in the input arrays representing matrix $A$\&. 
+.br
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fInrA,ncA\fP The number of rows and columns of the sparse matrix $A$\&. 
+.br
+\fIbrA,bcA\fP Blocking parameters: \fCbrA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use); \fCbcA\fP should be set to 1 or \fBRSB_DEFAULT_ROW_BLOCKING\fP (currently unused, reserved for future use)\&. 
+.br
+\fIflagsA\fP A valid combination of index conversion and matrix storage flags and other meaningful flags\&. The encouraged base choice here is \fBRSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS\fP\&. If Fortran (1 based) indices are being used for the IA, JA arrays, then the \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP flag should be added\&. If symmetric storage is desired, then \fBRSB_FLAG_SYMMETRIC\fP (or \fBRSB_FLAG_HERMITIAN\fP, for Hermitian matrices) is necessary, in combination with either \fBRSB_ [...]
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_clone (struct rsb_mtx_t **mtxBpp, \fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_flags_t\fP flags)"
+This function clones a given matrix, allocating a fresh data structure or overwriting an existing one\&. 
+.br
+ Target type (specified by \fCtypecode\fP) can be different from that in the matrix\&. \fCIf\fP \fCalphap=NULL\fP, the cloned matrix will not be scaled\&. 
+.br
+ This new structure will be completely separated and independent from the original one\&. 
+.br
+ Examples: 
+.PP
+.nf
+// will clone the matrix exactly
+errval = rsb_mtx_clone(&mtxBp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+// will clone the transpose of the matrix
+errval = rsb_mtx_clone(&mtxBp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_T,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+// will clone the lower triangle of the matrix
+errval = rsb_mtx_clone(&mtxBp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_TRIANGULAR|RSB_FLAG_LOWER);
+
+.fi
+.PP
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxBpp\fP Valid \fCrsb_mtx_t\fP pointer to an address for matrix $B$\&. If \fC*mtxBpp==NULL\fP, a fresh clone will be assigned there; if not, the existing matrix structure will be freed and allocated to host the new one\&. The case \fC*mtxBpp==mtxAp\fP is supported\&. 
+.br
+\fItypecode\fP A valid type code for the desired output matrix (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value for scaling the output\&. Of the type code of \fCmtxAp\fP\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_IDENTICAL_FLAGS\fP or a combination of other flags, e\&.g\&.: \fBRSB_FLAG_C_INDICES_INTERFACE\fP, \fBRSB_FLAG_SYMMETRIC\fP, \fBRSB_FLAG_HERMITIAN\fP, \fBRSB_FLAG_TRIANGULAR\fP, \fBRSB_FLAG_UPPER\fP, \fBRSB_FLAG_LOWER\fP, \fBRSB_FLAG_UNIT_DIAG_IMPLICIT\fP, \fBRSB_FLAG_DISCARD_ZEROS\fP\&. Flag \fBRSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS\fP is forbidden\&. Flag \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP is ignored\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_mtx_free (struct rsb_mtx_t *mtxAp)\fC [read]\fP"
+Frees a previously allocated sparse matrix structure\&. 
+.br
+ In the case the matrix has the \fBRSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS\fP flag, the main three data arrays VA,IA,JA will not be freed by \fBrsb_mtx_free\fP (see \fBrsb_mtx_alloc_from_coo_inplace\fP,\fBrsb_mtx_alloc_from_csr_inplace\fP)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+Always \fCNULL\fP\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_alloc_from_coo_const\fP, \fBrsb_mtx_alloc_from_coo_inplace\fP, \fBrsb_mtx_free\fP, \fBrsb_mtx_clone\fP, \fBrsb_mtx_alloc_from_csr_const\fP, \fBrsb_mtx_alloc_from_csc_const\fP, \fBrsb_mtx_alloc_from_csr_inplace\fP, \fBrsb_mtx_switch_to_csr\fP, \fBrsb_mtx_alloc_from_coo_begin\fP, \fBrsb_mtx_alloc_from_coo_end\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_coo (const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_flags_t\fP flags)"
+Returns the matrix converted in a coordinate storage format\&. 
+.br
+ Elements will be stored in no particular order\&. 
+.br
+ If there are structural or fill-in zero elements, these will be skipped\&. 
+.br
+ Writes as many entries as there are nonzeroes (use \fBrsb_mtx_get_info\fP(mtxAp,\fBRSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T\fP,&nnz)) to find out how many in order to allocate the arrays correctly\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVA,IA,JA\fP Output numerical values (\fCVA\fP) array; output row (\fCIA\fP) and column (\fCJA\fP) indices arrays\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP or \fBRSB_FLAG_C_INDICES_INTERFACE\fP (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_coo_block (const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_coo_idx_t\fP frA, \fBrsb_coo_idx_t\fP lrA, \fBrsb_coo_idx_t\fP fcA, \fBrsb_coo_idx_t\fP lcA, \fBrsb_coo_idx_t\fP *IREN, \fBrsb_coo_idx_t\fP *JREN, \fBrsb_nnz_idx_t\fP *rnzp, \fBrsb_flags_t\fP flags)"
+Writes in COO format the specified submatrix\&. Works in two stages: first the user invokes it with \fCVA\fP,IA,JA set to \fCNULL\fP to get \fC*rnzp\fP\&. Then the the \fCVA\fP,IA,JA arrays can be allocated, and the function called again, this time with \fCrnzp=NULL\fP but the \fCVA\fP,IA,JA arrays pointers non \fCNULL\fP (or at least, one of them)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVA,IA,JA\fP Output numerical values (\fCVA\fP) array; output row (\fCIA\fP) and column (\fCJA\fP) indices arrays\&. 
+.br
+\fIfrA,lrA\fP First and last row indices\&. 
+.br
+\fIfcA,lcA\fP First and last column indices\&. 
+.br
+\fIIREN,JREN\fP Renumbering arrays for \fCIA\fP and \fCJA\fP (respectively rows count and columns count sized)\&. If \fCNULL\fP, no renumbering will be used\&. 
+.br
+\fIrnzp\fP A pointer where the number of relevant nonzero elements will be written to\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP or \fBRSB_FLAG_C_INDICES_INTERFACE\fP (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. Examples: 
+.PP
+.nf
+// get nnz count first
+errval=rsb_mtx_get_coo_block(mtxAp,NULL,NULL,NULL,frA,lrA,fcA,lcA,NULL,NULL,&rnz,flags )
+// allocate VA, IA, JA to rnz elements
+\&.\&.\&.
+// get the  rnz  values then
+errval=rsb_mtx_get_coo_block(mtxAp,  VA,  IA,  JA,frA,lrA,fcA,lcA,NULL,NULL,NULL,flags )
+
+.fi
+.PP
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Expect this function to change soon (e\&.g\&.: have scaling parameters, etc\&.)\&. Contact the author if you intend to use it\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_csr (\fBrsb_type_t\fP typecode, const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_nnz_idx_t\fP *RP, \fBrsb_coo_idx_t\fP *JA, \fBrsb_flags_t\fP flags)"
+Fills the given arrays with the matrix expressed in the CSR format\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVA,RP,JA\fP Output numerical values (\fCVA\fP) array, compressed row indices (\fCRP\fP) and column indices (\fCJA\fP) arrays\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP or \fBRSB_FLAG_C_INDICES_INTERFACE\fP (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_info (const struct rsb_mtx_t *mtxAp, enum \fBrsb_mif_t\fP miflags, void *minfop)"
+Returns a specified matrix (numerical) property\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fImiflags\fP A valid value of matrix info flags (see \fBrsb_mif_t\fP for valid values)\&. 
+.br
+\fIminfop\fP Pointer to a variable of the right type, according to the matrix info flag specification (see \fBrsb_mif_t\fP)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_info_str (const struct rsb_mtx_t *mtxAp, const \fBrsb_char_t\fP *mis, void *minfop, size_tbuflen)"
+Returns a specified matrix (numerical) property, via a string form query\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fImis\fP A string specifying any identifier among the matrix info ones\&. See \fBrsb_mif_t\fP for a list of valid identifiers that can be supplied in string form\&. 
+.br
+\fIminfop\fP Pointer to a variable of the right type, according to the matrix info flag specification (see \fBrsb_mif_t\fP)\&. 
+.br
+\fIbuflen\fP If greater than 0, \fCminfop\fP will be treated as a string of length \fCbuflen\fP and filled with the desired value via the standard \fCsnprintf()\fP function\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_info\fP, \fBrsb_mtx_get_info_str\fP, \fBrsb_file_mtx_save\fP, \fBrsb_file_vec_load\fP, \fBrsb_file_mtx_load\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_nrm (const struct rsb_mtx_t *mtxAp, void *Np, enum \fBrsb_extff_t\fP flags)"
+Computes a matrix norm (either infinite-norm or or 2-norm or 1-norm)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fINp\fP Points to a scalar value which will be overwritten with the selected norm\&. 
+.br
+\fIflags\fP Either \fBRSB_EXTF_NORM_ONE\fP or \fBRSB_EXTF_NORM_TWO\fP or \fBRSB_EXTF_NORM_INF\fP\&.
+.RE
+.PP
+In case of a complex type, only the real part will be written to \fCNp\fP\&.
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_prec (void *opdp, const struct rsb_mtx_t *mtxAp, \fBrsb_precf_t\fP prec_flags, const void *ipdp)"
+A function computing a simple preconditioner out of \fCmtxAp\fP\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIopdp\fP Preconditioner data pointer (output)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIprec_flags\fP Valid preconditioner request flags (currently, only \fBRSB_PRECF_ILU0\fP is supported; for it, \fC*opdp\fP will be overwritten with two \fCrsb_mtx_t\fP pointers, respectively a lower and an upper matrix\&.)\&. 
+.br
+\fIipdp\fP Preconditioner data pointer (input)\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+Matrix should be square, have at least two rows, and have at least one nonzero\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_rows_sparse (\fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, void *VA, \fBrsb_coo_idx_t\fP *IA, \fBrsb_coo_idx_t\fP *JA, \fBrsb_coo_idx_t\fP frA, \fBrsb_coo_idx_t\fP lrA, \fBrsb_nnz_idx_t\fP *rnzp, \fBrsb_flags_t\fP flags)"
+Writes to the given COO arrays the specified submatrix\&.
+.PP
+Invoke with \fCVA\fP,IA,JA set to \fCNULL\fP in order to get the nonzeroes count written to \fC*rnzp\fP, and know how large the arrays should be\&.
+.PP
+IA can be \fCNULL\fP (in this case it will be ignored)\&. The written rows are ordered\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVA,IA,JA\fP Output numerical values (\fCVA\fP) array; input row (\fCIA\fP) and column (\fCJA\fP) indices arrays\&. 
+.br
+\fIfrA,lrA\fP First and last row indices\&. 
+.br
+\fIrnzp\fP A pointer where the number of relevant nonzero elements will be written to\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP or \fBRSB_FLAG_C_INDICES_INTERFACE\fP (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_vals (const struct rsb_mtx_t *mtxAp, void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnz, \fBrsb_flags_t\fP flags)"
+Gets the specified matrix elements, if found\&. Please note that unlike \fBrsb_mtx_set_vals\fP, the matrix has to be fully assembled here\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVA,IA,JA\fP Output numerical values (\fCVA\fP) array; input row (\fCIA\fP) and column (\fCJA\fP) indices arrays\&. 
+.br
+\fInnz\fP The number of nonzeroes in the input arrays\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP or \fBRSB_FLAG_C_INDICES_INTERFACE\fP (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_get_vec (const struct rsb_mtx_t *mtxAp, void *Dp, enum \fBrsb_extff_t\fP flags)"
+Will overwrite a supplied array with a specific vector quantity\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIDp\fP A valid pointer to a numerical vector array $D$\&. 
+.br
+\fIflags\fP Either one of the different extraction filter flags (e\&.g\&.: \fBRSB_EXTF_DIAG\fP, \fBRSB_EXTF_SUMS_ROW\fP, \&.\&.\&.) \&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_get_coo\fP, \fBrsb_mtx_get_csr\fP, \fBrsb_mtx_get_rows_sparse\fP, \fBrsb_mtx_get_coo_block\fP, \fBrsb_mtx_get_prec\fP, \fBrsb_mtx_get_nrm\fP, \fBrsb_mtx_get_vec\fP, \fBrsb_file_mtx_get_dims\fP, \fBrsb_mtx_get_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_rndr (const char *filename, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP pmWidth, \fBrsb_coo_idx_t\fP pmHeight, \fBrsb_marf_t\fP rflags)"
+Renders a matrix to a file\&. Currently, only Encapsulated Postscript (EPS) is supported\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIfilename\fP The specified output file name (if \fCNULL\fP, will write to standard output)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIpmWidth\fP Pixel map width (in pixels or points)\&. 
+.br
+\fIpmHeight\fP Pixel map height (in pixels or points)\&. 
+.br
+\fIrflags\fP The color mode; only \fBRSB_MARF_RGB\fP is supported for now (1 byte per channel, 3 channels --- red, green, blue): this requires array \fCpmp\fP to be at least (3*\fCpmlWidth*\fCpmHeight\fP)\fP bytes large\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_rndr\fP, \fBrsb_file_mtx_rndr\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_set_vals (struct rsb_mtx_t *mtxAp, const void *VA, const \fBrsb_coo_idx_t\fP *IA, const \fBrsb_coo_idx_t\fP *JA, \fBrsb_nnz_idx_t\fP nnz, \fBrsb_flags_t\fP flags)"
+Updates the specified matrix elements, if found in the nonzero pattern\&.
+.PP
+In the special case of a matrix in assembly state (that is, one that has been created as empty with \fBrsb_mtx_alloc_from_coo_begin()\fP and not yet assembled with \fBrsb_mtx_alloc_from_coo_end()\fP ) all the supplied matrix elements will be accepted: whether already present or not\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVA,IA,JA\fP Input numerical values (\fCVA\fP) array; row (\fCIA\fP) and column (\fCJA\fP) input indices arrays\&. 
+.br
+\fInnz\fP The number of nonzeroes in the input arrays\&. 
+.br
+\fIflags\fP Either \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP or \fBRSB_FLAG_C_INDICES_INTERFACE\fP plus either \fBRSB_FLAG_DUPLICATES_SUM\fP (to sum into) or \fBRSB_FLAG_DUPLICATES_KEEP_LAST\fP (to overwrite entries) (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_upd_vals\fP, \fBrsb_mtx_set_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_switch_to_coo (struct rsb_mtx_t *mtxAp, void **VAp, \fBrsb_coo_idx_t\fP **IAp, \fBrsb_coo_idx_t\fP **JAp, \fBrsb_flags_t\fP flags)"
+Switches a matrix to COO arrays in place\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVAp,IAp,JAp\fP Output numerical values (\fCVAp\fP) array pointer; output row (\fCIAp\fP) and column (\fCJAp\fP) indices arrays pointers\&. 
+.br
+\fIflags\fP A combination of \fBRSB_FLAG_C_INDICES_INTERFACE\fP,\fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP,\fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP\&. (see \fBflags_section\fP flags section)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is only valid if \fCmtxAp\fP has been assembled in place (that is, in the arrays that are being reclaimed), so with e\&.g\&.: \fBrsb_mtx_alloc_from_coo_inplace()\fP\&. Please also note that the matrix will get freed internally and so \fCmtxAp\fP will not be usable in any way afterwards\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_switch_to_coo\fP,\fBrsb_mtx_switch_to_coo\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_switch_to_csr (struct rsb_mtx_t *mtxAp, void **VAp, \fBrsb_coo_idx_t\fP **IAp, \fBrsb_coo_idx_t\fP **JAp, \fBrsb_flags_t\fP flags)"
+Switches the matrix to the CSR format, in-place\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIVAp,IAp,JAp\fP Output numerical values (\fCVAp\fP) array pointer; output row (\fCIAp\fP) and column (\fCJAp\fP) indices arrays pointers\&. 
+.br
+\fIflags\fP A valid combination of index conversion flags (that is, \fBRSB_FLAG_C_INDICES_INTERFACE\fP and \fBRSB_FLAG_FORTRAN_INDICES_INTERFACE\fP) and other meaningful flags\&. Symmetry flags shall be the same as in the matrix in use, because symmetry expansion may happen otherwise\&. Flags \fBRSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS\fP are forbidden\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBNote:\fP
+.RS 4
+This function is only valid if \fCmtxAp\fP has been assembled in place (that is, in the arrays that are being reclaimed), so with e\&.g\&.: \fBrsb_mtx_alloc_from_coo_inplace()\fP\&. Please also note that the matrix will get freed internally and so \fCmtxAp\fP will not be usable in any way afterwards\&. 
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_switch_to_coo\fP,\fBrsb_mtx_switch_to_coo\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_mtx_upd_vals (struct rsb_mtx_t *mtxAp, enum \fBrsb_elopf_t\fP elop_flags, const void *omegap)"
+$ A \leftarrow op (A,\Omega) $ Updates the matrix $A$ by applying either a rowwise or an elemental operation $op$, which is determined by \fCelop_flags\fP\&. If an unary operation is selected, \fComegap\fP can be \fCNULL\fP\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIelop_flags\fP Elemental operation specification flags (see \fBrsb_elopf_t\fP for valid choices)\&. 
+.br
+\fIomegap\fP Pointer to a numerical location(s) (of the same type as matrix)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_mtx_upd_vals\fP, \fBrsb_mtx_set_vals\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_perror (void *stream, \fBrsb_err_t\fP errval)"
+Prints out to the specified \fCstream\fP a string corresponding to the error code (using \fC<stdio\&.h>'s\fP \fCfprintf\fP)\&. If \fCstream==NULL\fP, will print out to the default output stream; see \fBRSB_IO_WANT_OUTPUT_STREAM\fP \&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIstream\fP A \fC\fP(FILE*) pointer, as declared in \fC<stdio\&.h>\fP; can be \fCNULL\fP\&. 
+.br
+\fIerrval\fP A valid error flag value (see \fBrsb_err_t\fP)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_perror\fP, \fBrsb_strerror_r\fP
+.RE
+.PP
+
+.SS "\fBrsb_trans_t\fP rsb_psblas_trans_to_rsb_trans (const charpsbtrans)"
+'Translates' a PSBLAS transposition value character to a \fClibrsb\fP one\&. 
+.br
+ See the PSBLAS library website/documentation for valid input values\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIpsbtrans\fP Transposition parameter value valid in the PSBLAS library\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+A valid transposition code; that is \fBRSB_TRANSPOSITION_N\fP for 'N', \fBRSB_TRANSPOSITION_T\fP for 'T', RSB_TRANSPOSITION_C for 'C', (See \fBmatrix_transposition_flags_section\fP)\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_psblas_trans_to_rsb_trans\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_spmm (\fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, const void *betap, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+Updates a dense matrix with the product of sparse matrix by dense matrix; that is, computes $ C \leftarrow \beta\cdot C + \alpha\cdot opa(A) \cdot B $\&.
+.PP
+$opa( A )=A$ if \fCtransA=\fBRSB_TRANSPOSITION_N\fP\fP; $opa( A )= A ^T$ if \fCtransA=\fBRSB_TRANSPOSITION_T\fP\fP; $opa( A )= A ^H$ if \fCtransA=\fBRSB_TRANSPOSITION_C\fP\fP; If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fInrhs\fP The number of right hand side vectors (cannot be \fC<1\fP)\&. 
+.br
+\fIorder\fP A flag among \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP and \fBRSB_FLAG_WANT_ROW_MAJOR_ORDER\fP\&. For contiguous vector arrays, you probably want \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP\&. 
+.br
+\fIBp\fP The input vector array\&. 
+.br
+\fIldB\fP Leading dimension of \fCBp\fP array\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fICp\fP The output vector array\&. 
+.br
+\fIldC\fP Leading dimension of \fCCp\fP array\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmv\fP, \fBrsb_spmm\fP, \fBrsb_tune_spmm\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_spmsp (\fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_trans_t\fP transB, const void *betap, const struct rsb_mtx_t *mtxBp, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Computes the weighted product of two sparse matrices in a new sparse matrix (also known as SpGEMM operation): $C \leftarrow \alpha \cdot opa(A) \cdot \beta \cdot opb(B) $ Symmetry/Hermitian flags are ignored by this operation\&.
+.PP
+$opa( A )=A$ if \fCtransA=\fBRSB_TRANSPOSITION_N\fP\fP; $opa( A )= A ^T$ if \fCtransA=\fBRSB_TRANSPOSITION_T\fP\fP; $opa( A )= A ^H$ if \fCtransA=\fBRSB_TRANSPOSITION_C\fP\fP; $opb( B )=B$ if \fCtransB=\fBRSB_TRANSPOSITION_N\fP\fP; $opb( B )= B ^T$ if \fCtransB=\fBRSB_TRANSPOSITION_T\fP\fP; $opb( B )= B ^H$ if \fCtransB=\fBRSB_TRANSPOSITION_C\fP\fP;
+.PP
+\fBParameters:\fP
+.RS 4
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fItransB\fP Transposition parameter for $B$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fImtxBp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $B$ representation\&. 
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Parameters \fCalphap\fP,betap,transA,transB are not yet taken in consideration\&. The following defaults are valid: $\alpha=1.0$ and $\beta=1.0$, and \fCtransA=transB=\fBRSB_TRANSPOSITION_N\fP\fP\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmsp_to_dense\fP, \fBrsb_sppsp\fP, \fBrsb_spmsp\fP, \fBrsb_mtx_add_to_dense\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_spmsp_to_dense (\fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_trans_t\fP transB, const void *betap, const struct rsb_mtx_t *mtxBp, \fBrsb_nnz_idx_t\fP ldC, \fBrsb_nnz_idx_t\fP nrC, \fBrsb_nnz_idx_t\fP ncC, \fBrsb_bool_t\fP rowmajorC, void *Cp)"
+Computes the product of sparse matrices and adds it to a dense matrix: $C \leftarrow \alpha opa(A) \cdot \beta \cdot opb(B) $\&.
+.PP
+$opa( A )=A$ if \fCtransA=\fBRSB_TRANSPOSITION_N\fP\fP; $opa( A )= A ^T$ if \fCtransA=\fBRSB_TRANSPOSITION_T\fP\fP; $opa( A )= A ^H$ if \fCtransA=\fBRSB_TRANSPOSITION_C\fP\fP; $opb( B )=B$ if \fCtransB=\fBRSB_TRANSPOSITION_N\fP\fP; $opb( B )= B ^T$ if \fCtransB=\fBRSB_TRANSPOSITION_T\fP\fP; $opb( B )= B ^H$ if \fCtransB=\fBRSB_TRANSPOSITION_C\fP\fP;
+.PP
+\fBParameters:\fP
+.RS 4
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fItransB\fP Transposition parameter for $B$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fImtxBp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $B$ representation\&. 
+.br
+\fIldC\fP Leading dimension of \fCCp\fP array\&. 
+.br
+\fInrC,ncC\fP The number of rows and columns for the dense matrix $C$\&. 
+.br
+\fIrowmajorC\fP \fBRSB_BOOL_TRUE\fP if the dense matrix $C$ is considered stored as row major, or \fBRSB_BOOL_FALSE\fP if as column major\&. 
+.br
+\fICp\fP Array representing the dense matrix $C$\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+Parameters \fCalphap\fP,betap,transA,transB are not yet taken in consideration\&. The following defaults are valid: $\alpha=1.0$ and $\beta=1.0$, and \fCtransA=transB=\fBRSB_TRANSPOSITION_N\fP\fP\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmsp_to_dense\fP, \fBrsb_sppsp\fP, \fBrsb_spmsp\fP, \fBrsb_mtx_add_to_dense\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_spmv (\fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, const void *Xp, \fBrsb_coo_idx_t\fP incX, const void *betap, void *Yp, \fBrsb_coo_idx_t\fP incY)"
+Multiplies a sparse matrix $opa(A)$ by a vector $X$, updating vector $Y$\&. 
+.br
+ Computes $Y \leftarrow \beta Y + \alpha \cdot opa(A) \cdot X $\&. 
+.br
+ It is not allowed to supply same \fCXp\fP and \fCYp\fP (that is, \fCXp==Yp\fP)\&. 
+.br
+.PP
+$opa( A )=A$ if \fCtransA=\fBRSB_TRANSPOSITION_N\fP\fP; $opa( A )= A ^T$ if \fCtransA=\fBRSB_TRANSPOSITION_T\fP\fP; $opa( A )= A ^H$ if \fCtransA=\fBRSB_TRANSPOSITION_C\fP\fP; If \fC--enable-rsb-num-threads\fP has been specified at configure time, the \fCRSB_NUM_THREADS\fP environment variable will override the number of executing threads specified by \fCOMP_NUM_THREADS\fP\&. (See also \fBRSB_IO_WANT_EXECUTING_THREADS\fP)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fIXp\fP The input vector array\&. 
+.br
+\fIincX\fP Spacing of vector elements in each input vector array (>=1)\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fIYp\fP The output array vector\&. 
+.br
+\fIincY\fP Spacing of vector elements in each output vector array (>=1)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmv\fP, \fBrsb_spmm\fP, \fBrsb_tune_spmm\fP
+.RE
+.PP
+
+.SS "struct rsb_mtx_t* rsb_sppsp (\fBrsb_type_t\fP typecode, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_trans_t\fP transB, const void *betap, const struct rsb_mtx_t *mtxBp, \fBrsb_err_t\fP *errvalp)\fC [read]\fP"
+Computes the weighted sum of two sparse matrices, returning a new matrix: $C \leftarrow \alpha\cdot transA(A) + \beta\cdot transB{B} $ Symmetry flags are ignored in this operation\&.
+.PP
+$opa( A )=A$ if \fCtransA=\fBRSB_TRANSPOSITION_N\fP\fP; $opa( A )= A ^T$ if \fCtransA=\fBRSB_TRANSPOSITION_T\fP\fP; $opa( A )= A ^H$ if \fCtransA=\fBRSB_TRANSPOSITION_C\fP\fP; $opb( B )=B$ if \fCtransB=\fBRSB_TRANSPOSITION_N\fP\fP; $opb( B )= B ^T$ if \fCtransB=\fBRSB_TRANSPOSITION_T\fP\fP; $opb( B )= B ^H$ if \fCtransB=\fBRSB_TRANSPOSITION_C\fP\fP;
+.PP
+\fBParameters:\fP
+.RS 4
+\fItypecode\fP A valid type code for the given (numerical array) input pointer (see \fBmatrix_type_symbols_section\fP)\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fItransB\fP Transposition parameter for $B$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fImtxBp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $B$ representation\&. 
+.br
+\fIerrvalp\fP An optional (can be \fCNULL\fP) pointer to \fBrsb_err_t\fP where the error status will be written to\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+On success, a valid pointer (\fCstruct\fP \fCrsb_mtx_t*\fP) to the newly allocated matrix structure; on error, \fCNULL\fP\&.
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmsp_to_dense\fP, \fBrsb_sppsp\fP, \fBrsb_spmsp\fP, \fBrsb_mtx_add_to_dense\fP
+.RE
+.PP
+\fBWarning:\fP
+.RS 4
+This function has not been thoroughly tested\&. 
+.PP
+This function is not optimized\&.
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_spsm (\fBrsb_trans_t\fP transT, const void *alphap, const struct rsb_mtx_t *mtxTp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *betap, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+Computes $Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot B $, with upper or lower triangular $T$\&.
+.PP
+$opt( T )=T$ if \fCtransT=\fBRSB_TRANSPOSITION_N\fP\fP; $opt( T )= T ^T$ if \fCtransT=\fBRSB_TRANSPOSITION_T\fP\fP; $opt( T )= T ^H$ if \fCtransT=\fBRSB_TRANSPOSITION_C\fP\fP;
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition parameter for $T$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxTp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $T$ representation\&. The matrix must be triangular; that is, it must have been allocated with either \fBRSB_FLAG_LOWER_TRIANGULAR\fP or \fBRSB_FLAG_UPPER_TRIANGULAR\fP flags\&. 
+.br
+\fInrhs\fP The number of right hand side vectors (cannot be \fC<1\fP)\&. 
+.br
+\fIorder\fP A flag among \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP and \fBRSB_FLAG_WANT_ROW_MAJOR_ORDER\fP\&. For contiguous vector arrays, you probably want \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fIBp\fP The input vector array\&. 
+.br
+\fIldB\fP Leading dimension of \fCBp\fP array\&. 
+.br
+\fICp\fP The output vector array\&. 
+.br
+\fIldC\fP Leading dimension of \fCCp\fP array\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spsm\fP, \fBrsb_spsv\fP, \fBrsb_tune_spsm\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_spsv (\fBrsb_trans_t\fP transT, const void *alphap, const struct rsb_mtx_t *mtxTp, const void *Xp, \fBrsb_coo_idx_t\fP incX, void *Yp, \fBrsb_coo_idx_t\fP incY)"
+Computes $Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot X $, with upper or lower triangular $T$\&. It is allowed to supply same \fCXp\fP and \fCYp\fP (that is, \fCXp==Yp\fP)\&.
+.PP
+$opt( T )=T$ if \fCtransT=\fBRSB_TRANSPOSITION_N\fP\fP; $opt( T )= T ^T$ if \fCtransT=\fBRSB_TRANSPOSITION_T\fP\fP; $opt( T )= T ^H$ if \fCtransT=\fBRSB_TRANSPOSITION_C\fP\fP;
+.PP
+\fBParameters:\fP
+.RS 4
+\fItransT\fP Transposition parameter for $T$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxTp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $T$ representation\&. The matrix must be triangular; that is, it must have been allocated with either \fBRSB_FLAG_LOWER_TRIANGULAR\fP or \fBRSB_FLAG_UPPER_TRIANGULAR\fP flags\&. 
+.br
+\fIXp\fP The input vector array\&. 
+.br
+\fIincX\fP Spacing of vector elements in each input vector array (>=1)\&. 
+.br
+\fIYp\fP The output array vector\&. 
+.br
+\fIincY\fP Spacing of vector elements in each output vector array (>=1)\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. If \fC--enable-zero-division-checks-on-solve\fP was specified at configure time, attempts to solve a triangular matrix with zeroes on a diagonal will fail\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spsm\fP, \fBrsb_spsv\fP, \fBrsb_tune_spsm\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_strerror_r (\fBrsb_err_t\fP errval, \fBrsb_char_t\fP *buf, size_tbuflen)"
+Writes a textual description of an error code in the specified string buffer\&. No more than buflen characters will be written (comprehensive of the terminting \fCNUL\fP character)\&.
+.PP
+\fBParameters:\fP
+.RS 4
+\fIerrval\fP A valid error flag value (see \fBrsb_err_t\fP)\&. 
+.br
+\fIbuf\fP A valid string buffer pointer where to write to\&. 
+.br
+\fIbuflen\fP The string buffer length\&.
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_perror\fP, \fBrsb_strerror_r\fP
+.RE
+.PP
+
+.SS "\fBrsb_time_t\fP rsb_time (void)"
+Returns the current time in seconds\&. This function is meant to be used for computing wall clock time intervals (e\&.g\&.: for benchmarking purposes)\&. The user should not rely on this function for absolute time computations\&.
+.PP
+\fBReturns:\fP
+.RS 4
+A value for the current time, in seconds\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_time\fP, \fBrsb_coo_sort\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_tune_spmm (struct rsb_mtx_t **mtxOpp, \fBrsb_real_t\fP *sfp, \fBrsb_int_t\fP *tnp, \fBrsb_int_t\fP maxr, \fBrsb_time_t\fP maxt, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, const void *betap, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+An auto-tuner: optimizes either the matrix instance, the thread count or both for the \fBrsb_spmm\fP operation\&.
+.PP
+The tuner works by evaluating different instances and working threads variants\&. The instance leading to faster operation time will be retained and given back to the user in \fC*mtxOpp\fP\&. If \fCnrhs==1\fP and \fCorder==\fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP\fP, unitary stride vectors are assumed\&. In case of error, the original input matrix shall be unaffected\&. It is possible to specify the leading dimensions of \fCBp\fP,Cp implicitly, with \fCldB=0\fP and \fCldC=0\fP: in this cas [...]
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxOpp\fP Optimal matrix structure pointer will be assigned to \fC*mtxOpp\fP (it may occur that *mtxOpp==mtxAp on output)\&. If \fCmtxOpp\fP is \fCNULL\fP then no data structure optimization will be attempted; rather, only optimal threads search will occur (\fCtnp\fP must be not \fCNULL\fP then)\&. 
+.br
+\fIsfp\fP Achieved speedup factor will be written to \fC*sfp\fP (unless \fCsfp==NULL\fP)\&. 
+.br
+\fItnp\fP If \fCtnp==NULL\fP on input, the current thread count will be utilized\&. Otherwise, if \fC*tnp>0\fP, then *tnp will be used as first suggestion in optimal thread count searching\&. If \fCtnp!=NULL\fP ,on output \fC*tnp\fP will be set to contain the optimal number of threads\&. Then, the user is expected to set this number of threads using e\&.g\&.: \fC\fBRSB_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,tnp,errval)\fP\fP\&. Please note that this will affect the whole l [...]
+.br
+\fImaxr\fP Optimizer rounds max count\&. If \fC<1\fP, will be treated as 1; if 0 will be decided automatically\&. Max is \fBRSB_CONST_MAX_TUNING_ROUNDS\fP\&. 
+.br
+\fImaxt\fP Maximum time (in seconds) per optimization round (does not take in account conversion time)\&. If \fCmaxt<0\&.0\fP is provided, \fC-ceil\fP(maxt) will be interpreted as number of iterations to check for each operation time sample\&. If \fCmaxt==0\&.0\fP is provided, a default choice will be made instead\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fInrhs\fP The number of right hand side vectors (cannot be \fC<1\fP)\&. 
+.br
+\fIorder\fP A flag among \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP and \fBRSB_FLAG_WANT_ROW_MAJOR_ORDER\fP\&. For contiguous vector arrays, you probably want \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP\&. 
+.br
+\fIBp\fP The input vector array\&. If \fCNULL\fP, a temporary, internally allocated copy will be used\&. 
+.br
+\fIldB\fP Leading dimension of \fCBp\fP array\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fICp\fP The output vector array\&. If \fCNULL\fP, a temporary, internally allocated copy will be used\&. 
+.br
+\fIldC\fP Leading dimension of \fCCp\fP array\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+Examples: 
+.PP
+.nf
+// obtain best thread count for mtxAp:
+errval = rsb_tune_spmm(NULL  ,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// obtain best thread count for mtxAp; Bp and Cp will be allocated by the tuner:
+errval = rsb_tune_spmm(NULL  ,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,NULL,0,&beta,NULL,0);
+
+// obtain best clone of mtxAp (for current thread count):
+assert(mtxOp == NULL && mtxAp != NULL);
+errval = rsb_tune_spmm(&mtxOp,&sf,NULL,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// obtain best clone of mtxAp and best thread count:
+assert(mtxOp == NULL && mtxAp != NULL);
+errval = rsb_tune_spmm(&mtxOp,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// replace mtxAp with best clone (if any):
+errval = rsb_tune_spmm(&mtxAp,&sf,NULL,maxr,maxt,transA,&alpha,NULL ,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// replace mtxAp with best clone (if any) and obtain best thread count:
+errval = rsb_tune_spmm(&mtxAp,&sf,&tn ,maxr,maxt,transA,&alpha,NULL ,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// illegal call:
+assert(mtxOp != NULL && mtxAp != NULL);
+errval = rsb_tune_spmm(&mtxOp,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+.fi
+.PP
+.PP
+\fBWarning:\fP
+.RS 4
+This function is still experimental\&. In case of error, although the matrix shall be unaffected, the library status may be affected (e\&.g\&.: execution thread count, default matrix subdivision)\&. 
+.RE
+.PP
+\fBTodo\fP
+.RS 4
+In the future, autotuning functionality shall improve considerably\&. Need support for lightweight, threads-only optimization\&. May support strided vectors in the future\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spmv\fP, \fBrsb_spmm\fP, \fBrsb_tune_spmm\fP
+.RE
+.PP
+
+.SS "\fBrsb_err_t\fP rsb_tune_spsm (struct rsb_mtx_t **mtxOpp, \fBrsb_real_t\fP *sfp, \fBrsb_int_t\fP *tnp, \fBrsb_int_t\fP maxr, \fBrsb_time_t\fP maxt, \fBrsb_trans_t\fP transA, const void *alphap, const struct rsb_mtx_t *mtxAp, \fBrsb_coo_idx_t\fP nrhs, \fBrsb_flags_t\fP order, const void *Bp, \fBrsb_nnz_idx_t\fP ldB, const void *betap, void *Cp, \fBrsb_nnz_idx_t\fP ldC)"
+An auto-tuner: optimizes either the matrix instance, the thread count or both for the \fBrsb_spsm\fP operation\&.
+.PP
+The tuner works by evaluating different instances and working threads variants\&. The instance leading to faster operation time will be retained and given back to the user in \fC*mtxOpp\fP\&. If \fCnrhs==1\fP and \fCorder==\fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP\fP, unitary stride vectors are assumed\&. In case of error, the original input matrix shall be unaffected\&. It is possible to specify the leading dimensions of \fCBp\fP,Cp implicitly, with \fCldB=0\fP and \fCldC=0\fP: in this cas [...]
+.PP
+\fBParameters:\fP
+.RS 4
+\fImtxOpp\fP Optimal matrix structure pointer will be assigned to \fC*mtxOpp\fP (it may occur that *mtxOpp==mtxAp on output)\&. If \fCmtxOpp\fP is \fCNULL\fP then no data structure optimization will be attempted; rather, only optimal threads search will occur (\fCtnp\fP must be not \fCNULL\fP then)\&. 
+.br
+\fIsfp\fP Achieved speedup factor will be written to \fC*sfp\fP (unless \fCsfp==NULL\fP)\&. 
+.br
+\fItnp\fP If \fCtnp==NULL\fP on input, the current thread count will be utilized\&. Otherwise, if \fC*tnp>0\fP, then *tnp will be used as first suggestion in optimal thread count searching\&. If \fCtnp!=NULL\fP ,on output \fC*tnp\fP will be set to contain the optimal number of threads\&. Then, the user is expected to set this number of threads using e\&.g\&.: \fC\fBRSB_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,tnp,errval)\fP\fP\&. Please note that this will affect the whole l [...]
+.br
+\fImaxr\fP Optimizer rounds max count\&. If \fC<1\fP, will be treated as 1; if 0 will be decided automatically\&. Max is \fBRSB_CONST_MAX_TUNING_ROUNDS\fP\&. 
+.br
+\fImaxt\fP Maximum time (in seconds) per optimization round (does not take in account conversion time)\&. If \fCmaxt<0\&.0\fP is provided, \fC-ceil\fP(maxt) will be interpreted as number of iterations to check for each operation time sample\&. If \fCmaxt==0\&.0\fP is provided, a default choice will be made instead\&. 
+.br
+\fItransA\fP Transposition parameter for $A$ (see \fBmatrix_transposition_flags_section\fP)\&. 
+.br
+\fIalphap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value (of the same type as matrix)\&. 
+.br
+\fImtxAp\fP Valid \fCrsb_mtx_t\fP pointer to matrix $A$ representation\&. 
+.br
+\fInrhs\fP The number of right hand side vectors (cannot be \fC<1\fP)\&. 
+.br
+\fIorder\fP A flag among \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP and \fBRSB_FLAG_WANT_ROW_MAJOR_ORDER\fP\&. For contiguous vector arrays, you probably want \fBRSB_FLAG_WANT_COLUMN_MAJOR_ORDER\fP\&. 
+.br
+\fIBp\fP The input vector array\&. If \fCNULL\fP, a temporary, internally allocated copy will be used\&. 
+.br
+\fIldB\fP Leading dimension of \fCBp\fP array\&. 
+.br
+\fIbetap\fP Optional pointer (if \fCNULL\fP, will default to 1) to a numerical value\&. 
+.br
+\fICp\fP The output vector array\&. If \fCNULL\fP, a temporary, internally allocated copy will be used\&. 
+.br
+\fIldC\fP Leading dimension of \fCCp\fP array\&. 
+.RE
+.PP
+\fBReturns:\fP
+.RS 4
+\fBRSB_ERR_NO_ERROR\fP on correct operation, an error code otherwise\&. You can use \fBrsb_strerror_r()\fP or \fBrsb_perror()\fP to get more information about the error\&.
+.RE
+.PP
+If \fC--enable-zero-division-checks-on-solve\fP was specified at configure time, attempts to solve a triangular matrix with zeroes on a diagonal will fail\&. 
+.PP
+\fBWarning:\fP
+.RS 4
+This function is still experimental\&. In case of error, although the matrix shall be unaffected, the library status may be affected (e\&.g\&.: execution thread count, default matrix subdivision)\&. 
+.RE
+.PP
+\fBTodo\fP
+.RS 4
+In the future, autotuning functionality shall improve considerably\&. Need support for lightweight, threads-only optimization\&. May support strided vectors in the future\&. 
+.RE
+.PP
+\fBSee Also:\fP
+.RS 4
+\fBrsb_spsm\fP, \fBrsb_spsv\fP, \fBrsb_tune_spsm\fP 
+.PP
+\fBrsb_tune_spmm\fP
+.RE
+.PP
+
+.SH "Author"
+.PP 
+librsb was written by Michele Martone; this documentation has been generated by Doxygen.
+.SH "SEE ALSO"
+.B rsb-examples
+.B rsb.h
+.B rsb-spblas.h
diff --git a/doc/man/rsbench.3 b/doc/man/rsbench.3
new file mode 100644
index 0000000..d912da8
--- /dev/null
+++ b/doc/man/rsbench.3
@@ -0,0 +1,76 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.40.10.
+.TH RSBENCH "1" "September 2016" "rsbench version: 1.2.0" "User Commands"
+.SH NAME
+rsbench \- manual page for rsbench version: 1.2.0
+.SH SYNOPSIS
+.B rsbench
+[\fIOPTIONS\fR]
+.br
+.B rsbench
+[ \fI-o OPCODE\fR] [ \fI-O {subprogram-code}\fR] [ \fI{subprogram-specific-arguments} \fR]
+.SH DESCRIPTION
+rsbench is a swiss army knife for testing the library functionality and performance.
+.PP
+        
+.IP
+Choose {subprogram\-code} among:
+r for the reference benchmark (will produce a machine specific file)
+c for the complete benchmark
+e for the matrix experimentation code
+d for a single matrix dumpout
+b for the (current, going to be obsoleted) benchmark
+t for some matrix construction tests
+o obsolete, will soon be removed
+.IP
+{subprogram\-specific\-arguments} will be available from the subprograms.
+e.g.: rsbench      \fB\-O\fR b \fB\-h\fR   will show the current benchmark subprogram's options
+e.g.: rsbench \fB\-o\fR a \fB\-O\fR b \fB\-h\fR   will show the spmv     benchmark subprogram's options
+e.g.: rsbench \fB\-o\fR n \fB\-O\fR b \fB\-h\fR   will show the negation benchmark subprogram's options
+.IP
+The default {subprogram\-code} is 'b'
+.IP
+With OPCODE among 'atinS'
+.SS "\&../rsbench  where OPTIONS are taken from :"
+.HP
+\fB\-h\fR              \fB\-\-help\fR
+.HP
+\fB\-o\fR              \fB\-\-matrix\-operation\fR <arg>
+.HP
+\fB\-O\fR              \fB\-\-subprogram\-operation\fR <arg>
+.HP
+\fB\-I\fR              \fB\-\-information\fR
+.HP
+\fB\-C\fR              \fB\-\-configuration\fR
+.HP
+\fB\-H\fR              \fB\-\-hardware\-counters\fR
+.HP
+\fB\-e\fR              \fB\-\-experiments\fR
+.HP
+\fB\-v\fR              \fB\-\-version\fR
+.HP
+\fB\-B\fR              \fB\-\-blas\-testing\fR
+.HP
+\fB\-Q\fR              \fB\-\-quick\-blas\-testing\fR <arg>
+.HP
+\fB\-E\fR              \fB\-\-error\-testing\fR <arg>
+.HP
+\fB\-F\fR              \fB\-\-fp\-bench\fR
+.TP
+\fB\-t\fR              \fB\-\-transpose\-test\fR
+\fB\-\-limits\-testing\fR
+.HP
+\fB\-G\fR              \fB\-\-guess\-blocking\fR
+.TP
+\fB\-g\fR              \fB\-\-generate\-matrix\fR
+\fB\-\-plot\-matrix\fR
+\fB\-\-matrix\-ls\fR
+\fB\-\-read\-performance\-record\fR <arg>
+\fB\-\-help\-read\-performance\-record\fR
+.PP
+Arguments to \fB\-\-want\-autotune\fR of the format "Ss[Xx[Tt[V[V]]]]", where S is the autotuning time in seconds, X is the number of tries, T the number of starting threads, V can be either q for quiet autotuning or v for a verbose one (can be specified twice). Valid examples: 3.0s2x4tv, 3.0s2x0tq, 3.0s, 2.0s10x . See documentation of rsb_tune_spmm for a full explanation of these parameters role in auto\-tuning.
+.SH AUTHOR
+Written by michelemartone_AT_users_DOT_sourceforge_DOT_net.
+.SH "REPORTING BUGS"
+Report bugs to michelemartone_AT_users_DOT_sourceforge_DOT_net.
+.SH COPYRIGHT
+Copyright \(co 2008\-2016 Michele Martone.
diff --git a/examples/Makefile.am b/examples/Makefile.am
new file mode 100644
index 0000000..0b4aaa2
--- /dev/null
+++ b/examples/Makefile.am
@@ -0,0 +1,83 @@
+
+subdir=examples
+
+#include $(top_srcdir)/Makefile
+EXTRA_DIST=$(srcdir)/pd.mtx $(srcdir)/vf.mtx $(srcdir)/make.sh $(srcdir)/benchex.sh
+EXPSOURCES_RSB=$(srcdir)/hello.c $(srcdir)/transpose.c $(srcdir)/power.c $(srcdir)/autotune.c
+EXPSOURCES_NSB=$(srcdir)/hello-spblas.c $(srcdir)/io-spblas.c
+EXPSOURCES_FEX=$(srcdir)/fortran.F90 $(srcdir)/fortran_rsb_fi.F90
+EXPSOURCES=$(EXPSOURCES_RSB) $(EXPSOURCES_NSB) $(EXPSOURCES_FEX)
+EXSTUFF= $(EXTRA_DIST) $(EXPSOURCES) 
+LIBRSB_LIB=$(abs_top_builddir)/librsb.la 
+
+if HAVE_C_EXAMPLES
+hello_DEPENDENCIES=$(LIBRSB_LIB)
+autotune_DEPENDENCIES=$(LIBRSB_LIB)
+transpose_DEPENDENCIES=$(LIBRSB_LIB)
+power_DEPENDENCIES=$(LIBRSB_LIB)
+hello_LDADD=$(default_ldadd)
+autotune_LDADD=$(default_ldadd)
+transpose_LDADD=$(default_ldadd)
+power_LDADD=$(default_ldadd)
+EXTRAPROGRAMSC=hello$(EXEEXT) transpose$(EXEEXT) power$(EXEEXT) autotune$(EXEEXT)
+if HAVE_SPARSE_BLAS_INTERFACE
+io_spblas_DEPENDENCIES=$(LIBRSB_LIB)
+io_spblas_LDADD=$(default_ldadd)
+hello_spblas_LDADD=$(default_ldadd)
+hello_spblas_DEPENDENCIES=$(LIBRSB_LIB)
+EXTRAPROGRAMSSPBLAS=hello-spblas$(EXEEXT) io-spblas$(EXEEXT)
+else
+EXTRAPROGRAMSSPBLAS=
+endif
+else
+EXTRAPROGRAMSSPBLAS=
+EXTRAPROGRAMSC=
+endif
+
+# NOTE: see main Makefile's default_ldadd 
+default_ldadd=-L$(top_builddir) $(LIBRSB_LIB)
+AM_CFLAGS= -I$(top_builddir) -I$(top_srcdir)
+AM_FCFLAGS=-I$(top_builddir) -I$(top_srcdir)
+tests:	$(noinst_PROGRAMS) all
+	if test $(abs_top_builddir) != $(abs_top_srcdir) ; then cp $(abs_top_srcdir)/pd.mtx $(abs_top_srcdir)/vf.mtx .. ; fi
+	for ii in $(noinst_PROGRAMS) ; do echo ./$$ii ; if  ./$$ii ; then true ; else exit -1 ;fi ; done
+
+if HAVE_FORTRAN_EXAMPLES
+if HAVE_SPARSE_BLAS_INTERFACE
+fortran_DEPENDENCIES=$(LIBRSB_LIB)
+fortran_LDADD=$(default_ldadd)
+fortran_LINK=$(FCLINK)
+fortran_SOURCES=fortran.F90
+fortran_rsb_fi_DEPENDENCIES=$(LIBRSB_LIB)
+fortran_rsb_fi_LDADD=$(default_ldadd)
+fortran_rsb_fi_LINK=$(FCLINK)
+fortran_rsb_fi_SOURCES=fortran_rsb_fi.F90
+EXTRAPROGRAMSFORTRAN=fortran fortran_rsb_fi
+else
+EXTRAPROGRAMSFORTRAN=
+endif
+else
+EXTRAPROGRAMSFORTRAN=
+endif
+
+noinst_PROGRAMS=$(EXTRAPROGRAMSC) $(EXTRAPROGRAMSSPBLAS) $(EXTRAPROGRAMSFORTRAN)
+
+all: $(EXTRA_DIST)
+
+pd.mtx: $(top_srcdir)/pd.mtx
+	cp $< $@ 
+
+vf.mtx: $(top_srcdir)/vf.mtx
+	cp $< $@ 
+
+install-data-local:
+	$(mkdir_p) "$(DESTDIR)$(docdir)"
+	$(mkdir_p) "$(DESTDIR)$(docdir)/examples/"
+	$(INSTALL_DATA) $(EXSTUFF) "$(DESTDIR)$(docdir)/examples/"
+	chmod +x "$(DESTDIR)$(docdir)/examples/make.sh"
+
+uninstall-local:
+	for f in $(EXSTUFF) ; do if test -f "$(DESTDIR)$(docdir)/examples/"$$f ; then  rm "$(DESTDIR)$(docdir)/examples/"$$f ; fi ; done
+	if test -d "$(DESTDIR)$(docdir)/examples" ; then rmdir "$(DESTDIR)$(docdir)/examples" || true ; fi
+	if test -d "$(DESTDIR)$(docdir)" ; then rmdir "$(DESTDIR)$(docdir)" || true ; fi
+
diff --git a/examples/Makefile.in b/examples/Makefile.in
new file mode 100644
index 0000000..2e9c6fc
--- /dev/null
+++ b/examples/Makefile.in
@@ -0,0 +1,681 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+noinst_PROGRAMS = $(am__EXEEXT_1) $(am__EXEEXT_2) $(am__EXEEXT_3)
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/rsb-config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+ at HAVE_C_EXAMPLES_TRUE@am__EXEEXT_1 = hello$(EXEEXT) transpose$(EXEEXT) \
+ at HAVE_C_EXAMPLES_TRUE@	power$(EXEEXT) autotune$(EXEEXT)
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am__EXEEXT_2 = hello-spblas$(EXEEXT) \
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE@	io-spblas$(EXEEXT)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am__EXEEXT_3 = fortran$(EXEEXT) \
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE@	fortran_rsb_fi$(EXEEXT)
+PROGRAMS = $(noinst_PROGRAMS)
+autotune_SOURCES = autotune.c
+autotune_OBJECTS = autotune.$(OBJEXT)
+am__DEPENDENCIES_1 = $(LIBRSB_LIB)
+am__fortran_SOURCES_DIST = fortran.F90
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am_fortran_OBJECTS = fortran.$(OBJEXT)
+fortran_OBJECTS = $(am_fortran_OBJECTS)
+am__fortran_rsb_fi_SOURCES_DIST = fortran_rsb_fi.F90
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at am_fortran_rsb_fi_OBJECTS = fortran_rsb_fi.$(OBJEXT)
+fortran_rsb_fi_OBJECTS = $(am_fortran_rsb_fi_OBJECTS)
+hello_SOURCES = hello.c
+hello_OBJECTS = hello.$(OBJEXT)
+hello_spblas_SOURCES = hello-spblas.c
+hello_spblas_OBJECTS = hello-spblas.$(OBJEXT)
+io_spblas_SOURCES = io-spblas.c
+io_spblas_OBJECTS = io-spblas.$(OBJEXT)
+power_SOURCES = power.c
+power_OBJECTS = power.$(OBJEXT)
+transpose_SOURCES = transpose.c
+transpose_OBJECTS = transpose.$(OBJEXT)
+DEFAULT_INCLUDES = -I. at am__isrc@ -I$(top_builddir)
+depcomp = $(SHELL) $(top_srcdir)/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+PPFCCOMPILE = $(FC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_FCFLAGS) $(FCFLAGS)
+LTPPFCCOMPILE = $(LIBTOOL) --tag=FC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(FC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_FCFLAGS) $(FCFLAGS)
+FCLD = $(FC)
+FCLINK = $(LIBTOOL) --tag=FC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(FCLD) $(AM_FCFLAGS) $(FCFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = autotune.c $(fortran_SOURCES) $(fortran_rsb_fi_SOURCES) \
+	hello.c hello-spblas.c io-spblas.c power.c transpose.c
+DIST_SOURCES = autotune.c $(am__fortran_SOURCES_DIST) \
+	$(am__fortran_rsb_fi_SOURCES_DIST) hello.c hello-spblas.c \
+	io-spblas.c power.c transpose.c
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+subdir = examples
+
+#include $(top_srcdir)/Makefile
+EXTRA_DIST = $(srcdir)/pd.mtx $(srcdir)/vf.mtx $(srcdir)/make.sh $(srcdir)/benchex.sh
+EXPSOURCES_RSB = $(srcdir)/hello.c $(srcdir)/transpose.c $(srcdir)/power.c $(srcdir)/autotune.c
+EXPSOURCES_NSB = $(srcdir)/hello-spblas.c $(srcdir)/io-spblas.c
+EXPSOURCES_FEX = $(srcdir)/fortran.F90 $(srcdir)/fortran_rsb_fi.F90
+EXPSOURCES = $(EXPSOURCES_RSB) $(EXPSOURCES_NSB) $(EXPSOURCES_FEX)
+EXSTUFF = $(EXTRA_DIST) $(EXPSOURCES) 
+LIBRSB_LIB = $(abs_top_builddir)/librsb.la 
+ at HAVE_C_EXAMPLES_TRUE@hello_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_C_EXAMPLES_TRUE@autotune_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_C_EXAMPLES_TRUE@transpose_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_C_EXAMPLES_TRUE@power_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_C_EXAMPLES_TRUE@hello_LDADD = $(default_ldadd)
+ at HAVE_C_EXAMPLES_TRUE@autotune_LDADD = $(default_ldadd)
+ at HAVE_C_EXAMPLES_TRUE@transpose_LDADD = $(default_ldadd)
+ at HAVE_C_EXAMPLES_TRUE@power_LDADD = $(default_ldadd)
+ at HAVE_C_EXAMPLES_FALSE@EXTRAPROGRAMSC = 
+ at HAVE_C_EXAMPLES_TRUE@EXTRAPROGRAMSC = hello$(EXEEXT) transpose$(EXEEXT) power$(EXEEXT) autotune$(EXEEXT)
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at io_spblas_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at io_spblas_LDADD = $(default_ldadd)
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at hello_spblas_LDADD = $(default_ldadd)
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at hello_spblas_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_C_EXAMPLES_FALSE@EXTRAPROGRAMSSPBLAS = 
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_FALSE at EXTRAPROGRAMSSPBLAS = 
+ at HAVE_C_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at EXTRAPROGRAMSSPBLAS = hello-spblas$(EXEEXT) io-spblas$(EXEEXT)
+
+# NOTE: see main Makefile's default_ldadd 
+default_ldadd = -L$(top_builddir) $(LIBRSB_LIB)
+AM_CFLAGS = -I$(top_builddir) -I$(top_srcdir)
+AM_FCFLAGS = -I$(top_builddir) -I$(top_srcdir)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_LDADD = $(default_ldadd)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_LINK = $(FCLINK)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_SOURCES = fortran.F90
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_rsb_fi_DEPENDENCIES = $(LIBRSB_LIB)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_rsb_fi_LDADD = $(default_ldadd)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_rsb_fi_LINK = $(FCLINK)
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at fortran_rsb_fi_SOURCES = fortran_rsb_fi.F90
+ at HAVE_FORTRAN_EXAMPLES_FALSE@EXTRAPROGRAMSFORTRAN = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_FALSE at EXTRAPROGRAMSFORTRAN = 
+ at HAVE_FORTRAN_EXAMPLES_TRUE@@HAVE_SPARSE_BLAS_INTERFACE_TRUE at EXTRAPROGRAMSFORTRAN = fortran fortran_rsb_fi
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .F90 .c .lo .o .obj
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu examples/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu examples/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstPROGRAMS:
+	@list='$(noinst_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+autotune$(EXEEXT): $(autotune_OBJECTS) $(autotune_DEPENDENCIES) $(EXTRA_autotune_DEPENDENCIES) 
+	@rm -f autotune$(EXEEXT)
+	$(LINK) $(autotune_OBJECTS) $(autotune_LDADD) $(LIBS)
+fortran$(EXEEXT): $(fortran_OBJECTS) $(fortran_DEPENDENCIES) $(EXTRA_fortran_DEPENDENCIES) 
+	@rm -f fortran$(EXEEXT)
+	$(fortran_LINK) $(fortran_OBJECTS) $(fortran_LDADD) $(LIBS)
+fortran_rsb_fi$(EXEEXT): $(fortran_rsb_fi_OBJECTS) $(fortran_rsb_fi_DEPENDENCIES) $(EXTRA_fortran_rsb_fi_DEPENDENCIES) 
+	@rm -f fortran_rsb_fi$(EXEEXT)
+	$(fortran_rsb_fi_LINK) $(fortran_rsb_fi_OBJECTS) $(fortran_rsb_fi_LDADD) $(LIBS)
+hello$(EXEEXT): $(hello_OBJECTS) $(hello_DEPENDENCIES) $(EXTRA_hello_DEPENDENCIES) 
+	@rm -f hello$(EXEEXT)
+	$(LINK) $(hello_OBJECTS) $(hello_LDADD) $(LIBS)
+hello-spblas$(EXEEXT): $(hello_spblas_OBJECTS) $(hello_spblas_DEPENDENCIES) $(EXTRA_hello_spblas_DEPENDENCIES) 
+	@rm -f hello-spblas$(EXEEXT)
+	$(LINK) $(hello_spblas_OBJECTS) $(hello_spblas_LDADD) $(LIBS)
+io-spblas$(EXEEXT): $(io_spblas_OBJECTS) $(io_spblas_DEPENDENCIES) $(EXTRA_io_spblas_DEPENDENCIES) 
+	@rm -f io-spblas$(EXEEXT)
+	$(LINK) $(io_spblas_OBJECTS) $(io_spblas_LDADD) $(LIBS)
+power$(EXEEXT): $(power_OBJECTS) $(power_DEPENDENCIES) $(EXTRA_power_DEPENDENCIES) 
+	@rm -f power$(EXEEXT)
+	$(LINK) $(power_OBJECTS) $(power_LDADD) $(LIBS)
+transpose$(EXEEXT): $(transpose_OBJECTS) $(transpose_DEPENDENCIES) $(EXTRA_transpose_DEPENDENCIES) 
+	@rm -f transpose$(EXEEXT)
+	$(LINK) $(transpose_OBJECTS) $(transpose_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/autotune.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/hello-spblas.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/hello.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/io-spblas.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/power.Po at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/transpose.Po at am__quote@
+
+.F90.o:
+	$(PPFCCOMPILE) -c -o $@ $<
+
+.F90.obj:
+	$(PPFCCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.F90.lo:
+	$(LTPPFCCOMPILE) -c -o $@ $<
+
+.c.o:
+ at am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+ at am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+ at am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+ at am__fastdepCC_TRUE@	$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+ at AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ at am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	set x; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstPROGRAMS \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am: install-data-local
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-local
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstPROGRAMS ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am \
+	install-data-local install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am uninstall-local
+
+tests:	$(noinst_PROGRAMS) all
+	if test $(abs_top_builddir) != $(abs_top_srcdir) ; then cp $(abs_top_srcdir)/pd.mtx $(abs_top_srcdir)/vf.mtx .. ; fi
+	for ii in $(noinst_PROGRAMS) ; do echo ./$$ii ; if  ./$$ii ; then true ; else exit -1 ;fi ; done
+
+all: $(EXTRA_DIST)
+
+pd.mtx: $(top_srcdir)/pd.mtx
+	cp $< $@ 
+
+vf.mtx: $(top_srcdir)/vf.mtx
+	cp $< $@ 
+
+install-data-local:
+	$(mkdir_p) "$(DESTDIR)$(docdir)"
+	$(mkdir_p) "$(DESTDIR)$(docdir)/examples/"
+	$(INSTALL_DATA) $(EXSTUFF) "$(DESTDIR)$(docdir)/examples/"
+	chmod +x "$(DESTDIR)$(docdir)/examples/make.sh"
+
+uninstall-local:
+	for f in $(EXSTUFF) ; do if test -f "$(DESTDIR)$(docdir)/examples/"$$f ; then  rm "$(DESTDIR)$(docdir)/examples/"$$f ; fi ; done
+	if test -d "$(DESTDIR)$(docdir)/examples" ; then rmdir "$(DESTDIR)$(docdir)/examples" || true ; fi
+	if test -d "$(DESTDIR)$(docdir)" ; then rmdir "$(DESTDIR)$(docdir)" || true ; fi
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/examples/autotune.c b/examples/autotune.c
new file mode 100644
index 0000000..630cfe0
--- /dev/null
+++ b/examples/autotune.c
@@ -0,0 +1,392 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*!
+ \ingroup rsb_doc_examples
+ @file
+ @author Michele Martone
+ @brief This is a first "RSB autotuning" example program.
+
+ \include autotuning.c
+*/
+#include <rsb.h>	/* librsb header to include */
+#include <stdio.h>	/* printf() */
+#include <ctype.h>	/* isdigit() */
+#include <stdlib.h>	/* atoi() */
+/* #include "rsb_internals.h" */
+
+int tune_from_file(char * const filename, rsb_int_t wvat)
+{
+	struct rsb_mtx_t *mtxMp = NULL;
+	/* spmv specific variables */
+	const RSB_DEFAULT_TYPE alpha = 1;
+	const RSB_DEFAULT_TYPE beta = 1;
+       	rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+      	const rsb_coo_idx_t nrhs = 2;  /* number of right hand sides */
+       	rsb_trans_t transA = RSB_TRANSPOSITION_N; /* transposition */
+       	rsb_nnz_idx_t ldB = 0;
+       	rsb_nnz_idx_t ldC = 0;
+	/* misc variables */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt;
+	char ib[200];
+	const char*is = "RSB_MIF_MATRIX_INFO__TO__CHAR_P";
+	/* misc variables */
+	/* input autotuning variables */
+       	rsb_int_t oitmax = 1 /*15*/;	/* auto-tune iterations */
+       	rsb_time_t tmax = 0.1;	/* time per autotune operation */
+	/* output autotuning variables */
+	rsb_flags_t flagsA = RSB_FLAG_NOFLAGS;
+	int ione = 1;
+	rsb_type_t typecodea [] = RSB_MATRIX_SPBLAS_TYPE_CODES_ARRAY;
+	int typecodei;
+
+	errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS);
+
+	if( (errval) != RSB_ERR_NO_ERROR )
+		goto err;
+
+	errval = rsb_lib_set_opt(RSB_IO_WANT_VERBOSE_TUNING, &wvat );
+	
+	/*
+	errval = rsb_lib_set_opt(RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE, &ione);
+	*/
+
+	if( (errval) != RSB_ERR_NO_ERROR )
+		goto err;
+
+	printf("Loading matrix from file \"%s\".\n",filename);
+
+	mtxMp = rsb_file_mtx_load(filename, flagsA, typecodea[0], &errval);
+
+	if( (errval) != RSB_ERR_NO_ERROR )
+		goto err;
+
+	for( typecodei = 0 ; typecodei < RSB_IMPLEMENTED_TYPES; ++typecodei )
+	{
+		rsb_type_t typecode = typecodea[typecodei];
+		struct rsb_mtx_t *mtxAp = NULL;
+		struct rsb_mtx_t *mtxOp = NULL;
+		rsb_real_t sf = 0.0;
+       		rsb_int_t tn = 0;
+
+		sf = 0.0;
+       		tn = 0;
+
+		printf("Considering %c clone.\n",typecode);
+		
+		errval = rsb_mtx_clone(&mtxAp, typecode, transA, NULL, mtxMp,
+			       	flagsA);
+
+		if( (errval) != RSB_ERR_NO_ERROR )
+			goto err;
+
+		printf("Base matrix:\n");
+		rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+		printf("%s\n\n",ib);
+
+		dt = -rsb_time();
+		errval = rsb_tune_spmm(NULL, &sf, &tn, oitmax, tmax, transA,
+		     &alpha, mtxAp, nrhs, order, NULL, ldB, &beta, NULL, ldC);
+
+		dt += rsb_time();
+		if(tn == 0)
+		printf("After %lfs, autotuning routine did not find a better"
+			" threads count configuration.\n",dt);
+		else
+		printf("After %lfs, thread autotuning declared speedup of %lg x,"
+			" when using threads count of %d.\n",dt,sf,tn);
+		printf("\n");
+
+
+		dt = -rsb_time();
+
+		mtxOp = mtxAp;
+		errval = rsb_tune_spmm(&mtxAp, &sf, &tn, oitmax, tmax, transA,
+		       	&alpha, NULL, nrhs, order, NULL, ldB, &beta, NULL, ldC);
+		if( (errval) != RSB_ERR_NO_ERROR )
+			goto err;
+
+		dt += rsb_time();
+		if( mtxOp == mtxAp )
+		{
+			printf("After %lfs, global autotuning found old matrix optimal,"
+			" with declared speedup %lg x when using %d threads\n",dt,sf,tn);
+		}
+		else
+		{
+			printf("After %lfs, global autotuning declared speedup of %lg x,"
+			" when using threads count of %d and a new matrix:\n",dt,sf,tn);
+			rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+			printf("%s\n",ib);
+		}
+		printf("\n");
+
+		/* user is expected to:
+		errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+		and use mtxAp in SpMV.
+ 		*/
+		rsb_mtx_free(mtxAp);
+		mtxAp = NULL;
+	}
+	rsb_mtx_free(mtxMp);
+	mtxMp = NULL;
+
+	goto ret;
+ret:
+	return 0;
+err:
+	rsb_perror(NULL,errval);
+	printf("Program terminating with error.\n");
+	return -1;
+}
+
+int main(const int argc, char * const argv[])
+{
+	/*!
+	 Autotuning example.
+	 */
+	/* matrix variables */
+	struct rsb_mtx_t *mtxAp = NULL;	/* matrix structure pointer */
+	const int bs = RSB_DEFAULT_BLOCKING;
+	rsb_coo_idx_t nrA = 500; /* number of rows */
+	rsb_coo_idx_t ncA = 500; /* number of cols */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_coo_idx_t rd = 1; /* every rd rows one is non empty */
+	rsb_coo_idx_t cd = 4; /* every cd cols one is non empty */
+	rsb_nnz_idx_t nnzA = (nrA/rd)*(ncA/cd); /* nonzeroes */
+	rsb_coo_idx_t*IA = NULL;
+	rsb_coo_idx_t*JA = NULL;
+	RSB_DEFAULT_TYPE*VA = NULL;
+	/* spmv specific variables */
+	const RSB_DEFAULT_TYPE alpha = 1;
+	const RSB_DEFAULT_TYPE beta = 1;
+	RSB_DEFAULT_TYPE*Cp = NULL;
+	RSB_DEFAULT_TYPE*Bp = NULL;
+       	rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+      	const rsb_coo_idx_t nrhs = 2;  /* number of right hand sides */
+       	rsb_trans_t transA = RSB_TRANSPOSITION_N; /* transposition */
+       	rsb_nnz_idx_t ldB = nrA;
+       	rsb_nnz_idx_t ldC = ncA;
+	/* misc variables */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t so = sizeof(RSB_DEFAULT_TYPE);
+	size_t si = sizeof(rsb_coo_idx_t);
+	rsb_time_t dt,odt;
+       	rsb_int_t t,tt = 100;	/* will repeat spmv tt times */
+	char ib[200];
+	const char*is = "RSB_MIF_MATRIX_INFO__TO__CHAR_P";
+	/* misc counters */
+       	rsb_coo_idx_t ci; 
+	rsb_coo_idx_t ri;
+	rsb_coo_idx_t ni;
+	rsb_int_t nrhsi;
+	/* misc variables */
+	rsb_time_t etime = 0.0;
+	/* input autotuning variables */
+       	rsb_int_t oitmax = 15;	/* auto-tune iterations */
+       	rsb_time_t tmax = 0.1;	/* time per autotune operation */
+	/* input/output autotuning variables */
+       	rsb_int_t tn = 0;	/* threads number */
+	/* output autotuning variables */
+	rsb_real_t sf = 0.0;	/* speedup factor obtained from auto tuning */
+	rsb_int_t wvat = 1;     /* want verbose autotuning; see documentation
+				   of RSB_IO_WANT_VERBOSE_TUNING */
+
+	if(argc > 1 && !isdigit(argv[1][0]) )
+		return tune_from_file(argv[1],wvat);
+
+	if(argc > 1)
+	{
+		nrA = ncA = atoi(argv[1]);
+		if ( nrA < RSB_MIN_MATRIX_DIM || (nrA > (RSB_MAX_MATRIX_DIM) ))
+			goto err;
+
+		nnzA = (nrA/rd)*(ncA/cd);
+       		ldB = nrA;
+       		ldC = ncA;
+	}
+
+	printf("Creating %d x %d matrix with %d nonzeroes.\n",nrA,ncA,nnzA);
+
+	IA = calloc(nnzA, si);
+	JA = calloc(nnzA, si);
+	VA = calloc(nnzA, so);
+	Bp = calloc(nrhs*ncA ,so);
+	Cp = calloc(nrhs*nrA ,so);
+
+	if( ! ( VA && IA && JA && Bp && Cp ) )
+		goto err;
+
+	for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+		for(ci=0;ci<ncA/cd;++ci)
+			Bp[nrhsi*ldC+ci] = 1.0;
+
+	for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+		for(ri=0;ri<nrA/rd;++ri)
+			Cp[nrhsi*ldC+ri] = 1.0;
+
+	ni = 0;
+
+	for(ci=0;ci<ncA/cd;++ci)
+		for(ri=0;ri<nrA/rd;++ri)
+		{
+			VA[ni] = nrA * ri + ci,
+			IA[ni] = ri;
+			JA[ni] = ci;
+			ni++;
+		}
+
+	if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS))
+			!= RSB_ERR_NO_ERROR) goto err;
+
+	errval = rsb_lib_set_opt(RSB_IO_WANT_VERBOSE_TUNING, &wvat );
+
+	mtxAp = rsb_mtx_alloc_from_coo_const(
+		VA,IA,JA,nnzA,typecode,nrA,ncA,bs,bs,
+		RSB_FLAG_NOFLAGS,&errval);
+
+	/* VA, IA, JA are not necessary anymore */
+	free(VA);
+	free(IA);
+	free(JA);
+	VA = NULL;
+       	IA = NULL;
+       	JA = NULL;
+
+	if((!mtxAp) || (errval != RSB_ERR_NO_ERROR))
+		goto err;
+
+	printf("Allocated matrix of %zd nonzeroes:\n",(size_t)nnzA);
+	rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+	printf("%s\n\n",ib);
+
+	dt = - rsb_time();
+	for(t=0;t<tt;++t)
+		/* 
+		   If nrhs == 1, the following is equivalent to
+		   rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);
+		*/
+		rsb_spmm(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+	dt += rsb_time();
+	odt = dt;
+	printf("Before auto-tuning, %d multiplications took %lfs.\n",tt,dt);
+
+	printf("Threads autotuning (may take more than %lfs)...\n",
+			oitmax*tmax);
+	dt = -rsb_time();
+	errval = rsb_tune_spmm(NULL, &sf, &tn, oitmax, tmax, transA,
+		       	&alpha, mtxAp, nrhs, order, Bp, ldB, &beta, Cp, ldC);
+	dt += rsb_time();
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+
+	if(tn == 0)
+	printf("After %lfs, autotuning routine did not find a better"
+			" threads count configuration.\n",dt);
+	else
+	printf("After %lfs, autotuning routine declared speedup of %lg x,"
+			" when using threads count of %d.\n",dt,sf,tn);
+
+	errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+
+	rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+	printf("%s\n",ib);
+
+	dt = -rsb_time();
+	for(t=0;t<tt;++t)
+		/*rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);*/
+		rsb_spmm(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+	dt += rsb_time();
+	printf("After threads auto-tuning, %d multiplications took %lfs"
+			"  --  effective speedup of %lg x\n",tt,dt,odt/dt);
+	odt = dt;
+
+
+	tn = 0; /* this will restore default threads count */
+	errval = rsb_lib_set_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+	errval = rsb_lib_get_opt(RSB_IO_WANT_EXECUTING_THREADS,&tn);
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+
+	printf("Matrix autotuning (may take more than %lfs; using %d"
+			" threads )...\n", oitmax*tmax, tn);
+
+	/* A negative tn will request also threads autotuning: */
+	/* tn = -tn; */
+
+	dt = -rsb_time();
+	errval = rsb_tune_spmm(&mtxAp, &sf, &tn, oitmax, tmax, transA,
+		       	&alpha,  NULL, nrhs, order, Bp, ldB, &beta, Cp, ldC);
+	dt += rsb_time();
+
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+
+	if(tn == 0)
+	printf("After %lfs, autotuning routine did not find a better"
+			" threads count configuration.\n",dt);
+	else
+	printf("After %lfs, autotuning routine declared speedup of %lg x,"
+			" when using threads count of %d.\n",dt,sf,tn);
+
+	rsb_mtx_get_info_str(mtxAp,is,ib,sizeof(ib));
+	printf("%s\n",ib);
+
+	dt = -rsb_time();
+	for(t=0;t<tt;++t)
+		/*rsb_spmv(transA,&alpha,mtxAp,Bp,1,&beta,Cp,1);*/
+		rsb_spmm(transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+	dt += rsb_time();
+	printf("After threads auto-tuning, %d multiplications took %lfs"
+			"  --  further speedup of %lg x\n",tt,dt,odt/dt);
+
+	rsb_mtx_free(mtxAp);
+	free(Cp);
+	free(Bp);
+
+
+	errval = rsb_lib_get_opt(RSB_IO_WANT_LIBRSB_ETIME,&etime);
+	if(errval == RSB_ERR_UNSUPPORTED_FEATURE)
+	{
+		printf("librsb timer-based profiling is not supported in "
+		"this build. If you wish to have it, re-configure librsb "
+	        "with its support. So you can safely ignore the error you"
+		" might just have seen printed out on screen.\n");
+		errval = RSB_ERR_NO_ERROR;
+	}
+	else
+	if(etime) /* This will only work if enabled at configure time. */
+		printf("Elapsed program time is %5.2lfs\n",etime);
+
+	if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+			!=RSB_ERR_NO_ERROR)
+		goto err;
+	return 0;
+err:
+	rsb_perror(NULL,errval);
+	printf("Program terminating with error.\n");
+	return -1;
+}
diff --git a/examples/benchex.sh b/examples/benchex.sh
new file mode 100644
index 0000000..a1fe4a5
--- /dev/null
+++ b/examples/benchex.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# systematic comparative benchmark, mostly for dense matrices
+# (with Intel MKL, if linked) benchmark comparing   
+# produces a number of plots systematically
+bench/dense.sh
+
+# the benchmark command; assumes A.mtx is a file in Matrix Market format
+./rsbench -oa -Ob -f A.mtx -qH -R -n1 -t100 --verbose -TD --compare-competitors 
+
+# rsbench is very flexible tool; see the help for it:
+./rsbench -oa -Ob --help
diff --git a/examples/fortran.F90 b/examples/fortran.F90
new file mode 100644
index 0000000..b4af0dd
--- /dev/null
+++ b/examples/fortran.F90
@@ -0,0 +1,135 @@
+! 
+! Copyright (C) 2008-2016 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+
+      SUBROUTINE blas_sparse_mod_example(res)
+      USE blas_sparse
+      USE rsb ! For the second part of the example
+      IMPLICIT NONE
+      INTEGER :: res, istat = 0, i
+      TYPE(C_PTR),TARGET :: mtxAp = C_NULL_PTR ! matrix pointer
+      INTEGER :: A
+      INTEGER,PARAMETER :: transn = blas_no_trans
+      INTEGER,PARAMETER :: incx = 1
+      INTEGER,PARAMETER :: incy = 1
+      REAL(KIND=8),PARAMETER :: alpha = 3
+! Symmetric (declared via lower triangle) matrix based example, e.g.:
+! 1 0
+! 1 1
+      ! declaration of VA,IA,JA 
+      !INTEGER,PARAMETER :: nr = 100
+      INTEGER,PARAMETER :: nr = 20
+      INTEGER,PARAMETER :: nc = nr
+      INTEGER,PARAMETER :: nnz = (nr*(nr+1))/2 ! half the square
+      INTEGER :: nt = 0
+      INTEGER :: ic, ir
+      INTEGER,PARAMETER :: IA(nnz) = (/ (((ir), ic=1,ir), ir=1,nr ) /) ! (/1, 2, 2/)
+      INTEGER,PARAMETER :: JA(nnz) = (/ (((ic), ic=1,ir), ir=1,nr ) /) ! (/1, 1, 2/)
+      REAL(KIND=8),PARAMETER :: VA(nnz) = (/ ((1, ic=1,ir), ir=1,nr ) /) ! (/1, 1, 1/)
+      REAL(KIND=8) :: x(nc) = (/((1), ir=1,nc)/) ! reference x ! (/1, 1/)
+      REAL(KIND=8),PARAMETER :: cy(nr) = (/((alpha+alpha*nr), ir=1,nr)/) ! reference cy after ! (/9, 9/)
+      REAL(KIND=8) :: y(nr) = (/((alpha), ir=1,nr)/) ! y will be overwritten ! (/3, 3/)
+      ! First example part: pure blas_sparse code.
+      res = 0
+      CALL duscr_begin(nr,nc,A,res)
+      IF (res.NE.0) GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF (istat.NE.0) GOTO 9997
+      CALL ussp(A,blas_rsb_spmv_autotuning_on,istat) ! (experimental) turns auto-tuning + thread setting on
+      IF (istat.NE.0) PRINT *,"autotuning returned nonzero:", istat &
+       &," ...did you enable autotuning ?"
+      !
+      ! First style example 
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF (istat.NE.0) GOTO 9997
+      CALL uscr_end(A,istat)
+      IF (istat.NE.0) GOTO 9997
+      ! CALL ussp(A,blas_rsb_duplicates_sum,istat)
+      ! CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat) ! uncomment this to activate add of coefficients to pattern
+      CALL usgp(A,blas_rsb_spmv_autotuning_on,nt)  ! (experimental)
+      IF (nt.NE.0) PRINT*,"autotuner chose ",nt," threads"
+      CALL ussp(A,blas_rsb_spmv_autotuning_off,istat) ! (experimental) turns auto-tuning + thread setting off
+      IF (istat.NE.0) GOTO 9997
+
+      CALL usmv(transn,alpha,A,x,incx,y,incy,istat)
+      IF (istat.NE.0) GOTO 9997
+      !
+      DO i = 1, nr
+            IF (y(i).NE.cy(i)) PRINT *, "first check results are not ok"
+            IF (y(i).NE.cy(i)) GOTO 9997
+      END DO
+      !
+      y(:) = alpha ! reset
+      !
+      ! Second style example 
+      CALL ussp(A,blas_rsb_autotune_next_operation,istat) ! (experimental) turns auto-tuning + thread setting on
+      IF (istat.NE.0) GOTO 9997
+      CALL usmv(transn,alpha,A,x,incx,y,incy,istat)
+      CALL usmm(blas_colmajor,transn,1, alpha,A,x,nr,y,nc,istat) ! Equivalent to the above (as long as incx=incy=1).
+      CALL usmm(blas_colmajor,transn,1,-alpha,A,x,nr,y,nc,istat) ! Subtract the last usmm call contribution.
+      IF (istat.NE.0) GOTO 9997
+      !
+      DO i = 1, nr
+            IF (y(i).NE.cy(i)) PRINT *,"second check results are not ok"
+            IF (y(i).NE.cy(i)) GOTO 9997
+      END DO
+      !
+      PRINT *, "check results are ok"
+      
+      ! Second part of the example: access to the rsb.h interface via
+      ! the ISO C Binding interface.
+      mtxAp = rsb_BLAS_get_mtx(A) ! get pointer to rsb structure (as in the rsb.h API)
+      IF(nr.LT.5) istat = rsb_file_mtx_save(mtxAp,C_NULL_PTR) ! write to stdout (only if matrix small enough)
+
+      GOTO 9998
+9997      res = -1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF (istat.NE.0) res = -1
+9999      CONTINUE
+      end SUBROUTINE blas_sparse_mod_example
+
+      PROGRAM main
+      USE rsb, ONLY: rsb_lib_init, rsb_lib_exit, C_PTR, C_NULL_PTR,&
+       & RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE,RSB_IO_WANT_VERBOSE_TUNING,&
+       & rsb_lib_set_opt
+      USE iso_c_binding
+      IMPLICIT NONE
+      INTEGER :: res = 0, passed = 0, failed = 0
+      !TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS
+      !TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS
+      ! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59411
+      TYPE(C_PTR),PARAMETER :: EO = C_NULL_PTR
+      TYPE(C_PTR),PARAMETER :: IO = C_NULL_PTR
+      INTEGER,TARGET::IONE=1
+      res = rsb_lib_init(IO)
+      res = rsb_lib_set_opt(RSB_IO_WANT_VERBOSE_TUNING,C_LOC(IONE))
+      
+      CALL blas_sparse_mod_example(res)
+      IF (res.LT.0) failed = failed + 1
+      IF (res.EQ.0) passed = passed + 1
+
+      res = rsb_lib_exit(EO)
+      
+      PRINT *, "FAILED:", failed
+      PRINT *, "PASSED:", passed
+      IF (failed .GT. 0) THEN
+       STOP 1
+      END IF
+      END PROGRAM
diff --git a/examples/fortran_rsb_fi.F90 b/examples/fortran_rsb_fi.F90
new file mode 100644
index 0000000..69e3693
--- /dev/null
+++ b/examples/fortran_rsb_fi.F90
@@ -0,0 +1,195 @@
+! 
+! Copyright (C) 2008-2016 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+      SUBROUTINE rsb_mod_example1(res)
+      USE rsb
+      USE ISO_C_BINDING
+      IMPLICIT NONE
+      INTEGER ::res
+      INTEGER,TARGET :: istat = 0, i
+      INTEGER :: transt = RSB_TRANSPOSITION_N ! Please note that this interface is unfinished
+      INTEGER :: incx = 1, incy = 1
+      REAL(KIND=8),TARGET :: alpha = 3, beta = 1
+! 1 1
+! 1 1
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz = 4
+      INTEGER :: nr = 2
+      INTEGER :: nc = 2
+      INTEGER :: nrhs = 1
+      INTEGER :: order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER ! rhs layout
+      INTEGER :: flags = RSB_FLAG_NOFLAGS 
+      INTEGER,TARGET :: IA(4) = (/0, 1, 1,0/)
+      INTEGER,TARGET :: JA(4) = (/0, 0, 1,1/)
+      REAL(KIND=8),TARGET :: VA(4) = (/1,1,1,1/)
+      REAL(KIND=8),TARGET :: x(2) = (/1, 1/)! reference x 
+      REAL(KIND=8),TARGET :: cy(2) = (/9, 9/)! reference cy after 
+      REAL(KIND=8),TARGET :: y(2) = (/3, 3/)! y will be overwritten
+      TYPE(C_PTR),TARGET :: mtxAp = C_NULL_PTR ! matrix pointer
+      REAL(KIND=8) :: tmax = 2.0 ! tuning max time
+      INTEGER :: titmax = 2 ! tuning max iterations
+      INTEGER,TARGET :: ont = 0     ! optimal number of threads
+
+      res = 0
+      mtxAp = rsb_mtx_alloc_from_coo_const(C_LOC(VA),C_LOC(IA),C_LOC(JA)&
+       &,nnz,&
+       & RSB_NUMERICAL_TYPE_DOUBLE,nr,nc,1,1,flags,C_LOC(istat))
+
+      IF (istat.NE.RSB_ERR_NO_ERROR) GOTO 9997
+
+      istat = rsb_file_mtx_save(mtxAp,C_NULL_PTR)
+
+      ! Structure autotuning:
+      istat = rsb_tune_spmm(C_LOC(mtxAp),C_NULL_PTR,C_NULL_PTR,titmax,&
+       & tmax,&
+       & transt,C_LOC(alpha),C_NULL_PTR,nrhs,order,C_LOC(x),nr,&
+       & C_LOC(beta),C_LOC(y),nc)
+
+      IF (istat.NE.RSB_ERR_NO_ERROR) GOTO 9997
+
+      ! Thread count autotuning:
+      istat = rsb_tune_spmm(C_NULL_PTR,C_NULL_PTR,C_LOC(ont),titmax,&
+       & tmax,&
+       & transt,C_LOC(alpha),mtxAp,nrhs,order,C_LOC(x),nr,C_LOC(beta),&
+       & C_LOC(y),nc)
+      PRINT *, "Optimal number of threads:", ont
+
+      y(:) = (/3, 3/)! reference y 
+      IF (istat.NE.RSB_ERR_NO_ERROR) GOTO 9997
+      
+      istat = rsb_file_mtx_save(mtxAp,C_NULL_PTR)
+      IF (istat.NE.RSB_ERR_NO_ERROR) GOTO 9997
+
+      istat = rsb_spmv(transt,C_LOC(alpha),mtxAp,C_LOC(x),incx,&
+       & C_LOC(beta),C_LOC(y),incy)
+      IF (istat.NE.RSB_ERR_NO_ERROR) GOTO 9997
+      DO i = 1, 2
+            IF (y(i).NE.cy(i)) PRINT *, "type=d dims=2x2 sym=g diag=g &
+      &blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF (y(i).NE.cy(i)) GOTO 9997
+      END DO
+      PRINT*,"type=d dims=2x2 sym=g diag=g blocks=1x1 usmv alpha= 3&
+       & beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      res = -1
+9998      CONTINUE
+      mtxAp = rsb_mtx_free(mtxAp)
+      IF (istat.NE.RSB_ERR_NO_ERROR) res = -1 
+! 9999      CONTINUE
+      istat = rsb_perror(C_NULL_PTR,istat)
+      end SUBROUTINE rsb_mod_example1
+
+      SUBROUTINE rsb_mod_example2(res)
+      USE rsb
+      USE ISO_C_BINDING
+      IMPLICIT NONE
+      INTEGER,TARGET :: errval
+      INTEGER :: res
+      INTEGER :: transt = RSB_TRANSPOSITION_N  ! no transposition
+      INTEGER :: incX = 1, incB = 1        ! X, B vectors increment
+      REAL(KIND=8),TARGET :: alpha = 3,beta = 1
+      INTEGER :: nnzA = 4, nrA = 3, ncA = 3     ! nonzeroes, rows, columns of matrix A
+      INTEGER,TARGET :: IA(4) = (/1, 2, 3, 3/)  ! row    indices
+      INTEGER,TARGET :: JA(4) = (/1, 2, 1, 3/)  ! column indices
+      INTEGER(C_SIGNED_CHAR) :: typecode = RSB_NUMERICAL_TYPE_DOUBLE
+      INTEGER :: flags =RSB_FLAG_DEFAULT_MATRIX_FLAGS+RSB_FLAG_SYMMETRIC
+      REAL(KIND=8),TARGET :: VA(4) = (/11.0, 22.0, 13.0, 33.0/) ! coefficients
+      REAL(KIND=8),TARGET :: X(3) = (/   0,    0,    0/)
+      REAL(KIND=8),TARGET :: B(3) = (/-1.0, -2.0, -2.0/)
+      TYPE(C_PTR),TARGET  :: mtxAp = C_NULL_PTR
+      TYPE(C_PTR)  :: mtxApp = C_NULL_PTR
+      REAL(KIND=8),TARGET :: ETIME = 0.0
+      !TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS
+      !TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS
+      ! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59411
+      TYPE(C_PTR),PARAMETER :: EO = C_NULL_PTR
+      TYPE(C_PTR),PARAMETER :: IO = C_NULL_PTR
+
+      errval = rsb_lib_init(IO)                ! librsb initialization
+      IF (errval.NE.RSB_ERR_NO_ERROR) &
+       & STOP "error calling rsb_lib_init"
+#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 5)
+#define RSB_SKIP_BECAUSE_OLD_COMPILER 1
+#endif
+#ifndef RSB_SKIP_BECAUSE_OLD_COMPILER
+      mtxAp = rsb_mtx_alloc_from_coo_begin(nnzA,typecode,nrA,ncA,flags,&
+       & C_LOC(errval)) ! begin matrix creation
+      errval = rsb_mtx_set_vals(mtxAp,&
+       & C_LOC(VA),C_LOC(IA),C_LOC(JA),nnzA,flags) ! insert some nonzeroes
+      mtxApp = C_LOC(mtxAp) ! Old compilers like e.g.: Gfortran 4.4.7 will NOT compile this.
+      IF (errval.NE.RSB_ERR_NO_ERROR) &
+       & STOP "error calling rsb_mtx_set_vals"
+      errval = rsb_mtx_alloc_from_coo_end(mtxApp)                   ! end matrix creation
+      IF (errval.NE.RSB_ERR_NO_ERROR) &
+       & STOP "error calling rsb_mtx_alloc_from_coo_end"
+      errval = rsb_spmv(transt,C_LOC(alpha),mtxAp,C_LOC(X),&
+       & incX,C_LOC(beta),C_LOC(B),incB) ! X := X + (3) * A * B 
+      IF (errval.NE.RSB_ERR_NO_ERROR)&
+       & STOP "error calling rsb_spmv"
+      mtxAp = rsb_mtx_free(mtxAp)                                 ! destroy matrix
+
+      ! The following is optional and depends on configure options, so it is allowed to fail
+      errval = rsb_lib_get_opt(RSB_IO_WANT_LIBRSB_ETIME,C_LOC(ETIME))
+      IF (errval.EQ.RSB_ERR_NO_ERROR)&
+       & PRINT*,"Time spent in librsb is:",ETIME
+      ! IF (errval.NE.0)STOP "error calling rsb_lib_get_opt" 
+      errval = RSB_ERR_NO_ERROR
+
+      IF (errval.NE.RSB_ERR_NO_ERROR) &
+       & STOP "error calling rsb_mtx_free"
+#else
+      PRINT*,"You have an old Fortran compiler not supporting C_LOC."
+      PRINT*,"Skipping a part of the test"
+#endif
+      errval=rsb_lib_exit(EO)                 ! librsb finalization
+      IF (errval.NE.RSB_ERR_NO_ERROR)&
+       & STOP "error calling rsb_lib_exit"
+      PRINT *, "rsb module fortran test is ok"
+      res = errval
+      end SUBROUTINE rsb_mod_example2
+
+      PROGRAM main
+      USE rsb
+      IMPLICIT NONE
+      INTEGER :: res = RSB_ERR_NO_ERROR, passed = 0, failed = 0
+      !TYPE(C_PTR),PARAMETER :: EO = RSB_NULL_EXIT_OPTIONS
+      !TYPE(C_PTR),PARAMETER :: IO = RSB_NULL_INIT_OPTIONS
+      ! Note: using C_NULL_PTR instead of the previous lines becase of http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59411
+      TYPE(C_PTR),PARAMETER :: EO = C_NULL_PTR
+      TYPE(C_PTR),PARAMETER :: IO = C_NULL_PTR
+
+      res = rsb_lib_init(IO)
+      
+      CALL rsb_mod_example1(res)
+      IF (res.LT.0) failed = failed + 1
+      IF (res.EQ.0) passed = passed + 1
+
+      res = rsb_lib_exit(EO)
+
+      CALL rsb_mod_example2(res)
+      IF (res.LT.0) failed = failed + 1
+      IF (res.EQ.0) passed = passed + 1
+      
+      PRINT *, "FAILED:", failed
+      PRINT *, "PASSED:", passed
+      IF (failed.GT.0) THEN
+       STOP 1
+      END IF
+      END PROGRAM
+
diff --git a/examples/hello-spblas.c b/examples/hello-spblas.c
new file mode 100644
index 0000000..3a190ef
--- /dev/null
+++ b/examples/hello-spblas.c
@@ -0,0 +1,170 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*!
+ \ingroup rsb_doc_examples
+ @file
+ @author Michele Martone
+ @brief This is a first "hello RSB" example program using 
+        a Sparse BLAS interface.
+
+ \include hello-spblas.c
+*/
+#include <rsb.h>	/* for rsb_lib_init */
+#include <blas_sparse.h>	/* Sparse BLAS on the top of librsb */
+#include <stdio.h>	/* printf */
+
+int main(const int argc, char * const argv[])
+{
+	/*!
+	 * A Hello/Sparse BLAS program.
+	 *
+	 * This program shows how to use the blas_sparse.h
+	 * interface correctly to:
+	 *
+	 * - initialize the library using #rsb_lib_init()
+	 * - allocate (build) a single sparse matrix in the RSB
+	 *   format using #BLAS_duscr_begin()/#BLAS_duscr_insert_entries()
+	 *   /#BLAS_duscr_end()
+	 * - extract one matrix element with #BLAS_dusget_element()
+	 * - multiply the matrix times a vector using #BLAS_dusmv()
+	 * - deallocate the matrix using #BLAS_usds() 
+	 * - finalize the library using
+	 *   #rsb_lib_exit(#RSB_NULL_EXIT_OPTIONS) 
+	*/
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE   
+	printf("'double' type configured out."
+	" Please reconfigure the library with it and recompile.\n");
+	return 0;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+	blas_sparse_matrix A = blas_invalid_handle; /* handle for A */
+	const int nnz = 4;	/* number of nonzeroes of matrix A */
+	const int  nr = 3;	/* number of A's rows */
+	const int  nc = 3;	/* number of A's columns */
+	/* A's nonzero elements row indices (coordinates): */
+	int   IA[] = { 0, 1, 2, 2 };
+	/* A's nonzero elements column indices (coordinates): */
+	int   JA[] = { 0, 1, 0, 2 };
+	/* A's nonzero values (matrix coefficients): */
+	double VA[] = { 11.0, 22.0, 13.0, 33.0  };
+       	/* the X vector's array: */
+	double X[] = { 0.0, 0.0, 0.0 };
+       	/* the B vector's array: */
+	double B[] = { -1.0, -2.0, -2.0 };
+       	/* the (known) result array: */
+	double AB[] = { 11.0+26.0, 44.0, 66.0+13.0 };
+	/* rsb error variable: */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int i;
+
+	printf("Hello, RSB!\n");
+	/* initialize the library */
+	if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) 
+			!= RSB_ERR_NO_ERROR)
+	{
+		goto err;
+	}
+	printf("Correctly initialized the library.\n");
+
+	/* initialize a matrix descriptor */
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == blas_invalid_handle )
+	{
+		goto err;
+	}
+	
+	/* specify properties (e.g.: symmetry)*/
+	if( BLAS_ussp(A,blas_lower_symmetric) != 0 )
+	{
+		goto err;
+	}
+
+	/* get properties (e.g.: symmetry) */
+	if( BLAS_usgp(A,blas_lower_symmetric) != 1 )
+	{
+		printf("Symmetry property non set ?!\n");
+		goto err;
+	}
+
+	/* insert the nonzeroes (here, all at once) */
+	if( BLAS_duscr_insert_entries(A, nnz, VA, IA, JA)
+			== blas_invalid_handle)
+	{
+		goto err;
+	}
+
+	/* finalize (allocate) the matrix build  */
+	if( BLAS_duscr_end(A) == blas_invalid_handle )
+	{
+		goto err;
+	}
+	printf("Correctly allocated a matrix.\n");
+
+	VA[0] = 0.0;
+	if( BLAS_dusget_element(A, IA[0], JA[0], &VA[0]) )
+	{
+		goto err;
+	}
+
+	/* a check */
+	if( VA[0] != 11.0 )
+	{
+		goto err;
+	}
+
+	/* compute X = X + (-1) * A * B   */
+	if(BLAS_dusmv(blas_no_trans,-1,A,B,1,X,1))
+	{
+		goto err;
+	}
+
+	for( i = 0 ; i < nc; ++i )
+		if( X[i] != AB[i] )
+		{
+			printf("Computed SPMV result seems wrong. Terminating.\n");
+			goto err;
+		}
+	printf("Correctly performed a SPMV.\n");
+
+	/* deallocate matrix A */
+	if( BLAS_usds(A) )
+	{
+		goto err;
+	}
+	printf("Correctly freed the matrix.\n");
+
+	/* finalize the library */
+	if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+			!= RSB_ERR_NO_ERROR)
+	{
+		goto err;
+	}
+	printf("Correctly finalized the library.\n");
+	printf("Program terminating with no error.\n");
+
+	return 0;
+err:
+	rsb_perror(NULL,errval);
+	printf("Program terminating with error.\n");
+	return -1;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+}
+
diff --git a/examples/hello.c b/examples/hello.c
new file mode 100644
index 0000000..892e5d7
--- /dev/null
+++ b/examples/hello.c
@@ -0,0 +1,159 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*!
+ \ingroup rsb_doc_examples
+ @file
+ @author Michele Martone
+ @brief This is a first "hello RSB" example program.
+
+ \include hello.c
+*/
+#include <rsb.h>	/* librsb header to include */
+#include <stdio.h>	/* printf() */
+
+int main(const int argc, char * const argv[])
+{
+	/*!
+	  A Hello-RSB program.
+	 
+	  This program shows how to use the rsb.h interface correctly to:
+	 
+	  - initialize the library using #rsb_lib_init()
+	  - set library options using #rsb_lib_set_opt()
+	  - revert such changes 
+	  - allocate (build) a single sparse matrix in the RSB format
+	    using #rsb_mtx_alloc_from_coo_const()
+	  - prints information obtained via #rsb_mtx_get_info_str()
+	  - multiply the matrix times a vector using #rsb_spmv()
+	  - deallocate the matrix using #rsb_mtx_free() 
+	  - finalize the library using #rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) 
+	 
+	  In this example, we use #RSB_DEFAULT_TYPE as matrix type.
+	  This type depends on what was configured at library build time.
+	 * */
+	struct rsb_mtx_t *mtxAp = NULL;	/* matrix structure pointer */
+	const int bs = RSB_DEFAULT_BLOCKING;
+	const int brA = bs, bcA = bs;
+	const RSB_DEFAULT_TYPE one = 1;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_nnz_idx_t nnzA = 4;		/* matrix nonzeroes count */
+	const rsb_coo_idx_t nrA = 3;		/* matrix rows count */
+	const rsb_coo_idx_t ncA = 3;		/* matrix columns count */
+	/* nonzero row indices coordinates: */
+	rsb_coo_idx_t IA[] = {0,1,2,2};
+	/* nonzero column indices coordinates: */
+	rsb_coo_idx_t JA[] = {0,1,2,2};
+	RSB_DEFAULT_TYPE VA[] = {11,22,32,1};/* values of nonzeroes */
+	RSB_DEFAULT_TYPE X[] = { 0, 0, 0 };	/* X vector's array */
+	const RSB_DEFAULT_TYPE B[] = { -1, -2, -5 }; /* B vector's array */
+	char ib[200];
+
+	printf("Hello, RSB!\n");
+	printf("Initializing the library...\n");
+	if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != 
+			RSB_ERR_NO_ERROR)
+	{
+		printf("Error initializing the library!\n");
+		goto err;
+	}
+	printf("Correctly initialized the library.\n");
+
+	printf("Attempting to set the"
+	       " RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE library option.\n");
+	{
+		rsb_int_t evi=1; 
+		/* Setting a single optional library parameter. */
+		errval = rsb_lib_set_opt(
+			RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE, &evi);
+		if(errval != RSB_ERR_NO_ERROR)
+		{
+			char errbuf[256];
+			rsb_strerror_r(errval,&errbuf[0],sizeof(errbuf));
+			printf("Failed setting the"
+			" RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE"
+			" library option (reason string:\n%s).\n",errbuf);
+			if(errval&RSB_ERRS_UNSUPPORTED_FEATURES)
+			{
+			  printf("This error may be safely ignored.\n");
+			}
+			else
+			{
+			  printf("Some unexpected error occurred!\n");
+			  goto err;
+			}
+		}
+		else
+		{
+			printf("Setting back the "
+				"RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE"
+				" library option.\n");
+			evi = 0;
+			errval = rsb_lib_set_opt(RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE,
+					&evi);
+			errval = RSB_ERR_NO_ERROR;
+		}
+	}
+
+	mtxAp = rsb_mtx_alloc_from_coo_const(
+		VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,
+		RSB_FLAG_NOFLAGS    /* default format will be chosen */
+		|RSB_FLAG_DUPLICATES_SUM/* duplicates will be summed */
+			,&errval);
+	if((!mtxAp) || (errval != RSB_ERR_NO_ERROR))
+	{
+		printf("Error while allocating the matrix!\n");
+		goto err;
+	}
+	printf("Correctly allocated a matrix.\n");
+	printf("Summary information of the matrix:\n");
+	/* print out the matrix summary information  */
+	rsb_mtx_get_info_str(mtxAp,"RSB_MIF_MATRIX_INFO__TO__CHAR_P",
+			ib,sizeof(ib));
+	printf("%s",ib);
+	printf("\n");
+
+	if((errval = 
+		rsb_spmv(RSB_TRANSPOSITION_N,&one,mtxAp,B,1,&one,X,1))
+			!= RSB_ERR_NO_ERROR )
+	{
+		printf("Error performing a multiplication!\n");
+		goto err;
+	}
+	printf("Correctly performed a SPMV.\n");
+	rsb_mtx_free(mtxAp);
+	printf("Correctly freed the matrix.\n");
+	if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+			!= RSB_ERR_NO_ERROR)
+	{
+		printf("Error finalizing the library!\n");
+		goto err;
+	}
+	printf("Correctly finalized the library.\n");
+	printf("Program terminating with no error.\n");
+	return 0;
+err:
+	rsb_perror(NULL,errval);
+	printf("Program terminating with error.\n");
+	return -1;
+}
+
diff --git a/examples/io-spblas.c b/examples/io-spblas.c
new file mode 100644
index 0000000..d3391a4
--- /dev/null
+++ b/examples/io-spblas.c
@@ -0,0 +1,111 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*!
+ \ingroup rsb_doc_examples
+ @file
+ @author Michele Martone
+ @brief This is an example program using a Sparse BLAS interface
+        and reading from file using the RSB library.
+
+ \include io-spblas.c
+*/
+#include <rsb.h>	/* for rsb_lib_init */
+#include <blas_sparse.h>
+#include <stdio.h>
+	
+int main(const int argc, char * const argv[])
+{
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE   
+	printf("Skipping a test because of 'double' type opted out.\n");
+	return 0;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+	blas_sparse_matrix A = blas_invalid_handle;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE;
+	rsb_char_t * filename = argc > 1 ? argv[1] : "../pd.mtx";
+
+	printf("Hello, RSB!\n");
+	if((rsb_perror(NULL,
+		rsb_lib_init(RSB_NULL_INIT_OPTIONS)))!=RSB_ERR_NO_ERROR)
+	{
+		printf("Error while initializing the library.\n");
+		goto err;
+	}
+
+	printf("Correctly initialized the library.\n");
+
+	A = rsb_load_spblas_matrix_file_as_matrix_market(filename,
+		       	typecode );
+	if( A == blas_invalid_handle )
+	{
+		printf("Error while loading matrix %s from file.\n",
+				filename);
+		goto err;
+	}
+
+	printf("Correctly loaded and allocated a matrix"
+			" from file %s.\n",filename);
+
+	if( BLAS_usgp(A,blas_symmetric) == 1 )
+		printf("Matrix is symmetric\n");
+
+	if( BLAS_usgp(A,blas_hermitian) == 1 )
+		printf("Matrix is hermitian\n");
+
+	printf("Now SPMV with NULL vectors will be attempted,"
+			" resulting in an error (so don't worry).\n");
+
+	if(BLAS_dusmv(blas_no_trans,-1,A,NULL,1,NULL,1))
+	{
+		printf("Correctly detected an error condition.\n");
+		goto okerr;
+	}
+
+	printf("No error detected ?\nIf you see this line printed out,"
+		" please report	as a bug, because the above NULL pointers"
+		" should have been detected\n");
+	return -1;
+
+okerr:
+	printf("Program correctly recovered from intentional"
+			" error condition.\n");
+	if(BLAS_usds(A))
+	{
+		printf("Error while freeing the matrix!\n");
+		goto err;
+	}
+
+	printf("Correctly freed the matrix.\n");
+err:
+	if(rsb_perror(NULL,
+		rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))!=RSB_ERR_NO_ERROR)
+	{
+		printf("Failed finalizing the library.\n");
+		goto ferr;
+	}
+
+	printf("Correctly finalized the library.\n");
+	return 0;
+ferr:
+	return -1;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+}
+
diff --git a/examples/make.sh b/examples/make.sh
new file mode 100755
index 0000000..d613d86
--- /dev/null
+++ b/examples/make.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Script to build the librsb example programs.
+
+LIBRSB_CONFIG=${LIBRSB_CONFIG:-librsb-config}
+
+for s in *.c
+do
+	p=${s/.c/}
+	rm -f $p 
+	CFLAGS=`${LIBRSB_CONFIG} --I_opts`
+       	LDFLAGS=`${LIBRSB_CONFIG} --static --ldflags --extra_libs`
+	CC=`${LIBRSB_CONFIG} --cc`
+	cmd="$CC $CFLAGS $s $LDFLAGS -o $p"
+	echo $cmd
+	$cmd
+done
+
+# replace false with true if you have built the Fortran modules and installed them in the include directory.
+if false ; then
+for s in *.F90
+do
+	p=${s/.F90/}
+	rm -f $p 
+	CFLAGS=`${LIBRSB_CONFIG} --I_opts`
+       	LDFLAGS=`${LIBRSB_CONFIG} --static --ldflags --extra_libs`
+	FC=`${LIBRSB_CONFIG} --fc`
+	cmd="$FC $CFLAGS $s $LDFLAGS -o $p"
+	echo $cmd
+	$cmd
+done
+fi
+
diff --git a/examples/pd.mtx b/examples/pd.mtx
new file mode 100644
index 0000000..380a400
--- /dev/null
+++ b/examples/pd.mtx
@@ -0,0 +1,48 @@
+%%MatrixMarket matrix coordinate real general
+% a positive definitive matrix, as in
+% http://www.ncsa.uiuc.edu/UserInfo/Resources/Hardware/IBMp690/IBM/usr/lpp/essl.html.en_US/html/essl43.html
+% *                        *
+% | 99  12  13  14  15  16 |
+% | 12  99  12  13  14  15 |
+% | 13  12  99  12  13  14 |
+% | 14  13  12  99  12  13 |
+% | 15  14  13  12  99  12 |
+% | 16  15  14  13  12  99 |
+% *                        *
+6 6 36
+1 1 99
+1 2 12
+1 3 13
+1 4 14
+1 5 15
+1 6 16
+2 1 12
+2 2 99
+2 3 12
+2 4 13
+2 5 14
+2 6 15
+3 1 13
+3 2 12
+3 3 99
+3 4 12
+3 5 13
+3 6 14
+4 1 14
+4 2 13
+4 3 12
+4 4 99
+4 5 12
+4 6 13
+5 1 15
+5 2 14
+5 3 13
+5 4 12
+5 5 99
+5 6 12
+6 1 16
+6 2 15
+6 3 14
+6 4 13
+6 5 12
+6 6 99
diff --git a/examples/power.c b/examples/power.c
new file mode 100644
index 0000000..354ff40
--- /dev/null
+++ b/examples/power.c
@@ -0,0 +1,139 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*!
+ @file
+ @author Michele Martone
+ @brief A toy program implementing the power method
+        for computing matrix eigenvalues.
+ \ingroup rsb_doc_examples
+
+ \include power.c
+*/
+
+#include <stdio.h>	// printf
+#include <math.h>	// sqrt
+#include <stdlib.h>	// calloc
+#include <rsb.h>
+
+int main(const int argc, char * const argv[])
+{
+	int WANT_VERBOSE = 0;
+	struct rsb_mtx_t *mtxAp = NULL;
+	const int bs = RSB_DEFAULT_BLOCKING;
+	int i;
+	const int br = bs, bc = bs; /* bs x bs blocked */
+	rsb_err_t errval = 0;
+	rsb_nnz_idx_t nnzA = 4;
+	rsb_coo_idx_t  nrA = 3;
+	rsb_coo_idx_t  ncA = 3;
+	rsb_int_t it = 0, maxit = 100;
+	const rsb_coo_idx_t    IA[] = { 0, 1, 2, 0 };
+	const rsb_coo_idx_t    JA[] = { 0, 1, 2, 2 };
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE VA[] = { 11, 22, 33, 13 };
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE ZERO = 0;
+
+	RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE norm = 0.0, /* nu */
+	oldnorm = 1.0, /* oldnorm */
+	*b1 = NULL, *b2 = NULL,
+	*bnow = NULL, *bnext = NULL;/* b1 and b2 aliases */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_FIRST_BLAS;
+	size_t ds = 0;
+       	/* tolerance */
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE tol = 1e-14;
+
+	/* library initialization */
+	if(rsb_lib_init(RSB_NULL_INIT_OPTIONS)!=RSB_ERR_NO_ERROR)
+		return -1;
+
+	/* allocation */
+	mtxAp = rsb_mtx_alloc_from_coo_const(VA,IA,JA,nnzA,
+			typecode,nrA,ncA,br,bc,RSB_FLAG_NOFLAGS,NULL);
+	if(!mtxAp)
+		return -1;
+
+	ds = (nrA)*sizeof(RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE);
+	b1 = calloc(1,ds);
+	b2 = calloc(1,ds);
+
+	if(! (b1 && b2))
+	{
+		errval = RSB_ERR_ENOMEM;
+		goto err;
+	}
+
+	for( i = 0; i < nrA; ++i )
+		b1[i] = 1;
+
+	bnow = b1, bnext = b2;/* b,b' */
+
+	while( fabs(norm-oldnorm) > tol && it<maxit )
+	{
+		++ it;
+		oldnorm = norm;
+		/* b'<-Ab */
+		if(( rsb_spmv(RSB_TRANSPOSITION_N,NULL,mtxAp,bnow,
+			1,&ZERO,bnext,1)) != RSB_ERR_NO_ERROR )
+			goto err;
+		/* nu<-||Ab||^2 */
+		norm = 0;
+		for(i=0;i<nrA;++i) 
+			norm += bnext[i]*bnext[i];
+		/* nu<-||Ab|| */
+		norm = sqrt(norm);
+		norm = 1.0/norm;
+		/* b'<- Ab / ||Ab|| */
+		for(i=0;i<nrA;++i)
+		       	bnext[i] *= norm;
+		norm = 1.0/norm;
+		printf("it:%d norm:%lg norm diff:%lg\n",it,norm,norm-oldnorm);
+
+		{void *tmp=bnow;bnow=bnext;bnext=tmp;/* pointers swap */}
+		if(WANT_VERBOSE)
+		{
+			printf("norm:%lg\n",norm);
+			if(isinf(norm))
+			/* isinf is a C99 feature (need correct
+			 * compilation flags) */
+				goto err;
+
+			for(i=0;i<2;++i)
+				printf("x[%d]=%lg\n",i,((double*)bnext)[i]);
+		}
+	}
+	/* the biggest eigenvalue should be in bnow */
+
+	rsb_mtx_free(mtxAp);
+	free(b1);
+	free(b2);
+	if(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)!=RSB_ERR_NO_ERROR)
+		goto err;
+	if( it == maxit )
+	{
+	       	printf("ERROR: hit iterations limit without convergence!");
+	       	errval=RSB_ERR_GENERIC_ERROR;
+       	}
+	return 0;
+err:
+	rsb_perror(NULL,errval);
+	return -1;
+}
+
diff --git a/examples/transpose.c b/examples/transpose.c
new file mode 100644
index 0000000..9efabef
--- /dev/null
+++ b/examples/transpose.c
@@ -0,0 +1,152 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*!
+ @file
+ @author Michele Martone
+ @brief A toy program showing instantiation, transposition and other
+ operations on a single matrix.
+ \ingroup rsb_doc_examples
+
+ \include transpose.c
+*/
+#include <rsb.h>
+#include <stdio.h>	/* printf */
+
+int main(const int argc, char * const argv[])
+{
+	struct rsb_mtx_t *mtxAp = NULL;
+	rsb_blk_idx_t brA = RSB_DEFAULT_BLOCKING, bcA=RSB_DEFAULT_BLOCKING;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t nnzA = 4;
+	rsb_coo_idx_t  nrA = 3;
+	rsb_coo_idx_t  ncA = 3;
+	rsb_coo_idx_t    IA[] = { 0, 1, 2, 0 };
+	rsb_coo_idx_t    JA[] = { 0, 1, 2, 2 };
+	RSB_DEFAULT_TYPE VA[] = { 11, 22, 33, 13 };
+	RSB_DEFAULT_TYPE XV[] = { 0,0,0,0,0,0 };
+	rsb_coo_idx_t  vl = 0;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+
+	/* library initialization */
+	if(rsb_lib_init(RSB_NULL_INIT_OPTIONS)!=RSB_ERR_NO_ERROR)
+	{
+		return -1;
+	}
+
+	/* allocation */
+	mtxAp = rsb_mtx_alloc_from_coo_const(
+			VA,IA,JA,nnzA,typecode,nrA,ncA,
+			brA,bcA,RSB_FLAG_NOFLAGS,NULL);
+	if(!mtxAp)
+	{
+		return -1;
+	}
+
+	/* printout */
+	if(RSB_ERR_NO_ERROR!=(errval = rsb_file_mtx_save(mtxAp,NULL)))
+	{
+		if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+			goto err;
+	}
+	
+	/* matrix transposition */
+	if( RSB_ERR_NO_ERROR != (errval =
+		rsb_mtx_clone(&mtxAp,RSB_NUMERICAL_TYPE_SAME_TYPE,
+		RSB_TRANSPOSITION_T,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS)))
+	{
+		goto err;
+	}
+
+	/* printout */
+	if(RSB_ERR_NO_ERROR!=(errval = rsb_file_mtx_save(mtxAp,NULL)))
+	{
+		if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+			goto err;
+	}
+
+	rsb_mtx_free(mtxAp);
+
+	/* doing the same after load from file */
+	mtxAp = rsb_file_mtx_load("../pd.mtx",
+		RSB_FLAG_NOFLAGS,typecode,NULL);
+	if(!mtxAp)
+	{
+		return -1;
+	}
+
+	/* printout */
+	if(RSB_ERR_NO_ERROR!=(errval = rsb_file_mtx_save(mtxAp,NULL)))
+	{
+		if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+			goto err;
+	}
+
+	/* one can see dimensions in advance, also */
+	if(RSB_ERR_NO_ERROR!=(errval =
+		rsb_file_mtx_get_dims("../pd.mtx",&nrA,&ncA,&nnzA,NULL)))
+	{
+		if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+			goto err;
+	}
+
+	/* A matrix can be rendered to Postscript. */
+	{
+		if(RSB_ERR_NO_ERROR!=(errval =
+		rsb_mtx_rndr("pd.eps",mtxAp,512,512,RSB_MARF_EPS_B)))
+			goto err;
+	}
+
+	rsb_mtx_free(mtxAp);
+
+	/* also vectors can be loaded */
+	if(RSB_ERR_NO_ERROR!=(errval = 
+		rsb_file_vec_load("../vf.mtx",typecode,NULL,&vl )))
+		goto err;
+	/* we expecy vf.mtx to be 6 rows long */
+	if( vl != 6 )
+	{
+		goto err;
+	}
+
+	if(RSB_ERR_NO_ERROR!=(errval = 
+		rsb_file_vec_load("../vf.mtx",typecode,XV, NULL )))
+		goto err;
+
+	/* matrices can be rendered from file to a pixelmap as well */
+	{
+		unsigned char pixmap[3*2*2];
+
+		if(RSB_ERR_NO_ERROR!=(errval =
+		rsb_file_mtx_rndr(pixmap,"../pd.mtx",2,2,2,RSB_MARF_RGB)))
+			goto err;
+	}
+
+	if(RSB_ERR_NO_ERROR != rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))
+	{
+		goto err;
+	}
+	return 0;
+err:
+	rsb_perror(NULL,errval);
+	return -1;
+}
+
diff --git a/examples/vf.mtx b/examples/vf.mtx
new file mode 100644
index 0000000..61799bf
--- /dev/null
+++ b/examples/vf.mtx
@@ -0,0 +1,8 @@
+%%MatrixMarket matrix array complex general
+6           1
+11.000000000000000E+000 12.000000000000000E+000 
+21.000000000000000E+000 22.000000000000000E+000 
+31.000000000000000E+000 32.000000000000000E+000 
+41.000000000000000E+000 42.000000000000000E+000 
+51.000000000000000E+000 52.000000000000000E+000 
+61.000000000000000E+000 62.000000000000000E+000 
diff --git a/install-sh b/install-sh
new file mode 100755
index 0000000..a9244eb
--- /dev/null
+++ b/install-sh
@@ -0,0 +1,527 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2011-01-19.21; # UTC
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+nl='
+'
+IFS=" ""	$nl"
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit=${DOITPROG-}
+if test -z "$doit"; then
+  doit_exec=exec
+else
+  doit_exec=$doit
+fi
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+chgrpprog=${CHGRPPROG-chgrp}
+chmodprog=${CHMODPROG-chmod}
+chownprog=${CHOWNPROG-chown}
+cmpprog=${CMPPROG-cmp}
+cpprog=${CPPROG-cp}
+mkdirprog=${MKDIRPROG-mkdir}
+mvprog=${MVPROG-mv}
+rmprog=${RMPROG-rm}
+stripprog=${STRIPPROG-strip}
+
+posix_glob='?'
+initialize_posix_glob='
+  test "$posix_glob" != "?" || {
+    if (set -f) 2>/dev/null; then
+      posix_glob=
+    else
+      posix_glob=:
+    fi
+  }
+'
+
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chgrpcmd=
+chmodcmd=$chmodprog
+chowncmd=
+mvcmd=$mvprog
+rmcmd="$rmprog -f"
+stripcmd=
+
+src=
+dst=
+dir_arg=
+dst_arg=
+
+copy_on_change=false
+no_target_directory=
+
+usage="\
+Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+   or: $0 [OPTION]... SRCFILES... DIRECTORY
+   or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+   or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+     --help     display this help and exit.
+     --version  display version info and exit.
+
+  -c            (ignored)
+  -C            install only if different (preserve the last data modification time)
+  -d            create directories instead of installing files.
+  -g GROUP      $chgrpprog installed files to GROUP.
+  -m MODE       $chmodprog installed files to MODE.
+  -o USER       $chownprog installed files to USER.
+  -s            $stripprog installed files.
+  -t DIRECTORY  install into DIRECTORY.
+  -T            report an error if DSTFILE is a directory.
+
+Environment variables override the default commands:
+  CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
+  RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+  case $1 in
+    -c) ;;
+
+    -C) copy_on_change=true;;
+
+    -d) dir_arg=true;;
+
+    -g) chgrpcmd="$chgrpprog $2"
+	shift;;
+
+    --help) echo "$usage"; exit $?;;
+
+    -m) mode=$2
+	case $mode in
+	  *' '* | *'	'* | *'
+'*	  | *'*'* | *'?'* | *'['*)
+	    echo "$0: invalid mode: $mode" >&2
+	    exit 1;;
+	esac
+	shift;;
+
+    -o) chowncmd="$chownprog $2"
+	shift;;
+
+    -s) stripcmd=$stripprog;;
+
+    -t) dst_arg=$2
+	# Protect names problematic for `test' and other utilities.
+	case $dst_arg in
+	  -* | [=\(\)!]) dst_arg=./$dst_arg;;
+	esac
+	shift;;
+
+    -T) no_target_directory=true;;
+
+    --version) echo "$0 $scriptversion"; exit $?;;
+
+    --)	shift
+	break;;
+
+    -*)	echo "$0: invalid option: $1" >&2
+	exit 1;;
+
+    *)  break;;
+  esac
+  shift
+done
+
+if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
+  # When -d is used, all remaining arguments are directories to create.
+  # When -t is used, the destination is already specified.
+  # Otherwise, the last argument is the destination.  Remove it from $@.
+  for arg
+  do
+    if test -n "$dst_arg"; then
+      # $@ is not empty: it contains at least $arg.
+      set fnord "$@" "$dst_arg"
+      shift # fnord
+    fi
+    shift # arg
+    dst_arg=$arg
+    # Protect names problematic for `test' and other utilities.
+    case $dst_arg in
+      -* | [=\(\)!]) dst_arg=./$dst_arg;;
+    esac
+  done
+fi
+
+if test $# -eq 0; then
+  if test -z "$dir_arg"; then
+    echo "$0: no input file specified." >&2
+    exit 1
+  fi
+  # It's OK to call `install-sh -d' without argument.
+  # This can happen when creating conditional directories.
+  exit 0
+fi
+
+if test -z "$dir_arg"; then
+  do_exit='(exit $ret); exit $ret'
+  trap "ret=129; $do_exit" 1
+  trap "ret=130; $do_exit" 2
+  trap "ret=141; $do_exit" 13
+  trap "ret=143; $do_exit" 15
+
+  # Set umask so as not to create temps with too-generous modes.
+  # However, 'strip' requires both read and write access to temps.
+  case $mode in
+    # Optimize common cases.
+    *644) cp_umask=133;;
+    *755) cp_umask=22;;
+
+    *[0-7])
+      if test -z "$stripcmd"; then
+	u_plus_rw=
+      else
+	u_plus_rw='% 200'
+      fi
+      cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+    *)
+      if test -z "$stripcmd"; then
+	u_plus_rw=
+      else
+	u_plus_rw=,u+rw
+      fi
+      cp_umask=$mode$u_plus_rw;;
+  esac
+fi
+
+for src
+do
+  # Protect names problematic for `test' and other utilities.
+  case $src in
+    -* | [=\(\)!]) src=./$src;;
+  esac
+
+  if test -n "$dir_arg"; then
+    dst=$src
+    dstdir=$dst
+    test -d "$dstdir"
+    dstdir_status=$?
+  else
+
+    # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+    # might cause directories to be created, which would be especially bad
+    # if $src (and thus $dsttmp) contains '*'.
+    if test ! -f "$src" && test ! -d "$src"; then
+      echo "$0: $src does not exist." >&2
+      exit 1
+    fi
+
+    if test -z "$dst_arg"; then
+      echo "$0: no destination specified." >&2
+      exit 1
+    fi
+    dst=$dst_arg
+
+    # If destination is a directory, append the input filename; won't work
+    # if double slashes aren't ignored.
+    if test -d "$dst"; then
+      if test -n "$no_target_directory"; then
+	echo "$0: $dst_arg: Is a directory" >&2
+	exit 1
+      fi
+      dstdir=$dst
+      dst=$dstdir/`basename "$src"`
+      dstdir_status=0
+    else
+      # Prefer dirname, but fall back on a substitute if dirname fails.
+      dstdir=`
+	(dirname "$dst") 2>/dev/null ||
+	expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	     X"$dst" : 'X\(//\)[^/]' \| \
+	     X"$dst" : 'X\(//\)$' \| \
+	     X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
+	echo X"$dst" |
+	    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+		   s//\1/
+		   q
+		 }
+		 /^X\(\/\/\)[^/].*/{
+		   s//\1/
+		   q
+		 }
+		 /^X\(\/\/\)$/{
+		   s//\1/
+		   q
+		 }
+		 /^X\(\/\).*/{
+		   s//\1/
+		   q
+		 }
+		 s/.*/./; q'
+      `
+
+      test -d "$dstdir"
+      dstdir_status=$?
+    fi
+  fi
+
+  obsolete_mkdir_used=false
+
+  if test $dstdir_status != 0; then
+    case $posix_mkdir in
+      '')
+	# Create intermediate dirs using mode 755 as modified by the umask.
+	# This is like FreeBSD 'install' as of 1997-10-28.
+	umask=`umask`
+	case $stripcmd.$umask in
+	  # Optimize common cases.
+	  *[2367][2367]) mkdir_umask=$umask;;
+	  .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+	  *[0-7])
+	    mkdir_umask=`expr $umask + 22 \
+	      - $umask % 100 % 40 + $umask % 20 \
+	      - $umask % 10 % 4 + $umask % 2
+	    `;;
+	  *) mkdir_umask=$umask,go-w;;
+	esac
+
+	# With -d, create the new directory with the user-specified mode.
+	# Otherwise, rely on $mkdir_umask.
+	if test -n "$dir_arg"; then
+	  mkdir_mode=-m$mode
+	else
+	  mkdir_mode=
+	fi
+
+	posix_mkdir=false
+	case $umask in
+	  *[123567][0-7][0-7])
+	    # POSIX mkdir -p sets u+wx bits regardless of umask, which
+	    # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+	    ;;
+	  *)
+	    tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+	    trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+	    if (umask $mkdir_umask &&
+		exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+	    then
+	      if test -z "$dir_arg" || {
+		   # Check for POSIX incompatibilities with -m.
+		   # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+		   # other-writeable bit of parent directory when it shouldn't.
+		   # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+		   ls_ld_tmpdir=`ls -ld "$tmpdir"`
+		   case $ls_ld_tmpdir in
+		     d????-?r-*) different_mode=700;;
+		     d????-?--*) different_mode=755;;
+		     *) false;;
+		   esac &&
+		   $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+		     ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+		     test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+		   }
+		 }
+	      then posix_mkdir=:
+	      fi
+	      rmdir "$tmpdir/d" "$tmpdir"
+	    else
+	      # Remove any dirs left behind by ancient mkdir implementations.
+	      rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+	    fi
+	    trap '' 0;;
+	esac;;
+    esac
+
+    if
+      $posix_mkdir && (
+	umask $mkdir_umask &&
+	$doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+      )
+    then :
+    else
+
+      # The umask is ridiculous, or mkdir does not conform to POSIX,
+      # or it failed possibly due to a race condition.  Create the
+      # directory the slow way, step by step, checking for races as we go.
+
+      case $dstdir in
+	/*) prefix='/';;
+	[-=\(\)!]*) prefix='./';;
+	*)  prefix='';;
+      esac
+
+      eval "$initialize_posix_glob"
+
+      oIFS=$IFS
+      IFS=/
+      $posix_glob set -f
+      set fnord $dstdir
+      shift
+      $posix_glob set +f
+      IFS=$oIFS
+
+      prefixes=
+
+      for d
+      do
+	test X"$d" = X && continue
+
+	prefix=$prefix$d
+	if test -d "$prefix"; then
+	  prefixes=
+	else
+	  if $posix_mkdir; then
+	    (umask=$mkdir_umask &&
+	     $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+	    # Don't fail if two instances are running concurrently.
+	    test -d "$prefix" || exit 1
+	  else
+	    case $prefix in
+	      *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+	      *) qprefix=$prefix;;
+	    esac
+	    prefixes="$prefixes '$qprefix'"
+	  fi
+	fi
+	prefix=$prefix/
+      done
+
+      if test -n "$prefixes"; then
+	# Don't fail if two instances are running concurrently.
+	(umask $mkdir_umask &&
+	 eval "\$doit_exec \$mkdirprog $prefixes") ||
+	  test -d "$dstdir" || exit 1
+	obsolete_mkdir_used=true
+      fi
+    fi
+  fi
+
+  if test -n "$dir_arg"; then
+    { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+    { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+    { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+      test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+  else
+
+    # Make a couple of temp file names in the proper directory.
+    dsttmp=$dstdir/_inst.$$_
+    rmtmp=$dstdir/_rm.$$_
+
+    # Trap to clean up those temp files at exit.
+    trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+    # Copy the file name to the temp name.
+    (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+    # and set any options; do chmod last to preserve setuid bits.
+    #
+    # If any of these fail, we abort the whole thing.  If we want to
+    # ignore errors from any of these, just make sure not to ignore
+    # errors from the above "$doit $cpprog $src $dsttmp" command.
+    #
+    { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
+    { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
+    { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
+    { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+    # If -C, don't bother to copy if it wouldn't change the file.
+    if $copy_on_change &&
+       old=`LC_ALL=C ls -dlL "$dst"	2>/dev/null` &&
+       new=`LC_ALL=C ls -dlL "$dsttmp"	2>/dev/null` &&
+
+       eval "$initialize_posix_glob" &&
+       $posix_glob set -f &&
+       set X $old && old=:$2:$4:$5:$6 &&
+       set X $new && new=:$2:$4:$5:$6 &&
+       $posix_glob set +f &&
+
+       test "$old" = "$new" &&
+       $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
+    then
+      rm -f "$dsttmp"
+    else
+      # Rename the file to the real destination.
+      $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
+
+      # The rename failed, perhaps because mv can't rename something else
+      # to itself, or perhaps because mv is so ancient that it does not
+      # support -f.
+      {
+	# Now remove or move aside any old file at destination location.
+	# We try this two ways since rm can't unlink itself on some
+	# systems and the destination file might be busy for other
+	# reasons.  In this case, the final cleanup might fail but the new
+	# file should still install successfully.
+	{
+	  test ! -f "$dst" ||
+	  $doit $rmcmd -f "$dst" 2>/dev/null ||
+	  { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
+	    { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
+	  } ||
+	  { echo "$0: cannot unlink or rename $dst" >&2
+	    (exit 1); exit 1
+	  }
+	} &&
+
+	# Now rename the file to the real destination.
+	$doit $mvcmd "$dsttmp" "$dst"
+      }
+    fi || exit 1
+
+    trap '' 0
+  fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/librsb-config.in b/librsb-config.in
new file mode 100755
index 0000000..ff5b5ce
--- /dev/null
+++ b/librsb-config.in
@@ -0,0 +1,154 @@
+#! /bin/sh
+
+# librsb-config
+# provides configuration info for librsb.
+
+# Copyright (C) 2010-2016 Michele Martone
+# Modeled after libpng-config, Copyright (C) 2002, 2004, 2006, 2007 Glenn Randers-Pehrson
+
+version="@LIBRSB_MAIN_RELEASE@ "
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+libdir="@libdir@"
+cc="@CC@"
+fc="@FC@"
+cxx="@CXX@"
+#includedir="@includedir@/librsb at LIBRSB_MAJOR@@LIBRSB_MINOR@"
+#includedir="@includedir@/@LIBRSB_MAJOR@@LIBRSB_MINOR@"
+includedir="@includedir@/"
+#libs="-lrsb at LIBRSB_MAJOR@@LIBRSB_MINOR@"
+libs="-lrsb "
+#all_libs="-lrsb at LIBRSB_MAJOR@@LIBRSB_MINOR@ @LIBS@"
+#all_libs="-lrsb @LIBS@"
+extra_libs="@LIBS@ @OPENMP_FCFLAGS@ "
+all_libs="-lrsb "
+I_opts="-I${includedir} "
+L_opts="-L${libdir} "
+R_opts="-Wl,-rpath -Wl,${libdir} "
+cppflags=""
+ccopts=""
+ldopts=""
+
+usage()
+{
+    cat <<EOF
+Usage: $0 [OPTION] ...
+
+Known values for OPTION are:
+
+  --prefix        print librsb prefix
+  --libdir        print path to directory containing library
+  --libs          print library linking information
+  --extra_libs    print extra linking information (e.g.: dependency libs)
+  --ccopts        print compiler options
+  --cc            print C compiler
+  --fc            print Fortran compiler
+  --cxx           print C++ compiler
+  --cppflags      print pre-processor flags
+  --cflags        print preprocessor flags, I_opts, and compiler options
+  --I_opts        print "-I" include options
+  --L_opts        print linker "-L" flags for dynamic linking
+  --R_opts        print dynamic linker "-R" or "-rpath" flags
+  --ldopts        print linker options
+  --ldflags       print linker flags (ldopts, L_opts, R_opts, and libs)
+  --static        revise subsequent outputs for static linking
+  --help          print this help and exit
+  --version       print version information
+EOF
+
+    exit $1
+}
+
+if test $# -eq 0; then
+    usage 1
+fi
+
+while test $# -gt 0; do
+    case "$1" in
+
+    --prefix)
+        echo -n ${prefix}
+        ;;
+
+    --version)
+        echo -n ${version}
+        exit 0
+        ;;
+
+    --help)
+        usage 0
+        ;;
+
+    --ccopts)
+        echo -n ${ccopts}
+        ;;
+
+    --cc)
+        echo -n ${cc}
+        ;;
+
+    --fc)
+        echo -n ${fc}
+        ;;
+
+    --cxx)
+        echo -n ${cxx}
+        ;;
+
+    --cppflags)
+        echo -n ${cppflags}
+        ;;
+
+    --cflags)
+        echo -n ${I_opts} ${cppflags} ${ccopts}
+        ;;
+
+    --libdir)
+        echo -n ${libdir}
+        ;;
+
+    --libs)
+        echo -n ${libs}
+        ;;
+
+    --extra_libs)
+        echo -n ${extra_libs}
+        ;;
+
+    --I_opts)
+        echo -n ${I_opts}
+        ;;
+
+    --L_opts)
+        echo -n ${L_opts}
+        ;;
+
+    --R_opts)
+        echo -n ${R_opts}
+        ;;
+
+    --ldopts)
+    	echo -n ${ldopts}
+	;;
+
+    --ldflags)
+        echo -n ${ldopts} ${L_opts} ${R_opts} ${libs}
+        ;;
+
+    --static)
+        R_opts=""
+	all_libs="${libdir}/librsb.a"
+	libs=${all_libs}
+        ;;
+
+    *)
+        usage
+        exit 1
+        ;;
+    esac
+    if test $# -gt 1; then echo -n \  ; fi
+    shift
+done
+echo
+
+exit 0
diff --git a/librsb.pc.in b/librsb.pc.in
new file mode 100644
index 0000000..3bfad0e
--- /dev/null
+++ b/librsb.pc.in
@@ -0,0 +1,15 @@
+# Copyright (C) 2013-2016 Michele Martone
+# librsb.pc file created by librsb
+
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+libdir="@libdir@"
+includedir="@includedir@/"
+version="@LIBRSB_MAIN_RELEASE@"
+
+Name: librsb
+Description: The Recursive Sparse Blocks sparse matrix library
+Version: ${version}
+Libs: -L${libdir} -lrsb
+Libs.private: @LIBS@ @OPENMP_FCFLAGS@
+Cflags: -I${includedir} 
diff --git a/libspblas_macros.m4 b/libspblas_macros.m4
new file mode 100644
index 0000000..4ca43c1
--- /dev/null
+++ b/libspblas_macros.m4
@@ -0,0 +1,951 @@
+dnl
+dnl
+dnl	Sparse BLAS interface code generating macros.
+dnl	Preliminary code.
+dnl
+define(`RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG',`dnl
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \warning \rsb_warn_configuredout_msg
+	*/
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_MSG',`dnl
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \warning \rsb_warn_unimplemented_msg
+	*/
+')dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_SBL1_MSG',`dnl
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  RSB_M4_SPBLAS_HELP_INFO(mop)
+	  \warning \rsb_spblasl1_msg
+	*/
+ifelse(lang,`lang_c',`dnl
+	RSB_SPB_INTERFACE_PREAMBLE
+')dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_DOC_COMMENT',`dnl
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           RSB_M4_SPBLAS_HELP_INFO(mop)
+         */
+ifelse(lang,`lang_c',`dnl
+	RSB_SPB_INTERFACE_PREAMBLE
+')dnl
+dnl
+')dnl
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES',(`lang_c',`f90'))dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_TYPES',(`float',`double',`float complex',`double complex'))dnl
+define(`RSB_M4_SPBLAS_SYMMETRY_UL_CHARCODE',(`u'))dnl	FIXME
+dnl
+dnl
+define(`RSB_SPBLAS_FUNCTION_IDENTIFIER',`dnl
+pushdef(`type',$2)dnl
+pushdef(`mop',$1)dnl
+pushdef(`lang',$3)dnl
+ifelse(lang,`f90',`dnl
+`blas_'dnl
+',`dnl
+`BLAS_'dnl
+')dnl
+dnl
+RSB_M4_SPBLAS_TYPE_CHARCODE(type)`'dnl
+dnl
+dnl	"US" stands for "Unstructured Sparse"
+dnl
+`us'dnl
+mop`'dnl
+dnl
+dnl	FIXME: the trailing underscore should be removed.
+dnl
+ifelse(lang,`f90',RSB_M4_FORTRAN_SYMBOL_ADD_TO_C,`')`'dnl
+dnl
+popdef(`lang')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE',`dnl
+pushdef(`type',$1)dnl
+ifelse(RSB_M4_MEMBER(type,`double complex',`float complex'),`1',`',`dnl
+ifelse(RSB_M4_MEMBER(type,`double complex',`float complex'),`1',`',`&')`'')dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl define(`RSB_SPBLAS_OVER_TYPE_POINTER_DEREFERENCE',`dnl
+dnl pushdef(`type',$1)dnl
+dnl ifelse(RSB_M4_MEMBER(type,`double complex',`float complex'),`1',`*',`dnl
+dnl ifelse(RSB_M4_MEMBER(type,`double complex *',`float complex *'),`1',`*',` ')`'')dnl
+dnl popdef(`type')dnl
+dnl ')dnl
+dnl
+define(`RSB_M4_SPBLAS_FUNCTION',`dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+pushdef(`want_what',$4)dnl
+pushdef(`over',$5)dnl
+pushdef(`lang',$6)dnl
+dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,want_what,over,lang)`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,want_what,over,lang)`'dnl
+RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(type,mop,tri,want_what,over,lang)`'dnl
+RSB_M4_SPBLAS_EXTRA_FUNCTION(type,mop,tri,want_what,over,lang)`'dnl
+dnl
+popdef(`lang')dnl
+popdef(`over')dnl
+popdef(`want_what')dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_SPBLAS_TO_RSB_FIX_ARGS',`dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+pushdef(`want_what',$4)dnl
+pushdef(`over',$5)dnl
+pushdef(`lang',$6)dnl
+dnl
+ifelse(`'RSB_M4_MEMBER(mop,`cr_insert_entry')`'RSB_M4_IS_COMPLEX_TYPE(type)`',`10',`A,RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)val,i,j',`dnl
+ifelse(`'RSB_M4_MEMBER(mop,`axpy')`'RSB_M4_MEMBER(lang,`lang_c')`',`11',`/* FIXME: this is an exception; shall use a formal substitution technique, rather */nnz,&alpha,x,indx,y,incy,index_base ',`dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_SPBLAS_FUNCTION(type,mop,tri,`ARGS',over,lang)))`'dnl
+')`'dnl
+')`'dnl
+dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`,RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)')`'dnl
+dnl
+popdef(`lang')dnl
+popdef(`want_what')dnl
+popdef(`over')dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+')dnl
+dnl
+define(`RSB_SPBLAS_OVER_TYPE',`dnl
+pushdef(`type',$1)dnl
+pushdef(`over',$2)dnl
+ifelse(over,`1',`dnl
+ifelse(RSB_M4_MEMBER(type,`double complex',`float complex'),`1',`const void *',`dnl
+ifelse(RSB_M4_MEMBER(type,`double complex *',`float complex *'),`1',`void *',type )`'')dnl
+',`dnl
+type `'dnl
+')dnl
+dnl ifelse(RSB_M4_MEMBER(type,`double complex',`float complex'),`1',`const 'type` *',`dnl
+dnl ifelse(RSB_M4_MEMBER(type,`double complex *',`float complex *'),`1',`'type`',type )`'')dnl
+popdef(`over')dnl
+popdef(`type')dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_TYPE_CHARCODE',`dnl
+pushdef(`type',$1)`'dnl
+dnl
+tolowercase(`RSB_M4_TYPE_CHARCODE(type)')`'dnl
+dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_MOP_CODE_TRANSLATE',`dnl
+pushdef(`mop',$1)dnl
+dnl
+dnl	FIXME
+dnl
+ifelse(mop,`mv',`spmv_uxux')`'dnl
+ifelse(mop,`mm',`spmm_xx')`'dnl
+ifelse(mop,`sv',`spsv_uxua')`'dnl
+ifelse(mop,`sm',`spsm_xxl')`'dnl
+popdef(`mop')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_HELP_INFO',`dnl
+pushdef(`mop',$1)dnl
+dnl
+dnl	FIXME: for some reason we are not using these!
+dnl
+ifelse(lang,`f90',`dnl
+')dnl
+ifelse(RSB_M4_MEMBER(mop,`dot'),`1',`\rsb_spblasl1_dot_msg')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`axpy'),`1',`\rsb_spblasl1_axpy_msg')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`ga'),`1',`\rsb_spblasl1_ga_msg')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`gz'),`1',`\rsb_spblasl1_gz_msg')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`sc'),`1',`\rsb_spblasl1_sc_msg')`'dnl
+dnl
+dnl
+ifelse(mop,`mv',`\rsb_spblasl2_mv_msg')`'dnl
+ifelse(mop,`mm',`\rsb_spblasl2_mm_msg')`'dnl
+ifelse(mop,`sv',`\rsb_spblasl2_sv_msg')`'dnl
+ifelse(mop,`sm',`\rsb_spblasl2_sm_msg')`'dnl
+ifelse(mop,`cr_begin',`\rsb_spblasl2_cr_begin_msg')`'dnl
+ifelse(mop,`cr_block_begin',`\rsb_spblasl2_cr_block_msg')`'dnl
+ifelse(mop,`cr_variable_block_begin',`\rsb_spblasl2_cr_vbr_msg')`'dnl
+ifelse(mop,`cr_insert_entry',`\rsb_spblasl2_cr_insert_entry_msg')`'dnl
+ifelse(mop,`cr_insert_entries',`\rsb_spblasl2_cr_insert_entries_msg')`'dnl
+ifelse(mop,`cr_insert_col',`\rsb_spblasl2_cr_insert_col_msg')`'dnl
+ifelse(mop,`cr_insert_row',`\rsb_spblasl2_cr_insert_row_msg')`'dnl
+ifelse(mop,`cr_insert_block',`\rsb_spblasl2_cr_insert_block_msg')`'dnl
+ifelse(mop,`cr_insert_clique',`\rsb_spblasl2_cr_insert_clique_msg')`'dnl
+ifelse(mop,`cr_end',`\rsb_spblasl2_cr_end_msg')`'dnl
+ifelse(mop,`ds',`\rsb_spblasl2_ds_msg')`'dnl
+dnl
+ifelse(mop,`sp',`\rsb_spblasl2_sp_msg')`'dnl
+ifelse(mop,`gp',`\rsb_spblasl2_gp_msg')`'dnl
+dnl
+dnl	Extra operations:
+dnl
+ifelse(mop,`get_diag',`\rsb_spblasl2e_usget_diag_msg')`'dnl
+ifelse(mop,`get_element',`\rsb_spblasl2e_usget_element_norm_msg')`'dnl
+ifelse(mop,`get_matrix_nnz',`\rsb_spblasl2e_usget_matrix_nnz_msg')`'dnl
+ifelse(mop,`get_infinity_norm',`\rsb_spblasl2e_usget_infinity_norm_msg')`'dnl
+ifelse(mop,`get_rows_nnz',`\rsb_spblasl2e_usget_rows_nnz_msg')`'dnl
+ifelse(mop,`get_rows_sparse',`\rsb_spblasl2e_usget_rows_sparse_msg')`'dnl
+ifelse(mop,`rows_scale',`\rsb_spblasl2e_usrows_scale_msg')`'dnl
+ifelse(mop,`set_elements',`\rsb_spblasl2e_usset_elements_norm_msg.')`'dnl
+ifelse(mop,`set_element',`\rsb_spblasl2e_usset_element_norm_msg')`'dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`dnl
+ifelse(lang,`f90',`\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg',`\rsb_spblas_return_mtx_msg')`'dnl
+',`dnl
+ifelse(lang,`f90',`\rsb_spblas_istat_msg',`\rsb_spblas_return_msg')`'dnl
+')dnl
+popdef(`mop')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_LIST_MEMBER',`dnl
+pushdef(`E',$1)dnl
+pushdef(`L',$2)dnl
+dnl
+pushdef(`M',`F')dnl
+dnl
+foreach(`X',L,`dnl
+ifelse(X,E,`ifelse(M,`T',`',`pushdef(`M',`T')')')dnl
+')dnl
+ifelse(M,`T',`1'popdef(`M'),`0')dnl
+popdef(`M')dnl
+dnl
+popdef(`E')dnl
+popdef(`L')dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_DIFFERENCE',`dnl
+pushdef(`L1',$1)dnl
+pushdef(`L2',$2)dnl
+pushdef(`fel',`T')dnl
+dnl
+dnl
+foreach(`E1',L1,`dnl
+dnl E1 L2 : RSB_M4_LIST_MEMBER(E1,L2)
+ifelse(RSB_M4_LIST_MEMBER(E1,L2),`1',`',`ifelse(fel,`T',`pushdef(`fel',`F')E1',`,E1')')`'dnl
+')dnl
+ifelse(fel,`T',popdef(`fel'),popdef(`fel')popdef(`fel'))dnl
+popdef(`L1')dnl
+popdef(`L2')dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_INTERSECTION',`dnl
+pushdef(`L1',$1)dnl
+pushdef(`L2',$2)dnl
+pushdef(`fel',`T')dnl
+dnl
+foreach(`E1',L1,`dnl
+foreach(`E2',L2,`dnl
+ifelse(E1,E2,`dnl
+ifelse(fel,`T',`E1`'pushdef(`fel',`F')',``,'E1`'')dnl
+')dnl
+')')dnl
+dnl
+ifelse(fel,`T',popdef(`fel'),popdef(`fel')popdef(`fel'))dnl
+popdef(`L1')dnl
+popdef(`L2')dnl
+')dnl
+dnl
+define(`RSB_M4_SBLAS_MATRIX_SUPPORTED_TYPES',`dnl
+dnl
+RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSBLAS_MATRIX_SUPPORTED_TYPES',`dnl
+dnl
+(RSB_M4_INTERSECTION(RSB_M4_SPBLAS_MATRIX_ALL_TYPES,(WANT_TYPES)))dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST',`dnl
+dnl
+RSB_M4_INTERSECTION(RSB_M4_SPBLAS_MATRIX_ALL_TYPES,(WANT_TYPES))dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST_LENGTH',`dnl
+dnl	FIXME: this is broken.
+dnl
+ifelse(RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST,`',0,RSB_M4_LIST_LENGTH(RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST))dnl
+dnl
+')dnl
+dnl
+ifelse(RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST,`',`dnl
+define(`RSB_M4_DEFAULT_POSSIBLY_BLAS_TYPE',RSB_M4_INVALID_TYPE)dnl
+define(`RSB_M4_DEFAULT_POSSIBLY_BLAS_TYPE_OR_DEFAULT',RSB_M4_DEFAULT_TYPE)dnl
+',`
+define(`RSB_M4_DEFAULT_POSSIBLY_BLAS_TYPE',`RSB_M4_FIRST(RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST,RSB_M4_INVALID_TYPE)')dnl
+define(`RSB_M4_DEFAULT_POSSIBLY_BLAS_TYPE_OR_DEFAULT',`RSB_M4_FIRST(RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST,RSB_M4_INVALID_TYPE)')dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES',`dnl
+dnl
+(RSB_M4_INTERSECTION(RSB_M4_SPBLAS_MATRIX_ALL_TYPES,(WANT_TYPES)))dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_UNSUPPORTED_TYPES',`dnl
+dnl
+(RSB_M4_DIFFERENCE(RSB_M4_SPBLAS_MATRIX_ALL_TYPES,(WANT_TYPES)))dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS',``cr_begin',`cr_block_begin',`cr_variable_block_begin'')dnl
+define(`RSB_M4_SPBLAS_MATRIX_END_MOPS',``cr_end'')dnl
+define(`RSB_M4_SPBLAS_MATRIX_INSERTION_MOPS',``cr_insert_entry',`cr_insert_entries',`cr_insert_col',`cr_insert_row',`cr_insert_clique',`cr_insert_block'')dnl
+define(`RSB_M4_SPBLAS_MATRIX_CREATION_MOPS_LIST',`RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS,RSB_M4_SPBLAS_MATRIX_END_MOPS,RSB_M4_SPBLAS_MATRIX_INSERTION_MOPS')dnl
+define(`RSB_M4_SPBLAS_MATRIX_CREATION_MOPS',`(RSB_M4_SPBLAS_MATRIX_CREATION_MOPS_LIST)')dnl
+dnl
+dnl
+dnl	FIXME : level 1 sparse blas is not implemented
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS_LIST',``dot',`axpy',`ga',`gz',`sc'')dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS',(RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS_LIST))dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS_LIST',``mv',`sv'')dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS',(RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS_LIST))dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS_LIST',``mm',`sm'')dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS',(RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS_LIST))dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L23_MOPS_LIST',``mv',`sv',`mm',`sm'')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_IMPLEMENTED_CODE_FOR_BLAS_CALL',`dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+dnl
+dnl
+RSB_M4_LIST_MEMBER(type,RSB_M4_MATRIX_TYPES)dnl
+RSB_M4_OR(`dnl
+RSB_M4_LIST_MEMBER(RSB_M4_SPBLAS_MOP_CODE_TRANSLATE(mop),RSB_M4_MATRIX_OPS)dnl
+dnl
+dnl	FIXME : this is fake, always-positive shortcut to get "11" !
+,1')dnl
+dnl
+dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION',`dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+pushdef(`want_what',$4)dnl
+pushdef(`over',$5)dnl
+pushdef(`lang',$6)dnl
+pushdef(`args',`$1,$2,$3')dnl
+dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS_LIST),`1',`dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`TYPE',1,lang)` 'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`ID',1,lang)dnl
+(RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`ARGS',1,lang));
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`TYPE',1,lang)` 'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`ID',1,lang)dnl
+(RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`ARGS',1,lang))
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`BODY',1,lang)dnl
+')dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+{
+dnl
+ifelse(RSB_M4_IMPLEMENTED_CODE_FOR_BLAS_CALL(type,mop,tri),`11',`dnl
+dnl
+
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS_LIST),`1',`dnl
+ifelse($0(type,mop,tri,`TYPE',1,lang),`void',`dnl
+RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_SBL1_MSG
+	int istatv = $0(type,mop,tri,`ID',1,`lang_c')`'(RSB_M4_FORTRAN_ADDRS_TO_C_VALUES(($0(type,mop,tri,`ARGS',1,`lang_c'))) );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+',`dnl C:
+RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_SBL1_MSG
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(`rsb__BLAS_'X`us'`'mop`'(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),RSB_SPBLAS_TO_RSB_FIX_ARGS(type,mop,tri,`ID',over,lang)))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+')dnl
+')`'dnl
+dnl
+',`dnl
+dnl RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_MSG
+dnl	return RSB_BLAS_ERROR;
+ifelse($0(type,mop,tri,`TYPE',1,lang),`void',`dnl
+RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_SBL1_MSG
+RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG
+	RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+	return;
+',`dnl
+RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_SBL1_MSG
+RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+')dnl
+')dnl
+dnl
+}
+')dnl
+dnl
+ifelse(want_what,`TYPE',`dnl
+ifelse(lang,`f90',`dnl
+void`'dnl
+',`dnl
+int`'dnl
+')dnl
+')dnl
+dnl
+ifelse(lang,`f90',`dnl
+ifelse(want_what,`ARGS',`dnl
+RSB_M4_C_VALUES_TO_FORTRAN_ADDRS(($0(type,mop,tri,`ARGS',1,`lang_c')`'dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`,blas_sparse_matrix A')`'dnl
+`,int istat'`'dnl
+))`'dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+ifelse(lang,`lang_c',`dnl
+ifelse(RSB_M4_MEMBER(mop,`dot'),`1',`enum blas_conj_type conj, int nnz, const RSB_SPBLAS_OVER_TYPE(type `*',over)x,
+		const int *indx, const RSB_SPBLAS_OVER_TYPE(type `*',over)y, int incy, RSB_SPBLAS_OVER_TYPE(type `*',over)r,
+		enum blas_base_type index_base')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`axpy'),`1',`int nnz, RSB_SPBLAS_OVER_TYPE(type,over) alpha, const RSB_SPBLAS_OVER_TYPE(type `*',over)x, const int *indx,
+                 RSB_SPBLAS_OVER_TYPE(type `*',over)y, int incy, enum blas_base_type index_base')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`ga'),`1',`int nnz, const RSB_SPBLAS_OVER_TYPE(type `*',over)y, int incy, RSB_SPBLAS_OVER_TYPE(type `*',over)x, const int *indx,
+              enum blas_base_type index_base')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`gz'),`1',`int nnz, RSB_SPBLAS_OVER_TYPE(type `*',over)y, int incy, RSB_SPBLAS_OVER_TYPE(type `*',over)x, const int *indx,
+              enum blas_base_type index_base')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`sc'),`1',`int nnz, const RSB_SPBLAS_OVER_TYPE(type `*',over)x, RSB_SPBLAS_OVER_TYPE(type `*',over)y, int incy, const int *indx,
+              enum blas_base_type index_base')`'dnl
+')dnl
+')dnl
+dnl
+dnl
+ifelse(want_what,`ID',`dnl
+RSB_SPBLAS_FUNCTION_IDENTIFIER(mop,type,lang)`'dnl
+')dnl
+dnl
+')dnl
+dnl
+popdef(`args')dnl
+popdef(`lang')dnl
+popdef(`want_what')dnl
+popdef(`over')dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+')dnl
+dnl
+define(`RSB_M4_C_ARG_TYPE',`dnl
+patsubst(patsubst($1,`\(.*\)\( \|\*\)\([a-zA-Z_0-9]+\)$',`\1\2'),`\([^ ]*\) *$',`\1')`'dnl
+')dnl
+dnl
+define(`RSB_M4_C_ARG_ID',`dnl
+patsubst(patsubst($1,`\(.*\)\( \|\*\)\([a-zA-Z_0-9]+\)$',`\3'),`\([^ ]*\) *$',`\1')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_FORTRAN_ADDR_TO_C_VAL',`dnl
+dnl
+ifelse(patsubst(RSB_M4_C_ARG_TYPE($1),`[a-zA-Z_0-9 ]',`'),`*',`',`*')RSB_M4_C_ARG_ID($1)`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_C_VALUE_REFS_TO_ADDR',`dnl
+dnl
+RSB_M4_C_ARG_TYPE($1)`'dnl
+ifelse(patsubst(RSB_M4_C_ARG_TYPE($1),`[a-zA-Z_0-9 ]',`'),`*',`',`*')RSB_M4_C_ARG_ID($1)`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_C_VALUES_TO_FORTRAN_ADDRS',`dnl
+dnl
+dnl	WARNING : this is THIN ICE :)
+pushdef(`firstarg',`0')dnl
+foreach(`arg',`$1',`ifelse(firstarg,`0',`pushdef(`firstarg',1)',`,')`'RSB_M4_C_VALUE_REFS_TO_ADDR(arg)')`'dnl
+ifelse(firstarg,`1',`popdef(`firstarg')')dnl
+popdef(`firstarg')dnl
+')dnl
+dnl
+define(`RSB_M4_FORTRAN_ADDRS_TO_C_VALUES',`dnl
+dnl
+dnl	WARNING : this is THIN ICE :)
+pushdef(`firstarg',`0')dnl
+foreach(`arg',`$1',`ifelse(firstarg,`0',`pushdef(`firstarg',1)',`,')`'RSB_M4_FORTRAN_ADDR_TO_C_VAL(arg)')`'dnl
+ifelse(firstarg,`1',`popdef(`firstarg')')dnl
+popdef(`firstarg')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION',`dnl
+dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+pushdef(`want_what',$4)dnl
+pushdef(`over',$5)dnl
+pushdef(`lang',$6)dnl
+pushdef(`args',`$1,$2,$3')dnl
+dnl
+dnl	FIXME
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_ALL_L23_MOPS_LIST),`1',`dnl
+dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`TYPE',1,lang)` 'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ID',1,lang)dnl
+(RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ARGS',1,lang));
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`TYPE',1,lang)` 'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ID',1,lang)dnl
+(RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ARGS',1,lang))
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`BODY',1,lang)dnl
+')dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+{
+dnl
+ifelse(lang,`lang_c',`dnl
+dnl
+ifelse(RSB_M4_IMPLEMENTED_CODE_FOR_BLAS_CALL(type,mop,tri),`11',`dnl
+dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,`mv'),`1',`dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+{
+	const type beta = RSB_M4_ONE(type);
+dnl	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+dnl	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb_spmv(rsb_blas_trans_to_rsb_trans(transA),RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,mtxAp,x,incx,&beta,y,incy)))
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmv(transA,RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,A,x,incx,&beta,y,incy))
+}
+	')`'dnl
+dnl
+dnl	FIXME : no & operator should be used when type is complex !
+dnl
+ifelse(RSB_M4_MEMBER(mop,`sv'),`1',`dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+dnl
+dnl	FIXME : no & operator should be used when type is complex !
+dnl
+{
+	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsv(rsb_blas_trans_to_rsb_trans(transT),RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,mtxAp,x,incx,x,incx)))
+}
+	')`'dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,`mm'),`1',`dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+{
+	const type beta = RSB_M4_ONE(type);
+dnl	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spmm(rsb_blas_trans_to_rsb_trans(transA),RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,rsb__BLAS_inner_matrix_retrieve(A),nrhs,rsb_blas_order_to_rsb_order(order),b,ldb,&beta,c,ldc,RSB_OP_FLAG_DEFAULT)))
+dnl	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spmm(rsb__BLAS_inner_matrix_retrieve(A),b,c,ldb,ldc,nrhs,rsb_blas_trans_to_rsb_trans(transA),RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,&beta,rsb_blas_order_to_rsb_order(order),RSB_OP_FLAG_DEFAULT)))
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmm(transA,RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,A,b,ldb,&beta,c,ldc,nrhs,order))
+}
+	')`'dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,`sm'),`1',`dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+{
+	const type beta = RSB_M4_ZERO(type);
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsm(rsb_blas_trans_to_rsb_trans(transT),RSB_SPBLAS_OVER_TYPE_ARGVAR_REFERENCE(type)alpha,rsb__BLAS_inner_matrix_retrieve(T),nrhs,rsb_blas_order_to_rsb_order(order),&beta,b,ldb,b,ldb)))
+}
+	')`'dnl
+dnl
+',`dnl
+dnl	/* FIXME : missing implementation */
+dnl	RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_MSG
+	/*!
+	 RSB_M4_SPBLAS_HELP_INFO(mop)
+	*/
+RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG
+	return RSB_BLAS_ERROR;
+')dnl
+dnl
+dnl
+',`dnl
+dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+	int istatv = $0(type,mop,tri,`ID',1,`lang_c')`'(RSB_M4_FORTRAN_ADDRS_TO_C_VALUES((RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ARGS',1,`lang_c'))));
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+dnl
+')dnl
+dnl
+dnl
+}
+')dnl
+dnl
+ifelse(want_what,`TYPE',`dnl
+ifelse(lang,`f90',`dnl
+void`'dnl
+',`dnl
+int`'dnl
+')dnl
+')dnl
+dnl
+ifelse(lang,`f90',`dnl
+ifelse(want_what,`ARGS',`dnl
+dnl $0(type,mop,tri,`ARGS',1,`lang_c')`'dnl
+RSB_M4_C_VALUES_TO_FORTRAN_ADDRS(($0(type,mop,tri,`ARGS',1,`lang_c')`'dnl
+ifelse(`RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS)',`1',`,blas_sparse_matrix A')`'dnl
+`,int istat'`'dnl
+))`'dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+dnl
+ifelse(lang,`lang_c',`dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,`mv'),`1',`enum blas_trans_type transA, RSB_SPBLAS_OVER_TYPE(type,over)alpha,
+    blas_sparse_matrix A, const RSB_SPBLAS_OVER_TYPE(type `*',over)x, int incx, RSB_SPBLAS_OVER_TYPE(type `*',over)y, int incy')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`sv'),`1',`enum blas_trans_type transT, RSB_SPBLAS_OVER_TYPE(type,over)alpha,
+    blas_sparse_matrix T, RSB_SPBLAS_OVER_TYPE(type `*',over)x, int incx')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`mm'),`1',`enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, RSB_SPBLAS_OVER_TYPE(type,over)alpha, blas_sparse_matrix A, const RSB_SPBLAS_OVER_TYPE(type `*',over)b, int ldb,
+       RSB_SPBLAS_OVER_TYPE(type `*',over) c, int ldc')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`sm'),`1',`enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, RSB_SPBLAS_OVER_TYPE(type,over)alpha, blas_sparse_matrix T, RSB_SPBLAS_OVER_TYPE(type `*',over)b, int ldb')`'dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ID',`dnl
+RSB_SPBLAS_FUNCTION_IDENTIFIER(mop,type,lang)`'dnl
+')dnl
+dnl
+dnl
+')dnl
+dnl
+popdef(`args')dnl
+popdef(`want_what')dnl
+popdef(`over')dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+popdef(`lang')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS',`dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+pushdef(`want_what',$4)dnl
+pushdef(`over',$5)dnl
+pushdef(`lang',$6)dnl
+pushdef(`args',`$1,$2,$3')dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_CREATION_MOPS_LIST,`ds',`sp',`gp',`cr_end'),`1',`dnl
+dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+$0(type,mop,tri,`TYPE',1,lang)` 'dnl
+$0(type,mop,tri,`ID',1,lang)dnl
+($0(type,mop,tri,`ARGS',1,lang));
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+$0(type,mop,tri,`TYPE',1,lang)` 'dnl
+$0(type,mop,tri,`ID',1,lang)dnl
+( $0(type,mop,tri,`ARGS',1,lang) )
+$0(type,mop,tri,`BODY',1,lang)dnl
+')dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+{
+dnl
+dnl
+ifelse(RSB_M4_OR(RSB_M4_LIST_MEMBER(type,RSB_M4_MATRIX_TYPES),RSB_M4_AND(RSB_M4_SAME(type,`'),RSB_M4_LIST_MEMBER(mop,(`cr_end',`ds',`sp',`gp')))),`1',`dnl
+dnl
+dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+dnl
+ifelse(lang,`f90',`dnl
+	int istatv = $0(type,mop,tri,`ID',1,`lang_c')`'dnl
+(RSB_M4_FORTRAN_ADDRS_TO_C_VALUES(($0(type,mop,tri,`ARGS',1,`lang_c'))) );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`dnl
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+')`'dnl
+dnl
+',`dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`dnl
+	RSB_SPB_INTERFACE_RETURN_HDL(`rsb__BLAS_'X`us'`'mop`'(RSB_SPBLAS_TO_RSB_FIX_ARGS(type,mop,tri,`ID',over,lang)))
+',`dnl
+	RSB_SPB_INTERFACE_RETURN(`rsb__BLAS_'X`us'`'mop`'(RSB_SPBLAS_TO_RSB_FIX_ARGS(type,mop,tri,`ID',over,lang)))
+')dnl
+')dnl
+dnl
+',`dnl
+dnl
+dnl	RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_MSG
+	RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG
+	/*!
+          RSB_M4_SPBLAS_HELP_INFO(mop)
+	 */
+dnl	/* FIXME : missing implementation */
+dnl	return RSB_BLAS_ERROR;
+ifelse(lang,`f90',`dnl
+	RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_INVALID_VAL);
+',`dnl
+	return RSB_BLAS_INVALID_VAL;
+')dnl
+dnl
+')dnl
+}
+')dnl
+dnl
+ifelse(want_what,`TYPE',`dnl
+ifelse(lang,`f90',`dnl
+void`'dnl
+',`dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_begin',`cr_block_begin',`cr_variable_block_begin'),`1',`blas_sparse_matrix',`int')`'dnl
+')dnl
+')dnl
+dnl
+ifelse(lang,`f90',`dnl
+ifelse(want_what,`ARGS',`dnl
+RSB_M4_C_VALUES_TO_FORTRAN_ADDRS(($0(type,mop,tri,`ARGS',1,`lang_c')`'dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`,blas_sparse_matrix A')`'dnl
+`,int istat'`'dnl
+))`'dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+ifelse(lang,`lang_c',`dnl
+dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_begin'),`1',`rsb_blas_int_t m, rsb_blas_int_t n')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_block_begin'),`1',`rsb_blas_int_t Mb, rsb_blas_int_t Nb, rsb_blas_int_t k, rsb_blas_int_t l')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_variable_block_begin'),`1',`rsb_blas_int_t Mb, rsb_blas_int_t Nb,
+		const rsb_blas_int_t *K, const rsb_blas_int_t *L')`'dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_insert_entry'),`1',`blas_sparse_matrix A, RSB_SPBLAS_OVER_TYPE(type,over) val, rsb_blas_int_t i, rsb_blas_int_t j')`'dnl
+dnl	FIXME : complex cr_insert_entry originally has not const pointers !?
+ifelse(RSB_M4_MEMBER(mop,`cr_insert_entries'),`1',`blas_sparse_matrix A, rsb_blas_int_t nnz, const RSB_SPBLAS_OVER_TYPE(type `*',over)val,
+                            const rsb_blas_int_t *indx, const rsb_blas_int_t *jndx')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_insert_col'),`1',`blas_sparse_matrix A, rsb_blas_int_t j, rsb_blas_int_t nnz,
+                           const RSB_SPBLAS_OVER_TYPE(type `*',over)val, const rsb_blas_int_t *indx')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_insert_row'),`1',`blas_sparse_matrix A, rsb_blas_int_t i, rsb_blas_int_t nnz,
+                           const RSB_SPBLAS_OVER_TYPE(type `*',over)val, const rsb_blas_int_t *indx')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_insert_clique'),`1',`blas_sparse_matrix A, const rsb_blas_int_t k, const rsb_blas_int_t l,
+                       const RSB_SPBLAS_OVER_TYPE(type `*',over)val, const rsb_blas_int_t row_stride,
+                       const rsb_blas_int_t col_stride, const rsb_blas_int_t *indx,
+                       const rsb_blas_int_t *jndx')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_insert_block'),`1',`blas_sparse_matrix A, const RSB_SPBLAS_OVER_TYPE(type `*',over)val,
+                        rsb_blas_int_t row_stride, rsb_blas_int_t col_stride, rsb_blas_int_t i, rsb_blas_int_t j')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`cr_end'),`1',`blas_sparse_matrix A')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`ds'),`1',`blas_sparse_matrix A')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`sp'),`1',`blas_sparse_matrix A, rsb_blas_int_t pname')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`gp'),`1',`blas_sparse_matrix A, rsb_blas_int_t pname')`'dnl
+')dnl
+dnl
+')dnl
+dnl
+ifelse(want_what,`ID',`dnl
+RSB_SPBLAS_FUNCTION_IDENTIFIER(mop,type,lang)`'dnl
+')dnl
+dnl
+')dnl
+dnl
+popdef(`args')dnl
+popdef(`over')dnl
+popdef(`want_what')dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+popdef(`lang')dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SPBLAS_EXTRA_FUNCTION',`dnl
+pushdef(`type',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`tri',$3)dnl
+pushdef(`want_what',$4)dnl
+pushdef(`over',$5)dnl
+pushdef(`lang',$6)dnl
+pushdef(`args',`$1,$2,$3')dnl
+dnl
+ifelse(RSB_M4_LIST_MEMBER(mop,RSB_M4_SBLAS_EXTRA_INTERFACE_OPS),`1',`dnl
+dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+$0(type,mop,tri,`TYPE',1,lang)` 'dnl
+$0(type,mop,tri,`ID',1,lang)dnl
+($0(type,mop,tri,`ARGS',1,lang));
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+$0(type,mop,tri,`TYPE',1,lang)` 'dnl
+$0(type,mop,tri,`ID',1,lang)dnl
+( $0(type,mop,tri,`ARGS',1,lang) )
+$0(type,mop,tri,`BODY',1,lang)dnl
+')dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+{
+dnl
+dnl
+ifelse(RSB_M4_LIST_MEMBER(mop,RSB_M4_SBLAS_EXTRA_INTERFACE_OPS),`1',`dnl
+dnl
+dnl
+RSB_M4_SPBLAS_DOC_COMMENT
+dnl
+ifelse(lang,`f90',`dnl
+dnl	RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG
+	int istatv = $0(type,mop,tri,`ID',1,`lang_c')`'dnl
+(RSB_M4_FORTRAN_ADDRS_TO_C_VALUES(($0(type,mop,tri,`ARGS',1,`lang_c'))) );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+dnl
+dnl
+',`dnl
+dnl	RSB_M4_SPBLAS_DOC_CONFIGUREDOUT_MSG
+	RSB_SPB_INTERFACE_RETURN(`rsb__BLAS_'X`us'`'mop`'(RSB_SPBLAS_TO_RSB_FIX_ARGS(type,mop,tri,`ID',over,lang)))
+')dnl
+dnl
+',`dnl
+dnl
+dnl	/* FIXME : missing implementation */
+RSB_M4_SPBLAS_DOC_UNIMPLEMENTED_MSG
+	return RSB_BLAS_ERROR;
+dnl
+')dnl
+}
+')dnl
+dnl
+ifelse(want_what,`TYPE',`dnl
+ifelse(lang,`f90',`dnl
+void`'dnl
+',`dnl
+int`'dnl
+')dnl
+')dnl
+dnl
+ifelse(lang,`f90',`dnl
+ifelse(want_what,`ARGS',`dnl
+RSB_M4_C_VALUES_TO_FORTRAN_ADDRS(($0(type,mop,tri,`ARGS',1,`lang_c')`'dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`,blas_sparse_matrix A')`'dnl
+`,int istat'`'dnl
+))`'dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+ifelse(lang,`lang_c',`dnl
+dnl
+dnl
+`blas_sparse_matrix A'dnl
+ifelse(RSB_M4_MEMBER(mop,`rows_scale'),`1',`,const RSB_SPBLAS_OVER_TYPE(type *,over) d, enum blas_trans_type trans')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`get_diag'),`1',`,RSB_SPBLAS_OVER_TYPE(type *,over) d')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`get_rows_sparse'),`1',`, RSB_SPBLAS_OVER_TYPE(type *,over) VA, rsb_blas_int_t * IA, rsb_blas_int_t * JA, rsb_blas_int_t * nnz, rsb_blas_int_t fr, rsb_blas_int_t lr')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`get_rows_nnz'),`1',`, rsb_blas_int_t fr, rsb_blas_int_t lr, rsb_blas_int_t * nnzp')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`get_matrix_nnz'),`1',`,rsb_blas_int_t * nnz')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`get_infinity_norm'),`1',`,RSB_SPBLAS_OVER_TYPE(type *, over)in, enum blas_trans_type trans')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`set_elements'),`1',`,const rsb_blas_int_t * ia, const rsb_blas_int_t *ja, const RSB_SPBLAS_OVER_TYPE(type *,over) va, rsb_blas_int_t nnz')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`get_element'),`1',`,rsb_blas_int_t i, rsb_blas_int_t j, RSB_SPBLAS_OVER_TYPE(type *,over) v')`'dnl
+ifelse(RSB_M4_MEMBER(mop,`set_element'),`1',`,rsb_blas_int_t i, rsb_blas_int_t j, RSB_SPBLAS_OVER_TYPE(type *,over) v')`'dnl
+dnl
+')dnl
+dnl
+')dnl
+dnl
+ifelse(want_what,`ID',`dnl
+RSB_SPBLAS_FUNCTION_IDENTIFIER(mop,type,lang)`'dnl
+')dnl
+dnl
+')dnl
+dnl
+popdef(`args')dnl
+popdef(`over')dnl
+popdef(`want_what')dnl
+popdef(`tri')dnl
+popdef(`mop')dnl
+popdef(`type')dnl
+popdef(`lang')dnl
+')dnl
+dnl
diff --git a/ltmain.sh b/ltmain.sh
new file mode 100644
index 0000000..33f642a
--- /dev/null
+++ b/ltmain.sh
@@ -0,0 +1,9661 @@
+
+# libtool (GNU libtool) 2.4.2
+# Written by Gordon Matzigkeit <gord at gnu.ai.mit.edu>, 1996
+
+# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,
+# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
+# This is free software; see the source for copying conditions.  There is NO
+# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+# GNU Libtool is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# As a special exception to the GNU General Public License,
+# if you distribute this file as part of a program or library that
+# is built using GNU Libtool, you may include this file under the
+# same distribution terms that you use for the rest of that program.
+#
+# GNU Libtool is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GNU Libtool; see the file COPYING.  If not, a copy
+# can be downloaded from http://www.gnu.org/licenses/gpl.html,
+# or obtained by writing to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+# Usage: $progname [OPTION]... [MODE-ARG]...
+#
+# Provide generalized library-building support services.
+#
+#       --config             show all configuration variables
+#       --debug              enable verbose shell tracing
+#   -n, --dry-run            display commands without modifying any files
+#       --features           display basic configuration information and exit
+#       --mode=MODE          use operation mode MODE
+#       --preserve-dup-deps  don't remove duplicate dependency libraries
+#       --quiet, --silent    don't print informational messages
+#       --no-quiet, --no-silent
+#                            print informational messages (default)
+#       --no-warn            don't display warning messages
+#       --tag=TAG            use configuration variables from tag TAG
+#   -v, --verbose            print more informational messages than default
+#       --no-verbose         don't print the extra informational messages
+#       --version            print version information
+#   -h, --help, --help-all   print short, long, or detailed help message
+#
+# MODE must be one of the following:
+#
+#         clean              remove files from the build directory
+#         compile            compile a source file into a libtool object
+#         execute            automatically set library path, then run a program
+#         finish             complete the installation of libtool libraries
+#         install            install libraries or executables
+#         link               create a library or an executable
+#         uninstall          remove libraries from an installed directory
+#
+# MODE-ARGS vary depending on the MODE.  When passed as first option,
+# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that.
+# Try `$progname --help --mode=MODE' for a more detailed description of MODE.
+#
+# When reporting a bug, please describe a test case to reproduce it and
+# include the following information:
+#
+#         host-triplet:	$host
+#         shell:		$SHELL
+#         compiler:		$LTCC
+#         compiler flags:		$LTCFLAGS
+#         linker:		$LD (gnu? $with_gnu_ld)
+#         $progname:	(GNU libtool) 2.4.2 Debian-2.4.2-1.1
+#         automake:	$automake_version
+#         autoconf:	$autoconf_version
+#
+# Report bugs to <bug-libtool at gnu.org>.
+# GNU libtool home page: <http://www.gnu.org/software/libtool/>.
+# General help using GNU software: <http://www.gnu.org/gethelp/>.
+
+PROGRAM=libtool
+PACKAGE=libtool
+VERSION="2.4.2 Debian-2.4.2-1.1"
+TIMESTAMP=""
+package_revision=1.3337
+
+# Be Bourne compatible
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
+  emulate sh
+  NULLCMD=:
+  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+$1
+_LTECHO_EOF'
+}
+
+# NLS nuisances: We save the old values to restore during execute mode.
+lt_user_locale=
+lt_safe_locale=
+for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+do
+  eval "if test \"\${$lt_var+set}\" = set; then
+          save_$lt_var=\$$lt_var
+          $lt_var=C
+	  export $lt_var
+	  lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\"
+	  lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\"
+	fi"
+done
+LC_ALL=C
+LANGUAGE=C
+export LANGUAGE LC_ALL
+
+$lt_unset CDPATH
+
+
+# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh
+# is ksh but when the shell is invoked as "sh" and the current value of
+# the _XPG environment variable is not equal to 1 (one), the special
+# positional parameter $0, within a function call, is the name of the
+# function.
+progpath="$0"
+
+
+
+: ${CP="cp -f"}
+test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'}
+: ${MAKE="make"}
+: ${MKDIR="mkdir"}
+: ${MV="mv -f"}
+: ${RM="rm -f"}
+: ${SHELL="${CONFIG_SHELL-/bin/sh}"}
+: ${Xsed="$SED -e 1s/^X//"}
+
+# Global variables:
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_MISMATCH=63  # $? = 63 is used to indicate version mismatch to missing.
+EXIT_SKIP=77	  # $? = 77 is used to indicate a skipped test to automake.
+
+exit_status=$EXIT_SUCCESS
+
+# Make sure IFS has a sensible default
+lt_nl='
+'
+IFS=" 	$lt_nl"
+
+dirname="s,/[^/]*$,,"
+basename="s,^.*/,,"
+
+# func_dirname file append nondir_replacement
+# Compute the dirname of FILE.  If nonempty, add APPEND to the result,
+# otherwise set result to NONDIR_REPLACEMENT.
+func_dirname ()
+{
+    func_dirname_result=`$ECHO "${1}" | $SED "$dirname"`
+    if test "X$func_dirname_result" = "X${1}"; then
+      func_dirname_result="${3}"
+    else
+      func_dirname_result="$func_dirname_result${2}"
+    fi
+} # func_dirname may be replaced by extended shell implementation
+
+
+# func_basename file
+func_basename ()
+{
+    func_basename_result=`$ECHO "${1}" | $SED "$basename"`
+} # func_basename may be replaced by extended shell implementation
+
+
+# func_dirname_and_basename file append nondir_replacement
+# perform func_basename and func_dirname in a single function
+# call:
+#   dirname:  Compute the dirname of FILE.  If nonempty,
+#             add APPEND to the result, otherwise set result
+#             to NONDIR_REPLACEMENT.
+#             value returned in "$func_dirname_result"
+#   basename: Compute filename of FILE.
+#             value retuned in "$func_basename_result"
+# Implementation must be kept synchronized with func_dirname
+# and func_basename. For efficiency, we do not delegate to
+# those functions but instead duplicate the functionality here.
+func_dirname_and_basename ()
+{
+    # Extract subdirectory from the argument.
+    func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"`
+    if test "X$func_dirname_result" = "X${1}"; then
+      func_dirname_result="${3}"
+    else
+      func_dirname_result="$func_dirname_result${2}"
+    fi
+    func_basename_result=`$ECHO "${1}" | $SED -e "$basename"`
+} # func_dirname_and_basename may be replaced by extended shell implementation
+
+
+# func_stripname prefix suffix name
+# strip PREFIX and SUFFIX off of NAME.
+# PREFIX and SUFFIX must not contain globbing or regex special
+# characters, hashes, percent signs, but SUFFIX may contain a leading
+# dot (in which case that matches only a dot).
+# func_strip_suffix prefix name
+func_stripname ()
+{
+    case ${2} in
+      .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
+      *)  func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
+    esac
+} # func_stripname may be replaced by extended shell implementation
+
+
+# These SED scripts presuppose an absolute path with a trailing slash.
+pathcar='s,^/\([^/]*\).*$,\1,'
+pathcdr='s,^/[^/]*,,'
+removedotparts=':dotsl
+		s@/\./@/@g
+		t dotsl
+		s,/\.$,/,'
+collapseslashes='s@/\{1,\}@/@g'
+finalslash='s,/*$,/,'
+
+# func_normal_abspath PATH
+# Remove doubled-up and trailing slashes, "." path components,
+# and cancel out any ".." path components in PATH after making
+# it an absolute path.
+#             value returned in "$func_normal_abspath_result"
+func_normal_abspath ()
+{
+  # Start from root dir and reassemble the path.
+  func_normal_abspath_result=
+  func_normal_abspath_tpath=$1
+  func_normal_abspath_altnamespace=
+  case $func_normal_abspath_tpath in
+    "")
+      # Empty path, that just means $cwd.
+      func_stripname '' '/' "`pwd`"
+      func_normal_abspath_result=$func_stripname_result
+      return
+    ;;
+    # The next three entries are used to spot a run of precisely
+    # two leading slashes without using negated character classes;
+    # we take advantage of case's first-match behaviour.
+    ///*)
+      # Unusual form of absolute path, do nothing.
+    ;;
+    //*)
+      # Not necessarily an ordinary path; POSIX reserves leading '//'
+      # and for example Cygwin uses it to access remote file shares
+      # over CIFS/SMB, so we conserve a leading double slash if found.
+      func_normal_abspath_altnamespace=/
+    ;;
+    /*)
+      # Absolute path, do nothing.
+    ;;
+    *)
+      # Relative path, prepend $cwd.
+      func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath
+    ;;
+  esac
+  # Cancel out all the simple stuff to save iterations.  We also want
+  # the path to end with a slash for ease of parsing, so make sure
+  # there is one (and only one) here.
+  func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+        -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"`
+  while :; do
+    # Processed it all yet?
+    if test "$func_normal_abspath_tpath" = / ; then
+      # If we ascended to the root using ".." the result may be empty now.
+      if test -z "$func_normal_abspath_result" ; then
+        func_normal_abspath_result=/
+      fi
+      break
+    fi
+    func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \
+        -e "$pathcar"`
+    func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \
+        -e "$pathcdr"`
+    # Figure out what to do with it
+    case $func_normal_abspath_tcomponent in
+      "")
+        # Trailing empty path component, ignore it.
+      ;;
+      ..)
+        # Parent dir; strip last assembled component from result.
+        func_dirname "$func_normal_abspath_result"
+        func_normal_abspath_result=$func_dirname_result
+      ;;
+      *)
+        # Actual path component, append it.
+        func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent
+      ;;
+    esac
+  done
+  # Restore leading double-slash if one was found on entry.
+  func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result
+}
+
+# func_relative_path SRCDIR DSTDIR
+# generates a relative path from SRCDIR to DSTDIR, with a trailing
+# slash if non-empty, suitable for immediately appending a filename
+# without needing to append a separator.
+#             value returned in "$func_relative_path_result"
+func_relative_path ()
+{
+  func_relative_path_result=
+  func_normal_abspath "$1"
+  func_relative_path_tlibdir=$func_normal_abspath_result
+  func_normal_abspath "$2"
+  func_relative_path_tbindir=$func_normal_abspath_result
+
+  # Ascend the tree starting from libdir
+  while :; do
+    # check if we have found a prefix of bindir
+    case $func_relative_path_tbindir in
+      $func_relative_path_tlibdir)
+        # found an exact match
+        func_relative_path_tcancelled=
+        break
+        ;;
+      $func_relative_path_tlibdir*)
+        # found a matching prefix
+        func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir"
+        func_relative_path_tcancelled=$func_stripname_result
+        if test -z "$func_relative_path_result"; then
+          func_relative_path_result=.
+        fi
+        break
+        ;;
+      *)
+        func_dirname $func_relative_path_tlibdir
+        func_relative_path_tlibdir=${func_dirname_result}
+        if test "x$func_relative_path_tlibdir" = x ; then
+          # Have to descend all the way to the root!
+          func_relative_path_result=../$func_relative_path_result
+          func_relative_path_tcancelled=$func_relative_path_tbindir
+          break
+        fi
+        func_relative_path_result=../$func_relative_path_result
+        ;;
+    esac
+  done
+
+  # Now calculate path; take care to avoid doubling-up slashes.
+  func_stripname '' '/' "$func_relative_path_result"
+  func_relative_path_result=$func_stripname_result
+  func_stripname '/' '/' "$func_relative_path_tcancelled"
+  if test "x$func_stripname_result" != x ; then
+    func_relative_path_result=${func_relative_path_result}/${func_stripname_result}
+  fi
+
+  # Normalisation. If bindir is libdir, return empty string,
+  # else relative path ending with a slash; either way, target
+  # file name can be directly appended.
+  if test ! -z "$func_relative_path_result"; then
+    func_stripname './' '' "$func_relative_path_result/"
+    func_relative_path_result=$func_stripname_result
+  fi
+}
+
+# The name of this program:
+func_dirname_and_basename "$progpath"
+progname=$func_basename_result
+
+# Make sure we have an absolute path for reexecution:
+case $progpath in
+  [\\/]*|[A-Za-z]:\\*) ;;
+  *[\\/]*)
+     progdir=$func_dirname_result
+     progdir=`cd "$progdir" && pwd`
+     progpath="$progdir/$progname"
+     ;;
+  *)
+     save_IFS="$IFS"
+     IFS=${PATH_SEPARATOR-:}
+     for progdir in $PATH; do
+       IFS="$save_IFS"
+       test -x "$progdir/$progname" && break
+     done
+     IFS="$save_IFS"
+     test -n "$progdir" || progdir=`pwd`
+     progpath="$progdir/$progname"
+     ;;
+esac
+
+# Sed substitution that helps us do robust quoting.  It backslashifies
+# metacharacters that are still active within double-quoted strings.
+Xsed="${SED}"' -e 1s/^X//'
+sed_quote_subst='s/\([`"$\\]\)/\\\1/g'
+
+# Same as above, but do not quote variable references.
+double_quote_subst='s/\(["`\\]\)/\\\1/g'
+
+# Sed substitution that turns a string into a regex matching for the
+# string literally.
+sed_make_literal_regex='s,[].[^$\\*\/],\\&,g'
+
+# Sed substitution that converts a w32 file name or path
+# which contains forward slashes, into one that contains
+# (escaped) backslashes.  A very naive implementation.
+lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g'
+
+# Re-`\' parameter expansions in output of double_quote_subst that were
+# `\'-ed in input to the same.  If an odd number of `\' preceded a '$'
+# in input to double_quote_subst, that '$' was protected from expansion.
+# Since each input `\' is now two `\'s, look for any number of runs of
+# four `\'s followed by two `\'s and then a '$'.  `\' that '$'.
+bs='\\'
+bs2='\\\\'
+bs4='\\\\\\\\'
+dollar='\$'
+sed_double_backslash="\
+  s/$bs4/&\\
+/g
+  s/^$bs2$dollar/$bs&/
+  s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g
+  s/\n//g"
+
+# Standard options:
+opt_dry_run=false
+opt_help=false
+opt_quiet=false
+opt_verbose=false
+opt_warning=:
+
+# func_echo arg...
+# Echo program name prefixed message, along with the current mode
+# name if it has been set yet.
+func_echo ()
+{
+    $ECHO "$progname: ${opt_mode+$opt_mode: }$*"
+}
+
+# func_verbose arg...
+# Echo program name prefixed message in verbose mode only.
+func_verbose ()
+{
+    $opt_verbose && func_echo ${1+"$@"}
+
+    # A bug in bash halts the script if the last line of a function
+    # fails when set -e is in force, so we need another command to
+    # work around that:
+    :
+}
+
+# func_echo_all arg...
+# Invoke $ECHO with all args, space-separated.
+func_echo_all ()
+{
+    $ECHO "$*"
+}
+
+# func_error arg...
+# Echo program name prefixed message to standard error.
+func_error ()
+{
+    $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2
+}
+
+# func_warning arg...
+# Echo program name prefixed warning message to standard error.
+func_warning ()
+{
+    $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2
+
+    # bash bug again:
+    :
+}
+
+# func_fatal_error arg...
+# Echo program name prefixed message to standard error, and exit.
+func_fatal_error ()
+{
+    func_error ${1+"$@"}
+    exit $EXIT_FAILURE
+}
+
+# func_fatal_help arg...
+# Echo program name prefixed message to standard error, followed by
+# a help hint, and exit.
+func_fatal_help ()
+{
+    func_error ${1+"$@"}
+    func_fatal_error "$help"
+}
+help="Try \`$progname --help' for more information."  ## default
+
+
+# func_grep expression filename
+# Check whether EXPRESSION matches any line of FILENAME, without output.
+func_grep ()
+{
+    $GREP "$1" "$2" >/dev/null 2>&1
+}
+
+
+# func_mkdir_p directory-path
+# Make sure the entire path to DIRECTORY-PATH is available.
+func_mkdir_p ()
+{
+    my_directory_path="$1"
+    my_dir_list=
+
+    if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then
+
+      # Protect directory names starting with `-'
+      case $my_directory_path in
+        -*) my_directory_path="./$my_directory_path" ;;
+      esac
+
+      # While some portion of DIR does not yet exist...
+      while test ! -d "$my_directory_path"; do
+        # ...make a list in topmost first order.  Use a colon delimited
+	# list incase some portion of path contains whitespace.
+        my_dir_list="$my_directory_path:$my_dir_list"
+
+        # If the last portion added has no slash in it, the list is done
+        case $my_directory_path in */*) ;; *) break ;; esac
+
+        # ...otherwise throw away the child directory and loop
+        my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"`
+      done
+      my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'`
+
+      save_mkdir_p_IFS="$IFS"; IFS=':'
+      for my_dir in $my_dir_list; do
+	IFS="$save_mkdir_p_IFS"
+        # mkdir can fail with a `File exist' error if two processes
+        # try to create one of the directories concurrently.  Don't
+        # stop in that case!
+        $MKDIR "$my_dir" 2>/dev/null || :
+      done
+      IFS="$save_mkdir_p_IFS"
+
+      # Bail out if we (or some other process) failed to create a directory.
+      test -d "$my_directory_path" || \
+        func_fatal_error "Failed to create \`$1'"
+    fi
+}
+
+
+# func_mktempdir [string]
+# Make a temporary directory that won't clash with other running
+# libtool processes, and avoids race conditions if possible.  If
+# given, STRING is the basename for that directory.
+func_mktempdir ()
+{
+    my_template="${TMPDIR-/tmp}/${1-$progname}"
+
+    if test "$opt_dry_run" = ":"; then
+      # Return a directory name, but don't create it in dry-run mode
+      my_tmpdir="${my_template}-$$"
+    else
+
+      # If mktemp works, use that first and foremost
+      my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null`
+
+      if test ! -d "$my_tmpdir"; then
+        # Failing that, at least try and use $RANDOM to avoid a race
+        my_tmpdir="${my_template}-${RANDOM-0}$$"
+
+        save_mktempdir_umask=`umask`
+        umask 0077
+        $MKDIR "$my_tmpdir"
+        umask $save_mktempdir_umask
+      fi
+
+      # If we're not in dry-run mode, bomb out on failure
+      test -d "$my_tmpdir" || \
+        func_fatal_error "cannot create temporary directory \`$my_tmpdir'"
+    fi
+
+    $ECHO "$my_tmpdir"
+}
+
+
+# func_quote_for_eval arg
+# Aesthetically quote ARG to be evaled later.
+# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT
+# is double-quoted, suitable for a subsequent eval, whereas
+# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters
+# which are still active within double quotes backslashified.
+func_quote_for_eval ()
+{
+    case $1 in
+      *[\\\`\"\$]*)
+	func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;;
+      *)
+        func_quote_for_eval_unquoted_result="$1" ;;
+    esac
+
+    case $func_quote_for_eval_unquoted_result in
+      # Double-quote args containing shell metacharacters to delay
+      # word splitting, command substitution and and variable
+      # expansion for a subsequent eval.
+      # Many Bourne shells cannot handle close brackets correctly
+      # in scan sets, so we specify it separately.
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+        func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\""
+        ;;
+      *)
+        func_quote_for_eval_result="$func_quote_for_eval_unquoted_result"
+    esac
+}
+
+
+# func_quote_for_expand arg
+# Aesthetically quote ARG to be evaled later; same as above,
+# but do not quote variable references.
+func_quote_for_expand ()
+{
+    case $1 in
+      *[\\\`\"]*)
+	my_arg=`$ECHO "$1" | $SED \
+	    -e "$double_quote_subst" -e "$sed_double_backslash"` ;;
+      *)
+        my_arg="$1" ;;
+    esac
+
+    case $my_arg in
+      # Double-quote args containing shell metacharacters to delay
+      # word splitting and command substitution for a subsequent eval.
+      # Many Bourne shells cannot handle close brackets correctly
+      # in scan sets, so we specify it separately.
+      *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \	]*|*]*|"")
+        my_arg="\"$my_arg\""
+        ;;
+    esac
+
+    func_quote_for_expand_result="$my_arg"
+}
+
+
+# func_show_eval cmd [fail_exp]
+# Unless opt_silent is true, then output CMD.  Then, if opt_dryrun is
+# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.
+func_show_eval ()
+{
+    my_cmd="$1"
+    my_fail_exp="${2-:}"
+
+    ${opt_silent-false} || {
+      func_quote_for_expand "$my_cmd"
+      eval "func_echo $func_quote_for_expand_result"
+    }
+
+    if ${opt_dry_run-false}; then :; else
+      eval "$my_cmd"
+      my_status=$?
+      if test "$my_status" -eq 0; then :; else
+	eval "(exit $my_status); $my_fail_exp"
+      fi
+    fi
+}
+
+
+# func_show_eval_locale cmd [fail_exp]
+# Unless opt_silent is true, then output CMD.  Then, if opt_dryrun is
+# not true, evaluate CMD.  If the evaluation of CMD fails, and FAIL_EXP
+# is given, then evaluate it.  Use the saved locale for evaluation.
+func_show_eval_locale ()
+{
+    my_cmd="$1"
+    my_fail_exp="${2-:}"
+
+    ${opt_silent-false} || {
+      func_quote_for_expand "$my_cmd"
+      eval "func_echo $func_quote_for_expand_result"
+    }
+
+    if ${opt_dry_run-false}; then :; else
+      eval "$lt_user_locale
+	    $my_cmd"
+      my_status=$?
+      eval "$lt_safe_locale"
+      if test "$my_status" -eq 0; then :; else
+	eval "(exit $my_status); $my_fail_exp"
+      fi
+    fi
+}
+
+# func_tr_sh
+# Turn $1 into a string suitable for a shell variable name.
+# Result is stored in $func_tr_sh_result.  All characters
+# not in the set a-zA-Z0-9_ are replaced with '_'. Further,
+# if $1 begins with a digit, a '_' is prepended as well.
+func_tr_sh ()
+{
+  case $1 in
+  [0-9]* | *[!a-zA-Z0-9_]*)
+    func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'`
+    ;;
+  * )
+    func_tr_sh_result=$1
+    ;;
+  esac
+}
+
+
+# func_version
+# Echo version message to standard output and exit.
+func_version ()
+{
+    $opt_debug
+
+    $SED -n '/(C)/!b go
+	:more
+	/\./!{
+	  N
+	  s/\n# / /
+	  b more
+	}
+	:go
+	/^# '$PROGRAM' (GNU /,/# warranty; / {
+        s/^# //
+	s/^# *$//
+        s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/
+        p
+     }' < "$progpath"
+     exit $?
+}
+
+# func_usage
+# Echo short help message to standard output and exit.
+func_usage ()
+{
+    $opt_debug
+
+    $SED -n '/^# Usage:/,/^#  *.*--help/ {
+        s/^# //
+	s/^# *$//
+	s/\$progname/'$progname'/
+	p
+    }' < "$progpath"
+    echo
+    $ECHO "run \`$progname --help | more' for full usage"
+    exit $?
+}
+
+# func_help [NOEXIT]
+# Echo long help message to standard output and exit,
+# unless 'noexit' is passed as argument.
+func_help ()
+{
+    $opt_debug
+
+    $SED -n '/^# Usage:/,/# Report bugs to/ {
+	:print
+        s/^# //
+	s/^# *$//
+	s*\$progname*'$progname'*
+	s*\$host*'"$host"'*
+	s*\$SHELL*'"$SHELL"'*
+	s*\$LTCC*'"$LTCC"'*
+	s*\$LTCFLAGS*'"$LTCFLAGS"'*
+	s*\$LD*'"$LD"'*
+	s/\$with_gnu_ld/'"$with_gnu_ld"'/
+	s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/
+	s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/
+	p
+	d
+     }
+     /^# .* home page:/b print
+     /^# General help using/b print
+     ' < "$progpath"
+    ret=$?
+    if test -z "$1"; then
+      exit $ret
+    fi
+}
+
+# func_missing_arg argname
+# Echo program name prefixed message to standard error and set global
+# exit_cmd.
+func_missing_arg ()
+{
+    $opt_debug
+
+    func_error "missing argument for $1."
+    exit_cmd=exit
+}
+
+
+# func_split_short_opt shortopt
+# Set func_split_short_opt_name and func_split_short_opt_arg shell
+# variables after splitting SHORTOPT after the 2nd character.
+func_split_short_opt ()
+{
+    my_sed_short_opt='1s/^\(..\).*$/\1/;q'
+    my_sed_short_rest='1s/^..\(.*\)$/\1/;q'
+
+    func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"`
+    func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"`
+} # func_split_short_opt may be replaced by extended shell implementation
+
+
+# func_split_long_opt longopt
+# Set func_split_long_opt_name and func_split_long_opt_arg shell
+# variables after splitting LONGOPT at the `=' sign.
+func_split_long_opt ()
+{
+    my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q'
+    my_sed_long_arg='1s/^--[^=]*=//'
+
+    func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"`
+    func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"`
+} # func_split_long_opt may be replaced by extended shell implementation
+
+exit_cmd=:
+
+
+
+
+
+magic="%%%MAGIC variable%%%"
+magic_exe="%%%MAGIC EXE variable%%%"
+
+# Global variables.
+nonopt=
+preserve_args=
+lo2o="s/\\.lo\$/.${objext}/"
+o2lo="s/\\.${objext}\$/.lo/"
+extracted_archives=
+extracted_serial=0
+
+# If this variable is set in any of the actions, the command in it
+# will be execed at the end.  This prevents here-documents from being
+# left over by shells.
+exec_cmd=
+
+# func_append var value
+# Append VALUE to the end of shell variable VAR.
+func_append ()
+{
+    eval "${1}=\$${1}\${2}"
+} # func_append may be replaced by extended shell implementation
+
+# func_append_quoted var value
+# Quote VALUE and append to the end of shell variable VAR, separated
+# by a space.
+func_append_quoted ()
+{
+    func_quote_for_eval "${2}"
+    eval "${1}=\$${1}\\ \$func_quote_for_eval_result"
+} # func_append_quoted may be replaced by extended shell implementation
+
+
+# func_arith arithmetic-term...
+func_arith ()
+{
+    func_arith_result=`expr "${@}"`
+} # func_arith may be replaced by extended shell implementation
+
+
+# func_len string
+# STRING may not start with a hyphen.
+func_len ()
+{
+    func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len`
+} # func_len may be replaced by extended shell implementation
+
+
+# func_lo2o object
+func_lo2o ()
+{
+    func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"`
+} # func_lo2o may be replaced by extended shell implementation
+
+
+# func_xform libobj-or-source
+func_xform ()
+{
+    func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'`
+} # func_xform may be replaced by extended shell implementation
+
+
+# func_fatal_configuration arg...
+# Echo program name prefixed message to standard error, followed by
+# a configuration failure hint, and exit.
+func_fatal_configuration ()
+{
+    func_error ${1+"$@"}
+    func_error "See the $PACKAGE documentation for more information."
+    func_fatal_error "Fatal configuration error."
+}
+
+
+# func_config
+# Display the configuration for all the tags in this script.
+func_config ()
+{
+    re_begincf='^# ### BEGIN LIBTOOL'
+    re_endcf='^# ### END LIBTOOL'
+
+    # Default configuration.
+    $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath"
+
+    # Now print the configurations for the tags.
+    for tagname in $taglist; do
+      $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath"
+    done
+
+    exit $?
+}
+
+# func_features
+# Display the features supported by this script.
+func_features ()
+{
+    echo "host: $host"
+    if test "$build_libtool_libs" = yes; then
+      echo "enable shared libraries"
+    else
+      echo "disable shared libraries"
+    fi
+    if test "$build_old_libs" = yes; then
+      echo "enable static libraries"
+    else
+      echo "disable static libraries"
+    fi
+
+    exit $?
+}
+
+# func_enable_tag tagname
+# Verify that TAGNAME is valid, and either flag an error and exit, or
+# enable the TAGNAME tag.  We also add TAGNAME to the global $taglist
+# variable here.
+func_enable_tag ()
+{
+  # Global variable:
+  tagname="$1"
+
+  re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$"
+  re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$"
+  sed_extractcf="/$re_begincf/,/$re_endcf/p"
+
+  # Validate tagname.
+  case $tagname in
+    *[!-_A-Za-z0-9,/]*)
+      func_fatal_error "invalid tag name: $tagname"
+      ;;
+  esac
+
+  # Don't test for the "default" C tag, as we know it's
+  # there but not specially marked.
+  case $tagname in
+    CC) ;;
+    *)
+      if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then
+	taglist="$taglist $tagname"
+
+	# Evaluate the configuration.  Be careful to quote the path
+	# and the sed script, to avoid splitting on whitespace, but
+	# also don't use non-portable quotes within backquotes within
+	# quotes we have to do it in 2 steps:
+	extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"`
+	eval "$extractedcf"
+      else
+	func_error "ignoring unknown tag $tagname"
+      fi
+      ;;
+  esac
+}
+
+# func_check_version_match
+# Ensure that we are using m4 macros, and libtool script from the same
+# release of libtool.
+func_check_version_match ()
+{
+  if test "$package_revision" != "$macro_revision"; then
+    if test "$VERSION" != "$macro_version"; then
+      if test -z "$macro_version"; then
+        cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from an older release.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+      else
+        cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, but the
+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version.
+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION
+$progname: and run autoconf again.
+_LT_EOF
+      fi
+    else
+      cat >&2 <<_LT_EOF
+$progname: Version mismatch error.  This is $PACKAGE $VERSION, revision $package_revision,
+$progname: but the definition of this LT_INIT comes from revision $macro_revision.
+$progname: You should recreate aclocal.m4 with macros from revision $package_revision
+$progname: of $PACKAGE $VERSION and run autoconf again.
+_LT_EOF
+    fi
+
+    exit $EXIT_MISMATCH
+  fi
+}
+
+
+# Shorthand for --mode=foo, only valid as the first argument
+case $1 in
+clean|clea|cle|cl)
+  shift; set dummy --mode clean ${1+"$@"}; shift
+  ;;
+compile|compil|compi|comp|com|co|c)
+  shift; set dummy --mode compile ${1+"$@"}; shift
+  ;;
+execute|execut|execu|exec|exe|ex|e)
+  shift; set dummy --mode execute ${1+"$@"}; shift
+  ;;
+finish|finis|fini|fin|fi|f)
+  shift; set dummy --mode finish ${1+"$@"}; shift
+  ;;
+install|instal|insta|inst|ins|in|i)
+  shift; set dummy --mode install ${1+"$@"}; shift
+  ;;
+link|lin|li|l)
+  shift; set dummy --mode link ${1+"$@"}; shift
+  ;;
+uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u)
+  shift; set dummy --mode uninstall ${1+"$@"}; shift
+  ;;
+esac
+
+
+
+# Option defaults:
+opt_debug=:
+opt_dry_run=false
+opt_config=false
+opt_preserve_dup_deps=false
+opt_features=false
+opt_finish=false
+opt_help=false
+opt_help_all=false
+opt_silent=:
+opt_warning=:
+opt_verbose=:
+opt_silent=false
+opt_verbose=false
+
+
+# Parse options once, thoroughly.  This comes as soon as possible in the
+# script to make things like `--version' happen as quickly as we can.
+{
+  # this just eases exit handling
+  while test $# -gt 0; do
+    opt="$1"
+    shift
+    case $opt in
+      --debug|-x)	opt_debug='set -x'
+			func_echo "enabling shell trace mode"
+			$opt_debug
+			;;
+      --dry-run|--dryrun|-n)
+			opt_dry_run=:
+			;;
+      --config)
+			opt_config=:
+func_config
+			;;
+      --dlopen|-dlopen)
+			optarg="$1"
+			opt_dlopen="${opt_dlopen+$opt_dlopen
+}$optarg"
+			shift
+			;;
+      --preserve-dup-deps)
+			opt_preserve_dup_deps=:
+			;;
+      --features)
+			opt_features=:
+func_features
+			;;
+      --finish)
+			opt_finish=:
+set dummy --mode finish ${1+"$@"}; shift
+			;;
+      --help)
+			opt_help=:
+			;;
+      --help-all)
+			opt_help_all=:
+opt_help=': help-all'
+			;;
+      --mode)
+			test $# = 0 && func_missing_arg $opt && break
+			optarg="$1"
+			opt_mode="$optarg"
+case $optarg in
+  # Valid mode arguments:
+  clean|compile|execute|finish|install|link|relink|uninstall) ;;
+
+  # Catch anything else as an error
+  *) func_error "invalid argument for $opt"
+     exit_cmd=exit
+     break
+     ;;
+esac
+			shift
+			;;
+      --no-silent|--no-quiet)
+			opt_silent=false
+func_append preserve_args " $opt"
+			;;
+      --no-warning|--no-warn)
+			opt_warning=false
+func_append preserve_args " $opt"
+			;;
+      --no-verbose)
+			opt_verbose=false
+func_append preserve_args " $opt"
+			;;
+      --silent|--quiet)
+			opt_silent=:
+func_append preserve_args " $opt"
+        opt_verbose=false
+			;;
+      --verbose|-v)
+			opt_verbose=:
+func_append preserve_args " $opt"
+opt_silent=false
+			;;
+      --tag)
+			test $# = 0 && func_missing_arg $opt && break
+			optarg="$1"
+			opt_tag="$optarg"
+func_append preserve_args " $opt $optarg"
+func_enable_tag "$optarg"
+			shift
+			;;
+
+      -\?|-h)		func_usage				;;
+      --help)		func_help				;;
+      --version)	func_version				;;
+
+      # Separate optargs to long options:
+      --*=*)
+			func_split_long_opt "$opt"
+			set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"}
+			shift
+			;;
+
+      # Separate non-argument short options:
+      -\?*|-h*|-n*|-v*)
+			func_split_short_opt "$opt"
+			set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"}
+			shift
+			;;
+
+      --)		break					;;
+      -*)		func_fatal_help "unrecognized option \`$opt'" ;;
+      *)		set dummy "$opt" ${1+"$@"};	shift; break  ;;
+    esac
+  done
+
+  # Validate options:
+
+  # save first non-option argument
+  if test "$#" -gt 0; then
+    nonopt="$opt"
+    shift
+  fi
+
+  # preserve --debug
+  test "$opt_debug" = : || func_append preserve_args " --debug"
+
+  case $host in
+    *cygwin* | *mingw* | *pw32* | *cegcc*)
+      # don't eliminate duplications in $postdeps and $predeps
+      opt_duplicate_compiler_generated_deps=:
+      ;;
+    *)
+      opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps
+      ;;
+  esac
+
+  $opt_help || {
+    # Sanity checks first:
+    func_check_version_match
+
+    if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then
+      func_fatal_configuration "not configured to build any kind of library"
+    fi
+
+    # Darwin sucks
+    eval std_shrext=\"$shrext_cmds\"
+
+    # Only execute mode is allowed to have -dlopen flags.
+    if test -n "$opt_dlopen" && test "$opt_mode" != execute; then
+      func_error "unrecognized option \`-dlopen'"
+      $ECHO "$help" 1>&2
+      exit $EXIT_FAILURE
+    fi
+
+    # Change the help message to a mode-specific one.
+    generic_help="$help"
+    help="Try \`$progname --help --mode=$opt_mode' for more information."
+  }
+
+
+  # Bail if the options were screwed
+  $exit_cmd $EXIT_FAILURE
+}
+
+
+
+
+## ----------- ##
+##    Main.    ##
+## ----------- ##
+
+# func_lalib_p file
+# True iff FILE is a libtool `.la' library or `.lo' object file.
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_lalib_p ()
+{
+    test -f "$1" &&
+      $SED -e 4q "$1" 2>/dev/null \
+        | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1
+}
+
+# func_lalib_unsafe_p file
+# True iff FILE is a libtool `.la' library or `.lo' object file.
+# This function implements the same check as func_lalib_p without
+# resorting to external programs.  To this end, it redirects stdin and
+# closes it afterwards, without saving the original file descriptor.
+# As a safety measure, use it only where a negative result would be
+# fatal anyway.  Works if `file' does not exist.
+func_lalib_unsafe_p ()
+{
+    lalib_p=no
+    if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then
+	for lalib_p_l in 1 2 3 4
+	do
+	    read lalib_p_line
+	    case "$lalib_p_line" in
+		\#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;;
+	    esac
+	done
+	exec 0<&5 5<&-
+    fi
+    test "$lalib_p" = yes
+}
+
+# func_ltwrapper_script_p file
+# True iff FILE is a libtool wrapper script
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_script_p ()
+{
+    func_lalib_p "$1"
+}
+
+# func_ltwrapper_executable_p file
+# True iff FILE is a libtool wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_executable_p ()
+{
+    func_ltwrapper_exec_suffix=
+    case $1 in
+    *.exe) ;;
+    *) func_ltwrapper_exec_suffix=.exe ;;
+    esac
+    $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1
+}
+
+# func_ltwrapper_scriptname file
+# Assumes file is an ltwrapper_executable
+# uses $file to determine the appropriate filename for a
+# temporary ltwrapper_script.
+func_ltwrapper_scriptname ()
+{
+    func_dirname_and_basename "$1" "" "."
+    func_stripname '' '.exe' "$func_basename_result"
+    func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper"
+}
+
+# func_ltwrapper_p file
+# True iff FILE is a libtool wrapper script or wrapper executable
+# This function is only a basic sanity check; it will hardly flush out
+# determined imposters.
+func_ltwrapper_p ()
+{
+    func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1"
+}
+
+
+# func_execute_cmds commands fail_cmd
+# Execute tilde-delimited COMMANDS.
+# If FAIL_CMD is given, eval that upon failure.
+# FAIL_CMD may read-access the current command in variable CMD!
+func_execute_cmds ()
+{
+    $opt_debug
+    save_ifs=$IFS; IFS='~'
+    for cmd in $1; do
+      IFS=$save_ifs
+      eval cmd=\"$cmd\"
+      func_show_eval "$cmd" "${2-:}"
+    done
+    IFS=$save_ifs
+}
+
+
+# func_source file
+# Source FILE, adding directory component if necessary.
+# Note that it is not necessary on cygwin/mingw to append a dot to
+# FILE even if both FILE and FILE.exe exist: automatic-append-.exe
+# behavior happens only for exec(3), not for open(2)!  Also, sourcing
+# `FILE.' does not work on cygwin managed mounts.
+func_source ()
+{
+    $opt_debug
+    case $1 in
+    */* | *\\*)	. "$1" ;;
+    *)		. "./$1" ;;
+    esac
+}
+
+
+# func_resolve_sysroot PATH
+# Replace a leading = in PATH with a sysroot.  Store the result into
+# func_resolve_sysroot_result
+func_resolve_sysroot ()
+{
+  func_resolve_sysroot_result=$1
+  case $func_resolve_sysroot_result in
+  =*)
+    func_stripname '=' '' "$func_resolve_sysroot_result"
+    func_resolve_sysroot_result=$lt_sysroot$func_stripname_result
+    ;;
+  esac
+}
+
+# func_replace_sysroot PATH
+# If PATH begins with the sysroot, replace it with = and
+# store the result into func_replace_sysroot_result.
+func_replace_sysroot ()
+{
+  case "$lt_sysroot:$1" in
+  ?*:"$lt_sysroot"*)
+    func_stripname "$lt_sysroot" '' "$1"
+    func_replace_sysroot_result="=$func_stripname_result"
+    ;;
+  *)
+    # Including no sysroot.
+    func_replace_sysroot_result=$1
+    ;;
+  esac
+}
+
+# func_infer_tag arg
+# Infer tagged configuration to use if any are available and
+# if one wasn't chosen via the "--tag" command line option.
+# Only attempt this if the compiler in the base compile
+# command doesn't match the default compiler.
+# arg is usually of the form 'gcc ...'
+func_infer_tag ()
+{
+    $opt_debug
+    if test -n "$available_tags" && test -z "$tagname"; then
+      CC_quoted=
+      for arg in $CC; do
+	func_append_quoted CC_quoted "$arg"
+      done
+      CC_expanded=`func_echo_all $CC`
+      CC_quoted_expanded=`func_echo_all $CC_quoted`
+      case $@ in
+      # Blanks in the command may have been stripped by the calling shell,
+      # but not from the CC environment variable when configure was run.
+      " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+      " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;;
+      # Blanks at the start of $base_compile will cause this to fail
+      # if we don't check for them as well.
+      *)
+	for z in $available_tags; do
+	  if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then
+	    # Evaluate the configuration.
+	    eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`"
+	    CC_quoted=
+	    for arg in $CC; do
+	      # Double-quote args containing other shell metacharacters.
+	      func_append_quoted CC_quoted "$arg"
+	    done
+	    CC_expanded=`func_echo_all $CC`
+	    CC_quoted_expanded=`func_echo_all $CC_quoted`
+	    case "$@ " in
+	    " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \
+	    " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*)
+	      # The compiler in the base compile command matches
+	      # the one in the tagged configuration.
+	      # Assume this is the tagged configuration we want.
+	      tagname=$z
+	      break
+	      ;;
+	    esac
+	  fi
+	done
+	# If $tagname still isn't set, then no tagged configuration
+	# was found and let the user know that the "--tag" command
+	# line option must be used.
+	if test -z "$tagname"; then
+	  func_echo "unable to infer tagged configuration"
+	  func_fatal_error "specify a tag with \`--tag'"
+#	else
+#	  func_verbose "using $tagname tagged configuration"
+	fi
+	;;
+      esac
+    fi
+}
+
+
+
+# func_write_libtool_object output_name pic_name nonpic_name
+# Create a libtool object file (analogous to a ".la" file),
+# but don't create it if we're doing a dry run.
+func_write_libtool_object ()
+{
+    write_libobj=${1}
+    if test "$build_libtool_libs" = yes; then
+      write_lobj=\'${2}\'
+    else
+      write_lobj=none
+    fi
+
+    if test "$build_old_libs" = yes; then
+      write_oldobj=\'${3}\'
+    else
+      write_oldobj=none
+    fi
+
+    $opt_dry_run || {
+      cat >${write_libobj}T <<EOF
+# $write_libobj - a libtool object file
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# Name of the PIC object.
+pic_object=$write_lobj
+
+# Name of the non-PIC object
+non_pic_object=$write_oldobj
+
+EOF
+      $MV "${write_libobj}T" "${write_libobj}"
+    }
+}
+
+
+##################################################
+# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS #
+##################################################
+
+# func_convert_core_file_wine_to_w32 ARG
+# Helper function used by file name conversion functions when $build is *nix,
+# and $host is mingw, cygwin, or some other w32 environment. Relies on a
+# correctly configured wine environment available, with the winepath program
+# in $build's $PATH.
+#
+# ARG is the $build file name to be converted to w32 format.
+# Result is available in $func_convert_core_file_wine_to_w32_result, and will
+# be empty on error (or when ARG is empty)
+func_convert_core_file_wine_to_w32 ()
+{
+  $opt_debug
+  func_convert_core_file_wine_to_w32_result="$1"
+  if test -n "$1"; then
+    # Unfortunately, winepath does not exit with a non-zero error code, so we
+    # are forced to check the contents of stdout. On the other hand, if the
+    # command is not found, the shell will set an exit code of 127 and print
+    # *an error message* to stdout. So we must check for both error code of
+    # zero AND non-empty stdout, which explains the odd construction:
+    func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null`
+    if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then
+      func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" |
+        $SED -e "$lt_sed_naive_backslashify"`
+    else
+      func_convert_core_file_wine_to_w32_result=
+    fi
+  fi
+}
+# end: func_convert_core_file_wine_to_w32
+
+
+# func_convert_core_path_wine_to_w32 ARG
+# Helper function used by path conversion functions when $build is *nix, and
+# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly
+# configured wine environment available, with the winepath program in $build's
+# $PATH. Assumes ARG has no leading or trailing path separator characters.
+#
+# ARG is path to be converted from $build format to win32.
+# Result is available in $func_convert_core_path_wine_to_w32_result.
+# Unconvertible file (directory) names in ARG are skipped; if no directory names
+# are convertible, then the result may be empty.
+func_convert_core_path_wine_to_w32 ()
+{
+  $opt_debug
+  # unfortunately, winepath doesn't convert paths, only file names
+  func_convert_core_path_wine_to_w32_result=""
+  if test -n "$1"; then
+    oldIFS=$IFS
+    IFS=:
+    for func_convert_core_path_wine_to_w32_f in $1; do
+      IFS=$oldIFS
+      func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f"
+      if test -n "$func_convert_core_file_wine_to_w32_result" ; then
+        if test -z "$func_convert_core_path_wine_to_w32_result"; then
+          func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result"
+        else
+          func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result"
+        fi
+      fi
+    done
+    IFS=$oldIFS
+  fi
+}
+# end: func_convert_core_path_wine_to_w32
+
+
+# func_cygpath ARGS...
+# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when
+# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2)
+# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or
+# (2), returns the Cygwin file name or path in func_cygpath_result (input
+# file name or path is assumed to be in w32 format, as previously converted
+# from $build's *nix or MSYS format). In case (3), returns the w32 file name
+# or path in func_cygpath_result (input file name or path is assumed to be in
+# Cygwin format). Returns an empty string on error.
+#
+# ARGS are passed to cygpath, with the last one being the file name or path to
+# be converted.
+#
+# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH
+# environment variable; do not put it in $PATH.
+func_cygpath ()
+{
+  $opt_debug
+  if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then
+    func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null`
+    if test "$?" -ne 0; then
+      # on failure, ensure result is empty
+      func_cygpath_result=
+    fi
+  else
+    func_cygpath_result=
+    func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'"
+  fi
+}
+#end: func_cygpath
+
+
+# func_convert_core_msys_to_w32 ARG
+# Convert file name or path ARG from MSYS format to w32 format.  Return
+# result in func_convert_core_msys_to_w32_result.
+func_convert_core_msys_to_w32 ()
+{
+  $opt_debug
+  # awkward: cmd appends spaces to result
+  func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null |
+    $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"`
+}
+#end: func_convert_core_msys_to_w32
+
+
+# func_convert_file_check ARG1 ARG2
+# Verify that ARG1 (a file name in $build format) was converted to $host
+# format in ARG2. Otherwise, emit an error message, but continue (resetting
+# func_to_host_file_result to ARG1).
+func_convert_file_check ()
+{
+  $opt_debug
+  if test -z "$2" && test -n "$1" ; then
+    func_error "Could not determine host file name corresponding to"
+    func_error "  \`$1'"
+    func_error "Continuing, but uninstalled executables may not work."
+    # Fallback:
+    func_to_host_file_result="$1"
+  fi
+}
+# end func_convert_file_check
+
+
+# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH
+# Verify that FROM_PATH (a path in $build format) was converted to $host
+# format in TO_PATH. Otherwise, emit an error message, but continue, resetting
+# func_to_host_file_result to a simplistic fallback value (see below).
+func_convert_path_check ()
+{
+  $opt_debug
+  if test -z "$4" && test -n "$3"; then
+    func_error "Could not determine the host path corresponding to"
+    func_error "  \`$3'"
+    func_error "Continuing, but uninstalled executables may not work."
+    # Fallback.  This is a deliberately simplistic "conversion" and
+    # should not be "improved".  See libtool.info.
+    if test "x$1" != "x$2"; then
+      lt_replace_pathsep_chars="s|$1|$2|g"
+      func_to_host_path_result=`echo "$3" |
+        $SED -e "$lt_replace_pathsep_chars"`
+    else
+      func_to_host_path_result="$3"
+    fi
+  fi
+}
+# end func_convert_path_check
+
+
+# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG
+# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT
+# and appending REPL if ORIG matches BACKPAT.
+func_convert_path_front_back_pathsep ()
+{
+  $opt_debug
+  case $4 in
+  $1 ) func_to_host_path_result="$3$func_to_host_path_result"
+    ;;
+  esac
+  case $4 in
+  $2 ) func_append func_to_host_path_result "$3"
+    ;;
+  esac
+}
+# end func_convert_path_front_back_pathsep
+
+
+##################################################
+# $build to $host FILE NAME CONVERSION FUNCTIONS #
+##################################################
+# invoked via `$to_host_file_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# Result will be available in $func_to_host_file_result.
+
+
+# func_to_host_file ARG
+# Converts the file name ARG from $build format to $host format. Return result
+# in func_to_host_file_result.
+func_to_host_file ()
+{
+  $opt_debug
+  $to_host_file_cmd "$1"
+}
+# end func_to_host_file
+
+
+# func_to_tool_file ARG LAZY
+# converts the file name ARG from $build format to toolchain format. Return
+# result in func_to_tool_file_result.  If the conversion in use is listed
+# in (the comma separated) LAZY, no conversion takes place.
+func_to_tool_file ()
+{
+  $opt_debug
+  case ,$2, in
+    *,"$to_tool_file_cmd",*)
+      func_to_tool_file_result=$1
+      ;;
+    *)
+      $to_tool_file_cmd "$1"
+      func_to_tool_file_result=$func_to_host_file_result
+      ;;
+  esac
+}
+# end func_to_tool_file
+
+
+# func_convert_file_noop ARG
+# Copy ARG to func_to_host_file_result.
+func_convert_file_noop ()
+{
+  func_to_host_file_result="$1"
+}
+# end func_convert_file_noop
+
+
+# func_convert_file_msys_to_w32 ARG
+# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper.  Returns result in
+# func_to_host_file_result.
+func_convert_file_msys_to_w32 ()
+{
+  $opt_debug
+  func_to_host_file_result="$1"
+  if test -n "$1"; then
+    func_convert_core_msys_to_w32 "$1"
+    func_to_host_file_result="$func_convert_core_msys_to_w32_result"
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_w32
+
+
+# func_convert_file_cygwin_to_w32 ARG
+# Convert file name ARG from Cygwin to w32 format.  Returns result in
+# func_to_host_file_result.
+func_convert_file_cygwin_to_w32 ()
+{
+  $opt_debug
+  func_to_host_file_result="$1"
+  if test -n "$1"; then
+    # because $build is cygwin, we call "the" cygpath in $PATH; no need to use
+    # LT_CYGPATH in this case.
+    func_to_host_file_result=`cygpath -m "$1"`
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_cygwin_to_w32
+
+
+# func_convert_file_nix_to_w32 ARG
+# Convert file name ARG from *nix to w32 format.  Requires a wine environment
+# and a working winepath. Returns result in func_to_host_file_result.
+func_convert_file_nix_to_w32 ()
+{
+  $opt_debug
+  func_to_host_file_result="$1"
+  if test -n "$1"; then
+    func_convert_core_file_wine_to_w32 "$1"
+    func_to_host_file_result="$func_convert_core_file_wine_to_w32_result"
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_w32
+
+
+# func_convert_file_msys_to_cygwin ARG
+# Convert file name ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_file_msys_to_cygwin ()
+{
+  $opt_debug
+  func_to_host_file_result="$1"
+  if test -n "$1"; then
+    func_convert_core_msys_to_w32 "$1"
+    func_cygpath -u "$func_convert_core_msys_to_w32_result"
+    func_to_host_file_result="$func_cygpath_result"
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_msys_to_cygwin
+
+
+# func_convert_file_nix_to_cygwin ARG
+# Convert file name ARG from *nix to Cygwin format.  Requires Cygwin installed
+# in a wine environment, working winepath, and LT_CYGPATH set.  Returns result
+# in func_to_host_file_result.
+func_convert_file_nix_to_cygwin ()
+{
+  $opt_debug
+  func_to_host_file_result="$1"
+  if test -n "$1"; then
+    # convert from *nix to w32, then use cygpath to convert from w32 to cygwin.
+    func_convert_core_file_wine_to_w32 "$1"
+    func_cygpath -u "$func_convert_core_file_wine_to_w32_result"
+    func_to_host_file_result="$func_cygpath_result"
+  fi
+  func_convert_file_check "$1" "$func_to_host_file_result"
+}
+# end func_convert_file_nix_to_cygwin
+
+
+#############################################
+# $build to $host PATH CONVERSION FUNCTIONS #
+#############################################
+# invoked via `$to_host_path_cmd ARG'
+#
+# In each case, ARG is the path to be converted from $build to $host format.
+# The result will be available in $func_to_host_path_result.
+#
+# Path separators are also converted from $build format to $host format.  If
+# ARG begins or ends with a path separator character, it is preserved (but
+# converted to $host format) on output.
+#
+# All path conversion functions are named using the following convention:
+#   file name conversion function    : func_convert_file_X_to_Y ()
+#   path conversion function         : func_convert_path_X_to_Y ()
+# where, for any given $build/$host combination the 'X_to_Y' value is the
+# same.  If conversion functions are added for new $build/$host combinations,
+# the two new functions must follow this pattern, or func_init_to_host_path_cmd
+# will break.
+
+
+# func_init_to_host_path_cmd
+# Ensures that function "pointer" variable $to_host_path_cmd is set to the
+# appropriate value, based on the value of $to_host_file_cmd.
+to_host_path_cmd=
+func_init_to_host_path_cmd ()
+{
+  $opt_debug
+  if test -z "$to_host_path_cmd"; then
+    func_stripname 'func_convert_file_' '' "$to_host_file_cmd"
+    to_host_path_cmd="func_convert_path_${func_stripname_result}"
+  fi
+}
+
+
+# func_to_host_path ARG
+# Converts the path ARG from $build format to $host format. Return result
+# in func_to_host_path_result.
+func_to_host_path ()
+{
+  $opt_debug
+  func_init_to_host_path_cmd
+  $to_host_path_cmd "$1"
+}
+# end func_to_host_path
+
+
+# func_convert_path_noop ARG
+# Copy ARG to func_to_host_path_result.
+func_convert_path_noop ()
+{
+  func_to_host_path_result="$1"
+}
+# end func_convert_path_noop
+
+
+# func_convert_path_msys_to_w32 ARG
+# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic
+# conversion to w32 is not available inside the cwrapper.  Returns result in
+# func_to_host_path_result.
+func_convert_path_msys_to_w32 ()
+{
+  $opt_debug
+  func_to_host_path_result="$1"
+  if test -n "$1"; then
+    # Remove leading and trailing path separator characters from ARG.  MSYS
+    # behavior is inconsistent here; cygpath turns them into '.;' and ';.';
+    # and winepath ignores them completely.
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+    func_to_host_path_result="$func_convert_core_msys_to_w32_result"
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_msys_to_w32
+
+
+# func_convert_path_cygwin_to_w32 ARG
+# Convert path ARG from Cygwin to w32 format.  Returns result in
+# func_to_host_file_result.
+func_convert_path_cygwin_to_w32 ()
+{
+  $opt_debug
+  func_to_host_path_result="$1"
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"`
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_cygwin_to_w32
+
+
+# func_convert_path_nix_to_w32 ARG
+# Convert path ARG from *nix to w32 format.  Requires a wine environment and
+# a working winepath.  Returns result in func_to_host_file_result.
+func_convert_path_nix_to_w32 ()
+{
+  $opt_debug
+  func_to_host_path_result="$1"
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+    func_to_host_path_result="$func_convert_core_path_wine_to_w32_result"
+    func_convert_path_check : ";" \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" ";" "$1"
+  fi
+}
+# end func_convert_path_nix_to_w32
+
+
+# func_convert_path_msys_to_cygwin ARG
+# Convert path ARG from MSYS to Cygwin format.  Requires LT_CYGPATH set.
+# Returns result in func_to_host_file_result.
+func_convert_path_msys_to_cygwin ()
+{
+  $opt_debug
+  func_to_host_path_result="$1"
+  if test -n "$1"; then
+    # See func_convert_path_msys_to_w32:
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_msys_to_w32 "$func_to_host_path_tmp1"
+    func_cygpath -u -p "$func_convert_core_msys_to_w32_result"
+    func_to_host_path_result="$func_cygpath_result"
+    func_convert_path_check : : \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+  fi
+}
+# end func_convert_path_msys_to_cygwin
+
+
+# func_convert_path_nix_to_cygwin ARG
+# Convert path ARG from *nix to Cygwin format.  Requires Cygwin installed in a
+# a wine environment, working winepath, and LT_CYGPATH set.  Returns result in
+# func_to_host_file_result.
+func_convert_path_nix_to_cygwin ()
+{
+  $opt_debug
+  func_to_host_path_result="$1"
+  if test -n "$1"; then
+    # Remove leading and trailing path separator characters from
+    # ARG. msys behavior is inconsistent here, cygpath turns them
+    # into '.;' and ';.', and winepath ignores them completely.
+    func_stripname : : "$1"
+    func_to_host_path_tmp1=$func_stripname_result
+    func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1"
+    func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result"
+    func_to_host_path_result="$func_cygpath_result"
+    func_convert_path_check : : \
+      "$func_to_host_path_tmp1" "$func_to_host_path_result"
+    func_convert_path_front_back_pathsep ":*" "*:" : "$1"
+  fi
+}
+# end func_convert_path_nix_to_cygwin
+
+
+# func_mode_compile arg...
+func_mode_compile ()
+{
+    $opt_debug
+    # Get the compilation command and the source file.
+    base_compile=
+    srcfile="$nonopt"  #  always keep a non-empty value in "srcfile"
+    suppress_opt=yes
+    suppress_output=
+    arg_mode=normal
+    libobj=
+    later=
+    pie_flag=
+
+    for arg
+    do
+      case $arg_mode in
+      arg  )
+	# do not "continue".  Instead, add this to base_compile
+	lastarg="$arg"
+	arg_mode=normal
+	;;
+
+      target )
+	libobj="$arg"
+	arg_mode=normal
+	continue
+	;;
+
+      normal )
+	# Accept any command-line options.
+	case $arg in
+	-o)
+	  test -n "$libobj" && \
+	    func_fatal_error "you cannot specify \`-o' more than once"
+	  arg_mode=target
+	  continue
+	  ;;
+
+	-pie | -fpie | -fPIE)
+          func_append pie_flag " $arg"
+	  continue
+	  ;;
+
+	-shared | -static | -prefer-pic | -prefer-non-pic)
+	  func_append later " $arg"
+	  continue
+	  ;;
+
+	-no-suppress)
+	  suppress_opt=no
+	  continue
+	  ;;
+
+	-Xcompiler)
+	  arg_mode=arg  #  the next one goes into the "base_compile" arg list
+	  continue      #  The current "srcfile" will either be retained or
+	  ;;            #  replaced later.  I would guess that would be a bug.
+
+	-Wc,*)
+	  func_stripname '-Wc,' '' "$arg"
+	  args=$func_stripname_result
+	  lastarg=
+	  save_ifs="$IFS"; IFS=','
+	  for arg in $args; do
+	    IFS="$save_ifs"
+	    func_append_quoted lastarg "$arg"
+	  done
+	  IFS="$save_ifs"
+	  func_stripname ' ' '' "$lastarg"
+	  lastarg=$func_stripname_result
+
+	  # Add the arguments to base_compile.
+	  func_append base_compile " $lastarg"
+	  continue
+	  ;;
+
+	*)
+	  # Accept the current argument as the source file.
+	  # The previous "srcfile" becomes the current argument.
+	  #
+	  lastarg="$srcfile"
+	  srcfile="$arg"
+	  ;;
+	esac  #  case $arg
+	;;
+      esac    #  case $arg_mode
+
+      # Aesthetically quote the previous argument.
+      func_append_quoted base_compile "$lastarg"
+    done # for arg
+
+    case $arg_mode in
+    arg)
+      func_fatal_error "you must specify an argument for -Xcompile"
+      ;;
+    target)
+      func_fatal_error "you must specify a target with \`-o'"
+      ;;
+    *)
+      # Get the name of the library object.
+      test -z "$libobj" && {
+	func_basename "$srcfile"
+	libobj="$func_basename_result"
+      }
+      ;;
+    esac
+
+    # Recognize several different file suffixes.
+    # If the user specifies -o file.o, it is replaced with file.lo
+    case $libobj in
+    *.[cCFSifmso] | \
+    *.ada | *.adb | *.ads | *.asm | \
+    *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \
+    *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup)
+      func_xform "$libobj"
+      libobj=$func_xform_result
+      ;;
+    esac
+
+    case $libobj in
+    *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;;
+    *)
+      func_fatal_error "cannot determine name of library object from \`$libobj'"
+      ;;
+    esac
+
+    func_infer_tag $base_compile
+
+    for arg in $later; do
+      case $arg in
+      -shared)
+	test "$build_libtool_libs" != yes && \
+	  func_fatal_configuration "can not build a shared library"
+	build_old_libs=no
+	continue
+	;;
+
+      -static)
+	build_libtool_libs=no
+	build_old_libs=yes
+	continue
+	;;
+
+      -prefer-pic)
+	pic_mode=yes
+	continue
+	;;
+
+      -prefer-non-pic)
+	pic_mode=no
+	continue
+	;;
+      esac
+    done
+
+    func_quote_for_eval "$libobj"
+    test "X$libobj" != "X$func_quote_for_eval_result" \
+      && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"'	 &()|`$[]' \
+      && func_warning "libobj name \`$libobj' may not contain shell special characters."
+    func_dirname_and_basename "$obj" "/" ""
+    objname="$func_basename_result"
+    xdir="$func_dirname_result"
+    lobj=${xdir}$objdir/$objname
+
+    test -z "$base_compile" && \
+      func_fatal_help "you must specify a compilation command"
+
+    # Delete any leftover library objects.
+    if test "$build_old_libs" = yes; then
+      removelist="$obj $lobj $libobj ${libobj}T"
+    else
+      removelist="$lobj $libobj ${libobj}T"
+    fi
+
+    # On Cygwin there's no "real" PIC flag so we must build both object types
+    case $host_os in
+    cygwin* | mingw* | pw32* | os2* | cegcc*)
+      pic_mode=default
+      ;;
+    esac
+    if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then
+      # non-PIC code in shared libraries is not supported
+      pic_mode=default
+    fi
+
+    # Calculate the filename of the output object if compiler does
+    # not support -o with -c
+    if test "$compiler_c_o" = no; then
+      output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext}
+      lockfile="$output_obj.lock"
+    else
+      output_obj=
+      need_locks=no
+      lockfile=
+    fi
+
+    # Lock this critical section if it is needed
+    # We use this script file to make the link, it avoids creating a new file
+    if test "$need_locks" = yes; then
+      until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+	func_echo "Waiting for $lockfile to be removed"
+	sleep 2
+      done
+    elif test "$need_locks" = warn; then
+      if test -f "$lockfile"; then
+	$ECHO "\
+*** ERROR, $lockfile exists and contains:
+`cat $lockfile 2>/dev/null`
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+      func_append removelist " $output_obj"
+      $ECHO "$srcfile" > "$lockfile"
+    fi
+
+    $opt_dry_run || $RM $removelist
+    func_append removelist " $lockfile"
+    trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15
+
+    func_to_tool_file "$srcfile" func_convert_file_msys_to_w32
+    srcfile=$func_to_tool_file_result
+    func_quote_for_eval "$srcfile"
+    qsrcfile=$func_quote_for_eval_result
+
+    # Only build a PIC object if we are building libtool libraries.
+    if test "$build_libtool_libs" = yes; then
+      # Without this assignment, base_compile gets emptied.
+      fbsd_hideous_sh_bug=$base_compile
+
+      if test "$pic_mode" != no; then
+	command="$base_compile $qsrcfile $pic_flag"
+      else
+	# Don't build PIC code
+	command="$base_compile $qsrcfile"
+      fi
+
+      func_mkdir_p "$xdir$objdir"
+
+      if test -z "$output_obj"; then
+	# Place PIC objects in $objdir
+	func_append command " -o $lobj"
+      fi
+
+      func_show_eval_locale "$command"	\
+          'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE'
+
+      if test "$need_locks" = warn &&
+	 test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+	$ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+
+      # Just move the object if needed, then go on to compile the next one
+      if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then
+	func_show_eval '$MV "$output_obj" "$lobj"' \
+	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+      fi
+
+      # Allow error messages only from the first compilation.
+      if test "$suppress_opt" = yes; then
+	suppress_output=' >/dev/null 2>&1'
+      fi
+    fi
+
+    # Only build a position-dependent object if we build old libraries.
+    if test "$build_old_libs" = yes; then
+      if test "$pic_mode" != yes; then
+	# Don't build PIC code
+	command="$base_compile $qsrcfile$pie_flag"
+      else
+	command="$base_compile $qsrcfile $pic_flag"
+      fi
+      if test "$compiler_c_o" = yes; then
+	func_append command " -o $obj"
+      fi
+
+      # Suppress compiler output if we already did a PIC compilation.
+      func_append command "$suppress_output"
+      func_show_eval_locale "$command" \
+        '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE'
+
+      if test "$need_locks" = warn &&
+	 test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then
+	$ECHO "\
+*** ERROR, $lockfile contains:
+`cat $lockfile 2>/dev/null`
+
+but it should contain:
+$srcfile
+
+This indicates that another process is trying to use the same
+temporary object file, and libtool could not work around it because
+your compiler does not support \`-c' and \`-o' together.  If you
+repeat this compilation, it may succeed, by chance, but you had better
+avoid parallel builds (make -j) in this platform, or get a better
+compiler."
+
+	$opt_dry_run || $RM $removelist
+	exit $EXIT_FAILURE
+      fi
+
+      # Just move the object if needed
+      if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then
+	func_show_eval '$MV "$output_obj" "$obj"' \
+	  'error=$?; $opt_dry_run || $RM $removelist; exit $error'
+      fi
+    fi
+
+    $opt_dry_run || {
+      func_write_libtool_object "$libobj" "$objdir/$objname" "$objname"
+
+      # Unlock the critical section if it was locked
+      if test "$need_locks" != no; then
+	removelist=$lockfile
+        $RM "$lockfile"
+      fi
+    }
+
+    exit $EXIT_SUCCESS
+}
+
+$opt_help || {
+  test "$opt_mode" = compile && func_mode_compile ${1+"$@"}
+}
+
+func_mode_help ()
+{
+    # We need to display help for each of the modes.
+    case $opt_mode in
+      "")
+        # Generic help is extracted from the usage comments
+        # at the start of this file.
+        func_help
+        ;;
+
+      clean)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE...
+
+Remove files from the build directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm').  RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, object or program, all the files associated
+with it are deleted. Otherwise, only FILE itself is deleted using RM."
+        ;;
+
+      compile)
+      $ECHO \
+"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE
+
+Compile a source file into a libtool library object.
+
+This mode accepts the following additional options:
+
+  -o OUTPUT-FILE    set the output file name to OUTPUT-FILE
+  -no-suppress      do not suppress compiler output for multiple passes
+  -prefer-pic       try to build PIC objects only
+  -prefer-non-pic   try to build non-PIC objects only
+  -shared           do not build a \`.o' file suitable for static linking
+  -static           only build a \`.o' file suitable for static linking
+  -Wc,FLAG          pass FLAG directly to the compiler
+
+COMPILE-COMMAND is a command to be used in creating a \`standard' object file
+from the given SOURCEFILE.
+
+The output file name is determined by removing the directory component from
+SOURCEFILE, then substituting the C source code suffix \`.c' with the
+library object suffix, \`.lo'."
+        ;;
+
+      execute)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]...
+
+Automatically set library path, then run a program.
+
+This mode accepts the following additional options:
+
+  -dlopen FILE      add the directory containing FILE to the library path
+
+This mode sets the library path environment variable according to \`-dlopen'
+flags.
+
+If any of the ARGS are libtool executable wrappers, then they are translated
+into their corresponding uninstalled binary, and any of their required library
+directories are added to the library path.
+
+Then, COMMAND is executed, with ARGS as arguments."
+        ;;
+
+      finish)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=finish [LIBDIR]...
+
+Complete the installation of libtool libraries.
+
+Each LIBDIR is a directory that contains libtool libraries.
+
+The commands that this mode executes may require superuser privileges.  Use
+the \`--dry-run' option if you just want to see what would be executed."
+        ;;
+
+      install)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND...
+
+Install executables or libraries.
+
+INSTALL-COMMAND is the installation command.  The first component should be
+either the \`install' or \`cp' program.
+
+The following components of INSTALL-COMMAND are treated specially:
+
+  -inst-prefix-dir PREFIX-DIR  Use PREFIX-DIR as a staging area for installation
+
+The rest of the components are interpreted as arguments to that command (only
+BSD-compatible install options are recognized)."
+        ;;
+
+      link)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=link LINK-COMMAND...
+
+Link object files or libraries together to form another library, or to
+create an executable program.
+
+LINK-COMMAND is a command using the C compiler that you would use to create
+a program from several object files.
+
+The following components of LINK-COMMAND are treated specially:
+
+  -all-static       do not do any dynamic linking at all
+  -avoid-version    do not add a version suffix if possible
+  -bindir BINDIR    specify path to binaries directory (for systems where
+                    libraries must be found in the PATH setting at runtime)
+  -dlopen FILE      \`-dlpreopen' FILE if it cannot be dlopened at runtime
+  -dlpreopen FILE   link in FILE and add its symbols to lt_preloaded_symbols
+  -export-dynamic   allow symbols from OUTPUT-FILE to be resolved with dlsym(3)
+  -export-symbols SYMFILE
+                    try to export only the symbols listed in SYMFILE
+  -export-symbols-regex REGEX
+                    try to export only the symbols matching REGEX
+  -LLIBDIR          search LIBDIR for required installed libraries
+  -lNAME            OUTPUT-FILE requires the installed library libNAME
+  -module           build a library that can dlopened
+  -no-fast-install  disable the fast-install mode
+  -no-install       link a not-installable executable
+  -no-undefined     declare that a library does not refer to external symbols
+  -o OUTPUT-FILE    create OUTPUT-FILE from the specified objects
+  -objectlist FILE  Use a list of object files found in FILE to specify objects
+  -precious-files-regex REGEX
+                    don't remove output files matching REGEX
+  -release RELEASE  specify package release information
+  -rpath LIBDIR     the created library will eventually be installed in LIBDIR
+  -R[ ]LIBDIR       add LIBDIR to the runtime path of programs and libraries
+  -shared           only do dynamic linking of libtool libraries
+  -shrext SUFFIX    override the standard shared library file extension
+  -static           do not do any dynamic linking of uninstalled libtool libraries
+  -static-libtool-libs
+                    do not do any dynamic linking of libtool libraries
+  -version-info CURRENT[:REVISION[:AGE]]
+                    specify library version info [each variable defaults to 0]
+  -weak LIBNAME     declare that the target provides the LIBNAME interface
+  -Wc,FLAG
+  -Xcompiler FLAG   pass linker-specific FLAG directly to the compiler
+  -Wl,FLAG
+  -Xlinker FLAG     pass linker-specific FLAG directly to the linker
+  -XCClinker FLAG   pass link-specific FLAG to the compiler driver (CC)
+
+All other options (arguments beginning with \`-') are ignored.
+
+Every other argument is treated as a filename.  Files ending in \`.la' are
+treated as uninstalled libtool libraries, other files are standard or library
+object files.
+
+If the OUTPUT-FILE ends in \`.la', then a libtool library is created,
+only library objects (\`.lo' files) may be specified, and \`-rpath' is
+required, except when creating a convenience library.
+
+If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created
+using \`ar' and \`ranlib', or on Windows using \`lib'.
+
+If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file
+is created, otherwise an executable program is created."
+        ;;
+
+      uninstall)
+        $ECHO \
+"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE...
+
+Remove libraries from an installation directory.
+
+RM is the name of the program to use to delete files associated with each FILE
+(typically \`/bin/rm').  RM-OPTIONS are options (such as \`-f') to be passed
+to RM.
+
+If FILE is a libtool library, all the files associated with it are deleted.
+Otherwise, only FILE itself is deleted using RM."
+        ;;
+
+      *)
+        func_fatal_help "invalid operation mode \`$opt_mode'"
+        ;;
+    esac
+
+    echo
+    $ECHO "Try \`$progname --help' for more information about other modes."
+}
+
+# Now that we've collected a possible --mode arg, show help if necessary
+if $opt_help; then
+  if test "$opt_help" = :; then
+    func_mode_help
+  else
+    {
+      func_help noexit
+      for opt_mode in compile link execute install finish uninstall clean; do
+	func_mode_help
+      done
+    } | sed -n '1p; 2,$s/^Usage:/  or: /p'
+    {
+      func_help noexit
+      for opt_mode in compile link execute install finish uninstall clean; do
+	echo
+	func_mode_help
+      done
+    } |
+    sed '1d
+      /^When reporting/,/^Report/{
+	H
+	d
+      }
+      $x
+      /information about other modes/d
+      /more detailed .*MODE/d
+      s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/'
+  fi
+  exit $?
+fi
+
+
+# func_mode_execute arg...
+func_mode_execute ()
+{
+    $opt_debug
+    # The first argument is the command name.
+    cmd="$nonopt"
+    test -z "$cmd" && \
+      func_fatal_help "you must specify a COMMAND"
+
+    # Handle -dlopen flags immediately.
+    for file in $opt_dlopen; do
+      test -f "$file" \
+	|| func_fatal_help "\`$file' is not a file"
+
+      dir=
+      case $file in
+      *.la)
+	func_resolve_sysroot "$file"
+	file=$func_resolve_sysroot_result
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$file" \
+	  || func_fatal_help "\`$lib' is not a valid libtool archive"
+
+	# Read the libtool library.
+	dlname=
+	library_names=
+	func_source "$file"
+
+	# Skip this library if it cannot be dlopened.
+	if test -z "$dlname"; then
+	  # Warn if it was a shared library.
+	  test -n "$library_names" && \
+	    func_warning "\`$file' was not linked with \`-export-dynamic'"
+	  continue
+	fi
+
+	func_dirname "$file" "" "."
+	dir="$func_dirname_result"
+
+	if test -f "$dir/$objdir/$dlname"; then
+	  func_append dir "/$objdir"
+	else
+	  if test ! -f "$dir/$dlname"; then
+	    func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'"
+	  fi
+	fi
+	;;
+
+      *.lo)
+	# Just add the directory containing the .lo file.
+	func_dirname "$file" "" "."
+	dir="$func_dirname_result"
+	;;
+
+      *)
+	func_warning "\`-dlopen' is ignored for non-libtool libraries and objects"
+	continue
+	;;
+      esac
+
+      # Get the absolute pathname.
+      absdir=`cd "$dir" && pwd`
+      test -n "$absdir" && dir="$absdir"
+
+      # Now add the directory to shlibpath_var.
+      if eval "test -z \"\$$shlibpath_var\""; then
+	eval "$shlibpath_var=\"\$dir\""
+      else
+	eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\""
+      fi
+    done
+
+    # This variable tells wrapper scripts just to set shlibpath_var
+    # rather than running their programs.
+    libtool_execute_magic="$magic"
+
+    # Check if any of the arguments is a wrapper script.
+    args=
+    for file
+    do
+      case $file in
+      -* | *.la | *.lo ) ;;
+      *)
+	# Do a test to see if this is really a libtool program.
+	if func_ltwrapper_script_p "$file"; then
+	  func_source "$file"
+	  # Transform arg to wrapped name.
+	  file="$progdir/$program"
+	elif func_ltwrapper_executable_p "$file"; then
+	  func_ltwrapper_scriptname "$file"
+	  func_source "$func_ltwrapper_scriptname_result"
+	  # Transform arg to wrapped name.
+	  file="$progdir/$program"
+	fi
+	;;
+      esac
+      # Quote arguments (to preserve shell metacharacters).
+      func_append_quoted args "$file"
+    done
+
+    if test "X$opt_dry_run" = Xfalse; then
+      if test -n "$shlibpath_var"; then
+	# Export the shlibpath_var.
+	eval "export $shlibpath_var"
+      fi
+
+      # Restore saved environment variables
+      for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES
+      do
+	eval "if test \"\${save_$lt_var+set}\" = set; then
+                $lt_var=\$save_$lt_var; export $lt_var
+	      else
+		$lt_unset $lt_var
+	      fi"
+      done
+
+      # Now prepare to actually exec the command.
+      exec_cmd="\$cmd$args"
+    else
+      # Display what would be done.
+      if test -n "$shlibpath_var"; then
+	eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\""
+	echo "export $shlibpath_var"
+      fi
+      $ECHO "$cmd$args"
+      exit $EXIT_SUCCESS
+    fi
+}
+
+test "$opt_mode" = execute && func_mode_execute ${1+"$@"}
+
+
+# func_mode_finish arg...
+func_mode_finish ()
+{
+    $opt_debug
+    libs=
+    libdirs=
+    admincmds=
+
+    for opt in "$nonopt" ${1+"$@"}
+    do
+      if test -d "$opt"; then
+	func_append libdirs " $opt"
+
+      elif test -f "$opt"; then
+	if func_lalib_unsafe_p "$opt"; then
+	  func_append libs " $opt"
+	else
+	  func_warning "\`$opt' is not a valid libtool archive"
+	fi
+
+      else
+	func_fatal_error "invalid argument \`$opt'"
+      fi
+    done
+
+    if test -n "$libs"; then
+      if test -n "$lt_sysroot"; then
+        sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"`
+        sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;"
+      else
+        sysroot_cmd=
+      fi
+
+      # Remove sysroot references
+      if $opt_dry_run; then
+        for lib in $libs; do
+          echo "removing references to $lt_sysroot and \`=' prefixes from $lib"
+        done
+      else
+        tmpdir=`func_mktempdir`
+        for lib in $libs; do
+	  sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \
+	    > $tmpdir/tmp-la
+	  mv -f $tmpdir/tmp-la $lib
+	done
+        ${RM}r "$tmpdir"
+      fi
+    fi
+
+    if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+      for libdir in $libdirs; do
+	if test -n "$finish_cmds"; then
+	  # Do each command in the finish commands.
+	  func_execute_cmds "$finish_cmds" 'admincmds="$admincmds
+'"$cmd"'"'
+	fi
+	if test -n "$finish_eval"; then
+	  # Do the single finish_eval.
+	  eval cmds=\"$finish_eval\"
+	  $opt_dry_run || eval "$cmds" || func_append admincmds "
+       $cmds"
+	fi
+      done
+    fi
+
+    # Exit here if they wanted silent mode.
+    $opt_silent && exit $EXIT_SUCCESS
+
+    if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then
+      echo "----------------------------------------------------------------------"
+      echo "Libraries have been installed in:"
+      for libdir in $libdirs; do
+	$ECHO "   $libdir"
+      done
+      echo
+      echo "If you ever happen to want to link against installed libraries"
+      echo "in a given directory, LIBDIR, you must either use libtool, and"
+      echo "specify the full pathname of the library, or use the \`-LLIBDIR'"
+      echo "flag during linking and do at least one of the following:"
+      if test -n "$shlibpath_var"; then
+	echo "   - add LIBDIR to the \`$shlibpath_var' environment variable"
+	echo "     during execution"
+      fi
+      if test -n "$runpath_var"; then
+	echo "   - add LIBDIR to the \`$runpath_var' environment variable"
+	echo "     during linking"
+      fi
+      if test -n "$hardcode_libdir_flag_spec"; then
+	libdir=LIBDIR
+	eval flag=\"$hardcode_libdir_flag_spec\"
+
+	$ECHO "   - use the \`$flag' linker flag"
+      fi
+      if test -n "$admincmds"; then
+	$ECHO "   - have your system administrator run these commands:$admincmds"
+      fi
+      if test -f /etc/ld.so.conf; then
+	echo "   - have your system administrator add LIBDIR to \`/etc/ld.so.conf'"
+      fi
+      echo
+
+      echo "See any operating system documentation about shared libraries for"
+      case $host in
+	solaris2.[6789]|solaris2.1[0-9])
+	  echo "more information, such as the ld(1), crle(1) and ld.so(8) manual"
+	  echo "pages."
+	  ;;
+	*)
+	  echo "more information, such as the ld(1) and ld.so(8) manual pages."
+	  ;;
+      esac
+      echo "----------------------------------------------------------------------"
+    fi
+    exit $EXIT_SUCCESS
+}
+
+test "$opt_mode" = finish && func_mode_finish ${1+"$@"}
+
+
+# func_mode_install arg...
+func_mode_install ()
+{
+    $opt_debug
+    # There may be an optional sh(1) argument at the beginning of
+    # install_prog (especially on Windows NT).
+    if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh ||
+       # Allow the use of GNU shtool's install command.
+       case $nonopt in *shtool*) :;; *) false;; esac; then
+      # Aesthetically quote it.
+      func_quote_for_eval "$nonopt"
+      install_prog="$func_quote_for_eval_result "
+      arg=$1
+      shift
+    else
+      install_prog=
+      arg=$nonopt
+    fi
+
+    # The real first argument should be the name of the installation program.
+    # Aesthetically quote it.
+    func_quote_for_eval "$arg"
+    func_append install_prog "$func_quote_for_eval_result"
+    install_shared_prog=$install_prog
+    case " $install_prog " in
+      *[\\\ /]cp\ *) install_cp=: ;;
+      *) install_cp=false ;;
+    esac
+
+    # We need to accept at least all the BSD install flags.
+    dest=
+    files=
+    opts=
+    prev=
+    install_type=
+    isdir=no
+    stripme=
+    no_mode=:
+    for arg
+    do
+      arg2=
+      if test -n "$dest"; then
+	func_append files " $dest"
+	dest=$arg
+	continue
+      fi
+
+      case $arg in
+      -d) isdir=yes ;;
+      -f)
+	if $install_cp; then :; else
+	  prev=$arg
+	fi
+	;;
+      -g | -m | -o)
+	prev=$arg
+	;;
+      -s)
+	stripme=" -s"
+	continue
+	;;
+      -*)
+	;;
+      *)
+	# If the previous option needed an argument, then skip it.
+	if test -n "$prev"; then
+	  if test "x$prev" = x-m && test -n "$install_override_mode"; then
+	    arg2=$install_override_mode
+	    no_mode=false
+	  fi
+	  prev=
+	else
+	  dest=$arg
+	  continue
+	fi
+	;;
+      esac
+
+      # Aesthetically quote the argument.
+      func_quote_for_eval "$arg"
+      func_append install_prog " $func_quote_for_eval_result"
+      if test -n "$arg2"; then
+	func_quote_for_eval "$arg2"
+      fi
+      func_append install_shared_prog " $func_quote_for_eval_result"
+    done
+
+    test -z "$install_prog" && \
+      func_fatal_help "you must specify an install program"
+
+    test -n "$prev" && \
+      func_fatal_help "the \`$prev' option requires an argument"
+
+    if test -n "$install_override_mode" && $no_mode; then
+      if $install_cp; then :; else
+	func_quote_for_eval "$install_override_mode"
+	func_append install_shared_prog " -m $func_quote_for_eval_result"
+      fi
+    fi
+
+    if test -z "$files"; then
+      if test -z "$dest"; then
+	func_fatal_help "no file or destination specified"
+      else
+	func_fatal_help "you must specify a destination"
+      fi
+    fi
+
+    # Strip any trailing slash from the destination.
+    func_stripname '' '/' "$dest"
+    dest=$func_stripname_result
+
+    # Check to see that the destination is a directory.
+    test -d "$dest" && isdir=yes
+    if test "$isdir" = yes; then
+      destdir="$dest"
+      destname=
+    else
+      func_dirname_and_basename "$dest" "" "."
+      destdir="$func_dirname_result"
+      destname="$func_basename_result"
+
+      # Not a directory, so check to see that there is only one file specified.
+      set dummy $files; shift
+      test "$#" -gt 1 && \
+	func_fatal_help "\`$dest' is not a directory"
+    fi
+    case $destdir in
+    [\\/]* | [A-Za-z]:[\\/]*) ;;
+    *)
+      for file in $files; do
+	case $file in
+	*.lo) ;;
+	*)
+	  func_fatal_help "\`$destdir' must be an absolute directory name"
+	  ;;
+	esac
+      done
+      ;;
+    esac
+
+    # This variable tells wrapper scripts just to set variables rather
+    # than running their programs.
+    libtool_install_magic="$magic"
+
+    staticlibs=
+    future_libdirs=
+    current_libdirs=
+    for file in $files; do
+
+      # Do each installation.
+      case $file in
+      *.$libext)
+	# Do the static libraries later.
+	func_append staticlibs " $file"
+	;;
+
+      *.la)
+	func_resolve_sysroot "$file"
+	file=$func_resolve_sysroot_result
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$file" \
+	  || func_fatal_help "\`$file' is not a valid libtool archive"
+
+	library_names=
+	old_library=
+	relink_command=
+	func_source "$file"
+
+	# Add the libdir to current_libdirs if it is the destination.
+	if test "X$destdir" = "X$libdir"; then
+	  case "$current_libdirs " in
+	  *" $libdir "*) ;;
+	  *) func_append current_libdirs " $libdir" ;;
+	  esac
+	else
+	  # Note the libdir as a future libdir.
+	  case "$future_libdirs " in
+	  *" $libdir "*) ;;
+	  *) func_append future_libdirs " $libdir" ;;
+	  esac
+	fi
+
+	func_dirname "$file" "/" ""
+	dir="$func_dirname_result"
+	func_append dir "$objdir"
+
+	if test -n "$relink_command"; then
+	  # Determine the prefix the user has applied to our future dir.
+	  inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"`
+
+	  # Don't allow the user to place us outside of our expected
+	  # location b/c this prevents finding dependent libraries that
+	  # are installed to the same prefix.
+	  # At present, this check doesn't affect windows .dll's that
+	  # are installed into $libdir/../bin (currently, that works fine)
+	  # but it's something to keep an eye on.
+	  test "$inst_prefix_dir" = "$destdir" && \
+	    func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir"
+
+	  if test -n "$inst_prefix_dir"; then
+	    # Stick the inst_prefix_dir data into the link command.
+	    relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"`
+	  else
+	    relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"`
+	  fi
+
+	  func_warning "relinking \`$file'"
+	  func_show_eval "$relink_command" \
+	    'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"'
+	fi
+
+	# See the names of the shared library.
+	set dummy $library_names; shift
+	if test -n "$1"; then
+	  realname="$1"
+	  shift
+
+	  srcname="$realname"
+	  test -n "$relink_command" && srcname="$realname"T
+
+	  # Install the shared library and build the symlinks.
+	  func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \
+	      'exit $?'
+	  tstripme="$stripme"
+	  case $host_os in
+	  cygwin* | mingw* | pw32* | cegcc*)
+	    case $realname in
+	    *.dll.a)
+	      tstripme=""
+	      ;;
+	    esac
+	    ;;
+	  esac
+	  if test -n "$tstripme" && test -n "$striplib"; then
+	    func_show_eval "$striplib $destdir/$realname" 'exit $?'
+	  fi
+
+	  if test "$#" -gt 0; then
+	    # Delete the old symlinks, and create new ones.
+	    # Try `ln -sf' first, because the `ln' binary might depend on
+	    # the symlink we replace!  Solaris /bin/ln does not understand -f,
+	    # so we also need to try rm && ln -s.
+	    for linkname
+	    do
+	      test "$linkname" != "$realname" \
+		&& func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })"
+	    done
+	  fi
+
+	  # Do each command in the postinstall commands.
+	  lib="$destdir/$realname"
+	  func_execute_cmds "$postinstall_cmds" 'exit $?'
+	fi
+
+	# Install the pseudo-library for information purposes.
+	func_basename "$file"
+	name="$func_basename_result"
+	instname="$dir/$name"i
+	func_show_eval "$install_prog $instname $destdir/$name" 'exit $?'
+
+	# Maybe install the static library, too.
+	test -n "$old_library" && func_append staticlibs " $dir/$old_library"
+	;;
+
+      *.lo)
+	# Install (i.e. copy) a libtool object.
+
+	# Figure out destination file name, if it wasn't already specified.
+	if test -n "$destname"; then
+	  destfile="$destdir/$destname"
+	else
+	  func_basename "$file"
+	  destfile="$func_basename_result"
+	  destfile="$destdir/$destfile"
+	fi
+
+	# Deduce the name of the destination old-style object file.
+	case $destfile in
+	*.lo)
+	  func_lo2o "$destfile"
+	  staticdest=$func_lo2o_result
+	  ;;
+	*.$objext)
+	  staticdest="$destfile"
+	  destfile=
+	  ;;
+	*)
+	  func_fatal_help "cannot copy a libtool object to \`$destfile'"
+	  ;;
+	esac
+
+	# Install the libtool object if requested.
+	test -n "$destfile" && \
+	  func_show_eval "$install_prog $file $destfile" 'exit $?'
+
+	# Install the old object if enabled.
+	if test "$build_old_libs" = yes; then
+	  # Deduce the name of the old-style object file.
+	  func_lo2o "$file"
+	  staticobj=$func_lo2o_result
+	  func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?'
+	fi
+	exit $EXIT_SUCCESS
+	;;
+
+      *)
+	# Figure out destination file name, if it wasn't already specified.
+	if test -n "$destname"; then
+	  destfile="$destdir/$destname"
+	else
+	  func_basename "$file"
+	  destfile="$func_basename_result"
+	  destfile="$destdir/$destfile"
+	fi
+
+	# If the file is missing, and there is a .exe on the end, strip it
+	# because it is most likely a libtool script we actually want to
+	# install
+	stripped_ext=""
+	case $file in
+	  *.exe)
+	    if test ! -f "$file"; then
+	      func_stripname '' '.exe' "$file"
+	      file=$func_stripname_result
+	      stripped_ext=".exe"
+	    fi
+	    ;;
+	esac
+
+	# Do a test to see if this is really a libtool program.
+	case $host in
+	*cygwin* | *mingw*)
+	    if func_ltwrapper_executable_p "$file"; then
+	      func_ltwrapper_scriptname "$file"
+	      wrapper=$func_ltwrapper_scriptname_result
+	    else
+	      func_stripname '' '.exe' "$file"
+	      wrapper=$func_stripname_result
+	    fi
+	    ;;
+	*)
+	    wrapper=$file
+	    ;;
+	esac
+	if func_ltwrapper_script_p "$wrapper"; then
+	  notinst_deplibs=
+	  relink_command=
+
+	  func_source "$wrapper"
+
+	  # Check the variables that should have been set.
+	  test -z "$generated_by_libtool_version" && \
+	    func_fatal_error "invalid libtool wrapper script \`$wrapper'"
+
+	  finalize=yes
+	  for lib in $notinst_deplibs; do
+	    # Check to see that each library is installed.
+	    libdir=
+	    if test -f "$lib"; then
+	      func_source "$lib"
+	    fi
+	    libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test
+	    if test -n "$libdir" && test ! -f "$libfile"; then
+	      func_warning "\`$lib' has not been installed in \`$libdir'"
+	      finalize=no
+	    fi
+	  done
+
+	  relink_command=
+	  func_source "$wrapper"
+
+	  outputname=
+	  if test "$fast_install" = no && test -n "$relink_command"; then
+	    $opt_dry_run || {
+	      if test "$finalize" = yes; then
+	        tmpdir=`func_mktempdir`
+		func_basename "$file$stripped_ext"
+		file="$func_basename_result"
+	        outputname="$tmpdir/$file"
+	        # Replace the output file specification.
+	        relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'`
+
+	        $opt_silent || {
+	          func_quote_for_expand "$relink_command"
+		  eval "func_echo $func_quote_for_expand_result"
+	        }
+	        if eval "$relink_command"; then :
+	          else
+		  func_error "error: relink \`$file' with the above command before installing it"
+		  $opt_dry_run || ${RM}r "$tmpdir"
+		  continue
+	        fi
+	        file="$outputname"
+	      else
+	        func_warning "cannot relink \`$file'"
+	      fi
+	    }
+	  else
+	    # Install the binary that we compiled earlier.
+	    file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"`
+	  fi
+	fi
+
+	# remove .exe since cygwin /usr/bin/install will append another
+	# one anyway
+	case $install_prog,$host in
+	*/usr/bin/install*,*cygwin*)
+	  case $file:$destfile in
+	  *.exe:*.exe)
+	    # this is ok
+	    ;;
+	  *.exe:*)
+	    destfile=$destfile.exe
+	    ;;
+	  *:*.exe)
+	    func_stripname '' '.exe' "$destfile"
+	    destfile=$func_stripname_result
+	    ;;
+	  esac
+	  ;;
+	esac
+	func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?'
+	$opt_dry_run || if test -n "$outputname"; then
+	  ${RM}r "$tmpdir"
+	fi
+	;;
+      esac
+    done
+
+    for file in $staticlibs; do
+      func_basename "$file"
+      name="$func_basename_result"
+
+      # Set up the ranlib parameters.
+      oldlib="$destdir/$name"
+      func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+      tool_oldlib=$func_to_tool_file_result
+
+      func_show_eval "$install_prog \$file \$oldlib" 'exit $?'
+
+      if test -n "$stripme" && test -n "$old_striplib"; then
+	func_show_eval "$old_striplib $tool_oldlib" 'exit $?'
+      fi
+
+      # Do each command in the postinstall commands.
+      func_execute_cmds "$old_postinstall_cmds" 'exit $?'
+    done
+
+    test -n "$future_libdirs" && \
+      func_warning "remember to run \`$progname --finish$future_libdirs'"
+
+    if test -n "$current_libdirs"; then
+      # Maybe just do a dry run.
+      $opt_dry_run && current_libdirs=" -n$current_libdirs"
+      exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs'
+    else
+      exit $EXIT_SUCCESS
+    fi
+}
+
+test "$opt_mode" = install && func_mode_install ${1+"$@"}
+
+
+# func_generate_dlsyms outputname originator pic_p
+# Extract symbols from dlprefiles and create ${outputname}S.o with
+# a dlpreopen symbol table.
+func_generate_dlsyms ()
+{
+    $opt_debug
+    my_outputname="$1"
+    my_originator="$2"
+    my_pic_p="${3-no}"
+    my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'`
+    my_dlsyms=
+
+    if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+      if test -n "$NM" && test -n "$global_symbol_pipe"; then
+	my_dlsyms="${my_outputname}S.c"
+      else
+	func_error "not configured to extract global symbols from dlpreopened files"
+      fi
+    fi
+
+    if test -n "$my_dlsyms"; then
+      case $my_dlsyms in
+      "") ;;
+      *.c)
+	# Discover the nlist of each of the dlfiles.
+	nlist="$output_objdir/${my_outputname}.nm"
+
+	func_show_eval "$RM $nlist ${nlist}S ${nlist}T"
+
+	# Parse the name list into a source file.
+	func_verbose "creating $output_objdir/$my_dlsyms"
+
+	$opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\
+/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */
+/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */
+
+#ifdef __cplusplus
+extern \"C\" {
+#endif
+
+#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4))
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#endif
+
+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE)
+/* DATA imports from DLLs on WIN32 con't be const, because runtime
+   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+# define LT_DLSYM_CONST
+#elif defined(__osf__)
+/* This system does not cope well with relocations in const data.  */
+# define LT_DLSYM_CONST
+#else
+# define LT_DLSYM_CONST const
+#endif
+
+/* External symbol declarations for the compiler. */\
+"
+
+	if test "$dlself" = yes; then
+	  func_verbose "generating symbol list for \`$output'"
+
+	  $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist"
+
+	  # Add our own program objects to the symbol list.
+	  progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	  for progfile in $progfiles; do
+	    func_to_tool_file "$progfile" func_convert_file_msys_to_w32
+	    func_verbose "extracting global C symbols from \`$func_to_tool_file_result'"
+	    $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'"
+	  done
+
+	  if test -n "$exclude_expsyms"; then
+	    $opt_dry_run || {
+	      eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	    }
+	  fi
+
+	  if test -n "$export_symbols_regex"; then
+	    $opt_dry_run || {
+	      eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	    }
+	  fi
+
+	  # Prepare the list of exported symbols
+	  if test -z "$export_symbols"; then
+	    export_symbols="$output_objdir/$outputname.exp"
+	    $opt_dry_run || {
+	      $RM $export_symbols
+	      eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"'
+	      case $host in
+	      *cygwin* | *mingw* | *cegcc* )
+                eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+                eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"'
+	        ;;
+	      esac
+	    }
+	  else
+	    $opt_dry_run || {
+	      eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"'
+	      eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T'
+	      eval '$MV "$nlist"T "$nlist"'
+	      case $host in
+	        *cygwin* | *mingw* | *cegcc* )
+	          eval "echo EXPORTS "'> "$output_objdir/$outputname.def"'
+	          eval 'cat "$nlist" >> "$output_objdir/$outputname.def"'
+	          ;;
+	      esac
+	    }
+	  fi
+	fi
+
+	for dlprefile in $dlprefiles; do
+	  func_verbose "extracting global C symbols from \`$dlprefile'"
+	  func_basename "$dlprefile"
+	  name="$func_basename_result"
+          case $host in
+	    *cygwin* | *mingw* | *cegcc* )
+	      # if an import library, we need to obtain dlname
+	      if func_win32_import_lib_p "$dlprefile"; then
+	        func_tr_sh "$dlprefile"
+	        eval "curr_lafile=\$libfile_$func_tr_sh_result"
+	        dlprefile_dlbasename=""
+	        if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then
+	          # Use subshell, to avoid clobbering current variable values
+	          dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"`
+	          if test -n "$dlprefile_dlname" ; then
+	            func_basename "$dlprefile_dlname"
+	            dlprefile_dlbasename="$func_basename_result"
+	          else
+	            # no lafile. user explicitly requested -dlpreopen <import library>.
+	            $sharedlib_from_linklib_cmd "$dlprefile"
+	            dlprefile_dlbasename=$sharedlib_from_linklib_result
+	          fi
+	        fi
+	        $opt_dry_run || {
+	          if test -n "$dlprefile_dlbasename" ; then
+	            eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"'
+	          else
+	            func_warning "Could not compute DLL name from $name"
+	            eval '$ECHO ": $name " >> "$nlist"'
+	          fi
+	          func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe |
+	            $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'"
+	        }
+	      else # not an import lib
+	        $opt_dry_run || {
+	          eval '$ECHO ": $name " >> "$nlist"'
+	          func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	          eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+	        }
+	      fi
+	    ;;
+	    *)
+	      $opt_dry_run || {
+	        eval '$ECHO ": $name " >> "$nlist"'
+	        func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32
+	        eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'"
+	      }
+	    ;;
+          esac
+	done
+
+	$opt_dry_run || {
+	  # Make sure we have at least an empty file.
+	  test -f "$nlist" || : > "$nlist"
+
+	  if test -n "$exclude_expsyms"; then
+	    $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T
+	    $MV "$nlist"T "$nlist"
+	  fi
+
+	  # Try sorting and uniquifying the output.
+	  if $GREP -v "^: " < "$nlist" |
+	      if sort -k 3 </dev/null >/dev/null 2>&1; then
+		sort -k 3
+	      else
+		sort +2
+	      fi |
+	      uniq > "$nlist"S; then
+	    :
+	  else
+	    $GREP -v "^: " < "$nlist" > "$nlist"S
+	  fi
+
+	  if test -f "$nlist"S; then
+	    eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"'
+	  else
+	    echo '/* NONE */' >> "$output_objdir/$my_dlsyms"
+	  fi
+
+	  echo >> "$output_objdir/$my_dlsyms" "\
+
+/* The mapping between symbol names and symbols.  */
+typedef struct {
+  const char *name;
+  void *address;
+} lt_dlsymlist;
+extern LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[];
+LT_DLSYM_CONST lt_dlsymlist
+lt_${my_prefix}_LTX_preloaded_symbols[] =
+{\
+  { \"$my_originator\", (void *) 0 },"
+
+	  case $need_lib_prefix in
+	  no)
+	    eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms"
+	    ;;
+	  *)
+	    eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms"
+	    ;;
+	  esac
+	  echo >> "$output_objdir/$my_dlsyms" "\
+  {0, (void *) 0}
+};
+
+/* This works around a problem in FreeBSD linker */
+#ifdef FREEBSD_WORKAROUND
+static const void *lt_preloaded_setup() {
+  return lt_${my_prefix}_LTX_preloaded_symbols;
+}
+#endif
+
+#ifdef __cplusplus
+}
+#endif\
+"
+	} # !$opt_dry_run
+
+	pic_flag_for_symtable=
+	case "$compile_command " in
+	*" -static "*) ;;
+	*)
+	  case $host in
+	  # compiling the symbol table file with pic_flag works around
+	  # a FreeBSD bug that causes programs to crash when -lm is
+	  # linked before any other PIC object.  But we must not use
+	  # pic_flag when linking with -static.  The problem exists in
+	  # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1.
+	  *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*)
+	    pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;;
+	  *-*-hpux*)
+	    pic_flag_for_symtable=" $pic_flag"  ;;
+	  *)
+	    if test "X$my_pic_p" != Xno; then
+	      pic_flag_for_symtable=" $pic_flag"
+	    fi
+	    ;;
+	  esac
+	  ;;
+	esac
+	symtab_cflags=
+	for arg in $LTCFLAGS; do
+	  case $arg in
+	  -pie | -fpie | -fPIE) ;;
+	  *) func_append symtab_cflags " $arg" ;;
+	  esac
+	done
+
+	# Now compile the dynamic symbol file.
+	func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?'
+
+	# Clean up the generated files.
+	func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"'
+
+	# Transform the symbol file into the correct name.
+	symfileobj="$output_objdir/${my_outputname}S.$objext"
+	case $host in
+	*cygwin* | *mingw* | *cegcc* )
+	  if test -f "$output_objdir/$my_outputname.def"; then
+	    compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+	    finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"`
+	  else
+	    compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	    finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  fi
+	  ;;
+	*)
+	  compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"`
+	  ;;
+	esac
+	;;
+      *)
+	func_fatal_error "unknown suffix for \`$my_dlsyms'"
+	;;
+      esac
+    else
+      # We keep going just in case the user didn't refer to
+      # lt_preloaded_symbols.  The linker will fail if global_symbol_pipe
+      # really was required.
+
+      # Nullify the symbol file.
+      compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"`
+      finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"`
+    fi
+}
+
+# func_win32_libid arg
+# return the library type of file 'arg'
+#
+# Need a lot of goo to handle *both* DLLs and import libs
+# Has to be a shell function in order to 'eat' the argument
+# that is supplied when $file_magic_command is called.
+# Despite the name, also deal with 64 bit binaries.
+func_win32_libid ()
+{
+  $opt_debug
+  win32_libid_type="unknown"
+  win32_fileres=`file -L $1 2>/dev/null`
+  case $win32_fileres in
+  *ar\ archive\ import\ library*) # definitely import
+    win32_libid_type="x86 archive import"
+    ;;
+  *ar\ archive*) # could be an import, or static
+    # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD.
+    if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null |
+       $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then
+      func_to_tool_file "$1" func_convert_file_msys_to_w32
+      win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" |
+	$SED -n -e '
+	    1,100{
+		/ I /{
+		    s,.*,import,
+		    p
+		    q
+		}
+	    }'`
+      case $win32_nmres in
+      import*)  win32_libid_type="x86 archive import";;
+      *)        win32_libid_type="x86 archive static";;
+      esac
+    fi
+    ;;
+  *DLL*)
+    win32_libid_type="x86 DLL"
+    ;;
+  *executable*) # but shell scripts are "executable" too...
+    case $win32_fileres in
+    *MS\ Windows\ PE\ Intel*)
+      win32_libid_type="x86 DLL"
+      ;;
+    esac
+    ;;
+  esac
+  $ECHO "$win32_libid_type"
+}
+
+# func_cygming_dll_for_implib ARG
+#
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+# Invoked by eval'ing the libtool variable
+#    $sharedlib_from_linklib_cmd
+# Result is available in the variable
+#    $sharedlib_from_linklib_result
+func_cygming_dll_for_implib ()
+{
+  $opt_debug
+  sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"`
+}
+
+# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs
+#
+# The is the core of a fallback implementation of a
+# platform-specific function to extract the name of the
+# DLL associated with the specified import library LIBNAME.
+#
+# SECTION_NAME is either .idata$6 or .idata$7, depending
+# on the platform and compiler that created the implib.
+#
+# Echos the name of the DLL associated with the
+# specified import library.
+func_cygming_dll_for_implib_fallback_core ()
+{
+  $opt_debug
+  match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"`
+  $OBJDUMP -s --section "$1" "$2" 2>/dev/null |
+    $SED '/^Contents of section '"$match_literal"':/{
+      # Place marker at beginning of archive member dllname section
+      s/.*/====MARK====/
+      p
+      d
+    }
+    # These lines can sometimes be longer than 43 characters, but
+    # are always uninteresting
+    /:[	 ]*file format pe[i]\{,1\}-/d
+    /^In archive [^:]*:/d
+    # Ensure marker is printed
+    /^====MARK====/p
+    # Remove all lines with less than 43 characters
+    /^.\{43\}/!d
+    # From remaining lines, remove first 43 characters
+    s/^.\{43\}//' |
+    $SED -n '
+      # Join marker and all lines until next marker into a single line
+      /^====MARK====/ b para
+      H
+      $ b para
+      b
+      :para
+      x
+      s/\n//g
+      # Remove the marker
+      s/^====MARK====//
+      # Remove trailing dots and whitespace
+      s/[\. \t]*$//
+      # Print
+      /./p' |
+    # we now have a list, one entry per line, of the stringified
+    # contents of the appropriate section of all members of the
+    # archive which possess that section. Heuristic: eliminate
+    # all those which have a first or second character that is
+    # a '.' (that is, objdump's representation of an unprintable
+    # character.) This should work for all archives with less than
+    # 0x302f exports -- but will fail for DLLs whose name actually
+    # begins with a literal '.' or a single character followed by
+    # a '.'.
+    #
+    # Of those that remain, print the first one.
+    $SED -e '/^\./d;/^.\./d;q'
+}
+
+# func_cygming_gnu_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is a GNU/binutils-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_gnu_implib_p ()
+{
+  $opt_debug
+  func_to_tool_file "$1" func_convert_file_msys_to_w32
+  func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'`
+  test -n "$func_cygming_gnu_implib_tmp"
+}
+
+# func_cygming_ms_implib_p ARG
+# This predicate returns with zero status (TRUE) if
+# ARG is an MS-style import library. Returns
+# with nonzero status (FALSE) otherwise.
+func_cygming_ms_implib_p ()
+{
+  $opt_debug
+  func_to_tool_file "$1" func_convert_file_msys_to_w32
+  func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'`
+  test -n "$func_cygming_ms_implib_tmp"
+}
+
+# func_cygming_dll_for_implib_fallback ARG
+# Platform-specific function to extract the
+# name of the DLL associated with the specified
+# import library ARG.
+#
+# This fallback implementation is for use when $DLLTOOL
+# does not support the --identify-strict option.
+# Invoked by eval'ing the libtool variable
+#    $sharedlib_from_linklib_cmd
+# Result is available in the variable
+#    $sharedlib_from_linklib_result
+func_cygming_dll_for_implib_fallback ()
+{
+  $opt_debug
+  if func_cygming_gnu_implib_p "$1" ; then
+    # binutils import library
+    sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"`
+  elif func_cygming_ms_implib_p "$1" ; then
+    # ms-generated import library
+    sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"`
+  else
+    # unknown
+    sharedlib_from_linklib_result=""
+  fi
+}
+
+
+# func_extract_an_archive dir oldlib
+func_extract_an_archive ()
+{
+    $opt_debug
+    f_ex_an_ar_dir="$1"; shift
+    f_ex_an_ar_oldlib="$1"
+    if test "$lock_old_archive_extraction" = yes; then
+      lockfile=$f_ex_an_ar_oldlib.lock
+      until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do
+	func_echo "Waiting for $lockfile to be removed"
+	sleep 2
+      done
+    fi
+    func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \
+		   'stat=$?; rm -f "$lockfile"; exit $stat'
+    if test "$lock_old_archive_extraction" = yes; then
+      $opt_dry_run || rm -f "$lockfile"
+    fi
+    if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then
+     :
+    else
+      func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib"
+    fi
+}
+
+
+# func_extract_archives gentop oldlib ...
+func_extract_archives ()
+{
+    $opt_debug
+    my_gentop="$1"; shift
+    my_oldlibs=${1+"$@"}
+    my_oldobjs=""
+    my_xlib=""
+    my_xabs=""
+    my_xdir=""
+
+    for my_xlib in $my_oldlibs; do
+      # Extract the objects.
+      case $my_xlib in
+	[\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;;
+	*) my_xabs=`pwd`"/$my_xlib" ;;
+      esac
+      func_basename "$my_xlib"
+      my_xlib="$func_basename_result"
+      my_xlib_u=$my_xlib
+      while :; do
+        case " $extracted_archives " in
+	*" $my_xlib_u "*)
+	  func_arith $extracted_serial + 1
+	  extracted_serial=$func_arith_result
+	  my_xlib_u=lt$extracted_serial-$my_xlib ;;
+	*) break ;;
+	esac
+      done
+      extracted_archives="$extracted_archives $my_xlib_u"
+      my_xdir="$my_gentop/$my_xlib_u"
+
+      func_mkdir_p "$my_xdir"
+
+      case $host in
+      *-darwin*)
+	func_verbose "Extracting $my_xabs"
+	# Do not bother doing anything if just a dry run
+	$opt_dry_run || {
+	  darwin_orig_dir=`pwd`
+	  cd $my_xdir || exit $?
+	  darwin_archive=$my_xabs
+	  darwin_curdir=`pwd`
+	  darwin_base_archive=`basename "$darwin_archive"`
+	  darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true`
+	  if test -n "$darwin_arches"; then
+	    darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'`
+	    darwin_arch=
+	    func_verbose "$darwin_base_archive has multiple architectures $darwin_arches"
+	    for darwin_arch in  $darwin_arches ; do
+	      func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}"
+	      $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}"
+	      cd "unfat-$$/${darwin_base_archive}-${darwin_arch}"
+	      func_extract_an_archive "`pwd`" "${darwin_base_archive}"
+	      cd "$darwin_curdir"
+	      $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}"
+	    done # $darwin_arches
+            ## Okay now we've a bunch of thin objects, gotta fatten them up :)
+	    darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u`
+	    darwin_file=
+	    darwin_files=
+	    for darwin_file in $darwin_filelist; do
+	      darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP`
+	      $LIPO -create -output "$darwin_file" $darwin_files
+	    done # $darwin_filelist
+	    $RM -rf unfat-$$
+	    cd "$darwin_orig_dir"
+	  else
+	    cd $darwin_orig_dir
+	    func_extract_an_archive "$my_xdir" "$my_xabs"
+	  fi # $darwin_arches
+	} # !$opt_dry_run
+	;;
+      *)
+        func_extract_an_archive "$my_xdir" "$my_xabs"
+	;;
+      esac
+      my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP`
+    done
+
+    func_extract_archives_result="$my_oldobjs"
+}
+
+
+# func_emit_wrapper [arg=no]
+#
+# Emit a libtool wrapper script on stdout.
+# Don't directly open a file because we may want to
+# incorporate the script contents within a cygwin/mingw
+# wrapper executable.  Must ONLY be called from within
+# func_mode_link because it depends on a number of variables
+# set therein.
+#
+# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR
+# variable will take.  If 'yes', then the emitted script
+# will assume that the directory in which it is stored is
+# the $objdir directory.  This is a cygwin/mingw-specific
+# behavior.
+func_emit_wrapper ()
+{
+	func_emit_wrapper_arg1=${1-no}
+
+	$ECHO "\
+#! $SHELL
+
+# $output - temporary wrapper script for $objdir/$outputname
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# The $output program cannot be directly executed until all the libtool
+# libraries that it depends on are installed.
+#
+# This wrapper script should never be moved out of the build directory.
+# If it is, it will not operate correctly.
+
+# Sed substitution that helps us do robust quoting.  It backslashifies
+# metacharacters that are still active within double-quoted strings.
+sed_quote_subst='$sed_quote_subst'
+
+# Be Bourne compatible
+if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then
+  emulate sh
+  NULLCMD=:
+  # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else
+  case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac
+fi
+BIN_SH=xpg4; export BIN_SH # for Tru64
+DUALCASE=1; export DUALCASE # for MKS sh
+
+# The HP-UX ksh and POSIX shell print the target directory to stdout
+# if CDPATH is set.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+relink_command=\"$relink_command\"
+
+# This environment variable determines our operation mode.
+if test \"\$libtool_install_magic\" = \"$magic\"; then
+  # install mode needs the following variables:
+  generated_by_libtool_version='$macro_version'
+  notinst_deplibs='$notinst_deplibs'
+else
+  # When we are sourced in execute mode, \$file and \$ECHO are already set.
+  if test \"\$libtool_execute_magic\" != \"$magic\"; then
+    file=\"\$0\""
+
+    qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"`
+    $ECHO "\
+
+# A function that is used when there is no print builtin or printf.
+func_fallback_echo ()
+{
+  eval 'cat <<_LTECHO_EOF
+\$1
+_LTECHO_EOF'
+}
+    ECHO=\"$qECHO\"
+  fi
+
+# Very basic option parsing. These options are (a) specific to
+# the libtool wrapper, (b) are identical between the wrapper
+# /script/ and the wrapper /executable/ which is used only on
+# windows platforms, and (c) all begin with the string "--lt-"
+# (application programs are unlikely to have options which match
+# this pattern).
+#
+# There are only two supported options: --lt-debug and
+# --lt-dump-script. There is, deliberately, no --lt-help.
+#
+# The first argument to this parsing function should be the
+# script's $0 value, followed by "$@".
+lt_option_debug=
+func_parse_lt_options ()
+{
+  lt_script_arg0=\$0
+  shift
+  for lt_opt
+  do
+    case \"\$lt_opt\" in
+    --lt-debug) lt_option_debug=1 ;;
+    --lt-dump-script)
+        lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\`
+        test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=.
+        lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\`
+        cat \"\$lt_dump_D/\$lt_dump_F\"
+        exit 0
+      ;;
+    --lt-*)
+        \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2
+        exit 1
+      ;;
+    esac
+  done
+
+  # Print the debug banner immediately:
+  if test -n \"\$lt_option_debug\"; then
+    echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2
+  fi
+}
+
+# Used when --lt-debug. Prints its arguments to stdout
+# (redirection is the responsibility of the caller)
+func_lt_dump_args ()
+{
+  lt_dump_args_N=1;
+  for lt_arg
+  do
+    \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\"
+    lt_dump_args_N=\`expr \$lt_dump_args_N + 1\`
+  done
+}
+
+# Core function for launching the target application
+func_exec_program_core ()
+{
+"
+  case $host in
+  # Backslashes separate directories on plain windows
+  *-*-mingw | *-*-os2* | *-cegcc*)
+    $ECHO "\
+      if test -n \"\$lt_option_debug\"; then
+        \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2
+        func_lt_dump_args \${1+\"\$@\"} 1>&2
+      fi
+      exec \"\$progdir\\\\\$program\" \${1+\"\$@\"}
+"
+    ;;
+
+  *)
+    $ECHO "\
+      if test -n \"\$lt_option_debug\"; then
+        \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2
+        func_lt_dump_args \${1+\"\$@\"} 1>&2
+      fi
+      exec \"\$progdir/\$program\" \${1+\"\$@\"}
+"
+    ;;
+  esac
+  $ECHO "\
+      \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2
+      exit 1
+}
+
+# A function to encapsulate launching the target application
+# Strips options in the --lt-* namespace from \$@ and
+# launches target application with the remaining arguments.
+func_exec_program ()
+{
+  case \" \$* \" in
+  *\\ --lt-*)
+    for lt_wr_arg
+    do
+      case \$lt_wr_arg in
+      --lt-*) ;;
+      *) set x \"\$@\" \"\$lt_wr_arg\"; shift;;
+      esac
+      shift
+    done ;;
+  esac
+  func_exec_program_core \${1+\"\$@\"}
+}
+
+  # Parse options
+  func_parse_lt_options \"\$0\" \${1+\"\$@\"}
+
+  # Find the directory that this script lives in.
+  thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\`
+  test \"x\$thisdir\" = \"x\$file\" && thisdir=.
+
+  # Follow symbolic links until we get to the real thisdir.
+  file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\`
+  while test -n \"\$file\"; do
+    destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\`
+
+    # If there was a directory component, then change thisdir.
+    if test \"x\$destdir\" != \"x\$file\"; then
+      case \"\$destdir\" in
+      [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;;
+      *) thisdir=\"\$thisdir/\$destdir\" ;;
+      esac
+    fi
+
+    file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\`
+    file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\`
+  done
+
+  # Usually 'no', except on cygwin/mingw when embedded into
+  # the cwrapper.
+  WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1
+  if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then
+    # special case for '.'
+    if test \"\$thisdir\" = \".\"; then
+      thisdir=\`pwd\`
+    fi
+    # remove .libs from thisdir
+    case \"\$thisdir\" in
+    *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;;
+    $objdir )   thisdir=. ;;
+    esac
+  fi
+
+  # Try to get the absolute directory name.
+  absdir=\`cd \"\$thisdir\" && pwd\`
+  test -n \"\$absdir\" && thisdir=\"\$absdir\"
+"
+
+	if test "$fast_install" = yes; then
+	  $ECHO "\
+  program=lt-'$outputname'$exeext
+  progdir=\"\$thisdir/$objdir\"
+
+  if test ! -f \"\$progdir/\$program\" ||
+     { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\
+       test \"X\$file\" != \"X\$progdir/\$program\"; }; then
+
+    file=\"\$\$-\$program\"
+
+    if test ! -d \"\$progdir\"; then
+      $MKDIR \"\$progdir\"
+    else
+      $RM \"\$progdir/\$file\"
+    fi"
+
+	  $ECHO "\
+
+    # relink executable if necessary
+    if test -n \"\$relink_command\"; then
+      if relink_command_output=\`eval \$relink_command 2>&1\`; then :
+      else
+	$ECHO \"\$relink_command_output\" >&2
+	$RM \"\$progdir/\$file\"
+	exit 1
+      fi
+    fi
+
+    $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null ||
+    { $RM \"\$progdir/\$program\";
+      $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; }
+    $RM \"\$progdir/\$file\"
+  fi"
+	else
+	  $ECHO "\
+  program='$outputname'
+  progdir=\"\$thisdir/$objdir\"
+"
+	fi
+
+	$ECHO "\
+
+  if test -f \"\$progdir/\$program\"; then"
+
+	# fixup the dll searchpath if we need to.
+	#
+	# Fix the DLL searchpath if we need to.  Do this before prepending
+	# to shlibpath, because on Windows, both are PATH and uninstalled
+	# libraries must come first.
+	if test -n "$dllsearchpath"; then
+	  $ECHO "\
+    # Add the dll search path components to the executable PATH
+    PATH=$dllsearchpath:\$PATH
+"
+	fi
+
+	# Export our shlibpath_var if we have one.
+	if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+	  $ECHO "\
+    # Add our own library path to $shlibpath_var
+    $shlibpath_var=\"$temp_rpath\$$shlibpath_var\"
+
+    # Some systems cannot cope with colon-terminated $shlibpath_var
+    # The second colon is a workaround for a bug in BeOS R4 sed
+    $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\`
+
+    export $shlibpath_var
+"
+	fi
+
+	$ECHO "\
+    if test \"\$libtool_execute_magic\" != \"$magic\"; then
+      # Run the actual program with our arguments.
+      func_exec_program \${1+\"\$@\"}
+    fi
+  else
+    # The program doesn't exist.
+    \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2
+    \$ECHO \"This script is just a wrapper for \$program.\" 1>&2
+    \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2
+    exit 1
+  fi
+fi\
+"
+}
+
+
+# func_emit_cwrapperexe_src
+# emit the source code for a wrapper executable on stdout
+# Must ONLY be called from within func_mode_link because
+# it depends on a number of variable set therein.
+func_emit_cwrapperexe_src ()
+{
+	cat <<EOF
+
+/* $cwrappersource - temporary wrapper executable for $objdir/$outputname
+   Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+
+   The $output program cannot be directly executed until all the libtool
+   libraries that it depends on are installed.
+
+   This wrapper executable should never be moved out of the build directory.
+   If it is, it will not operate correctly.
+*/
+EOF
+	    cat <<"EOF"
+#ifdef _MSC_VER
+# define _CRT_SECURE_NO_DEPRECATE 1
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef _MSC_VER
+# include <direct.h>
+# include <process.h>
+# include <io.h>
+#else
+# include <unistd.h>
+# include <stdint.h>
+# ifdef __CYGWIN__
+#  include <io.h>
+# endif
+#endif
+#include <malloc.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+/* declarations of non-ANSI functions */
+#if defined(__MINGW32__)
+# ifdef __STRICT_ANSI__
+int _putenv (const char *);
+# endif
+#elif defined(__CYGWIN__)
+# ifdef __STRICT_ANSI__
+char *realpath (const char *, char *);
+int putenv (char *);
+int setenv (const char *, const char *, int);
+# endif
+/* #elif defined (other platforms) ... */
+#endif
+
+/* portability defines, excluding path handling macros */
+#if defined(_MSC_VER)
+# define setmode _setmode
+# define stat    _stat
+# define chmod   _chmod
+# define getcwd  _getcwd
+# define putenv  _putenv
+# define S_IXUSR _S_IEXEC
+# ifndef _INTPTR_T_DEFINED
+#  define _INTPTR_T_DEFINED
+#  define intptr_t int
+# endif
+#elif defined(__MINGW32__)
+# define setmode _setmode
+# define stat    _stat
+# define chmod   _chmod
+# define getcwd  _getcwd
+# define putenv  _putenv
+#elif defined(__CYGWIN__)
+# define HAVE_SETENV
+# define FOPEN_WB "wb"
+/* #elif defined (other platforms) ... */
+#endif
+
+#if defined(PATH_MAX)
+# define LT_PATHMAX PATH_MAX
+#elif defined(MAXPATHLEN)
+# define LT_PATHMAX MAXPATHLEN
+#else
+# define LT_PATHMAX 1024
+#endif
+
+#ifndef S_IXOTH
+# define S_IXOTH 0
+#endif
+#ifndef S_IXGRP
+# define S_IXGRP 0
+#endif
+
+/* path handling portability macros */
+#ifndef DIR_SEPARATOR
+# define DIR_SEPARATOR '/'
+# define PATH_SEPARATOR ':'
+#endif
+
+#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \
+  defined (__OS2__)
+# define HAVE_DOS_BASED_FILE_SYSTEM
+# define FOPEN_WB "wb"
+# ifndef DIR_SEPARATOR_2
+#  define DIR_SEPARATOR_2 '\\'
+# endif
+# ifndef PATH_SEPARATOR_2
+#  define PATH_SEPARATOR_2 ';'
+# endif
+#endif
+
+#ifndef DIR_SEPARATOR_2
+# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
+#else /* DIR_SEPARATOR_2 */
+# define IS_DIR_SEPARATOR(ch) \
+	(((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
+#endif /* DIR_SEPARATOR_2 */
+
+#ifndef PATH_SEPARATOR_2
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR)
+#else /* PATH_SEPARATOR_2 */
+# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2)
+#endif /* PATH_SEPARATOR_2 */
+
+#ifndef FOPEN_WB
+# define FOPEN_WB "w"
+#endif
+#ifndef _O_BINARY
+# define _O_BINARY 0
+#endif
+
+#define XMALLOC(type, num)      ((type *) xmalloc ((num) * sizeof(type)))
+#define XFREE(stale) do { \
+  if (stale) { free ((void *) stale); stale = 0; } \
+} while (0)
+
+#if defined(LT_DEBUGWRAPPER)
+static int lt_debug = 1;
+#else
+static int lt_debug = 0;
+#endif
+
+const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */
+
+void *xmalloc (size_t num);
+char *xstrdup (const char *string);
+const char *base_name (const char *name);
+char *find_executable (const char *wrapper);
+char *chase_symlinks (const char *pathspec);
+int make_executable (const char *path);
+int check_executable (const char *path);
+char *strendzap (char *str, const char *pat);
+void lt_debugprintf (const char *file, int line, const char *fmt, ...);
+void lt_fatal (const char *file, int line, const char *message, ...);
+static const char *nonnull (const char *s);
+static const char *nonempty (const char *s);
+void lt_setenv (const char *name, const char *value);
+char *lt_extend_str (const char *orig_value, const char *add, int to_end);
+void lt_update_exe_path (const char *name, const char *value);
+void lt_update_lib_path (const char *name, const char *value);
+char **prepare_spawn (char **argv);
+void lt_dump_script (FILE *f);
+EOF
+
+	    cat <<EOF
+volatile const char * MAGIC_EXE = "$magic_exe";
+const char * LIB_PATH_VARNAME = "$shlibpath_var";
+EOF
+
+	    if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then
+              func_to_host_path "$temp_rpath"
+	      cat <<EOF
+const char * LIB_PATH_VALUE   = "$func_to_host_path_result";
+EOF
+	    else
+	      cat <<"EOF"
+const char * LIB_PATH_VALUE   = "";
+EOF
+	    fi
+
+	    if test -n "$dllsearchpath"; then
+              func_to_host_path "$dllsearchpath:"
+	      cat <<EOF
+const char * EXE_PATH_VARNAME = "PATH";
+const char * EXE_PATH_VALUE   = "$func_to_host_path_result";
+EOF
+	    else
+	      cat <<"EOF"
+const char * EXE_PATH_VARNAME = "";
+const char * EXE_PATH_VALUE   = "";
+EOF
+	    fi
+
+	    if test "$fast_install" = yes; then
+	      cat <<EOF
+const char * TARGET_PROGRAM_NAME = "lt-$outputname"; /* hopefully, no .exe */
+EOF
+	    else
+	      cat <<EOF
+const char * TARGET_PROGRAM_NAME = "$outputname"; /* hopefully, no .exe */
+EOF
+	    fi
+
+
+	    cat <<"EOF"
+
+#define LTWRAPPER_OPTION_PREFIX         "--lt-"
+
+static const char *ltwrapper_option_prefix = LTWRAPPER_OPTION_PREFIX;
+static const char *dumpscript_opt       = LTWRAPPER_OPTION_PREFIX "dump-script";
+static const char *debug_opt            = LTWRAPPER_OPTION_PREFIX "debug";
+
+int
+main (int argc, char *argv[])
+{
+  char **newargz;
+  int  newargc;
+  char *tmp_pathspec;
+  char *actual_cwrapper_path;
+  char *actual_cwrapper_name;
+  char *target_name;
+  char *lt_argv_zero;
+  intptr_t rval = 127;
+
+  int i;
+
+  program_name = (char *) xstrdup (base_name (argv[0]));
+  newargz = XMALLOC (char *, argc + 1);
+
+  /* very simple arg parsing; don't want to rely on getopt
+   * also, copy all non cwrapper options to newargz, except
+   * argz[0], which is handled differently
+   */
+  newargc=0;
+  for (i = 1; i < argc; i++)
+    {
+      if (strcmp (argv[i], dumpscript_opt) == 0)
+	{
+EOF
+	    case "$host" in
+	      *mingw* | *cygwin* )
+		# make stdout use "unix" line endings
+		echo "          setmode(1,_O_BINARY);"
+		;;
+	      esac
+
+	    cat <<"EOF"
+	  lt_dump_script (stdout);
+	  return 0;
+	}
+      if (strcmp (argv[i], debug_opt) == 0)
+	{
+          lt_debug = 1;
+          continue;
+	}
+      if (strcmp (argv[i], ltwrapper_option_prefix) == 0)
+        {
+          /* however, if there is an option in the LTWRAPPER_OPTION_PREFIX
+             namespace, but it is not one of the ones we know about and
+             have already dealt with, above (inluding dump-script), then
+             report an error. Otherwise, targets might begin to believe
+             they are allowed to use options in the LTWRAPPER_OPTION_PREFIX
+             namespace. The first time any user complains about this, we'll
+             need to make LTWRAPPER_OPTION_PREFIX a configure-time option
+             or a configure.ac-settable value.
+           */
+          lt_fatal (__FILE__, __LINE__,
+		    "unrecognized %s option: '%s'",
+                    ltwrapper_option_prefix, argv[i]);
+        }
+      /* otherwise ... */
+      newargz[++newargc] = xstrdup (argv[i]);
+    }
+  newargz[++newargc] = NULL;
+
+EOF
+	    cat <<EOF
+  /* The GNU banner must be the first non-error debug message */
+  lt_debugprintf (__FILE__, __LINE__, "libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\n");
+EOF
+	    cat <<"EOF"
+  lt_debugprintf (__FILE__, __LINE__, "(main) argv[0]: %s\n", argv[0]);
+  lt_debugprintf (__FILE__, __LINE__, "(main) program_name: %s\n", program_name);
+
+  tmp_pathspec = find_executable (argv[0]);
+  if (tmp_pathspec == NULL)
+    lt_fatal (__FILE__, __LINE__, "couldn't find %s", argv[0]);
+  lt_debugprintf (__FILE__, __LINE__,
+                  "(main) found exe (before symlink chase) at: %s\n",
+		  tmp_pathspec);
+
+  actual_cwrapper_path = chase_symlinks (tmp_pathspec);
+  lt_debugprintf (__FILE__, __LINE__,
+                  "(main) found exe (after symlink chase) at: %s\n",
+		  actual_cwrapper_path);
+  XFREE (tmp_pathspec);
+
+  actual_cwrapper_name = xstrdup (base_name (actual_cwrapper_path));
+  strendzap (actual_cwrapper_path, actual_cwrapper_name);
+
+  /* wrapper name transforms */
+  strendzap (actual_cwrapper_name, ".exe");
+  tmp_pathspec = lt_extend_str (actual_cwrapper_name, ".exe", 1);
+  XFREE (actual_cwrapper_name);
+  actual_cwrapper_name = tmp_pathspec;
+  tmp_pathspec = 0;
+
+  /* target_name transforms -- use actual target program name; might have lt- prefix */
+  target_name = xstrdup (base_name (TARGET_PROGRAM_NAME));
+  strendzap (target_name, ".exe");
+  tmp_pathspec = lt_extend_str (target_name, ".exe", 1);
+  XFREE (target_name);
+  target_name = tmp_pathspec;
+  tmp_pathspec = 0;
+
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(main) libtool target name: %s\n",
+		  target_name);
+EOF
+
+	    cat <<EOF
+  newargz[0] =
+    XMALLOC (char, (strlen (actual_cwrapper_path) +
+		    strlen ("$objdir") + 1 + strlen (actual_cwrapper_name) + 1));
+  strcpy (newargz[0], actual_cwrapper_path);
+  strcat (newargz[0], "$objdir");
+  strcat (newargz[0], "/");
+EOF
+
+	    cat <<"EOF"
+  /* stop here, and copy so we don't have to do this twice */
+  tmp_pathspec = xstrdup (newargz[0]);
+
+  /* do NOT want the lt- prefix here, so use actual_cwrapper_name */
+  strcat (newargz[0], actual_cwrapper_name);
+
+  /* DO want the lt- prefix here if it exists, so use target_name */
+  lt_argv_zero = lt_extend_str (tmp_pathspec, target_name, 1);
+  XFREE (tmp_pathspec);
+  tmp_pathspec = NULL;
+EOF
+
+	    case $host_os in
+	      mingw*)
+	    cat <<"EOF"
+  {
+    char* p;
+    while ((p = strchr (newargz[0], '\\')) != NULL)
+      {
+	*p = '/';
+      }
+    while ((p = strchr (lt_argv_zero, '\\')) != NULL)
+      {
+	*p = '/';
+      }
+  }
+EOF
+	    ;;
+	    esac
+
+	    cat <<"EOF"
+  XFREE (target_name);
+  XFREE (actual_cwrapper_path);
+  XFREE (actual_cwrapper_name);
+
+  lt_setenv ("BIN_SH", "xpg4"); /* for Tru64 */
+  lt_setenv ("DUALCASE", "1");  /* for MSK sh */
+  /* Update the DLL searchpath.  EXE_PATH_VALUE ($dllsearchpath) must
+     be prepended before (that is, appear after) LIB_PATH_VALUE ($temp_rpath)
+     because on Windows, both *_VARNAMEs are PATH but uninstalled
+     libraries must come first. */
+  lt_update_exe_path (EXE_PATH_VARNAME, EXE_PATH_VALUE);
+  lt_update_lib_path (LIB_PATH_VARNAME, LIB_PATH_VALUE);
+
+  lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n",
+		  nonnull (lt_argv_zero));
+  for (i = 0; i < newargc; i++)
+    {
+      lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n",
+		      i, nonnull (newargz[i]));
+    }
+
+EOF
+
+	    case $host_os in
+	      mingw*)
+		cat <<"EOF"
+  /* execv doesn't actually work on mingw as expected on unix */
+  newargz = prepare_spawn (newargz);
+  rval = _spawnv (_P_WAIT, lt_argv_zero, (const char * const *) newargz);
+  if (rval == -1)
+    {
+      /* failed to start process */
+      lt_debugprintf (__FILE__, __LINE__,
+		      "(main) failed to launch target \"%s\": %s\n",
+		      lt_argv_zero, nonnull (strerror (errno)));
+      return 127;
+    }
+  return rval;
+EOF
+		;;
+	      *)
+		cat <<"EOF"
+  execv (lt_argv_zero, newargz);
+  return rval; /* =127, but avoids unused variable warning */
+EOF
+		;;
+	    esac
+
+	    cat <<"EOF"
+}
+
+void *
+xmalloc (size_t num)
+{
+  void *p = (void *) malloc (num);
+  if (!p)
+    lt_fatal (__FILE__, __LINE__, "memory exhausted");
+
+  return p;
+}
+
+char *
+xstrdup (const char *string)
+{
+  return string ? strcpy ((char *) xmalloc (strlen (string) + 1),
+			  string) : NULL;
+}
+
+const char *
+base_name (const char *name)
+{
+  const char *base;
+
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+  /* Skip over the disk name in MSDOS pathnames. */
+  if (isalpha ((unsigned char) name[0]) && name[1] == ':')
+    name += 2;
+#endif
+
+  for (base = name; *name; name++)
+    if (IS_DIR_SEPARATOR (*name))
+      base = name + 1;
+  return base;
+}
+
+int
+check_executable (const char *path)
+{
+  struct stat st;
+
+  lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n",
+                  nonempty (path));
+  if ((!path) || (!*path))
+    return 0;
+
+  if ((stat (path, &st) >= 0)
+      && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)))
+    return 1;
+  else
+    return 0;
+}
+
+int
+make_executable (const char *path)
+{
+  int rval = 0;
+  struct stat st;
+
+  lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n",
+                  nonempty (path));
+  if ((!path) || (!*path))
+    return 0;
+
+  if (stat (path, &st) >= 0)
+    {
+      rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR);
+    }
+  return rval;
+}
+
+/* Searches for the full path of the wrapper.  Returns
+   newly allocated full path name if found, NULL otherwise
+   Does not chase symlinks, even on platforms that support them.
+*/
+char *
+find_executable (const char *wrapper)
+{
+  int has_slash = 0;
+  const char *p;
+  const char *p_next;
+  /* static buffer for getcwd */
+  char tmp[LT_PATHMAX + 1];
+  int tmp_len;
+  char *concat_name;
+
+  lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n",
+                  nonempty (wrapper));
+
+  if ((wrapper == NULL) || (*wrapper == '\0'))
+    return NULL;
+
+  /* Absolute path? */
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+  if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':')
+    {
+      concat_name = xstrdup (wrapper);
+      if (check_executable (concat_name))
+	return concat_name;
+      XFREE (concat_name);
+    }
+  else
+    {
+#endif
+      if (IS_DIR_SEPARATOR (wrapper[0]))
+	{
+	  concat_name = xstrdup (wrapper);
+	  if (check_executable (concat_name))
+	    return concat_name;
+	  XFREE (concat_name);
+	}
+#if defined (HAVE_DOS_BASED_FILE_SYSTEM)
+    }
+#endif
+
+  for (p = wrapper; *p; p++)
+    if (*p == '/')
+      {
+	has_slash = 1;
+	break;
+      }
+  if (!has_slash)
+    {
+      /* no slashes; search PATH */
+      const char *path = getenv ("PATH");
+      if (path != NULL)
+	{
+	  for (p = path; *p; p = p_next)
+	    {
+	      const char *q;
+	      size_t p_len;
+	      for (q = p; *q; q++)
+		if (IS_PATH_SEPARATOR (*q))
+		  break;
+	      p_len = q - p;
+	      p_next = (*q == '\0' ? q : q + 1);
+	      if (p_len == 0)
+		{
+		  /* empty path: current directory */
+		  if (getcwd (tmp, LT_PATHMAX) == NULL)
+		    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+                              nonnull (strerror (errno)));
+		  tmp_len = strlen (tmp);
+		  concat_name =
+		    XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+		  memcpy (concat_name, tmp, tmp_len);
+		  concat_name[tmp_len] = '/';
+		  strcpy (concat_name + tmp_len + 1, wrapper);
+		}
+	      else
+		{
+		  concat_name =
+		    XMALLOC (char, p_len + 1 + strlen (wrapper) + 1);
+		  memcpy (concat_name, p, p_len);
+		  concat_name[p_len] = '/';
+		  strcpy (concat_name + p_len + 1, wrapper);
+		}
+	      if (check_executable (concat_name))
+		return concat_name;
+	      XFREE (concat_name);
+	    }
+	}
+      /* not found in PATH; assume curdir */
+    }
+  /* Relative path | not found in path: prepend cwd */
+  if (getcwd (tmp, LT_PATHMAX) == NULL)
+    lt_fatal (__FILE__, __LINE__, "getcwd failed: %s",
+              nonnull (strerror (errno)));
+  tmp_len = strlen (tmp);
+  concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1);
+  memcpy (concat_name, tmp, tmp_len);
+  concat_name[tmp_len] = '/';
+  strcpy (concat_name + tmp_len + 1, wrapper);
+
+  if (check_executable (concat_name))
+    return concat_name;
+  XFREE (concat_name);
+  return NULL;
+}
+
+char *
+chase_symlinks (const char *pathspec)
+{
+#ifndef S_ISLNK
+  return xstrdup (pathspec);
+#else
+  char buf[LT_PATHMAX];
+  struct stat s;
+  char *tmp_pathspec = xstrdup (pathspec);
+  char *p;
+  int has_symlinks = 0;
+  while (strlen (tmp_pathspec) && !has_symlinks)
+    {
+      lt_debugprintf (__FILE__, __LINE__,
+		      "checking path component for symlinks: %s\n",
+		      tmp_pathspec);
+      if (lstat (tmp_pathspec, &s) == 0)
+	{
+	  if (S_ISLNK (s.st_mode) != 0)
+	    {
+	      has_symlinks = 1;
+	      break;
+	    }
+
+	  /* search backwards for last DIR_SEPARATOR */
+	  p = tmp_pathspec + strlen (tmp_pathspec) - 1;
+	  while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+	    p--;
+	  if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p)))
+	    {
+	      /* no more DIR_SEPARATORS left */
+	      break;
+	    }
+	  *p = '\0';
+	}
+      else
+	{
+	  lt_fatal (__FILE__, __LINE__,
+		    "error accessing file \"%s\": %s",
+		    tmp_pathspec, nonnull (strerror (errno)));
+	}
+    }
+  XFREE (tmp_pathspec);
+
+  if (!has_symlinks)
+    {
+      return xstrdup (pathspec);
+    }
+
+  tmp_pathspec = realpath (pathspec, buf);
+  if (tmp_pathspec == 0)
+    {
+      lt_fatal (__FILE__, __LINE__,
+		"could not follow symlinks for %s", pathspec);
+    }
+  return xstrdup (tmp_pathspec);
+#endif
+}
+
+char *
+strendzap (char *str, const char *pat)
+{
+  size_t len, patlen;
+
+  assert (str != NULL);
+  assert (pat != NULL);
+
+  len = strlen (str);
+  patlen = strlen (pat);
+
+  if (patlen <= len)
+    {
+      str += len - patlen;
+      if (strcmp (str, pat) == 0)
+	*str = '\0';
+    }
+  return str;
+}
+
+void
+lt_debugprintf (const char *file, int line, const char *fmt, ...)
+{
+  va_list args;
+  if (lt_debug)
+    {
+      (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line);
+      va_start (args, fmt);
+      (void) vfprintf (stderr, fmt, args);
+      va_end (args);
+    }
+}
+
+static void
+lt_error_core (int exit_status, const char *file,
+	       int line, const char *mode,
+	       const char *message, va_list ap)
+{
+  fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode);
+  vfprintf (stderr, message, ap);
+  fprintf (stderr, ".\n");
+
+  if (exit_status >= 0)
+    exit (exit_status);
+}
+
+void
+lt_fatal (const char *file, int line, const char *message, ...)
+{
+  va_list ap;
+  va_start (ap, message);
+  lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap);
+  va_end (ap);
+}
+
+static const char *
+nonnull (const char *s)
+{
+  return s ? s : "(null)";
+}
+
+static const char *
+nonempty (const char *s)
+{
+  return (s && !*s) ? "(empty)" : nonnull (s);
+}
+
+void
+lt_setenv (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_setenv) setting '%s' to '%s'\n",
+                  nonnull (name), nonnull (value));
+  {
+#ifdef HAVE_SETENV
+    /* always make a copy, for consistency with !HAVE_SETENV */
+    char *str = xstrdup (value);
+    setenv (name, str, 1);
+#else
+    int len = strlen (name) + 1 + strlen (value) + 1;
+    char *str = XMALLOC (char, len);
+    sprintf (str, "%s=%s", name, value);
+    if (putenv (str) != EXIT_SUCCESS)
+      {
+        XFREE (str);
+      }
+#endif
+  }
+}
+
+char *
+lt_extend_str (const char *orig_value, const char *add, int to_end)
+{
+  char *new_value;
+  if (orig_value && *orig_value)
+    {
+      int orig_value_len = strlen (orig_value);
+      int add_len = strlen (add);
+      new_value = XMALLOC (char, add_len + orig_value_len + 1);
+      if (to_end)
+        {
+          strcpy (new_value, orig_value);
+          strcpy (new_value + orig_value_len, add);
+        }
+      else
+        {
+          strcpy (new_value, add);
+          strcpy (new_value + add_len, orig_value);
+        }
+    }
+  else
+    {
+      new_value = xstrdup (add);
+    }
+  return new_value;
+}
+
+void
+lt_update_exe_path (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_update_exe_path) modifying '%s' by prepending '%s'\n",
+                  nonnull (name), nonnull (value));
+
+  if (name && *name && value && *value)
+    {
+      char *new_value = lt_extend_str (getenv (name), value, 0);
+      /* some systems can't cope with a ':'-terminated path #' */
+      int len = strlen (new_value);
+      while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1]))
+        {
+          new_value[len-1] = '\0';
+        }
+      lt_setenv (name, new_value);
+      XFREE (new_value);
+    }
+}
+
+void
+lt_update_lib_path (const char *name, const char *value)
+{
+  lt_debugprintf (__FILE__, __LINE__,
+		  "(lt_update_lib_path) modifying '%s' by prepending '%s'\n",
+                  nonnull (name), nonnull (value));
+
+  if (name && *name && value && *value)
+    {
+      char *new_value = lt_extend_str (getenv (name), value, 0);
+      lt_setenv (name, new_value);
+      XFREE (new_value);
+    }
+}
+
+EOF
+	    case $host_os in
+	      mingw*)
+		cat <<"EOF"
+
+/* Prepares an argument vector before calling spawn().
+   Note that spawn() does not by itself call the command interpreter
+     (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") :
+      ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
+         GetVersionEx(&v);
+         v.dwPlatformId == VER_PLATFORM_WIN32_NT;
+      }) ? "cmd.exe" : "command.com").
+   Instead it simply concatenates the arguments, separated by ' ', and calls
+   CreateProcess().  We must quote the arguments since Win32 CreateProcess()
+   interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a
+   special way:
+   - Space and tab are interpreted as delimiters. They are not treated as
+     delimiters if they are surrounded by double quotes: "...".
+   - Unescaped double quotes are removed from the input. Their only effect is
+     that within double quotes, space and tab are treated like normal
+     characters.
+   - Backslashes not followed by double quotes are not special.
+   - But 2*n+1 backslashes followed by a double quote become
+     n backslashes followed by a double quote (n >= 0):
+       \" -> "
+       \\\" -> \"
+       \\\\\" -> \\"
+ */
+#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037"
+char **
+prepare_spawn (char **argv)
+{
+  size_t argc;
+  char **new_argv;
+  size_t i;
+
+  /* Count number of arguments.  */
+  for (argc = 0; argv[argc] != NULL; argc++)
+    ;
+
+  /* Allocate new argument vector.  */
+  new_argv = XMALLOC (char *, argc + 1);
+
+  /* Put quoted arguments into the new argument vector.  */
+  for (i = 0; i < argc; i++)
+    {
+      const char *string = argv[i];
+
+      if (string[0] == '\0')
+	new_argv[i] = xstrdup ("\"\"");
+      else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL)
+	{
+	  int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL);
+	  size_t length;
+	  unsigned int backslashes;
+	  const char *s;
+	  char *quoted_string;
+	  char *p;
+
+	  length = 0;
+	  backslashes = 0;
+	  if (quote_around)
+	    length++;
+	  for (s = string; *s != '\0'; s++)
+	    {
+	      char c = *s;
+	      if (c == '"')
+		length += backslashes + 1;
+	      length++;
+	      if (c == '\\')
+		backslashes++;
+	      else
+		backslashes = 0;
+	    }
+	  if (quote_around)
+	    length += backslashes + 1;
+
+	  quoted_string = XMALLOC (char, length + 1);
+
+	  p = quoted_string;
+	  backslashes = 0;
+	  if (quote_around)
+	    *p++ = '"';
+	  for (s = string; *s != '\0'; s++)
+	    {
+	      char c = *s;
+	      if (c == '"')
+		{
+		  unsigned int j;
+		  for (j = backslashes + 1; j > 0; j--)
+		    *p++ = '\\';
+		}
+	      *p++ = c;
+	      if (c == '\\')
+		backslashes++;
+	      else
+		backslashes = 0;
+	    }
+	  if (quote_around)
+	    {
+	      unsigned int j;
+	      for (j = backslashes; j > 0; j--)
+		*p++ = '\\';
+	      *p++ = '"';
+	    }
+	  *p = '\0';
+
+	  new_argv[i] = quoted_string;
+	}
+      else
+	new_argv[i] = (char *) string;
+    }
+  new_argv[argc] = NULL;
+
+  return new_argv;
+}
+EOF
+		;;
+	    esac
+
+            cat <<"EOF"
+void lt_dump_script (FILE* f)
+{
+EOF
+	    func_emit_wrapper yes |
+	      $SED -n -e '
+s/^\(.\{79\}\)\(..*\)/\1\
+\2/
+h
+s/\([\\"]\)/\\\1/g
+s/$/\\n/
+s/\([^\n]*\).*/  fputs ("\1", f);/p
+g
+D'
+            cat <<"EOF"
+}
+EOF
+}
+# end: func_emit_cwrapperexe_src
+
+# func_win32_import_lib_p ARG
+# True if ARG is an import lib, as indicated by $file_magic_cmd
+func_win32_import_lib_p ()
+{
+    $opt_debug
+    case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in
+    *import*) : ;;
+    *) false ;;
+    esac
+}
+
+# func_mode_link arg...
+func_mode_link ()
+{
+    $opt_debug
+    case $host in
+    *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+      # It is impossible to link a dll without this setting, and
+      # we shouldn't force the makefile maintainer to figure out
+      # which system we are compiling for in order to pass an extra
+      # flag for every libtool invocation.
+      # allow_undefined=no
+
+      # FIXME: Unfortunately, there are problems with the above when trying
+      # to make a dll which has undefined symbols, in which case not
+      # even a static library is built.  For now, we need to specify
+      # -no-undefined on the libtool link line when we can be certain
+      # that all symbols are satisfied, otherwise we get a static library.
+      allow_undefined=yes
+      ;;
+    *)
+      allow_undefined=yes
+      ;;
+    esac
+    libtool_args=$nonopt
+    base_compile="$nonopt $@"
+    compile_command=$nonopt
+    finalize_command=$nonopt
+
+    compile_rpath=
+    finalize_rpath=
+    compile_shlibpath=
+    finalize_shlibpath=
+    convenience=
+    old_convenience=
+    deplibs=
+    old_deplibs=
+    compiler_flags=
+    linker_flags=
+    dllsearchpath=
+    lib_search_path=`pwd`
+    inst_prefix_dir=
+    new_inherited_linker_flags=
+
+    avoid_version=no
+    bindir=
+    dlfiles=
+    dlprefiles=
+    dlself=no
+    export_dynamic=no
+    export_symbols=
+    export_symbols_regex=
+    generated=
+    libobjs=
+    ltlibs=
+    module=no
+    no_install=no
+    objs=
+    non_pic_objects=
+    precious_files_regex=
+    prefer_static_libs=no
+    preload=no
+    prev=
+    prevarg=
+    release=
+    rpath=
+    xrpath=
+    perm_rpath=
+    temp_rpath=
+    thread_safe=no
+    vinfo=
+    vinfo_number=no
+    weak_libs=
+    single_module="${wl}-single_module"
+    func_infer_tag $base_compile
+
+    # We need to know -static, to get the right output filenames.
+    for arg
+    do
+      case $arg in
+      -shared)
+	test "$build_libtool_libs" != yes && \
+	  func_fatal_configuration "can not build a shared library"
+	build_old_libs=no
+	break
+	;;
+      -all-static | -static | -static-libtool-libs)
+	case $arg in
+	-all-static)
+	  if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then
+	    func_warning "complete static linking is impossible in this configuration"
+	  fi
+	  if test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=yes
+	  ;;
+	-static)
+	  if test -z "$pic_flag" && test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=built
+	  ;;
+	-static-libtool-libs)
+	  if test -z "$pic_flag" && test -n "$link_static_flag"; then
+	    dlopen_self=$dlopen_self_static
+	  fi
+	  prefer_static_libs=yes
+	  ;;
+	esac
+	build_libtool_libs=no
+	build_old_libs=yes
+	break
+	;;
+      esac
+    done
+
+    # See if our shared archives depend on static archives.
+    test -n "$old_archive_from_new_cmds" && build_old_libs=yes
+
+    # Go through the arguments, transforming them on the way.
+    while test "$#" -gt 0; do
+      arg="$1"
+      shift
+      func_quote_for_eval "$arg"
+      qarg=$func_quote_for_eval_unquoted_result
+      func_append libtool_args " $func_quote_for_eval_result"
+
+      # If the previous option needs an argument, assign it.
+      if test -n "$prev"; then
+	case $prev in
+	output)
+	  func_append compile_command " @OUTPUT@"
+	  func_append finalize_command " @OUTPUT@"
+	  ;;
+	esac
+
+	case $prev in
+	bindir)
+	  bindir="$arg"
+	  prev=
+	  continue
+	  ;;
+	dlfiles|dlprefiles)
+	  if test "$preload" = no; then
+	    # Add the symbol object into the linking commands.
+	    func_append compile_command " @SYMFILE@"
+	    func_append finalize_command " @SYMFILE@"
+	    preload=yes
+	  fi
+	  case $arg in
+	  *.la | *.lo) ;;  # We handle these cases below.
+	  force)
+	    if test "$dlself" = no; then
+	      dlself=needless
+	      export_dynamic=yes
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  self)
+	    if test "$prev" = dlprefiles; then
+	      dlself=yes
+	    elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then
+	      dlself=yes
+	    else
+	      dlself=needless
+	      export_dynamic=yes
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  *)
+	    if test "$prev" = dlfiles; then
+	      func_append dlfiles " $arg"
+	    else
+	      func_append dlprefiles " $arg"
+	    fi
+	    prev=
+	    continue
+	    ;;
+	  esac
+	  ;;
+	expsyms)
+	  export_symbols="$arg"
+	  test -f "$arg" \
+	    || func_fatal_error "symbol file \`$arg' does not exist"
+	  prev=
+	  continue
+	  ;;
+	expsyms_regex)
+	  export_symbols_regex="$arg"
+	  prev=
+	  continue
+	  ;;
+	framework)
+	  case $host in
+	    *-*-darwin*)
+	      case "$deplibs " in
+		*" $qarg.ltframework "*) ;;
+		*) func_append deplibs " $qarg.ltframework" # this is fixed later
+		   ;;
+	      esac
+	      ;;
+	  esac
+	  prev=
+	  continue
+	  ;;
+	inst_prefix)
+	  inst_prefix_dir="$arg"
+	  prev=
+	  continue
+	  ;;
+	objectlist)
+	  if test -f "$arg"; then
+	    save_arg=$arg
+	    moreargs=
+	    for fil in `cat "$save_arg"`
+	    do
+#	      func_append moreargs " $fil"
+	      arg=$fil
+	      # A libtool-controlled object.
+
+	      # Check to see that this really is a libtool object.
+	      if func_lalib_unsafe_p "$arg"; then
+		pic_object=
+		non_pic_object=
+
+		# Read the .lo file
+		func_source "$arg"
+
+		if test -z "$pic_object" ||
+		   test -z "$non_pic_object" ||
+		   test "$pic_object" = none &&
+		   test "$non_pic_object" = none; then
+		  func_fatal_error "cannot find name of object for \`$arg'"
+		fi
+
+		# Extract subdirectory from the argument.
+		func_dirname "$arg" "/" ""
+		xdir="$func_dirname_result"
+
+		if test "$pic_object" != none; then
+		  # Prepend the subdirectory the object is found in.
+		  pic_object="$xdir$pic_object"
+
+		  if test "$prev" = dlfiles; then
+		    if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+		      func_append dlfiles " $pic_object"
+		      prev=
+		      continue
+		    else
+		      # If libtool objects are unsupported, then we need to preload.
+		      prev=dlprefiles
+		    fi
+		  fi
+
+		  # CHECK ME:  I think I busted this.  -Ossama
+		  if test "$prev" = dlprefiles; then
+		    # Preload the old-style object.
+		    func_append dlprefiles " $pic_object"
+		    prev=
+		  fi
+
+		  # A PIC object.
+		  func_append libobjs " $pic_object"
+		  arg="$pic_object"
+		fi
+
+		# Non-PIC object.
+		if test "$non_pic_object" != none; then
+		  # Prepend the subdirectory the object is found in.
+		  non_pic_object="$xdir$non_pic_object"
+
+		  # A standard non-PIC object
+		  func_append non_pic_objects " $non_pic_object"
+		  if test -z "$pic_object" || test "$pic_object" = none ; then
+		    arg="$non_pic_object"
+		  fi
+		else
+		  # If the PIC object exists, use it instead.
+		  # $xdir was prepended to $pic_object above.
+		  non_pic_object="$pic_object"
+		  func_append non_pic_objects " $non_pic_object"
+		fi
+	      else
+		# Only an error if not doing a dry-run.
+		if $opt_dry_run; then
+		  # Extract subdirectory from the argument.
+		  func_dirname "$arg" "/" ""
+		  xdir="$func_dirname_result"
+
+		  func_lo2o "$arg"
+		  pic_object=$xdir$objdir/$func_lo2o_result
+		  non_pic_object=$xdir$func_lo2o_result
+		  func_append libobjs " $pic_object"
+		  func_append non_pic_objects " $non_pic_object"
+	        else
+		  func_fatal_error "\`$arg' is not a valid libtool object"
+		fi
+	      fi
+	    done
+	  else
+	    func_fatal_error "link input file \`$arg' does not exist"
+	  fi
+	  arg=$save_arg
+	  prev=
+	  continue
+	  ;;
+	precious_regex)
+	  precious_files_regex="$arg"
+	  prev=
+	  continue
+	  ;;
+	release)
+	  release="-$arg"
+	  prev=
+	  continue
+	  ;;
+	rpath | xrpath)
+	  # We need an absolute path.
+	  case $arg in
+	  [\\/]* | [A-Za-z]:[\\/]*) ;;
+	  *)
+	    func_fatal_error "only absolute run-paths are allowed"
+	    ;;
+	  esac
+	  if test "$prev" = rpath; then
+	    case "$rpath " in
+	    *" $arg "*) ;;
+	    *) func_append rpath " $arg" ;;
+	    esac
+	  else
+	    case "$xrpath " in
+	    *" $arg "*) ;;
+	    *) func_append xrpath " $arg" ;;
+	    esac
+	  fi
+	  prev=
+	  continue
+	  ;;
+	shrext)
+	  shrext_cmds="$arg"
+	  prev=
+	  continue
+	  ;;
+	weak)
+	  func_append weak_libs " $arg"
+	  prev=
+	  continue
+	  ;;
+	xcclinker)
+	  func_append linker_flags " $qarg"
+	  func_append compiler_flags " $qarg"
+	  prev=
+	  func_append compile_command " $qarg"
+	  func_append finalize_command " $qarg"
+	  continue
+	  ;;
+	xcompiler)
+	  func_append compiler_flags " $qarg"
+	  prev=
+	  func_append compile_command " $qarg"
+	  func_append finalize_command " $qarg"
+	  continue
+	  ;;
+	xlinker)
+	  func_append linker_flags " $qarg"
+	  func_append compiler_flags " $wl$qarg"
+	  prev=
+	  func_append compile_command " $wl$qarg"
+	  func_append finalize_command " $wl$qarg"
+	  continue
+	  ;;
+	*)
+	  eval "$prev=\"\$arg\""
+	  prev=
+	  continue
+	  ;;
+	esac
+      fi # test -n "$prev"
+
+      prevarg="$arg"
+
+      case $arg in
+      -all-static)
+	if test -n "$link_static_flag"; then
+	  # See comment for -static flag below, for more details.
+	  func_append compile_command " $link_static_flag"
+	  func_append finalize_command " $link_static_flag"
+	fi
+	continue
+	;;
+
+      -allow-undefined)
+	# FIXME: remove this flag sometime in the future.
+	func_fatal_error "\`-allow-undefined' must not be used because it is the default"
+	;;
+
+      -avoid-version)
+	avoid_version=yes
+	continue
+	;;
+
+      -bindir)
+	prev=bindir
+	continue
+	;;
+
+      -dlopen)
+	prev=dlfiles
+	continue
+	;;
+
+      -dlpreopen)
+	prev=dlprefiles
+	continue
+	;;
+
+      -export-dynamic)
+	export_dynamic=yes
+	continue
+	;;
+
+      -export-symbols | -export-symbols-regex)
+	if test -n "$export_symbols" || test -n "$export_symbols_regex"; then
+	  func_fatal_error "more than one -exported-symbols argument is not allowed"
+	fi
+	if test "X$arg" = "X-export-symbols"; then
+	  prev=expsyms
+	else
+	  prev=expsyms_regex
+	fi
+	continue
+	;;
+
+      -framework)
+	prev=framework
+	continue
+	;;
+
+      -inst-prefix-dir)
+	prev=inst_prefix
+	continue
+	;;
+
+      # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:*
+      # so, if we see these flags be careful not to treat them like -L
+      -L[A-Z][A-Z]*:*)
+	case $with_gcc/$host in
+	no/*-*-irix* | /*-*-irix*)
+	  func_append compile_command " $arg"
+	  func_append finalize_command " $arg"
+	  ;;
+	esac
+	continue
+	;;
+
+      -L*)
+	func_stripname "-L" '' "$arg"
+	if test -z "$func_stripname_result"; then
+	  if test "$#" -gt 0; then
+	    func_fatal_error "require no space between \`-L' and \`$1'"
+	  else
+	    func_fatal_error "need path for \`-L' option"
+	  fi
+	fi
+	func_resolve_sysroot "$func_stripname_result"
+	dir=$func_resolve_sysroot_result
+	# We need an absolute path.
+	case $dir in
+	[\\/]* | [A-Za-z]:[\\/]*) ;;
+	*)
+	  absdir=`cd "$dir" && pwd`
+	  test -z "$absdir" && \
+	    func_fatal_error "cannot determine absolute directory name of \`$dir'"
+	  dir="$absdir"
+	  ;;
+	esac
+	case "$deplibs " in
+	*" -L$dir "* | *" $arg "*)
+	  # Will only happen for absolute or sysroot arguments
+	  ;;
+	*)
+	  # Preserve sysroot, but never include relative directories
+	  case $dir in
+	    [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;;
+	    *) func_append deplibs " -L$dir" ;;
+	  esac
+	  func_append lib_search_path " $dir"
+	  ;;
+	esac
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+	  testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'`
+	  case :$dllsearchpath: in
+	  *":$dir:"*) ;;
+	  ::) dllsearchpath=$dir;;
+	  *) func_append dllsearchpath ":$dir";;
+	  esac
+	  case :$dllsearchpath: in
+	  *":$testbindir:"*) ;;
+	  ::) dllsearchpath=$testbindir;;
+	  *) func_append dllsearchpath ":$testbindir";;
+	  esac
+	  ;;
+	esac
+	continue
+	;;
+
+      -l*)
+	if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then
+	  case $host in
+	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*)
+	    # These systems don't actually have a C or math library (as such)
+	    continue
+	    ;;
+	  *-*-os2*)
+	    # These systems don't actually have a C library (as such)
+	    test "X$arg" = "X-lc" && continue
+	    ;;
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+	    # Do not include libc due to us having libc/libc_r.
+	    test "X$arg" = "X-lc" && continue
+	    ;;
+	  *-*-rhapsody* | *-*-darwin1.[012])
+	    # Rhapsody C and math libraries are in the System framework
+	    func_append deplibs " System.ltframework"
+	    continue
+	    ;;
+	  *-*-sco3.2v5* | *-*-sco5v6*)
+	    # Causes problems with __ctype
+	    test "X$arg" = "X-lc" && continue
+	    ;;
+	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+	    # Compiler inserts libc in the correct place for threads to work
+	    test "X$arg" = "X-lc" && continue
+	    ;;
+	  esac
+	elif test "X$arg" = "X-lc_r"; then
+	 case $host in
+	 *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+	   # Do not include libc_r directly, use -pthread flag.
+	   continue
+	   ;;
+	 esac
+	fi
+	func_append deplibs " $arg"
+	continue
+	;;
+
+      -module)
+	module=yes
+	continue
+	;;
+
+      # Tru64 UNIX uses -model [arg] to determine the layout of C++
+      # classes, name mangling, and exception handling.
+      # Darwin uses the -arch flag to determine output architecture.
+      -model|-arch|-isysroot|--sysroot)
+	func_append compiler_flags " $arg"
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+	prev=xcompiler
+	continue
+	;;
+
+      -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+      |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+	func_append compiler_flags " $arg"
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+	case "$new_inherited_linker_flags " in
+	    *" $arg "*) ;;
+	    * ) func_append new_inherited_linker_flags " $arg" ;;
+	esac
+	continue
+	;;
+
+      -multi_module)
+	single_module="${wl}-multi_module"
+	continue
+	;;
+
+      -no-fast-install)
+	fast_install=no
+	continue
+	;;
+
+      -no-install)
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*)
+	  # The PATH hackery in wrapper scripts is required on Windows
+	  # and Darwin in order for the loader to find any dlls it needs.
+	  func_warning "\`-no-install' is ignored for $host"
+	  func_warning "assuming \`-no-fast-install' instead"
+	  fast_install=no
+	  ;;
+	*) no_install=yes ;;
+	esac
+	continue
+	;;
+
+      -no-undefined)
+	allow_undefined=no
+	continue
+	;;
+
+      -objectlist)
+	prev=objectlist
+	continue
+	;;
+
+      -o) prev=output ;;
+
+      -precious-files-regex)
+	prev=precious_regex
+	continue
+	;;
+
+      -release)
+	prev=release
+	continue
+	;;
+
+      -rpath)
+	prev=rpath
+	continue
+	;;
+
+      -R)
+	prev=xrpath
+	continue
+	;;
+
+      -R*)
+	func_stripname '-R' '' "$arg"
+	dir=$func_stripname_result
+	# We need an absolute path.
+	case $dir in
+	[\\/]* | [A-Za-z]:[\\/]*) ;;
+	=*)
+	  func_stripname '=' '' "$dir"
+	  dir=$lt_sysroot$func_stripname_result
+	  ;;
+	*)
+	  func_fatal_error "only absolute run-paths are allowed"
+	  ;;
+	esac
+	case "$xrpath " in
+	*" $dir "*) ;;
+	*) func_append xrpath " $dir" ;;
+	esac
+	continue
+	;;
+
+      -shared)
+	# The effects of -shared are defined in a previous loop.
+	continue
+	;;
+
+      -shrext)
+	prev=shrext
+	continue
+	;;
+
+      -static | -static-libtool-libs)
+	# The effects of -static are defined in a previous loop.
+	# We used to do the same as -all-static on platforms that
+	# didn't have a PIC flag, but the assumption that the effects
+	# would be equivalent was wrong.  It would break on at least
+	# Digital Unix and AIX.
+	continue
+	;;
+
+      -thread-safe)
+	thread_safe=yes
+	continue
+	;;
+
+      -version-info)
+	prev=vinfo
+	continue
+	;;
+
+      -version-number)
+	prev=vinfo
+	vinfo_number=yes
+	continue
+	;;
+
+      -weak)
+        prev=weak
+	continue
+	;;
+
+      -Wc,*)
+	func_stripname '-Wc,' '' "$arg"
+	args=$func_stripname_result
+	arg=
+	save_ifs="$IFS"; IFS=','
+	for flag in $args; do
+	  IFS="$save_ifs"
+          func_quote_for_eval "$flag"
+	  func_append arg " $func_quote_for_eval_result"
+	  func_append compiler_flags " $func_quote_for_eval_result"
+	done
+	IFS="$save_ifs"
+	func_stripname ' ' '' "$arg"
+	arg=$func_stripname_result
+	;;
+
+      -Wl,*)
+	func_stripname '-Wl,' '' "$arg"
+	args=$func_stripname_result
+	arg=
+	save_ifs="$IFS"; IFS=','
+	for flag in $args; do
+	  IFS="$save_ifs"
+          func_quote_for_eval "$flag"
+	  func_append arg " $wl$func_quote_for_eval_result"
+	  func_append compiler_flags " $wl$func_quote_for_eval_result"
+	  func_append linker_flags " $func_quote_for_eval_result"
+	done
+	IFS="$save_ifs"
+	func_stripname ' ' '' "$arg"
+	arg=$func_stripname_result
+	;;
+
+      -Xcompiler)
+	prev=xcompiler
+	continue
+	;;
+
+      -Xlinker)
+	prev=xlinker
+	continue
+	;;
+
+      -XCClinker)
+	prev=xcclinker
+	continue
+	;;
+
+      # -msg_* for osf cc
+      -msg_*)
+	func_quote_for_eval "$arg"
+	arg="$func_quote_for_eval_result"
+	;;
+
+      # Flags to be passed through unchanged, with rationale:
+      # -64, -mips[0-9]      enable 64-bit mode for the SGI compiler
+      # -r[0-9][0-9]*        specify processor for the SGI compiler
+      # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler
+      # +DA*, +DD*           enable 64-bit mode for the HP compiler
+      # -q*                  compiler args for the IBM compiler
+      # -m*, -t[45]*, -txscale* architecture-specific flags for GCC
+      # -F/path              path to uninstalled frameworks, gcc on darwin
+      # -p, -pg, --coverage, -fprofile-*  profiling flags for GCC
+      # @file                GCC response files
+      # -tp=*                Portland pgcc target processor selection
+      # --sysroot=*          for sysroot support
+      # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization
+      -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \
+      -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \
+      -O*|-flto*|-fwhopr*|-fuse-linker-plugin)
+        func_quote_for_eval "$arg"
+	arg="$func_quote_for_eval_result"
+        func_append compile_command " $arg"
+        func_append finalize_command " $arg"
+        func_append compiler_flags " $arg"
+        continue
+        ;;
+
+      # Some other compiler flag.
+      -* | +*)
+        func_quote_for_eval "$arg"
+	arg="$func_quote_for_eval_result"
+	;;
+
+      *.$objext)
+	# A standard object.
+	func_append objs " $arg"
+	;;
+
+      *.lo)
+	# A libtool-controlled object.
+
+	# Check to see that this really is a libtool object.
+	if func_lalib_unsafe_p "$arg"; then
+	  pic_object=
+	  non_pic_object=
+
+	  # Read the .lo file
+	  func_source "$arg"
+
+	  if test -z "$pic_object" ||
+	     test -z "$non_pic_object" ||
+	     test "$pic_object" = none &&
+	     test "$non_pic_object" = none; then
+	    func_fatal_error "cannot find name of object for \`$arg'"
+	  fi
+
+	  # Extract subdirectory from the argument.
+	  func_dirname "$arg" "/" ""
+	  xdir="$func_dirname_result"
+
+	  if test "$pic_object" != none; then
+	    # Prepend the subdirectory the object is found in.
+	    pic_object="$xdir$pic_object"
+
+	    if test "$prev" = dlfiles; then
+	      if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then
+		func_append dlfiles " $pic_object"
+		prev=
+		continue
+	      else
+		# If libtool objects are unsupported, then we need to preload.
+		prev=dlprefiles
+	      fi
+	    fi
+
+	    # CHECK ME:  I think I busted this.  -Ossama
+	    if test "$prev" = dlprefiles; then
+	      # Preload the old-style object.
+	      func_append dlprefiles " $pic_object"
+	      prev=
+	    fi
+
+	    # A PIC object.
+	    func_append libobjs " $pic_object"
+	    arg="$pic_object"
+	  fi
+
+	  # Non-PIC object.
+	  if test "$non_pic_object" != none; then
+	    # Prepend the subdirectory the object is found in.
+	    non_pic_object="$xdir$non_pic_object"
+
+	    # A standard non-PIC object
+	    func_append non_pic_objects " $non_pic_object"
+	    if test -z "$pic_object" || test "$pic_object" = none ; then
+	      arg="$non_pic_object"
+	    fi
+	  else
+	    # If the PIC object exists, use it instead.
+	    # $xdir was prepended to $pic_object above.
+	    non_pic_object="$pic_object"
+	    func_append non_pic_objects " $non_pic_object"
+	  fi
+	else
+	  # Only an error if not doing a dry-run.
+	  if $opt_dry_run; then
+	    # Extract subdirectory from the argument.
+	    func_dirname "$arg" "/" ""
+	    xdir="$func_dirname_result"
+
+	    func_lo2o "$arg"
+	    pic_object=$xdir$objdir/$func_lo2o_result
+	    non_pic_object=$xdir$func_lo2o_result
+	    func_append libobjs " $pic_object"
+	    func_append non_pic_objects " $non_pic_object"
+	  else
+	    func_fatal_error "\`$arg' is not a valid libtool object"
+	  fi
+	fi
+	;;
+
+      *.$libext)
+	# An archive.
+	func_append deplibs " $arg"
+	func_append old_deplibs " $arg"
+	continue
+	;;
+
+      *.la)
+	# A libtool-controlled library.
+
+	func_resolve_sysroot "$arg"
+	if test "$prev" = dlfiles; then
+	  # This library was specified with -dlopen.
+	  func_append dlfiles " $func_resolve_sysroot_result"
+	  prev=
+	elif test "$prev" = dlprefiles; then
+	  # The library was specified with -dlpreopen.
+	  func_append dlprefiles " $func_resolve_sysroot_result"
+	  prev=
+	else
+	  func_append deplibs " $func_resolve_sysroot_result"
+	fi
+	continue
+	;;
+
+      # Some other compiler argument.
+      *)
+	# Unknown arguments in both finalize_command and compile_command need
+	# to be aesthetically quoted because they are evaled later.
+	func_quote_for_eval "$arg"
+	arg="$func_quote_for_eval_result"
+	;;
+      esac # arg
+
+      # Now actually substitute the argument into the commands.
+      if test -n "$arg"; then
+	func_append compile_command " $arg"
+	func_append finalize_command " $arg"
+      fi
+    done # argument parsing loop
+
+    test -n "$prev" && \
+      func_fatal_help "the \`$prevarg' option requires an argument"
+
+    if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then
+      eval arg=\"$export_dynamic_flag_spec\"
+      func_append compile_command " $arg"
+      func_append finalize_command " $arg"
+    fi
+
+    oldlibs=
+    # calculate the name of the file, without its directory
+    func_basename "$output"
+    outputname="$func_basename_result"
+    libobjs_save="$libobjs"
+
+    if test -n "$shlibpath_var"; then
+      # get the directories listed in $shlibpath_var
+      eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\`
+    else
+      shlib_search_path=
+    fi
+    eval sys_lib_search_path=\"$sys_lib_search_path_spec\"
+    eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"
+
+    func_dirname "$output" "/" ""
+    output_objdir="$func_dirname_result$objdir"
+    func_to_tool_file "$output_objdir/"
+    tool_output_objdir=$func_to_tool_file_result
+    # Create the object directory.
+    func_mkdir_p "$output_objdir"
+
+    # Determine the type of output
+    case $output in
+    "")
+      func_fatal_help "you must specify an output file"
+      ;;
+    *.$libext) linkmode=oldlib ;;
+    *.lo | *.$objext) linkmode=obj ;;
+    *.la) linkmode=lib ;;
+    *) linkmode=prog ;; # Anything else should be a program.
+    esac
+
+    specialdeplibs=
+
+    libs=
+    # Find all interdependent deplibs by searching for libraries
+    # that are linked more than once (e.g. -la -lb -la)
+    for deplib in $deplibs; do
+      if $opt_preserve_dup_deps ; then
+	case "$libs " in
+	*" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	esac
+      fi
+      func_append libs " $deplib"
+    done
+
+    if test "$linkmode" = lib; then
+      libs="$predeps $libs $compiler_lib_search_path $postdeps"
+
+      # Compute libraries that are listed more than once in $predeps
+      # $postdeps and mark them as special (i.e., whose duplicates are
+      # not to be eliminated).
+      pre_post_deps=
+      if $opt_duplicate_compiler_generated_deps; then
+	for pre_post_dep in $predeps $postdeps; do
+	  case "$pre_post_deps " in
+	  *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;;
+	  esac
+	  func_append pre_post_deps " $pre_post_dep"
+	done
+      fi
+      pre_post_deps=
+    fi
+
+    deplibs=
+    newdependency_libs=
+    newlib_search_path=
+    need_relink=no # whether we're linking any uninstalled libtool libraries
+    notinst_deplibs= # not-installed libtool libraries
+    notinst_path= # paths that contain not-installed libtool libraries
+
+    case $linkmode in
+    lib)
+	passes="conv dlpreopen link"
+	for file in $dlfiles $dlprefiles; do
+	  case $file in
+	  *.la) ;;
+	  *)
+	    func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file"
+	    ;;
+	  esac
+	done
+	;;
+    prog)
+	compile_deplibs=
+	finalize_deplibs=
+	alldeplibs=no
+	newdlfiles=
+	newdlprefiles=
+	passes="conv scan dlopen dlpreopen link"
+	;;
+    *)  passes="conv"
+	;;
+    esac
+
+    for pass in $passes; do
+      # The preopen pass in lib mode reverses $deplibs; put it back here
+      # so that -L comes before libs that need it for instance...
+      if test "$linkmode,$pass" = "lib,link"; then
+	## FIXME: Find the place where the list is rebuilt in the wrong
+	##        order, and fix it there properly
+        tmp_deplibs=
+	for deplib in $deplibs; do
+	  tmp_deplibs="$deplib $tmp_deplibs"
+	done
+	deplibs="$tmp_deplibs"
+      fi
+
+      if test "$linkmode,$pass" = "lib,link" ||
+	 test "$linkmode,$pass" = "prog,scan"; then
+	libs="$deplibs"
+	deplibs=
+      fi
+      if test "$linkmode" = prog; then
+	case $pass in
+	dlopen) libs="$dlfiles" ;;
+	dlpreopen) libs="$dlprefiles" ;;
+	link)
+	  libs="$deplibs %DEPLIBS%"
+	  test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs"
+	  ;;
+	esac
+      fi
+      if test "$linkmode,$pass" = "lib,dlpreopen"; then
+	# Collect and forward deplibs of preopened libtool libs
+	for lib in $dlprefiles; do
+	  # Ignore non-libtool-libs
+	  dependency_libs=
+	  func_resolve_sysroot "$lib"
+	  case $lib in
+	  *.la)	func_source "$func_resolve_sysroot_result" ;;
+	  esac
+
+	  # Collect preopened libtool deplibs, except any this library
+	  # has declared as weak libs
+	  for deplib in $dependency_libs; do
+	    func_basename "$deplib"
+            deplib_base=$func_basename_result
+	    case " $weak_libs " in
+	    *" $deplib_base "*) ;;
+	    *) func_append deplibs " $deplib" ;;
+	    esac
+	  done
+	done
+	libs="$dlprefiles"
+      fi
+      if test "$pass" = dlopen; then
+	# Collect dlpreopened libraries
+	save_deplibs="$deplibs"
+	deplibs=
+      fi
+
+      for deplib in $libs; do
+	lib=
+	found=no
+	case $deplib in
+	-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \
+        |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*)
+	  if test "$linkmode,$pass" = "prog,link"; then
+	    compile_deplibs="$deplib $compile_deplibs"
+	    finalize_deplibs="$deplib $finalize_deplibs"
+	  else
+	    func_append compiler_flags " $deplib"
+	    if test "$linkmode" = lib ; then
+		case "$new_inherited_linker_flags " in
+		    *" $deplib "*) ;;
+		    * ) func_append new_inherited_linker_flags " $deplib" ;;
+		esac
+	    fi
+	  fi
+	  continue
+	  ;;
+	-l*)
+	  if test "$linkmode" != lib && test "$linkmode" != prog; then
+	    func_warning "\`-l' is ignored for archives/objects"
+	    continue
+	  fi
+	  func_stripname '-l' '' "$deplib"
+	  name=$func_stripname_result
+	  if test "$linkmode" = lib; then
+	    searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path"
+	  else
+	    searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path"
+	  fi
+	  for searchdir in $searchdirs; do
+	    for search_ext in .la $std_shrext .so .a; do
+	      # Search the libtool library
+	      lib="$searchdir/lib${name}${search_ext}"
+	      if test -f "$lib"; then
+		if test "$search_ext" = ".la"; then
+		  found=yes
+		else
+		  found=no
+		fi
+		break 2
+	      fi
+	    done
+	  done
+	  if test "$found" != yes; then
+	    # deplib doesn't seem to be a libtool library
+	    if test "$linkmode,$pass" = "prog,link"; then
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    else
+	      deplibs="$deplib $deplibs"
+	      test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
+	    fi
+	    continue
+	  else # deplib is a libtool library
+	    # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib,
+	    # We need to do some special things here, and not later.
+	    if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+	      case " $predeps $postdeps " in
+	      *" $deplib "*)
+		if func_lalib_p "$lib"; then
+		  library_names=
+		  old_library=
+		  func_source "$lib"
+		  for l in $old_library $library_names; do
+		    ll="$l"
+		  done
+		  if test "X$ll" = "X$old_library" ; then # only static version available
+		    found=no
+		    func_dirname "$lib" "" "."
+		    ladir="$func_dirname_result"
+		    lib=$ladir/$old_library
+		    if test "$linkmode,$pass" = "prog,link"; then
+		      compile_deplibs="$deplib $compile_deplibs"
+		      finalize_deplibs="$deplib $finalize_deplibs"
+		    else
+		      deplibs="$deplib $deplibs"
+		      test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs"
+		    fi
+		    continue
+		  fi
+		fi
+		;;
+	      *) ;;
+	      esac
+	    fi
+	  fi
+	  ;; # -l
+	*.ltframework)
+	  if test "$linkmode,$pass" = "prog,link"; then
+	    compile_deplibs="$deplib $compile_deplibs"
+	    finalize_deplibs="$deplib $finalize_deplibs"
+	  else
+	    deplibs="$deplib $deplibs"
+	    if test "$linkmode" = lib ; then
+		case "$new_inherited_linker_flags " in
+		    *" $deplib "*) ;;
+		    * ) func_append new_inherited_linker_flags " $deplib" ;;
+		esac
+	    fi
+	  fi
+	  continue
+	  ;;
+	-L*)
+	  case $linkmode in
+	  lib)
+	    deplibs="$deplib $deplibs"
+	    test "$pass" = conv && continue
+	    newdependency_libs="$deplib $newdependency_libs"
+	    func_stripname '-L' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    func_append newlib_search_path " $func_resolve_sysroot_result"
+	    ;;
+	  prog)
+	    if test "$pass" = conv; then
+	      deplibs="$deplib $deplibs"
+	      continue
+	    fi
+	    if test "$pass" = scan; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    fi
+	    func_stripname '-L' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    func_append newlib_search_path " $func_resolve_sysroot_result"
+	    ;;
+	  *)
+	    func_warning "\`-L' is ignored for archives/objects"
+	    ;;
+	  esac # linkmode
+	  continue
+	  ;; # -L
+	-R*)
+	  if test "$pass" = link; then
+	    func_stripname '-R' '' "$deplib"
+	    func_resolve_sysroot "$func_stripname_result"
+	    dir=$func_resolve_sysroot_result
+	    # Make sure the xrpath contains only unique directories.
+	    case "$xrpath " in
+	    *" $dir "*) ;;
+	    *) func_append xrpath " $dir" ;;
+	    esac
+	  fi
+	  deplibs="$deplib $deplibs"
+	  continue
+	  ;;
+	*.la)
+	  func_resolve_sysroot "$deplib"
+	  lib=$func_resolve_sysroot_result
+	  ;;
+	*.$libext)
+	  if test "$pass" = conv; then
+	    deplibs="$deplib $deplibs"
+	    continue
+	  fi
+	  case $linkmode in
+	  lib)
+	    # Linking convenience modules into shared libraries is allowed,
+	    # but linking other static libraries is non-portable.
+	    case " $dlpreconveniencelibs " in
+	    *" $deplib "*) ;;
+	    *)
+	      valid_a_lib=no
+	      case $deplibs_check_method in
+		match_pattern*)
+		  set dummy $deplibs_check_method; shift
+		  match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+		  if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \
+		    | $EGREP "$match_pattern_regex" > /dev/null; then
+		    valid_a_lib=yes
+		  fi
+		;;
+		pass_all)
+		  valid_a_lib=yes
+		;;
+	      esac
+	      if test "$valid_a_lib" != yes; then
+		echo
+		$ECHO "*** Warning: Trying to link with static lib archive $deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because the file extensions .$libext of this argument makes me believe"
+		echo "*** that it is just a static archive that I should not use here."
+	      else
+		echo
+		$ECHO "*** Warning: Linking the shared library $output against the"
+		$ECHO "*** static library $deplib is not portable!"
+		deplibs="$deplib $deplibs"
+	      fi
+	      ;;
+	    esac
+	    continue
+	    ;;
+	  prog)
+	    if test "$pass" != link; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    fi
+	    continue
+	    ;;
+	  esac # linkmode
+	  ;; # *.$libext
+	*.lo | *.$objext)
+	  if test "$pass" = conv; then
+	    deplibs="$deplib $deplibs"
+	  elif test "$linkmode" = prog; then
+	    if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then
+	      # If there is no dlopen support or we're linking statically,
+	      # we need to preload.
+	      func_append newdlprefiles " $deplib"
+	      compile_deplibs="$deplib $compile_deplibs"
+	      finalize_deplibs="$deplib $finalize_deplibs"
+	    else
+	      func_append newdlfiles " $deplib"
+	    fi
+	  fi
+	  continue
+	  ;;
+	%DEPLIBS%)
+	  alldeplibs=yes
+	  continue
+	  ;;
+	esac # case $deplib
+
+	if test "$found" = yes || test -f "$lib"; then :
+	else
+	  func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'"
+	fi
+
+	# Check to see that this really is a libtool archive.
+	func_lalib_unsafe_p "$lib" \
+	  || func_fatal_error "\`$lib' is not a valid libtool archive"
+
+	func_dirname "$lib" "" "."
+	ladir="$func_dirname_result"
+
+	dlname=
+	dlopen=
+	dlpreopen=
+	libdir=
+	library_names=
+	old_library=
+	inherited_linker_flags=
+	# If the library was installed with an old release of libtool,
+	# it will not redefine variables installed, or shouldnotlink
+	installed=yes
+	shouldnotlink=no
+	avoidtemprpath=
+
+
+	# Read the .la file
+	func_source "$lib"
+
+	# Convert "-framework foo" to "foo.ltframework"
+	if test -n "$inherited_linker_flags"; then
+	  tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'`
+	  for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do
+	    case " $new_inherited_linker_flags " in
+	      *" $tmp_inherited_linker_flag "*) ;;
+	      *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";;
+	    esac
+	  done
+	fi
+	dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	if test "$linkmode,$pass" = "lib,link" ||
+	   test "$linkmode,$pass" = "prog,scan" ||
+	   { test "$linkmode" != prog && test "$linkmode" != lib; }; then
+	  test -n "$dlopen" && func_append dlfiles " $dlopen"
+	  test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen"
+	fi
+
+	if test "$pass" = conv; then
+	  # Only check for convenience libraries
+	  deplibs="$lib $deplibs"
+	  if test -z "$libdir"; then
+	    if test -z "$old_library"; then
+	      func_fatal_error "cannot find name of link library for \`$lib'"
+	    fi
+	    # It is a libtool convenience library, so add in its objects.
+	    func_append convenience " $ladir/$objdir/$old_library"
+	    func_append old_convenience " $ladir/$objdir/$old_library"
+	    tmp_libs=
+	    for deplib in $dependency_libs; do
+	      deplibs="$deplib $deplibs"
+	      if $opt_preserve_dup_deps ; then
+		case "$tmp_libs " in
+		*" $deplib "*) func_append specialdeplibs " $deplib" ;;
+		esac
+	      fi
+	      func_append tmp_libs " $deplib"
+	    done
+	  elif test "$linkmode" != prog && test "$linkmode" != lib; then
+	    func_fatal_error "\`$lib' is not a convenience library"
+	  fi
+	  continue
+	fi # $pass = conv
+
+
+	# Get the name of the library we link against.
+	linklib=
+	if test -n "$old_library" &&
+	   { test "$prefer_static_libs" = yes ||
+	     test "$prefer_static_libs,$installed" = "built,no"; }; then
+	  linklib=$old_library
+	else
+	  for l in $old_library $library_names; do
+	    linklib="$l"
+	  done
+	fi
+	if test -z "$linklib"; then
+	  func_fatal_error "cannot find name of link library for \`$lib'"
+	fi
+
+	# This library was specified with -dlopen.
+	if test "$pass" = dlopen; then
+	  if test -z "$libdir"; then
+	    func_fatal_error "cannot -dlopen a convenience library: \`$lib'"
+	  fi
+	  if test -z "$dlname" ||
+	     test "$dlopen_support" != yes ||
+	     test "$build_libtool_libs" = no; then
+	    # If there is no dlname, no dlopen support or we're linking
+	    # statically, we need to preload.  We also need to preload any
+	    # dependent libraries so libltdl's deplib preloader doesn't
+	    # bomb out in the load deplibs phase.
+	    func_append dlprefiles " $lib $dependency_libs"
+	  else
+	    func_append newdlfiles " $lib"
+	  fi
+	  continue
+	fi # $pass = dlopen
+
+	# We need an absolute path.
+	case $ladir in
+	[\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;;
+	*)
+	  abs_ladir=`cd "$ladir" && pwd`
+	  if test -z "$abs_ladir"; then
+	    func_warning "cannot determine absolute directory name of \`$ladir'"
+	    func_warning "passing it literally to the linker, although it might fail"
+	    abs_ladir="$ladir"
+	  fi
+	  ;;
+	esac
+	func_basename "$lib"
+	laname="$func_basename_result"
+
+	# Find the relevant object directory and library name.
+	if test "X$installed" = Xyes; then
+	  if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+	    func_warning "library \`$lib' was moved."
+	    dir="$ladir"
+	    absdir="$abs_ladir"
+	    libdir="$abs_ladir"
+	  else
+	    dir="$lt_sysroot$libdir"
+	    absdir="$lt_sysroot$libdir"
+	  fi
+	  test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes
+	else
+	  if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then
+	    dir="$ladir"
+	    absdir="$abs_ladir"
+	    # Remove this search path later
+	    func_append notinst_path " $abs_ladir"
+	  else
+	    dir="$ladir/$objdir"
+	    absdir="$abs_ladir/$objdir"
+	    # Remove this search path later
+	    func_append notinst_path " $abs_ladir"
+	  fi
+	fi # $installed = yes
+	func_stripname 'lib' '.la' "$laname"
+	name=$func_stripname_result
+
+	# This library was specified with -dlpreopen.
+	if test "$pass" = dlpreopen; then
+	  if test -z "$libdir" && test "$linkmode" = prog; then
+	    func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'"
+	  fi
+	  case "$host" in
+	    # special handling for platforms with PE-DLLs.
+	    *cygwin* | *mingw* | *cegcc* )
+	      # Linker will automatically link against shared library if both
+	      # static and shared are present.  Therefore, ensure we extract
+	      # symbols from the import library if a shared library is present
+	      # (otherwise, the dlopen module name will be incorrect).  We do
+	      # this by putting the import library name into $newdlprefiles.
+	      # We recover the dlopen module name by 'saving' the la file
+	      # name in a special purpose variable, and (later) extracting the
+	      # dlname from the la file.
+	      if test -n "$dlname"; then
+	        func_tr_sh "$dir/$linklib"
+	        eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname"
+	        func_append newdlprefiles " $dir/$linklib"
+	      else
+	        func_append newdlprefiles " $dir/$old_library"
+	        # Keep a list of preopened convenience libraries to check
+	        # that they are being used correctly in the link pass.
+	        test -z "$libdir" && \
+	          func_append dlpreconveniencelibs " $dir/$old_library"
+	      fi
+	    ;;
+	    * )
+	      # Prefer using a static library (so that no silly _DYNAMIC symbols
+	      # are required to link).
+	      if test -n "$old_library"; then
+	        func_append newdlprefiles " $dir/$old_library"
+	        # Keep a list of preopened convenience libraries to check
+	        # that they are being used correctly in the link pass.
+	        test -z "$libdir" && \
+	          func_append dlpreconveniencelibs " $dir/$old_library"
+	      # Otherwise, use the dlname, so that lt_dlopen finds it.
+	      elif test -n "$dlname"; then
+	        func_append newdlprefiles " $dir/$dlname"
+	      else
+	        func_append newdlprefiles " $dir/$linklib"
+	      fi
+	    ;;
+	  esac
+	fi # $pass = dlpreopen
+
+	if test -z "$libdir"; then
+	  # Link the convenience library
+	  if test "$linkmode" = lib; then
+	    deplibs="$dir/$old_library $deplibs"
+	  elif test "$linkmode,$pass" = "prog,link"; then
+	    compile_deplibs="$dir/$old_library $compile_deplibs"
+	    finalize_deplibs="$dir/$old_library $finalize_deplibs"
+	  else
+	    deplibs="$lib $deplibs" # used for prog,scan pass
+	  fi
+	  continue
+	fi
+
+
+	if test "$linkmode" = prog && test "$pass" != link; then
+	  func_append newlib_search_path " $ladir"
+	  deplibs="$lib $deplibs"
+
+	  linkalldeplibs=no
+	  if test "$link_all_deplibs" != no || test -z "$library_names" ||
+	     test "$build_libtool_libs" = no; then
+	    linkalldeplibs=yes
+	  fi
+
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    case $deplib in
+	    -L*) func_stripname '-L' '' "$deplib"
+	         func_resolve_sysroot "$func_stripname_result"
+	         func_append newlib_search_path " $func_resolve_sysroot_result"
+		 ;;
+	    esac
+	    # Need to link against all dependency_libs?
+	    if test "$linkalldeplibs" = yes; then
+	      deplibs="$deplib $deplibs"
+	    else
+	      # Need to hardcode shared library paths
+	      # or/and link against static libraries
+	      newdependency_libs="$deplib $newdependency_libs"
+	    fi
+	    if $opt_preserve_dup_deps ; then
+	      case "$tmp_libs " in
+	      *" $deplib "*) func_append specialdeplibs " $deplib" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $deplib"
+	  done # for deplib
+	  continue
+	fi # $linkmode = prog...
+
+	if test "$linkmode,$pass" = "prog,link"; then
+	  if test -n "$library_names" &&
+	     { { test "$prefer_static_libs" = no ||
+	         test "$prefer_static_libs,$installed" = "built,yes"; } ||
+	       test -z "$old_library"; }; then
+	    # We need to hardcode the library path
+	    if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then
+	      # Make sure the rpath contains only unique directories.
+	      case "$temp_rpath:" in
+	      *"$absdir:"*) ;;
+	      *) func_append temp_rpath "$absdir:" ;;
+	      esac
+	    fi
+
+	    # Hardcode the library path.
+	    # Skip directories that are in the system default run-time
+	    # search path.
+	    case " $sys_lib_dlsearch_path " in
+	    *" $absdir "*) ;;
+	    *)
+	      case "$compile_rpath " in
+	      *" $absdir "*) ;;
+	      *) func_append compile_rpath " $absdir" ;;
+	      esac
+	      ;;
+	    esac
+	    case " $sys_lib_dlsearch_path " in
+	    *" $libdir "*) ;;
+	    *)
+	      case "$finalize_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append finalize_rpath " $libdir" ;;
+	      esac
+	      ;;
+	    esac
+	  fi # $linkmode,$pass = prog,link...
+
+	  if test "$alldeplibs" = yes &&
+	     { test "$deplibs_check_method" = pass_all ||
+	       { test "$build_libtool_libs" = yes &&
+		 test -n "$library_names"; }; }; then
+	    # We only need to search for static libraries
+	    continue
+	  fi
+	fi
+
+	link_static=no # Whether the deplib will be linked statically
+	use_static_libs=$prefer_static_libs
+	if test "$use_static_libs" = built && test "$installed" = yes; then
+	  use_static_libs=no
+	fi
+	if test -n "$library_names" &&
+	   { test "$use_static_libs" = no || test -z "$old_library"; }; then
+	  case $host in
+	  *cygwin* | *mingw* | *cegcc*)
+	      # No point in relinking DLLs because paths are not encoded
+	      func_append notinst_deplibs " $lib"
+	      need_relink=no
+	    ;;
+	  *)
+	    if test "$installed" = no; then
+	      func_append notinst_deplibs " $lib"
+	      need_relink=yes
+	    fi
+	    ;;
+	  esac
+	  # This is a shared library
+
+	  # Warn about portability, can't link against -module's on some
+	  # systems (darwin).  Don't bleat about dlopened modules though!
+	  dlopenmodule=""
+	  for dlpremoduletest in $dlprefiles; do
+	    if test "X$dlpremoduletest" = "X$lib"; then
+	      dlopenmodule="$dlpremoduletest"
+	      break
+	    fi
+	  done
+	  if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then
+	    echo
+	    if test "$linkmode" = prog; then
+	      $ECHO "*** Warning: Linking the executable $output against the loadable module"
+	    else
+	      $ECHO "*** Warning: Linking the shared library $output against the loadable module"
+	    fi
+	    $ECHO "*** $linklib is not portable!"
+	  fi
+	  if test "$linkmode" = lib &&
+	     test "$hardcode_into_libs" = yes; then
+	    # Hardcode the library path.
+	    # Skip directories that are in the system default run-time
+	    # search path.
+	    case " $sys_lib_dlsearch_path " in
+	    *" $absdir "*) ;;
+	    *)
+	      case "$compile_rpath " in
+	      *" $absdir "*) ;;
+	      *) func_append compile_rpath " $absdir" ;;
+	      esac
+	      ;;
+	    esac
+	    case " $sys_lib_dlsearch_path " in
+	    *" $libdir "*) ;;
+	    *)
+	      case "$finalize_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append finalize_rpath " $libdir" ;;
+	      esac
+	      ;;
+	    esac
+	  fi
+
+	  if test -n "$old_archive_from_expsyms_cmds"; then
+	    # figure out the soname
+	    set dummy $library_names
+	    shift
+	    realname="$1"
+	    shift
+	    libname=`eval "\\$ECHO \"$libname_spec\""`
+	    # use dlname if we got it. it's perfectly good, no?
+	    if test -n "$dlname"; then
+	      soname="$dlname"
+	    elif test -n "$soname_spec"; then
+	      # bleh windows
+	      case $host in
+	      *cygwin* | mingw* | *cegcc*)
+	        func_arith $current - $age
+		major=$func_arith_result
+		versuffix="-$major"
+		;;
+	      esac
+	      eval soname=\"$soname_spec\"
+	    else
+	      soname="$realname"
+	    fi
+
+	    # Make a new name for the extract_expsyms_cmds to use
+	    soroot="$soname"
+	    func_basename "$soroot"
+	    soname="$func_basename_result"
+	    func_stripname 'lib' '.dll' "$soname"
+	    newlib=libimp-$func_stripname_result.a
+
+	    # If the library has no export list, then create one now
+	    if test -f "$output_objdir/$soname-def"; then :
+	    else
+	      func_verbose "extracting exported symbol list from \`$soname'"
+	      func_execute_cmds "$extract_expsyms_cmds" 'exit $?'
+	    fi
+
+	    # Create $newlib
+	    if test -f "$output_objdir/$newlib"; then :; else
+	      func_verbose "generating import library for \`$soname'"
+	      func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?'
+	    fi
+	    # make sure the library variables are pointing to the new library
+	    dir=$output_objdir
+	    linklib=$newlib
+	  fi # test -n "$old_archive_from_expsyms_cmds"
+
+	  if test "$linkmode" = prog || test "$opt_mode" != relink; then
+	    add_shlibpath=
+	    add_dir=
+	    add=
+	    lib_linked=yes
+	    case $hardcode_action in
+	    immediate | unsupported)
+	      if test "$hardcode_direct" = no; then
+		add="$dir/$linklib"
+		case $host in
+		  *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;;
+		  *-*-sysv4*uw2*) add_dir="-L$dir" ;;
+		  *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \
+		    *-*-unixware7*) add_dir="-L$dir" ;;
+		  *-*-darwin* )
+		    # if the lib is a (non-dlopened) module then we can not
+		    # link against it, someone is ignoring the earlier warnings
+		    if /usr/bin/file -L $add 2> /dev/null |
+			 $GREP ": [^:]* bundle" >/dev/null ; then
+		      if test "X$dlopenmodule" != "X$lib"; then
+			$ECHO "*** Warning: lib $linklib is a module, not a shared library"
+			if test -z "$old_library" ; then
+			  echo
+			  echo "*** And there doesn't seem to be a static archive available"
+			  echo "*** The link will probably fail, sorry"
+			else
+			  add="$dir/$old_library"
+			fi
+		      elif test -n "$old_library"; then
+			add="$dir/$old_library"
+		      fi
+		    fi
+		esac
+	      elif test "$hardcode_minus_L" = no; then
+		case $host in
+		*-*-sunos*) add_shlibpath="$dir" ;;
+		esac
+		add_dir="-L$dir"
+		add="-l$name"
+	      elif test "$hardcode_shlibpath_var" = no; then
+		add_shlibpath="$dir"
+		add="-l$name"
+	      else
+		lib_linked=no
+	      fi
+	      ;;
+	    relink)
+	      if test "$hardcode_direct" = yes &&
+	         test "$hardcode_direct_absolute" = no; then
+		add="$dir/$linklib"
+	      elif test "$hardcode_minus_L" = yes; then
+		add_dir="-L$absdir"
+		# Try looking first in the location we're being installed to.
+		if test -n "$inst_prefix_dir"; then
+		  case $libdir in
+		    [\\/]*)
+		      func_append add_dir " -L$inst_prefix_dir$libdir"
+		      ;;
+		  esac
+		fi
+		add="-l$name"
+	      elif test "$hardcode_shlibpath_var" = yes; then
+		add_shlibpath="$dir"
+		add="-l$name"
+	      else
+		lib_linked=no
+	      fi
+	      ;;
+	    *) lib_linked=no ;;
+	    esac
+
+	    if test "$lib_linked" != yes; then
+	      func_fatal_configuration "unsupported hardcode properties"
+	    fi
+
+	    if test -n "$add_shlibpath"; then
+	      case :$compile_shlibpath: in
+	      *":$add_shlibpath:"*) ;;
+	      *) func_append compile_shlibpath "$add_shlibpath:" ;;
+	      esac
+	    fi
+	    if test "$linkmode" = prog; then
+	      test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs"
+	      test -n "$add" && compile_deplibs="$add $compile_deplibs"
+	    else
+	      test -n "$add_dir" && deplibs="$add_dir $deplibs"
+	      test -n "$add" && deplibs="$add $deplibs"
+	      if test "$hardcode_direct" != yes &&
+		 test "$hardcode_minus_L" != yes &&
+		 test "$hardcode_shlibpath_var" = yes; then
+		case :$finalize_shlibpath: in
+		*":$libdir:"*) ;;
+		*) func_append finalize_shlibpath "$libdir:" ;;
+		esac
+	      fi
+	    fi
+	  fi
+
+	  if test "$linkmode" = prog || test "$opt_mode" = relink; then
+	    add_shlibpath=
+	    add_dir=
+	    add=
+	    # Finalize command for both is simple: just hardcode it.
+	    if test "$hardcode_direct" = yes &&
+	       test "$hardcode_direct_absolute" = no; then
+	      add="$libdir/$linklib"
+	    elif test "$hardcode_minus_L" = yes; then
+	      add_dir="-L$libdir"
+	      add="-l$name"
+	    elif test "$hardcode_shlibpath_var" = yes; then
+	      case :$finalize_shlibpath: in
+	      *":$libdir:"*) ;;
+	      *) func_append finalize_shlibpath "$libdir:" ;;
+	      esac
+	      add="-l$name"
+	    elif test "$hardcode_automatic" = yes; then
+	      if test -n "$inst_prefix_dir" &&
+		 test -f "$inst_prefix_dir$libdir/$linklib" ; then
+		add="$inst_prefix_dir$libdir/$linklib"
+	      else
+		add="$libdir/$linklib"
+	      fi
+	    else
+	      # We cannot seem to hardcode it, guess we'll fake it.
+	      add_dir="-L$libdir"
+	      # Try looking first in the location we're being installed to.
+	      if test -n "$inst_prefix_dir"; then
+		case $libdir in
+		  [\\/]*)
+		    func_append add_dir " -L$inst_prefix_dir$libdir"
+		    ;;
+		esac
+	      fi
+	      add="-l$name"
+	    fi
+
+	    if test "$linkmode" = prog; then
+	      test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs"
+	      test -n "$add" && finalize_deplibs="$add $finalize_deplibs"
+	    else
+	      test -n "$add_dir" && deplibs="$add_dir $deplibs"
+	      test -n "$add" && deplibs="$add $deplibs"
+	    fi
+	  fi
+	elif test "$linkmode" = prog; then
+	  # Here we assume that one of hardcode_direct or hardcode_minus_L
+	  # is not unsupported.  This is valid on all known static and
+	  # shared platforms.
+	  if test "$hardcode_direct" != unsupported; then
+	    test -n "$old_library" && linklib="$old_library"
+	    compile_deplibs="$dir/$linklib $compile_deplibs"
+	    finalize_deplibs="$dir/$linklib $finalize_deplibs"
+	  else
+	    compile_deplibs="-l$name -L$dir $compile_deplibs"
+	    finalize_deplibs="-l$name -L$dir $finalize_deplibs"
+	  fi
+	elif test "$build_libtool_libs" = yes; then
+	  # Not a shared library
+	  if test "$deplibs_check_method" != pass_all; then
+	    # We're trying link a shared library against a static one
+	    # but the system doesn't support it.
+
+	    # Just print a warning and add the library to dependency_libs so
+	    # that the program can be linked against the static library.
+	    echo
+	    $ECHO "*** Warning: This system can not link to static lib archive $lib."
+	    echo "*** I have the capability to make that library automatically link in when"
+	    echo "*** you link to this library.  But I can only do this if you have a"
+	    echo "*** shared version of the library, which you do not appear to have."
+	    if test "$module" = yes; then
+	      echo "*** But as you try to build a module library, libtool will still create "
+	      echo "*** a static module, that should work as long as the dlopening application"
+	      echo "*** is linked with the -dlopen flag to resolve symbols at runtime."
+	      if test -z "$global_symbol_pipe"; then
+		echo
+		echo "*** However, this would only work if libtool was able to extract symbol"
+		echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+		echo "*** not find such a program.  So, this module is probably useless."
+		echo "*** \`nm' from GNU binutils and a full rebuild may help."
+	      fi
+	      if test "$build_old_libs" = no; then
+		build_libtool_libs=module
+		build_old_libs=yes
+	      else
+		build_libtool_libs=no
+	      fi
+	    fi
+	  else
+	    deplibs="$dir/$old_library $deplibs"
+	    link_static=yes
+	  fi
+	fi # link shared/static library?
+
+	if test "$linkmode" = lib; then
+	  if test -n "$dependency_libs" &&
+	     { test "$hardcode_into_libs" != yes ||
+	       test "$build_old_libs" = yes ||
+	       test "$link_static" = yes; }; then
+	    # Extract -R from dependency_libs
+	    temp_deplibs=
+	    for libdir in $dependency_libs; do
+	      case $libdir in
+	      -R*) func_stripname '-R' '' "$libdir"
+	           temp_xrpath=$func_stripname_result
+		   case " $xrpath " in
+		   *" $temp_xrpath "*) ;;
+		   *) func_append xrpath " $temp_xrpath";;
+		   esac;;
+	      *) func_append temp_deplibs " $libdir";;
+	      esac
+	    done
+	    dependency_libs="$temp_deplibs"
+	  fi
+
+	  func_append newlib_search_path " $absdir"
+	  # Link against this library
+	  test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs"
+	  # ... and its dependency_libs
+	  tmp_libs=
+	  for deplib in $dependency_libs; do
+	    newdependency_libs="$deplib $newdependency_libs"
+	    case $deplib in
+              -L*) func_stripname '-L' '' "$deplib"
+                   func_resolve_sysroot "$func_stripname_result";;
+              *) func_resolve_sysroot "$deplib" ;;
+            esac
+	    if $opt_preserve_dup_deps ; then
+	      case "$tmp_libs " in
+	      *" $func_resolve_sysroot_result "*)
+                func_append specialdeplibs " $func_resolve_sysroot_result" ;;
+	      esac
+	    fi
+	    func_append tmp_libs " $func_resolve_sysroot_result"
+	  done
+
+	  if test "$link_all_deplibs" != no; then
+	    # Add the search paths of all dependency libraries
+	    for deplib in $dependency_libs; do
+	      path=
+	      case $deplib in
+	      -L*) path="$deplib" ;;
+	      *.la)
+	        func_resolve_sysroot "$deplib"
+	        deplib=$func_resolve_sysroot_result
+	        func_dirname "$deplib" "" "."
+		dir=$func_dirname_result
+		# We need an absolute path.
+		case $dir in
+		[\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;;
+		*)
+		  absdir=`cd "$dir" && pwd`
+		  if test -z "$absdir"; then
+		    func_warning "cannot determine absolute directory name of \`$dir'"
+		    absdir="$dir"
+		  fi
+		  ;;
+		esac
+		if $GREP "^installed=no" $deplib > /dev/null; then
+		case $host in
+		*-*-darwin*)
+		  depdepl=
+		  eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib`
+		  if test -n "$deplibrary_names" ; then
+		    for tmp in $deplibrary_names ; do
+		      depdepl=$tmp
+		    done
+		    if test -f "$absdir/$objdir/$depdepl" ; then
+		      depdepl="$absdir/$objdir/$depdepl"
+		      darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'`
+                      if test -z "$darwin_install_name"; then
+                          darwin_install_name=`${OTOOL64} -L $depdepl  | awk '{if (NR == 2) {print $1;exit}}'`
+                      fi
+		      func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}"
+		      func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}"
+		      path=
+		    fi
+		  fi
+		  ;;
+		*)
+		  path="-L$absdir/$objdir"
+		  ;;
+		esac
+		else
+		  eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib`
+		  test -z "$libdir" && \
+		    func_fatal_error "\`$deplib' is not a valid libtool archive"
+		  test "$absdir" != "$libdir" && \
+		    func_warning "\`$deplib' seems to be moved"
+
+		  path="-L$absdir"
+		fi
+		;;
+	      esac
+	      case " $deplibs " in
+	      *" $path "*) ;;
+	      *) deplibs="$path $deplibs" ;;
+	      esac
+	    done
+	  fi # link_all_deplibs != no
+	fi # linkmode = lib
+      done # for deplib in $libs
+      if test "$pass" = link; then
+	if test "$linkmode" = "prog"; then
+	  compile_deplibs="$new_inherited_linker_flags $compile_deplibs"
+	  finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs"
+	else
+	  compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	fi
+      fi
+      dependency_libs="$newdependency_libs"
+      if test "$pass" = dlpreopen; then
+	# Link the dlpreopened libraries before other libraries
+	for deplib in $save_deplibs; do
+	  deplibs="$deplib $deplibs"
+	done
+      fi
+      if test "$pass" != dlopen; then
+	if test "$pass" != conv; then
+	  # Make sure lib_search_path contains only unique directories.
+	  lib_search_path=
+	  for dir in $newlib_search_path; do
+	    case "$lib_search_path " in
+	    *" $dir "*) ;;
+	    *) func_append lib_search_path " $dir" ;;
+	    esac
+	  done
+	  newlib_search_path=
+	fi
+
+	if test "$linkmode,$pass" != "prog,link"; then
+	  vars="deplibs"
+	else
+	  vars="compile_deplibs finalize_deplibs"
+	fi
+	for var in $vars dependency_libs; do
+	  # Add libraries to $var in reverse order
+	  eval tmp_libs=\"\$$var\"
+	  new_libs=
+	  for deplib in $tmp_libs; do
+	    # FIXME: Pedantically, this is the right thing to do, so
+	    #        that some nasty dependency loop isn't accidentally
+	    #        broken:
+	    #new_libs="$deplib $new_libs"
+	    # Pragmatically, this seems to cause very few problems in
+	    # practice:
+	    case $deplib in
+	    -L*) new_libs="$deplib $new_libs" ;;
+	    -R*) ;;
+	    *)
+	      # And here is the reason: when a library appears more
+	      # than once as an explicit dependence of a library, or
+	      # is implicitly linked in more than once by the
+	      # compiler, it is considered special, and multiple
+	      # occurrences thereof are not removed.  Compare this
+	      # with having the same library being listed as a
+	      # dependency of multiple other libraries: in this case,
+	      # we know (pedantically, we assume) the library does not
+	      # need to be listed more than once, so we keep only the
+	      # last copy.  This is not always right, but it is rare
+	      # enough that we require users that really mean to play
+	      # such unportable linking tricks to link the library
+	      # using -Wl,-lname, so that libtool does not consider it
+	      # for duplicate removal.
+	      case " $specialdeplibs " in
+	      *" $deplib "*) new_libs="$deplib $new_libs" ;;
+	      *)
+		case " $new_libs " in
+		*" $deplib "*) ;;
+		*) new_libs="$deplib $new_libs" ;;
+		esac
+		;;
+	      esac
+	      ;;
+	    esac
+	  done
+	  tmp_libs=
+	  for deplib in $new_libs; do
+	    case $deplib in
+	    -L*)
+	      case " $tmp_libs " in
+	      *" $deplib "*) ;;
+	      *) func_append tmp_libs " $deplib" ;;
+	      esac
+	      ;;
+	    *) func_append tmp_libs " $deplib" ;;
+	    esac
+	  done
+	  eval $var=\"$tmp_libs\"
+	done # for var
+      fi
+      # Last step: remove runtime libs from dependency_libs
+      # (they stay in deplibs)
+      tmp_libs=
+      for i in $dependency_libs ; do
+	case " $predeps $postdeps $compiler_lib_search_path " in
+	*" $i "*)
+	  i=""
+	  ;;
+	esac
+	if test -n "$i" ; then
+	  func_append tmp_libs " $i"
+	fi
+      done
+      dependency_libs=$tmp_libs
+    done # for pass
+    if test "$linkmode" = prog; then
+      dlfiles="$newdlfiles"
+    fi
+    if test "$linkmode" = prog || test "$linkmode" = lib; then
+      dlprefiles="$newdlprefiles"
+    fi
+
+    case $linkmode in
+    oldlib)
+      if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+	func_warning "\`-dlopen' is ignored for archives"
+      fi
+
+      case " $deplibs" in
+      *\ -l* | *\ -L*)
+	func_warning "\`-l' and \`-L' are ignored for archives" ;;
+      esac
+
+      test -n "$rpath" && \
+	func_warning "\`-rpath' is ignored for archives"
+
+      test -n "$xrpath" && \
+	func_warning "\`-R' is ignored for archives"
+
+      test -n "$vinfo" && \
+	func_warning "\`-version-info/-version-number' is ignored for archives"
+
+      test -n "$release" && \
+	func_warning "\`-release' is ignored for archives"
+
+      test -n "$export_symbols$export_symbols_regex" && \
+	func_warning "\`-export-symbols' is ignored for archives"
+
+      # Now set the variables for building old libraries.
+      build_libtool_libs=no
+      oldlibs="$output"
+      func_append objs "$old_deplibs"
+      ;;
+
+    lib)
+      # Make sure we only generate libraries of the form `libNAME.la'.
+      case $outputname in
+      lib*)
+	func_stripname 'lib' '.la' "$outputname"
+	name=$func_stripname_result
+	eval shared_ext=\"$shrext_cmds\"
+	eval libname=\"$libname_spec\"
+	;;
+      *)
+	test "$module" = no && \
+	  func_fatal_help "libtool library \`$output' must begin with \`lib'"
+
+	if test "$need_lib_prefix" != no; then
+	  # Add the "lib" prefix for modules if required
+	  func_stripname '' '.la' "$outputname"
+	  name=$func_stripname_result
+	  eval shared_ext=\"$shrext_cmds\"
+	  eval libname=\"$libname_spec\"
+	else
+	  func_stripname '' '.la' "$outputname"
+	  libname=$func_stripname_result
+	fi
+	;;
+      esac
+
+      if test -n "$objs"; then
+	if test "$deplibs_check_method" != pass_all; then
+	  func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs"
+	else
+	  echo
+	  $ECHO "*** Warning: Linking the shared library $output against the non-libtool"
+	  $ECHO "*** objects $objs is not portable!"
+	  func_append libobjs " $objs"
+	fi
+      fi
+
+      test "$dlself" != no && \
+	func_warning "\`-dlopen self' is ignored for libtool libraries"
+
+      set dummy $rpath
+      shift
+      test "$#" -gt 1 && \
+	func_warning "ignoring multiple \`-rpath's for a libtool library"
+
+      install_libdir="$1"
+
+      oldlibs=
+      if test -z "$rpath"; then
+	if test "$build_libtool_libs" = yes; then
+	  # Building a libtool convenience library.
+	  # Some compilers have problems with a `.al' extension so
+	  # convenience libraries should have the same extension an
+	  # archive normally would.
+	  oldlibs="$output_objdir/$libname.$libext $oldlibs"
+	  build_libtool_libs=convenience
+	  build_old_libs=yes
+	fi
+
+	test -n "$vinfo" && \
+	  func_warning "\`-version-info/-version-number' is ignored for convenience libraries"
+
+	test -n "$release" && \
+	  func_warning "\`-release' is ignored for convenience libraries"
+      else
+
+	# Parse the version information argument.
+	save_ifs="$IFS"; IFS=':'
+	set dummy $vinfo 0 0 0
+	shift
+	IFS="$save_ifs"
+
+	test -n "$7" && \
+	  func_fatal_help "too many parameters to \`-version-info'"
+
+	# convert absolute version numbers to libtool ages
+	# this retains compatibility with .la files and attempts
+	# to make the code below a bit more comprehensible
+
+	case $vinfo_number in
+	yes)
+	  number_major="$1"
+	  number_minor="$2"
+	  number_revision="$3"
+	  #
+	  # There are really only two kinds -- those that
+	  # use the current revision as the major version
+	  # and those that subtract age and use age as
+	  # a minor version.  But, then there is irix
+	  # which has an extra 1 added just for fun
+	  #
+	  case $version_type in
+	  # correct linux to gnu/linux during the next big refactor
+	  darwin|linux|osf|windows|none)
+	    func_arith $number_major + $number_minor
+	    current=$func_arith_result
+	    age="$number_minor"
+	    revision="$number_revision"
+	    ;;
+	  freebsd-aout|freebsd-elf|qnx|sunos)
+	    current="$number_major"
+	    revision="$number_minor"
+	    age="0"
+	    ;;
+	  irix|nonstopux)
+	    func_arith $number_major + $number_minor
+	    current=$func_arith_result
+	    age="$number_minor"
+	    revision="$number_minor"
+	    lt_irix_increment=no
+	    ;;
+	  *)
+	    func_fatal_configuration "$modename: unknown library version type \`$version_type'"
+	    ;;
+	  esac
+	  ;;
+	no)
+	  current="$1"
+	  revision="$2"
+	  age="$3"
+	  ;;
+	esac
+
+	# Check that each of the things are valid numbers.
+	case $current in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "CURRENT \`$current' must be a nonnegative integer"
+	  func_fatal_error "\`$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	case $revision in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "REVISION \`$revision' must be a nonnegative integer"
+	  func_fatal_error "\`$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	case $age in
+	0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;;
+	*)
+	  func_error "AGE \`$age' must be a nonnegative integer"
+	  func_fatal_error "\`$vinfo' is not valid version information"
+	  ;;
+	esac
+
+	if test "$age" -gt "$current"; then
+	  func_error "AGE \`$age' is greater than the current interface number \`$current'"
+	  func_fatal_error "\`$vinfo' is not valid version information"
+	fi
+
+	# Calculate the version variables.
+	major=
+	versuffix=
+	verstring=
+	case $version_type in
+	none) ;;
+
+	darwin)
+	  # Like Linux, but with the current version available in
+	  # verstring for coding it into the library header
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix="$major.$age.$revision"
+	  # Darwin ld doesn't like 0 for these options...
+	  func_arith $current + 1
+	  minor_current=$func_arith_result
+	  xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision"
+	  verstring="-compatibility_version $minor_current -current_version $minor_current.$revision"
+	  ;;
+
+	freebsd-aout)
+	  major=".$current"
+	  versuffix=".$current.$revision";
+	  ;;
+
+	freebsd-elf)
+	  major=".$current"
+	  versuffix=".$current"
+	  ;;
+
+	irix | nonstopux)
+	  if test "X$lt_irix_increment" = "Xno"; then
+	    func_arith $current - $age
+	  else
+	    func_arith $current - $age + 1
+	  fi
+	  major=$func_arith_result
+
+	  case $version_type in
+	    nonstopux) verstring_prefix=nonstopux ;;
+	    *)         verstring_prefix=sgi ;;
+	  esac
+	  verstring="$verstring_prefix$major.$revision"
+
+	  # Add in all the interfaces that we are compatible with.
+	  loop=$revision
+	  while test "$loop" -ne 0; do
+	    func_arith $revision - $loop
+	    iface=$func_arith_result
+	    func_arith $loop - 1
+	    loop=$func_arith_result
+	    verstring="$verstring_prefix$major.$iface:$verstring"
+	  done
+
+	  # Before this point, $major must not contain `.'.
+	  major=.$major
+	  versuffix="$major.$revision"
+	  ;;
+
+	linux) # correct to gnu/linux during the next big refactor
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix="$major.$age.$revision"
+	  ;;
+
+	osf)
+	  func_arith $current - $age
+	  major=.$func_arith_result
+	  versuffix=".$current.$age.$revision"
+	  verstring="$current.$age.$revision"
+
+	  # Add in all the interfaces that we are compatible with.
+	  loop=$age
+	  while test "$loop" -ne 0; do
+	    func_arith $current - $loop
+	    iface=$func_arith_result
+	    func_arith $loop - 1
+	    loop=$func_arith_result
+	    verstring="$verstring:${iface}.0"
+	  done
+
+	  # Make executables depend on our current version.
+	  func_append verstring ":${current}.0"
+	  ;;
+
+	qnx)
+	  major=".$current"
+	  versuffix=".$current"
+	  ;;
+
+	sunos)
+	  major=".$current"
+	  versuffix=".$current.$revision"
+	  ;;
+
+	windows)
+	  # Use '-' rather than '.', since we only want one
+	  # extension on DOS 8.3 filesystems.
+	  func_arith $current - $age
+	  major=$func_arith_result
+	  versuffix="-$major"
+	  ;;
+
+	*)
+	  func_fatal_configuration "unknown library version type \`$version_type'"
+	  ;;
+	esac
+
+	# Clear the version info if we defaulted, and they specified a release.
+	if test -z "$vinfo" && test -n "$release"; then
+	  major=
+	  case $version_type in
+	  darwin)
+	    # we can't check for "0.0" in archive_cmds due to quoting
+	    # problems, so we reset it completely
+	    verstring=
+	    ;;
+	  *)
+	    verstring="0.0"
+	    ;;
+	  esac
+	  if test "$need_version" = no; then
+	    versuffix=
+	  else
+	    versuffix=".0.0"
+	  fi
+	fi
+
+	# Remove version info from name if versioning should be avoided
+	if test "$avoid_version" = yes && test "$need_version" = no; then
+	  major=
+	  versuffix=
+	  verstring=""
+	fi
+
+	# Check to see if the archive will have undefined symbols.
+	if test "$allow_undefined" = yes; then
+	  if test "$allow_undefined_flag" = unsupported; then
+	    func_warning "undefined symbols not allowed in $host shared libraries"
+	    build_libtool_libs=no
+	    build_old_libs=yes
+	  fi
+	else
+	  # Don't allow undefined symbols.
+	  allow_undefined_flag="$no_undefined_flag"
+	fi
+
+      fi
+
+      func_generate_dlsyms "$libname" "$libname" "yes"
+      func_append libobjs " $symfileobj"
+      test "X$libobjs" = "X " && libobjs=
+
+      if test "$opt_mode" != relink; then
+	# Remove our outputs, but don't remove object files since they
+	# may have been created when compiling PIC objects.
+	removelist=
+	tempremovelist=`$ECHO "$output_objdir/*"`
+	for p in $tempremovelist; do
+	  case $p in
+	    *.$objext | *.gcno)
+	       ;;
+	    $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*)
+	       if test "X$precious_files_regex" != "X"; then
+		 if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1
+		 then
+		   continue
+		 fi
+	       fi
+	       func_append removelist " $p"
+	       ;;
+	    *) ;;
+	  esac
+	done
+	test -n "$removelist" && \
+	  func_show_eval "${RM}r \$removelist"
+      fi
+
+      # Now set the variables for building old libraries.
+      if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then
+	func_append oldlibs " $output_objdir/$libname.$libext"
+
+	# Transform .lo files to .o files.
+	oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP`
+      fi
+
+      # Eliminate all temporary directories.
+      #for path in $notinst_path; do
+      #	lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"`
+      #	deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"`
+      #	dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"`
+      #done
+
+      if test -n "$xrpath"; then
+	# If the user specified any rpath flags, then add them.
+	temp_xrpath=
+	for libdir in $xrpath; do
+	  func_replace_sysroot "$libdir"
+	  func_append temp_xrpath " -R$func_replace_sysroot_result"
+	  case "$finalize_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_rpath " $libdir" ;;
+	  esac
+	done
+	if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then
+	  dependency_libs="$temp_xrpath $dependency_libs"
+	fi
+      fi
+
+      # Make sure dlfiles contains only unique files that won't be dlpreopened
+      old_dlfiles="$dlfiles"
+      dlfiles=
+      for lib in $old_dlfiles; do
+	case " $dlprefiles $dlfiles " in
+	*" $lib "*) ;;
+	*) func_append dlfiles " $lib" ;;
+	esac
+      done
+
+      # Make sure dlprefiles contains only unique files
+      old_dlprefiles="$dlprefiles"
+      dlprefiles=
+      for lib in $old_dlprefiles; do
+	case "$dlprefiles " in
+	*" $lib "*) ;;
+	*) func_append dlprefiles " $lib" ;;
+	esac
+      done
+
+      if test "$build_libtool_libs" = yes; then
+	if test -n "$rpath"; then
+	  case $host in
+	  *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*)
+	    # these systems don't actually have a c library (as such)!
+	    ;;
+	  *-*-rhapsody* | *-*-darwin1.[012])
+	    # Rhapsody C library is in the System framework
+	    func_append deplibs " System.ltframework"
+	    ;;
+	  *-*-netbsd*)
+	    # Don't link with libc until the a.out ld.so is fixed.
+	    ;;
+	  *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*)
+	    # Do not include libc due to us having libc/libc_r.
+	    ;;
+	  *-*-sco3.2v5* | *-*-sco5v6*)
+	    # Causes problems with __ctype
+	    ;;
+	  *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*)
+	    # Compiler inserts libc in the correct place for threads to work
+	    ;;
+	  *)
+	    # Add libc to deplibs on all other systems if necessary.
+	    if test "$build_libtool_need_lc" = "yes"; then
+	      func_append deplibs " -lc"
+	    fi
+	    ;;
+	  esac
+	fi
+
+	# Transform deplibs into only deplibs that can be linked in shared.
+	name_save=$name
+	libname_save=$libname
+	release_save=$release
+	versuffix_save=$versuffix
+	major_save=$major
+	# I'm not sure if I'm treating the release correctly.  I think
+	# release should show up in the -l (ie -lgmp5) so we don't want to
+	# add it in twice.  Is that correct?
+	release=""
+	versuffix=""
+	major=""
+	newdeplibs=
+	droppeddeps=no
+	case $deplibs_check_method in
+	pass_all)
+	  # Don't check for shared/static.  Everything works.
+	  # This might be a little naive.  We might want to check
+	  # whether the library exists or not.  But this is on
+	  # osf3 & osf4 and I'm not really sure... Just
+	  # implementing what was already the behavior.
+	  newdeplibs=$deplibs
+	  ;;
+	test_compile)
+	  # This code stresses the "libraries are programs" paradigm to its
+	  # limits. Maybe even breaks it.  We compile a program, linking it
+	  # against the deplibs as a proxy for the library.  Then we can check
+	  # whether they linked in statically or dynamically with ldd.
+	  $opt_dry_run || $RM conftest.c
+	  cat > conftest.c <<EOF
+	  int main() { return 0; }
+EOF
+	  $opt_dry_run || $RM conftest
+	  if $LTCC $LTCFLAGS -o conftest conftest.c $deplibs; then
+	    ldd_output=`ldd conftest`
+	    for i in $deplibs; do
+	      case $i in
+	      -l*)
+		func_stripname -l '' "$i"
+		name=$func_stripname_result
+		if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+		  case " $predeps $postdeps " in
+		  *" $i "*)
+		    func_append newdeplibs " $i"
+		    i=""
+		    ;;
+		  esac
+		fi
+		if test -n "$i" ; then
+		  libname=`eval "\\$ECHO \"$libname_spec\""`
+		  deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+		  set dummy $deplib_matches; shift
+		  deplib_match=$1
+		  if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+		    func_append newdeplibs " $i"
+		  else
+		    droppeddeps=yes
+		    echo
+		    $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+		    echo "*** I have the capability to make that library automatically link in when"
+		    echo "*** you link to this library.  But I can only do this if you have a"
+		    echo "*** shared version of the library, which I believe you do not have"
+		    echo "*** because a test_compile did reveal that the linker did not use it for"
+		    echo "*** its dynamic dependency list that programs get resolved with at runtime."
+		  fi
+		fi
+		;;
+	      *)
+		func_append newdeplibs " $i"
+		;;
+	      esac
+	    done
+	  else
+	    # Error occurred in the first compile.  Let's try to salvage
+	    # the situation: Compile a separate program for each library.
+	    for i in $deplibs; do
+	      case $i in
+	      -l*)
+		func_stripname -l '' "$i"
+		name=$func_stripname_result
+		$opt_dry_run || $RM conftest
+		if $LTCC $LTCFLAGS -o conftest conftest.c $i; then
+		  ldd_output=`ldd conftest`
+		  if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+		    case " $predeps $postdeps " in
+		    *" $i "*)
+		      func_append newdeplibs " $i"
+		      i=""
+		      ;;
+		    esac
+		  fi
+		  if test -n "$i" ; then
+		    libname=`eval "\\$ECHO \"$libname_spec\""`
+		    deplib_matches=`eval "\\$ECHO \"$library_names_spec\""`
+		    set dummy $deplib_matches; shift
+		    deplib_match=$1
+		    if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then
+		      func_append newdeplibs " $i"
+		    else
+		      droppeddeps=yes
+		      echo
+		      $ECHO "*** Warning: dynamic linker does not accept needed library $i."
+		      echo "*** I have the capability to make that library automatically link in when"
+		      echo "*** you link to this library.  But I can only do this if you have a"
+		      echo "*** shared version of the library, which you do not appear to have"
+		      echo "*** because a test_compile did reveal that the linker did not use this one"
+		      echo "*** as a dynamic dependency that programs can get resolved with at runtime."
+		    fi
+		  fi
+		else
+		  droppeddeps=yes
+		  echo
+		  $ECHO "*** Warning!  Library $i is needed by this library but I was not able to"
+		  echo "*** make it link in!  You will probably need to install it or some"
+		  echo "*** library that it depends on before this library will be fully"
+		  echo "*** functional.  Installing it before continuing would be even better."
+		fi
+		;;
+	      *)
+		func_append newdeplibs " $i"
+		;;
+	      esac
+	    done
+	  fi
+	  ;;
+	file_magic*)
+	  set dummy $deplibs_check_method; shift
+	  file_magic_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+	  for a_deplib in $deplibs; do
+	    case $a_deplib in
+	    -l*)
+	      func_stripname -l '' "$a_deplib"
+	      name=$func_stripname_result
+	      if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+		case " $predeps $postdeps " in
+		*" $a_deplib "*)
+		  func_append newdeplibs " $a_deplib"
+		  a_deplib=""
+		  ;;
+		esac
+	      fi
+	      if test -n "$a_deplib" ; then
+		libname=`eval "\\$ECHO \"$libname_spec\""`
+		if test -n "$file_magic_glob"; then
+		  libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob`
+		else
+		  libnameglob=$libname
+		fi
+		test "$want_nocaseglob" = yes && nocaseglob=`shopt -p nocaseglob`
+		for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+		  if test "$want_nocaseglob" = yes; then
+		    shopt -s nocaseglob
+		    potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+		    $nocaseglob
+		  else
+		    potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null`
+		  fi
+		  for potent_lib in $potential_libs; do
+		      # Follow soft links.
+		      if ls -lLd "$potent_lib" 2>/dev/null |
+			 $GREP " -> " >/dev/null; then
+			continue
+		      fi
+		      # The statement above tries to avoid entering an
+		      # endless loop below, in case of cyclic links.
+		      # We might still enter an endless loop, since a link
+		      # loop can be closed while we follow links,
+		      # but so what?
+		      potlib="$potent_lib"
+		      while test -h "$potlib" 2>/dev/null; do
+			potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'`
+			case $potliblink in
+			[\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";;
+			*) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";;
+			esac
+		      done
+		      if eval $file_magic_cmd \"\$potlib\" 2>/dev/null |
+			 $SED -e 10q |
+			 $EGREP "$file_magic_regex" > /dev/null; then
+			func_append newdeplibs " $a_deplib"
+			a_deplib=""
+			break 2
+		      fi
+		  done
+		done
+	      fi
+	      if test -n "$a_deplib" ; then
+		droppeddeps=yes
+		echo
+		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because I did check the linker path looking for a file starting"
+		if test -z "$potlib" ; then
+		  $ECHO "*** with $libname but no candidates were found. (...for file magic test)"
+		else
+		  $ECHO "*** with $libname and none of the candidates passed a file format test"
+		  $ECHO "*** using a file magic. Last file checked: $potlib"
+		fi
+	      fi
+	      ;;
+	    *)
+	      # Add a -L argument.
+	      func_append newdeplibs " $a_deplib"
+	      ;;
+	    esac
+	  done # Gone through all deplibs.
+	  ;;
+	match_pattern*)
+	  set dummy $deplibs_check_method; shift
+	  match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"`
+	  for a_deplib in $deplibs; do
+	    case $a_deplib in
+	    -l*)
+	      func_stripname -l '' "$a_deplib"
+	      name=$func_stripname_result
+	      if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+		case " $predeps $postdeps " in
+		*" $a_deplib "*)
+		  func_append newdeplibs " $a_deplib"
+		  a_deplib=""
+		  ;;
+		esac
+	      fi
+	      if test -n "$a_deplib" ; then
+		libname=`eval "\\$ECHO \"$libname_spec\""`
+		for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do
+		  potential_libs=`ls $i/$libname[.-]* 2>/dev/null`
+		  for potent_lib in $potential_libs; do
+		    potlib="$potent_lib" # see symlink-check above in file_magic test
+		    if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \
+		       $EGREP "$match_pattern_regex" > /dev/null; then
+		      func_append newdeplibs " $a_deplib"
+		      a_deplib=""
+		      break 2
+		    fi
+		  done
+		done
+	      fi
+	      if test -n "$a_deplib" ; then
+		droppeddeps=yes
+		echo
+		$ECHO "*** Warning: linker path does not have real file for library $a_deplib."
+		echo "*** I have the capability to make that library automatically link in when"
+		echo "*** you link to this library.  But I can only do this if you have a"
+		echo "*** shared version of the library, which you do not appear to have"
+		echo "*** because I did check the linker path looking for a file starting"
+		if test -z "$potlib" ; then
+		  $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)"
+		else
+		  $ECHO "*** with $libname and none of the candidates passed a file format test"
+		  $ECHO "*** using a regex pattern. Last file checked: $potlib"
+		fi
+	      fi
+	      ;;
+	    *)
+	      # Add a -L argument.
+	      func_append newdeplibs " $a_deplib"
+	      ;;
+	    esac
+	  done # Gone through all deplibs.
+	  ;;
+	none | unknown | *)
+	  newdeplibs=""
+	  tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'`
+	  if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then
+	    for i in $predeps $postdeps ; do
+	      # can't use Xsed below, because $i might contain '/'
+	      tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"`
+	    done
+	  fi
+	  case $tmp_deplibs in
+	  *[!\	\ ]*)
+	    echo
+	    if test "X$deplibs_check_method" = "Xnone"; then
+	      echo "*** Warning: inter-library dependencies are not supported in this platform."
+	    else
+	      echo "*** Warning: inter-library dependencies are not known to be supported."
+	    fi
+	    echo "*** All declared inter-library dependencies are being dropped."
+	    droppeddeps=yes
+	    ;;
+	  esac
+	  ;;
+	esac
+	versuffix=$versuffix_save
+	major=$major_save
+	release=$release_save
+	libname=$libname_save
+	name=$name_save
+
+	case $host in
+	*-*-rhapsody* | *-*-darwin1.[012])
+	  # On Rhapsody replace the C library with the System framework
+	  newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'`
+	  ;;
+	esac
+
+	if test "$droppeddeps" = yes; then
+	  if test "$module" = yes; then
+	    echo
+	    echo "*** Warning: libtool could not satisfy all declared inter-library"
+	    $ECHO "*** dependencies of module $libname.  Therefore, libtool will create"
+	    echo "*** a static module, that should work as long as the dlopening"
+	    echo "*** application is linked with the -dlopen flag."
+	    if test -z "$global_symbol_pipe"; then
+	      echo
+	      echo "*** However, this would only work if libtool was able to extract symbol"
+	      echo "*** lists from a program, using \`nm' or equivalent, but libtool could"
+	      echo "*** not find such a program.  So, this module is probably useless."
+	      echo "*** \`nm' from GNU binutils and a full rebuild may help."
+	    fi
+	    if test "$build_old_libs" = no; then
+	      oldlibs="$output_objdir/$libname.$libext"
+	      build_libtool_libs=module
+	      build_old_libs=yes
+	    else
+	      build_libtool_libs=no
+	    fi
+	  else
+	    echo "*** The inter-library dependencies that have been dropped here will be"
+	    echo "*** automatically added whenever a program is linked with this library"
+	    echo "*** or is declared to -dlopen it."
+
+	    if test "$allow_undefined" = no; then
+	      echo
+	      echo "*** Since this library must not contain undefined symbols,"
+	      echo "*** because either the platform does not support them or"
+	      echo "*** it was explicitly requested with -no-undefined,"
+	      echo "*** libtool will only create a static version of it."
+	      if test "$build_old_libs" = no; then
+		oldlibs="$output_objdir/$libname.$libext"
+		build_libtool_libs=module
+		build_old_libs=yes
+	      else
+		build_libtool_libs=no
+	      fi
+	    fi
+	  fi
+	fi
+	# Done checking deplibs!
+	deplibs=$newdeplibs
+      fi
+      # Time to change all our "foo.ltframework" stuff back to "-framework foo"
+      case $host in
+	*-*-darwin*)
+	  newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	  ;;
+      esac
+
+      # move library search paths that coincide with paths to not yet
+      # installed libraries to the beginning of the library search list
+      new_libs=
+      for path in $notinst_path; do
+	case " $new_libs " in
+	*" -L$path/$objdir "*) ;;
+	*)
+	  case " $deplibs " in
+	  *" -L$path/$objdir "*)
+	    func_append new_libs " -L$path/$objdir" ;;
+	  esac
+	  ;;
+	esac
+      done
+      for deplib in $deplibs; do
+	case $deplib in
+	-L*)
+	  case " $new_libs " in
+	  *" $deplib "*) ;;
+	  *) func_append new_libs " $deplib" ;;
+	  esac
+	  ;;
+	*) func_append new_libs " $deplib" ;;
+	esac
+      done
+      deplibs="$new_libs"
+
+      # All the library-specific variables (install_libdir is set above).
+      library_names=
+      old_library=
+      dlname=
+
+      # Test again, we may have decided not to build it any more
+      if test "$build_libtool_libs" = yes; then
+	# Remove ${wl} instances when linking with ld.
+	# FIXME: should test the right _cmds variable.
+	case $archive_cmds in
+	  *\$LD\ *) wl= ;;
+        esac
+	if test "$hardcode_into_libs" = yes; then
+	  # Hardcode the library paths
+	  hardcode_libdirs=
+	  dep_rpath=
+	  rpath="$finalize_rpath"
+	  test "$opt_mode" != relink && rpath="$compile_rpath$rpath"
+	  for libdir in $rpath; do
+	    if test -n "$hardcode_libdir_flag_spec"; then
+	      if test -n "$hardcode_libdir_separator"; then
+		func_replace_sysroot "$libdir"
+		libdir=$func_replace_sysroot_result
+		if test -z "$hardcode_libdirs"; then
+		  hardcode_libdirs="$libdir"
+		else
+		  # Just accumulate the unique libdirs.
+		  case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+		  *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		    ;;
+		  *)
+		    func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		    ;;
+		  esac
+		fi
+	      else
+		eval flag=\"$hardcode_libdir_flag_spec\"
+		func_append dep_rpath " $flag"
+	      fi
+	    elif test -n "$runpath_var"; then
+	      case "$perm_rpath " in
+	      *" $libdir "*) ;;
+	      *) func_append perm_rpath " $libdir" ;;
+	      esac
+	    fi
+	  done
+	  # Substitute the hardcoded libdirs into the rpath.
+	  if test -n "$hardcode_libdir_separator" &&
+	     test -n "$hardcode_libdirs"; then
+	    libdir="$hardcode_libdirs"
+	    eval "dep_rpath=\"$hardcode_libdir_flag_spec\""
+	  fi
+	  if test -n "$runpath_var" && test -n "$perm_rpath"; then
+	    # We should set the runpath_var.
+	    rpath=
+	    for dir in $perm_rpath; do
+	      func_append rpath "$dir:"
+	    done
+	    eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var"
+	  fi
+	  test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs"
+	fi
+
+	shlibpath="$finalize_shlibpath"
+	test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath"
+	if test -n "$shlibpath"; then
+	  eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var"
+	fi
+
+	# Get the real and link names of the library.
+	eval shared_ext=\"$shrext_cmds\"
+	eval library_names=\"$library_names_spec\"
+	set dummy $library_names
+	shift
+	realname="$1"
+	shift
+
+	if test -n "$soname_spec"; then
+	  eval soname=\"$soname_spec\"
+	else
+	  soname="$realname"
+	fi
+	if test -z "$dlname"; then
+	  dlname=$soname
+	fi
+
+	lib="$output_objdir/$realname"
+	linknames=
+	for link
+	do
+	  func_append linknames " $link"
+	done
+
+	# Use standard objects if they are pic
+	test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	test "X$libobjs" = "X " && libobjs=
+
+	delfiles=
+	if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	  $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp"
+	  export_symbols="$output_objdir/$libname.uexp"
+	  func_append delfiles " $export_symbols"
+	fi
+
+	orig_export_symbols=
+	case $host_os in
+	cygwin* | mingw* | cegcc*)
+	  if test -n "$export_symbols" && test -z "$export_symbols_regex"; then
+	    # exporting using user supplied symfile
+	    if test "x`$SED 1q $export_symbols`" != xEXPORTS; then
+	      # and it's NOT already a .def file. Must figure out
+	      # which of the given symbols are data symbols and tag
+	      # them as such. So, trigger use of export_symbols_cmds.
+	      # export_symbols gets reassigned inside the "prepare
+	      # the list of exported symbols" if statement, so the
+	      # include_expsyms logic still works.
+	      orig_export_symbols="$export_symbols"
+	      export_symbols=
+	      always_export_symbols=yes
+	    fi
+	  fi
+	  ;;
+	esac
+
+	# Prepare the list of exported symbols
+	if test -z "$export_symbols"; then
+	  if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then
+	    func_verbose "generating symbol list for \`$libname.la'"
+	    export_symbols="$output_objdir/$libname.exp"
+	    $opt_dry_run || $RM $export_symbols
+	    cmds=$export_symbols_cmds
+	    save_ifs="$IFS"; IFS='~'
+	    for cmd1 in $cmds; do
+	      IFS="$save_ifs"
+	      # Take the normal branch if the nm_file_list_spec branch
+	      # doesn't work or if tool conversion is not needed.
+	      case $nm_file_list_spec~$to_tool_file_cmd in
+		*~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*)
+		  try_normal_branch=yes
+		  eval cmd=\"$cmd1\"
+		  func_len " $cmd"
+		  len=$func_len_result
+		  ;;
+		*)
+		  try_normal_branch=no
+		  ;;
+	      esac
+	      if test "$try_normal_branch" = yes \
+		 && { test "$len" -lt "$max_cmd_len" \
+		      || test "$max_cmd_len" -le -1; }
+	      then
+		func_show_eval "$cmd" 'exit $?'
+		skipped_export=false
+	      elif test -n "$nm_file_list_spec"; then
+		func_basename "$output"
+		output_la=$func_basename_result
+		save_libobjs=$libobjs
+		save_output=$output
+		output=${output_objdir}/${output_la}.nm
+		func_to_tool_file "$output"
+		libobjs=$nm_file_list_spec$func_to_tool_file_result
+		func_append delfiles " $output"
+		func_verbose "creating $NM input file list: $output"
+		for obj in $save_libobjs; do
+		  func_to_tool_file "$obj"
+		  $ECHO "$func_to_tool_file_result"
+		done > "$output"
+		eval cmd=\"$cmd1\"
+		func_show_eval "$cmd" 'exit $?'
+		output=$save_output
+		libobjs=$save_libobjs
+		skipped_export=false
+	      else
+		# The command line is too long to execute in one step.
+		func_verbose "using reloadable object file for export list..."
+		skipped_export=:
+		# Break out early, otherwise skipped_export may be
+		# set to false by a later but shorter cmd.
+		break
+	      fi
+	    done
+	    IFS="$save_ifs"
+	    if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then
+	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+	    fi
+	  fi
+	fi
+
+	if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	  tmp_export_symbols="$export_symbols"
+	  test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
+	  $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+	fi
+
+	if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then
+	  # The given exports_symbols file has to be filtered, so filter it.
+	  func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
+	  # FIXME: $output_objdir/$libname.filter potentially contains lots of
+	  # 's' commands which not all seds can handle. GNU sed should be fine
+	  # though. Also, the filter scales superlinearly with the number of
+	  # global variables. join(1) would be nice here, but unfortunately
+	  # isn't a blessed tool.
+	  $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+	  func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+	  export_symbols=$output_objdir/$libname.def
+	  $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+	fi
+
+	tmp_deplibs=
+	for test_deplib in $deplibs; do
+	  case " $convenience " in
+	  *" $test_deplib "*) ;;
+	  *)
+	    func_append tmp_deplibs " $test_deplib"
+	    ;;
+	  esac
+	done
+	deplibs="$tmp_deplibs"
+
+	if test -n "$convenience"; then
+	  if test -n "$whole_archive_flag_spec" &&
+	    test "$compiler_needs_object" = yes &&
+	    test -z "$libobjs"; then
+	    # extract the archives, so we have objects to list.
+	    # TODO: could optimize this to just extract one archive.
+	    whole_archive_flag_spec=
+	  fi
+	  if test -n "$whole_archive_flag_spec"; then
+	    save_libobjs=$libobjs
+	    eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+	    test "X$libobjs" = "X " && libobjs=
+	  else
+	    gentop="$output_objdir/${outputname}x"
+	    func_append generated " $gentop"
+
+	    func_extract_archives $gentop $convenience
+	    func_append libobjs " $func_extract_archives_result"
+	    test "X$libobjs" = "X " && libobjs=
+	  fi
+	fi
+
+	if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then
+	  eval flag=\"$thread_safe_flag_spec\"
+	  func_append linker_flags " $flag"
+	fi
+
+	# Make a backup of the uninstalled library when relinking
+	if test "$opt_mode" = relink; then
+	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $?
+	fi
+
+	# Do each of the archive commands.
+	if test "$module" = yes && test -n "$module_cmds" ; then
+	  if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+	    eval test_cmds=\"$module_expsym_cmds\"
+	    cmds=$module_expsym_cmds
+	  else
+	    eval test_cmds=\"$module_cmds\"
+	    cmds=$module_cmds
+	  fi
+	else
+	  if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+	    eval test_cmds=\"$archive_expsym_cmds\"
+	    cmds=$archive_expsym_cmds
+	  else
+	    eval test_cmds=\"$archive_cmds\"
+	    cmds=$archive_cmds
+	  fi
+	fi
+
+	if test "X$skipped_export" != "X:" &&
+	   func_len " $test_cmds" &&
+	   len=$func_len_result &&
+	   test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+	  :
+	else
+	  # The command line is too long to link in one step, link piecewise
+	  # or, if using GNU ld and skipped_export is not :, use a linker
+	  # script.
+
+	  # Save the value of $output and $libobjs because we want to
+	  # use them later.  If we have whole_archive_flag_spec, we
+	  # want to use save_libobjs as it was before
+	  # whole_archive_flag_spec was expanded, because we can't
+	  # assume the linker understands whole_archive_flag_spec.
+	  # This may have to be revisited, in case too many
+	  # convenience libraries get linked in and end up exceeding
+	  # the spec.
+	  if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then
+	    save_libobjs=$libobjs
+	  fi
+	  save_output=$output
+	  func_basename "$output"
+	  output_la=$func_basename_result
+
+	  # Clear the reloadable object creation command queue and
+	  # initialize k to one.
+	  test_cmds=
+	  concat_cmds=
+	  objlist=
+	  last_robj=
+	  k=1
+
+	  if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then
+	    output=${output_objdir}/${output_la}.lnkscript
+	    func_verbose "creating GNU ld script: $output"
+	    echo 'INPUT (' > $output
+	    for obj in $save_libobjs
+	    do
+	      func_to_tool_file "$obj"
+	      $ECHO "$func_to_tool_file_result" >> $output
+	    done
+	    echo ')' >> $output
+	    func_append delfiles " $output"
+	    func_to_tool_file "$output"
+	    output=$func_to_tool_file_result
+	  elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then
+	    output=${output_objdir}/${output_la}.lnk
+	    func_verbose "creating linker input file list: $output"
+	    : > $output
+	    set x $save_libobjs
+	    shift
+	    firstobj=
+	    if test "$compiler_needs_object" = yes; then
+	      firstobj="$1 "
+	      shift
+	    fi
+	    for obj
+	    do
+	      func_to_tool_file "$obj"
+	      $ECHO "$func_to_tool_file_result" >> $output
+	    done
+	    func_append delfiles " $output"
+	    func_to_tool_file "$output"
+	    output=$firstobj\"$file_list_spec$func_to_tool_file_result\"
+	  else
+	    if test -n "$save_libobjs"; then
+	      func_verbose "creating reloadable object files..."
+	      output=$output_objdir/$output_la-${k}.$objext
+	      eval test_cmds=\"$reload_cmds\"
+	      func_len " $test_cmds"
+	      len0=$func_len_result
+	      len=$len0
+
+	      # Loop over the list of objects to be linked.
+	      for obj in $save_libobjs
+	      do
+		func_len " $obj"
+		func_arith $len + $func_len_result
+		len=$func_arith_result
+		if test "X$objlist" = X ||
+		   test "$len" -lt "$max_cmd_len"; then
+		  func_append objlist " $obj"
+		else
+		  # The command $test_cmds is almost too long, add a
+		  # command to the queue.
+		  if test "$k" -eq 1 ; then
+		    # The first file doesn't have a previous command to add.
+		    reload_objs=$objlist
+		    eval concat_cmds=\"$reload_cmds\"
+		  else
+		    # All subsequent reloadable object files will link in
+		    # the last one created.
+		    reload_objs="$objlist $last_robj"
+		    eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"
+		  fi
+		  last_robj=$output_objdir/$output_la-${k}.$objext
+		  func_arith $k + 1
+		  k=$func_arith_result
+		  output=$output_objdir/$output_la-${k}.$objext
+		  objlist=" $obj"
+		  func_len " $last_robj"
+		  func_arith $len0 + $func_len_result
+		  len=$func_arith_result
+		fi
+	      done
+	      # Handle the remaining objects by creating one last
+	      # reloadable object file.  All subsequent reloadable object
+	      # files will link in the last one created.
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      reload_objs="$objlist $last_robj"
+	      eval concat_cmds=\"\${concat_cmds}$reload_cmds\"
+	      if test -n "$last_robj"; then
+	        eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"
+	      fi
+	      func_append delfiles " $output"
+
+	    else
+	      output=
+	    fi
+
+	    if ${skipped_export-false}; then
+	      func_verbose "generating symbol list for \`$libname.la'"
+	      export_symbols="$output_objdir/$libname.exp"
+	      $opt_dry_run || $RM $export_symbols
+	      libobjs=$output
+	      # Append the command to create the export file.
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\"
+	      if test -n "$last_robj"; then
+		eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\"
+	      fi
+	    fi
+
+	    test -n "$save_libobjs" &&
+	      func_verbose "creating a temporary reloadable object file: $output"
+
+	    # Loop through the commands generated above and execute them.
+	    save_ifs="$IFS"; IFS='~'
+	    for cmd in $concat_cmds; do
+	      IFS="$save_ifs"
+	      $opt_silent || {
+		  func_quote_for_expand "$cmd"
+		  eval "func_echo $func_quote_for_expand_result"
+	      }
+	      $opt_dry_run || eval "$cmd" || {
+		lt_exit=$?
+
+		# Restore the uninstalled library and exit
+		if test "$opt_mode" = relink; then
+		  ( cd "$output_objdir" && \
+		    $RM "${realname}T" && \
+		    $MV "${realname}U" "$realname" )
+		fi
+
+		exit $lt_exit
+	      }
+	    done
+	    IFS="$save_ifs"
+
+	    if test -n "$export_symbols_regex" && ${skipped_export-false}; then
+	      func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"'
+	      func_show_eval '$MV "${export_symbols}T" "$export_symbols"'
+	    fi
+	  fi
+
+          if ${skipped_export-false}; then
+	    if test -n "$export_symbols" && test -n "$include_expsyms"; then
+	      tmp_export_symbols="$export_symbols"
+	      test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols"
+	      $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"'
+	    fi
+
+	    if test -n "$orig_export_symbols"; then
+	      # The given exports_symbols file has to be filtered, so filter it.
+	      func_verbose "filter symbol list for \`$libname.la' to tag DATA exports"
+	      # FIXME: $output_objdir/$libname.filter potentially contains lots of
+	      # 's' commands which not all seds can handle. GNU sed should be fine
+	      # though. Also, the filter scales superlinearly with the number of
+	      # global variables. join(1) would be nice here, but unfortunately
+	      # isn't a blessed tool.
+	      $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter
+	      func_append delfiles " $export_symbols $output_objdir/$libname.filter"
+	      export_symbols=$output_objdir/$libname.def
+	      $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols
+	    fi
+	  fi
+
+	  libobjs=$output
+	  # Restore the value of output.
+	  output=$save_output
+
+	  if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then
+	    eval libobjs=\"\$libobjs $whole_archive_flag_spec\"
+	    test "X$libobjs" = "X " && libobjs=
+	  fi
+	  # Expand the library linking commands again to reset the
+	  # value of $libobjs for piecewise linking.
+
+	  # Do each of the archive commands.
+	  if test "$module" = yes && test -n "$module_cmds" ; then
+	    if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then
+	      cmds=$module_expsym_cmds
+	    else
+	      cmds=$module_cmds
+	    fi
+	  else
+	    if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then
+	      cmds=$archive_expsym_cmds
+	    else
+	      cmds=$archive_cmds
+	    fi
+	  fi
+	fi
+
+	if test -n "$delfiles"; then
+	  # Append the command to remove temporary files to $cmds.
+	  eval cmds=\"\$cmds~\$RM $delfiles\"
+	fi
+
+	# Add any objects from preloaded convenience libraries
+	if test -n "$dlprefiles"; then
+	  gentop="$output_objdir/${outputname}x"
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $dlprefiles
+	  func_append libobjs " $func_extract_archives_result"
+	  test "X$libobjs" = "X " && libobjs=
+	fi
+
+	save_ifs="$IFS"; IFS='~'
+	for cmd in $cmds; do
+	  IFS="$save_ifs"
+	  eval cmd=\"$cmd\"
+	  $opt_silent || {
+	    func_quote_for_expand "$cmd"
+	    eval "func_echo $func_quote_for_expand_result"
+	  }
+	  $opt_dry_run || eval "$cmd" || {
+	    lt_exit=$?
+
+	    # Restore the uninstalled library and exit
+	    if test "$opt_mode" = relink; then
+	      ( cd "$output_objdir" && \
+	        $RM "${realname}T" && \
+		$MV "${realname}U" "$realname" )
+	    fi
+
+	    exit $lt_exit
+	  }
+	done
+	IFS="$save_ifs"
+
+	# Restore the uninstalled library and exit
+	if test "$opt_mode" = relink; then
+	  $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $?
+
+	  if test -n "$convenience"; then
+	    if test -z "$whole_archive_flag_spec"; then
+	      func_show_eval '${RM}r "$gentop"'
+	    fi
+	  fi
+
+	  exit $EXIT_SUCCESS
+	fi
+
+	# Create links to the real library.
+	for linkname in $linknames; do
+	  if test "$realname" != "$linkname"; then
+	    func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?'
+	  fi
+	done
+
+	# If -module or -export-dynamic was specified, set the dlname.
+	if test "$module" = yes || test "$export_dynamic" = yes; then
+	  # On all known operating systems, these are identical.
+	  dlname="$soname"
+	fi
+      fi
+      ;;
+
+    obj)
+      if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then
+	func_warning "\`-dlopen' is ignored for objects"
+      fi
+
+      case " $deplibs" in
+      *\ -l* | *\ -L*)
+	func_warning "\`-l' and \`-L' are ignored for objects" ;;
+      esac
+
+      test -n "$rpath" && \
+	func_warning "\`-rpath' is ignored for objects"
+
+      test -n "$xrpath" && \
+	func_warning "\`-R' is ignored for objects"
+
+      test -n "$vinfo" && \
+	func_warning "\`-version-info' is ignored for objects"
+
+      test -n "$release" && \
+	func_warning "\`-release' is ignored for objects"
+
+      case $output in
+      *.lo)
+	test -n "$objs$old_deplibs" && \
+	  func_fatal_error "cannot build library object \`$output' from non-libtool objects"
+
+	libobj=$output
+	func_lo2o "$libobj"
+	obj=$func_lo2o_result
+	;;
+      *)
+	libobj=
+	obj="$output"
+	;;
+      esac
+
+      # Delete the old objects.
+      $opt_dry_run || $RM $obj $libobj
+
+      # Objects from convenience libraries.  This assumes
+      # single-version convenience libraries.  Whenever we create
+      # different ones for PIC/non-PIC, this we'll have to duplicate
+      # the extraction.
+      reload_conv_objs=
+      gentop=
+      # reload_cmds runs $LD directly, so let us get rid of
+      # -Wl from whole_archive_flag_spec and hope we can get by with
+      # turning comma into space..
+      wl=
+
+      if test -n "$convenience"; then
+	if test -n "$whole_archive_flag_spec"; then
+	  eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\"
+	  reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'`
+	else
+	  gentop="$output_objdir/${obj}x"
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $convenience
+	  reload_conv_objs="$reload_objs $func_extract_archives_result"
+	fi
+      fi
+
+      # If we're not building shared, we need to use non_pic_objs
+      test "$build_libtool_libs" != yes && libobjs="$non_pic_objects"
+
+      # Create the old-style object.
+      reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test
+
+      output="$obj"
+      func_execute_cmds "$reload_cmds" 'exit $?'
+
+      # Exit if we aren't doing a library object file.
+      if test -z "$libobj"; then
+	if test -n "$gentop"; then
+	  func_show_eval '${RM}r "$gentop"'
+	fi
+
+	exit $EXIT_SUCCESS
+      fi
+
+      if test "$build_libtool_libs" != yes; then
+	if test -n "$gentop"; then
+	  func_show_eval '${RM}r "$gentop"'
+	fi
+
+	# Create an invalid libtool object if no PIC, so that we don't
+	# accidentally link it into a program.
+	# $show "echo timestamp > $libobj"
+	# $opt_dry_run || eval "echo timestamp > $libobj" || exit $?
+	exit $EXIT_SUCCESS
+      fi
+
+      if test -n "$pic_flag" || test "$pic_mode" != default; then
+	# Only do commands if we really have different PIC objects.
+	reload_objs="$libobjs $reload_conv_objs"
+	output="$libobj"
+	func_execute_cmds "$reload_cmds" 'exit $?'
+      fi
+
+      if test -n "$gentop"; then
+	func_show_eval '${RM}r "$gentop"'
+      fi
+
+      exit $EXIT_SUCCESS
+      ;;
+
+    prog)
+      case $host in
+	*cygwin*) func_stripname '' '.exe' "$output"
+	          output=$func_stripname_result.exe;;
+      esac
+      test -n "$vinfo" && \
+	func_warning "\`-version-info' is ignored for programs"
+
+      test -n "$release" && \
+	func_warning "\`-release' is ignored for programs"
+
+      test "$preload" = yes \
+        && test "$dlopen_support" = unknown \
+	&& test "$dlopen_self" = unknown \
+	&& test "$dlopen_self_static" = unknown && \
+	  func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support."
+
+      case $host in
+      *-*-rhapsody* | *-*-darwin1.[012])
+	# On Rhapsody replace the C library is the System framework
+	compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'`
+	finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'`
+	;;
+      esac
+
+      case $host in
+      *-*-darwin*)
+	# Don't allow lazy linking, it breaks C++ global constructors
+	# But is supposedly fixed on 10.4 or later (yay!).
+	if test "$tagname" = CXX ; then
+	  case ${MACOSX_DEPLOYMENT_TARGET-10.0} in
+	    10.[0123])
+	      func_append compile_command " ${wl}-bind_at_load"
+	      func_append finalize_command " ${wl}-bind_at_load"
+	    ;;
+	  esac
+	fi
+	# Time to change all our "foo.ltframework" stuff back to "-framework foo"
+	compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'`
+	;;
+      esac
+
+
+      # move library search paths that coincide with paths to not yet
+      # installed libraries to the beginning of the library search list
+      new_libs=
+      for path in $notinst_path; do
+	case " $new_libs " in
+	*" -L$path/$objdir "*) ;;
+	*)
+	  case " $compile_deplibs " in
+	  *" -L$path/$objdir "*)
+	    func_append new_libs " -L$path/$objdir" ;;
+	  esac
+	  ;;
+	esac
+      done
+      for deplib in $compile_deplibs; do
+	case $deplib in
+	-L*)
+	  case " $new_libs " in
+	  *" $deplib "*) ;;
+	  *) func_append new_libs " $deplib" ;;
+	  esac
+	  ;;
+	*) func_append new_libs " $deplib" ;;
+	esac
+      done
+      compile_deplibs="$new_libs"
+
+
+      func_append compile_command " $compile_deplibs"
+      func_append finalize_command " $finalize_deplibs"
+
+      if test -n "$rpath$xrpath"; then
+	# If the user specified any rpath flags, then add them.
+	for libdir in $rpath $xrpath; do
+	  # This is the magic to use -rpath.
+	  case "$finalize_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_rpath " $libdir" ;;
+	  esac
+	done
+      fi
+
+      # Now hardcode the library paths
+      rpath=
+      hardcode_libdirs=
+      for libdir in $compile_rpath $finalize_rpath; do
+	if test -n "$hardcode_libdir_flag_spec"; then
+	  if test -n "$hardcode_libdir_separator"; then
+	    if test -z "$hardcode_libdirs"; then
+	      hardcode_libdirs="$libdir"
+	    else
+	      # Just accumulate the unique libdirs.
+	      case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		;;
+	      *)
+		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		;;
+	      esac
+	    fi
+	  else
+	    eval flag=\"$hardcode_libdir_flag_spec\"
+	    func_append rpath " $flag"
+	  fi
+	elif test -n "$runpath_var"; then
+	  case "$perm_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append perm_rpath " $libdir" ;;
+	  esac
+	fi
+	case $host in
+	*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*)
+	  testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'`
+	  case :$dllsearchpath: in
+	  *":$libdir:"*) ;;
+	  ::) dllsearchpath=$libdir;;
+	  *) func_append dllsearchpath ":$libdir";;
+	  esac
+	  case :$dllsearchpath: in
+	  *":$testbindir:"*) ;;
+	  ::) dllsearchpath=$testbindir;;
+	  *) func_append dllsearchpath ":$testbindir";;
+	  esac
+	  ;;
+	esac
+      done
+      # Substitute the hardcoded libdirs into the rpath.
+      if test -n "$hardcode_libdir_separator" &&
+	 test -n "$hardcode_libdirs"; then
+	libdir="$hardcode_libdirs"
+	eval rpath=\" $hardcode_libdir_flag_spec\"
+      fi
+      compile_rpath="$rpath"
+
+      rpath=
+      hardcode_libdirs=
+      for libdir in $finalize_rpath; do
+	if test -n "$hardcode_libdir_flag_spec"; then
+	  if test -n "$hardcode_libdir_separator"; then
+	    if test -z "$hardcode_libdirs"; then
+	      hardcode_libdirs="$libdir"
+	    else
+	      # Just accumulate the unique libdirs.
+	      case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in
+	      *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*)
+		;;
+	      *)
+		func_append hardcode_libdirs "$hardcode_libdir_separator$libdir"
+		;;
+	      esac
+	    fi
+	  else
+	    eval flag=\"$hardcode_libdir_flag_spec\"
+	    func_append rpath " $flag"
+	  fi
+	elif test -n "$runpath_var"; then
+	  case "$finalize_perm_rpath " in
+	  *" $libdir "*) ;;
+	  *) func_append finalize_perm_rpath " $libdir" ;;
+	  esac
+	fi
+      done
+      # Substitute the hardcoded libdirs into the rpath.
+      if test -n "$hardcode_libdir_separator" &&
+	 test -n "$hardcode_libdirs"; then
+	libdir="$hardcode_libdirs"
+	eval rpath=\" $hardcode_libdir_flag_spec\"
+      fi
+      finalize_rpath="$rpath"
+
+      if test -n "$libobjs" && test "$build_old_libs" = yes; then
+	# Transform all the library objects into standard objects.
+	compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+	finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP`
+      fi
+
+      func_generate_dlsyms "$outputname" "@PROGRAM@" "no"
+
+      # template prelinking step
+      if test -n "$prelink_cmds"; then
+	func_execute_cmds "$prelink_cmds" 'exit $?'
+      fi
+
+      wrappers_required=yes
+      case $host in
+      *cegcc* | *mingw32ce*)
+        # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway.
+        wrappers_required=no
+        ;;
+      *cygwin* | *mingw* )
+        if test "$build_libtool_libs" != yes; then
+          wrappers_required=no
+        fi
+        ;;
+      *)
+        if test "$need_relink" = no || test "$build_libtool_libs" != yes; then
+          wrappers_required=no
+        fi
+        ;;
+      esac
+      if test "$wrappers_required" = no; then
+	# Replace the output file specification.
+	compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+	link_command="$compile_command$compile_rpath"
+
+	# We have no uninstalled library dependencies, so finalize right now.
+	exit_status=0
+	func_show_eval "$link_command" 'exit_status=$?'
+
+	if test -n "$postlink_cmds"; then
+	  func_to_tool_file "$output"
+	  postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	  func_execute_cmds "$postlink_cmds" 'exit $?'
+	fi
+
+	# Delete the generated files.
+	if test -f "$output_objdir/${outputname}S.${objext}"; then
+	  func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"'
+	fi
+
+	exit $exit_status
+      fi
+
+      if test -n "$compile_shlibpath$finalize_shlibpath"; then
+	compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command"
+      fi
+      if test -n "$finalize_shlibpath"; then
+	finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command"
+      fi
+
+      compile_var=
+      finalize_var=
+      if test -n "$runpath_var"; then
+	if test -n "$perm_rpath"; then
+	  # We should set the runpath_var.
+	  rpath=
+	  for dir in $perm_rpath; do
+	    func_append rpath "$dir:"
+	  done
+	  compile_var="$runpath_var=\"$rpath\$$runpath_var\" "
+	fi
+	if test -n "$finalize_perm_rpath"; then
+	  # We should set the runpath_var.
+	  rpath=
+	  for dir in $finalize_perm_rpath; do
+	    func_append rpath "$dir:"
+	  done
+	  finalize_var="$runpath_var=\"$rpath\$$runpath_var\" "
+	fi
+      fi
+
+      if test "$no_install" = yes; then
+	# We don't need to create a wrapper script.
+	link_command="$compile_var$compile_command$compile_rpath"
+	# Replace the output file specification.
+	link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'`
+	# Delete the old output file.
+	$opt_dry_run || $RM $output
+	# Link the executable and exit
+	func_show_eval "$link_command" 'exit $?'
+
+	if test -n "$postlink_cmds"; then
+	  func_to_tool_file "$output"
+	  postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	  func_execute_cmds "$postlink_cmds" 'exit $?'
+	fi
+
+	exit $EXIT_SUCCESS
+      fi
+
+      if test "$hardcode_action" = relink; then
+	# Fast installation is not supported
+	link_command="$compile_var$compile_command$compile_rpath"
+	relink_command="$finalize_var$finalize_command$finalize_rpath"
+
+	func_warning "this platform does not like uninstalled shared libraries"
+	func_warning "\`$output' will be relinked during installation"
+      else
+	if test "$fast_install" != no; then
+	  link_command="$finalize_var$compile_command$finalize_rpath"
+	  if test "$fast_install" = yes; then
+	    relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'`
+	  else
+	    # fast_install is set to needless
+	    relink_command=
+	  fi
+	else
+	  link_command="$compile_var$compile_command$compile_rpath"
+	  relink_command="$finalize_var$finalize_command$finalize_rpath"
+	fi
+      fi
+
+      # Replace the output file specification.
+      link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'`
+
+      # Delete the old output files.
+      $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname
+
+      func_show_eval "$link_command" 'exit $?'
+
+      if test -n "$postlink_cmds"; then
+	func_to_tool_file "$output_objdir/$outputname"
+	postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'`
+	func_execute_cmds "$postlink_cmds" 'exit $?'
+      fi
+
+      # Now create the wrapper script.
+      func_verbose "creating $output"
+
+      # Quote the relink command for shipping.
+      if test -n "$relink_command"; then
+	# Preserve any variables that may affect compiler behavior
+	for var in $variables_saved_for_relink; do
+	  if eval test -z \"\${$var+set}\"; then
+	    relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+	  elif eval var_value=\$$var; test -z "$var_value"; then
+	    relink_command="$var=; export $var; $relink_command"
+	  else
+	    func_quote_for_eval "$var_value"
+	    relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+	  fi
+	done
+	relink_command="(cd `pwd`; $relink_command)"
+	relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+      fi
+
+      # Only actually do things if not in dry run mode.
+      $opt_dry_run || {
+	# win32 will think the script is a binary if it has
+	# a .exe suffix, so we strip it off here.
+	case $output in
+	  *.exe) func_stripname '' '.exe' "$output"
+	         output=$func_stripname_result ;;
+	esac
+	# test for cygwin because mv fails w/o .exe extensions
+	case $host in
+	  *cygwin*)
+	    exeext=.exe
+	    func_stripname '' '.exe' "$outputname"
+	    outputname=$func_stripname_result ;;
+	  *) exeext= ;;
+	esac
+	case $host in
+	  *cygwin* | *mingw* )
+	    func_dirname_and_basename "$output" "" "."
+	    output_name=$func_basename_result
+	    output_path=$func_dirname_result
+	    cwrappersource="$output_path/$objdir/lt-$output_name.c"
+	    cwrapper="$output_path/$output_name.exe"
+	    $RM $cwrappersource $cwrapper
+	    trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15
+
+	    func_emit_cwrapperexe_src > $cwrappersource
+
+	    # The wrapper executable is built using the $host compiler,
+	    # because it contains $host paths and files. If cross-
+	    # compiling, it, like the target executable, must be
+	    # executed on the $host or under an emulation environment.
+	    $opt_dry_run || {
+	      $LTCC $LTCFLAGS -o $cwrapper $cwrappersource
+	      $STRIP $cwrapper
+	    }
+
+	    # Now, create the wrapper script for func_source use:
+	    func_ltwrapper_scriptname $cwrapper
+	    $RM $func_ltwrapper_scriptname_result
+	    trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15
+	    $opt_dry_run || {
+	      # note: this script will not be executed, so do not chmod.
+	      if test "x$build" = "x$host" ; then
+		$cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result
+	      else
+		func_emit_wrapper no > $func_ltwrapper_scriptname_result
+	      fi
+	    }
+	  ;;
+	  * )
+	    $RM $output
+	    trap "$RM $output; exit $EXIT_FAILURE" 1 2 15
+
+	    func_emit_wrapper no > $output
+	    chmod +x $output
+	  ;;
+	esac
+      }
+      exit $EXIT_SUCCESS
+      ;;
+    esac
+
+    # See if we need to build an old-fashioned archive.
+    for oldlib in $oldlibs; do
+
+      if test "$build_libtool_libs" = convenience; then
+	oldobjs="$libobjs_save $symfileobj"
+	addlibs="$convenience"
+	build_libtool_libs=no
+      else
+	if test "$build_libtool_libs" = module; then
+	  oldobjs="$libobjs_save"
+	  build_libtool_libs=no
+	else
+	  oldobjs="$old_deplibs $non_pic_objects"
+	  if test "$preload" = yes && test -f "$symfileobj"; then
+	    func_append oldobjs " $symfileobj"
+	  fi
+	fi
+	addlibs="$old_convenience"
+      fi
+
+      if test -n "$addlibs"; then
+	gentop="$output_objdir/${outputname}x"
+	func_append generated " $gentop"
+
+	func_extract_archives $gentop $addlibs
+	func_append oldobjs " $func_extract_archives_result"
+      fi
+
+      # Do each command in the archive commands.
+      if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then
+	cmds=$old_archive_from_new_cmds
+      else
+
+	# Add any objects from preloaded convenience libraries
+	if test -n "$dlprefiles"; then
+	  gentop="$output_objdir/${outputname}x"
+	  func_append generated " $gentop"
+
+	  func_extract_archives $gentop $dlprefiles
+	  func_append oldobjs " $func_extract_archives_result"
+	fi
+
+	# POSIX demands no paths to be encoded in archives.  We have
+	# to avoid creating archives with duplicate basenames if we
+	# might have to extract them afterwards, e.g., when creating a
+	# static archive out of a convenience library, or when linking
+	# the entirety of a libtool archive into another (currently
+	# not supported by libtool).
+	if (for obj in $oldobjs
+	    do
+	      func_basename "$obj"
+	      $ECHO "$func_basename_result"
+	    done | sort | sort -uc >/dev/null 2>&1); then
+	  :
+	else
+	  echo "copying selected object files to avoid basename conflicts..."
+	  gentop="$output_objdir/${outputname}x"
+	  func_append generated " $gentop"
+	  func_mkdir_p "$gentop"
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  counter=1
+	  for obj in $save_oldobjs
+	  do
+	    func_basename "$obj"
+	    objbase="$func_basename_result"
+	    case " $oldobjs " in
+	    " ") oldobjs=$obj ;;
+	    *[\ /]"$objbase "*)
+	      while :; do
+		# Make sure we don't pick an alternate name that also
+		# overlaps.
+		newobj=lt$counter-$objbase
+		func_arith $counter + 1
+		counter=$func_arith_result
+		case " $oldobjs " in
+		*[\ /]"$newobj "*) ;;
+		*) if test ! -f "$gentop/$newobj"; then break; fi ;;
+		esac
+	      done
+	      func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj"
+	      func_append oldobjs " $gentop/$newobj"
+	      ;;
+	    *) func_append oldobjs " $obj" ;;
+	    esac
+	  done
+	fi
+	func_to_tool_file "$oldlib" func_convert_file_msys_to_w32
+	tool_oldlib=$func_to_tool_file_result
+	eval cmds=\"$old_archive_cmds\"
+
+	func_len " $cmds"
+	len=$func_len_result
+	if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then
+	  cmds=$old_archive_cmds
+	elif test -n "$archiver_list_spec"; then
+	  func_verbose "using command file archive linking..."
+	  for obj in $oldobjs
+	  do
+	    func_to_tool_file "$obj"
+	    $ECHO "$func_to_tool_file_result"
+	  done > $output_objdir/$libname.libcmd
+	  func_to_tool_file "$output_objdir/$libname.libcmd"
+	  oldobjs=" $archiver_list_spec$func_to_tool_file_result"
+	  cmds=$old_archive_cmds
+	else
+	  # the command line is too long to link in one step, link in parts
+	  func_verbose "using piecewise archive linking..."
+	  save_RANLIB=$RANLIB
+	  RANLIB=:
+	  objlist=
+	  concat_cmds=
+	  save_oldobjs=$oldobjs
+	  oldobjs=
+	  # Is there a better way of finding the last object in the list?
+	  for obj in $save_oldobjs
+	  do
+	    last_oldobj=$obj
+	  done
+	  eval test_cmds=\"$old_archive_cmds\"
+	  func_len " $test_cmds"
+	  len0=$func_len_result
+	  len=$len0
+	  for obj in $save_oldobjs
+	  do
+	    func_len " $obj"
+	    func_arith $len + $func_len_result
+	    len=$func_arith_result
+	    func_append objlist " $obj"
+	    if test "$len" -lt "$max_cmd_len"; then
+	      :
+	    else
+	      # the above command should be used before it gets too long
+	      oldobjs=$objlist
+	      if test "$obj" = "$last_oldobj" ; then
+		RANLIB=$save_RANLIB
+	      fi
+	      test -z "$concat_cmds" || concat_cmds=$concat_cmds~
+	      eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\"
+	      objlist=
+	      len=$len0
+	    fi
+	  done
+	  RANLIB=$save_RANLIB
+	  oldobjs=$objlist
+	  if test "X$oldobjs" = "X" ; then
+	    eval cmds=\"\$concat_cmds\"
+	  else
+	    eval cmds=\"\$concat_cmds~\$old_archive_cmds\"
+	  fi
+	fi
+      fi
+      func_execute_cmds "$cmds" 'exit $?'
+    done
+
+    test -n "$generated" && \
+      func_show_eval "${RM}r$generated"
+
+    # Now create the libtool archive.
+    case $output in
+    *.la)
+      old_library=
+      test "$build_old_libs" = yes && old_library="$libname.$libext"
+      func_verbose "creating $output"
+
+      # Preserve any variables that may affect compiler behavior
+      for var in $variables_saved_for_relink; do
+	if eval test -z \"\${$var+set}\"; then
+	  relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command"
+	elif eval var_value=\$$var; test -z "$var_value"; then
+	  relink_command="$var=; export $var; $relink_command"
+	else
+	  func_quote_for_eval "$var_value"
+	  relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command"
+	fi
+      done
+      # Quote the link command for shipping.
+      relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)"
+      relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"`
+      if test "$hardcode_automatic" = yes ; then
+	relink_command=
+      fi
+
+      # Only create the output if not a dry run.
+      $opt_dry_run || {
+	for installed in no yes; do
+	  if test "$installed" = yes; then
+	    if test -z "$install_libdir"; then
+	      break
+	    fi
+	    output="$output_objdir/$outputname"i
+	    # Replace all uninstalled libtool libraries with the installed ones
+	    newdependency_libs=
+	    for deplib in $dependency_libs; do
+	      case $deplib in
+	      *.la)
+		func_basename "$deplib"
+		name="$func_basename_result"
+		func_resolve_sysroot "$deplib"
+		eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result`
+		test -z "$libdir" && \
+		  func_fatal_error "\`$deplib' is not a valid libtool archive"
+		func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      -L*)
+		func_stripname -L '' "$deplib"
+		func_replace_sysroot "$func_stripname_result"
+		func_append newdependency_libs " -L$func_replace_sysroot_result"
+		;;
+	      -R*)
+		func_stripname -R '' "$deplib"
+		func_replace_sysroot "$func_stripname_result"
+		func_append newdependency_libs " -R$func_replace_sysroot_result"
+		;;
+	      *) func_append newdependency_libs " $deplib" ;;
+	      esac
+	    done
+	    dependency_libs="$newdependency_libs"
+	    newdlfiles=
+
+	    for lib in $dlfiles; do
+	      case $lib in
+	      *.la)
+	        func_basename "$lib"
+		name="$func_basename_result"
+		eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+		test -z "$libdir" && \
+		  func_fatal_error "\`$lib' is not a valid libtool archive"
+		func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      *) func_append newdlfiles " $lib" ;;
+	      esac
+	    done
+	    dlfiles="$newdlfiles"
+	    newdlprefiles=
+	    for lib in $dlprefiles; do
+	      case $lib in
+	      *.la)
+		# Only pass preopened files to the pseudo-archive (for
+		# eventual linking with the app. that links it) if we
+		# didn't already link the preopened objects directly into
+		# the library:
+		func_basename "$lib"
+		name="$func_basename_result"
+		eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib`
+		test -z "$libdir" && \
+		  func_fatal_error "\`$lib' is not a valid libtool archive"
+		func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name"
+		;;
+	      esac
+	    done
+	    dlprefiles="$newdlprefiles"
+	  else
+	    newdlfiles=
+	    for lib in $dlfiles; do
+	      case $lib in
+		[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
+		*) abs=`pwd`"/$lib" ;;
+	      esac
+	      func_append newdlfiles " $abs"
+	    done
+	    dlfiles="$newdlfiles"
+	    newdlprefiles=
+	    for lib in $dlprefiles; do
+	      case $lib in
+		[\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;;
+		*) abs=`pwd`"/$lib" ;;
+	      esac
+	      func_append newdlprefiles " $abs"
+	    done
+	    dlprefiles="$newdlprefiles"
+	  fi
+	  $RM $output
+	  # place dlname in correct position for cygwin
+	  # In fact, it would be nice if we could use this code for all target
+	  # systems that can't hard-code library paths into their executables
+	  # and that have no shared library path variable independent of PATH,
+	  # but it turns out we can't easily determine that from inspecting
+	  # libtool variables, so we have to hard-code the OSs to which it
+	  # applies here; at the moment, that means platforms that use the PE
+	  # object format with DLL files.  See the long comment at the top of
+	  # tests/bindir.at for full details.
+	  tdlname=$dlname
+	  case $host,$output,$installed,$module,$dlname in
+	    *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll)
+	      # If a -bindir argument was supplied, place the dll there.
+	      if test "x$bindir" != x ;
+	      then
+		func_relative_path "$install_libdir" "$bindir"
+		tdlname=$func_relative_path_result$dlname
+	      else
+		# Otherwise fall back on heuristic.
+		tdlname=../bin/$dlname
+	      fi
+	      ;;
+	  esac
+	  $ECHO > $output "\
+# $outputname - a libtool library file
+# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION
+#
+# Please DO NOT delete this file!
+# It is necessary for linking the library.
+
+# The name that we can dlopen(3).
+dlname='$tdlname'
+
+# Names of this library.
+library_names='$library_names'
+
+# The name of the static archive.
+old_library='$old_library'
+
+# Linker flags that can not go in dependency_libs.
+inherited_linker_flags='$new_inherited_linker_flags'
+
+# Libraries that this one depends upon.
+dependency_libs='$dependency_libs'
+
+# Names of additional weak libraries provided by this library
+weak_library_names='$weak_libs'
+
+# Version information for $libname.
+current=$current
+age=$age
+revision=$revision
+
+# Is this an already installed library?
+installed=$installed
+
+# Should we warn about portability when linking against -modules?
+shouldnotlink=$module
+
+# Files to dlopen/dlpreopen
+dlopen='$dlfiles'
+dlpreopen='$dlprefiles'
+
+# Directory that this library needs to be installed in:
+libdir='$install_libdir'"
+	  if test "$installed" = no && test "$need_relink" = yes; then
+	    $ECHO >> $output "\
+relink_command=\"$relink_command\""
+	  fi
+	done
+      }
+
+      # Do a symbolic link so that the libtool archive can be found in
+      # LD_LIBRARY_PATH before the program is installed.
+      func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?'
+      ;;
+    esac
+    exit $EXIT_SUCCESS
+}
+
+{ test "$opt_mode" = link || test "$opt_mode" = relink; } &&
+    func_mode_link ${1+"$@"}
+
+
+# func_mode_uninstall arg...
+func_mode_uninstall ()
+{
+    $opt_debug
+    RM="$nonopt"
+    files=
+    rmforce=
+    exit_status=0
+
+    # This variable tells wrapper scripts just to set variables rather
+    # than running their programs.
+    libtool_install_magic="$magic"
+
+    for arg
+    do
+      case $arg in
+      -f) func_append RM " $arg"; rmforce=yes ;;
+      -*) func_append RM " $arg" ;;
+      *) func_append files " $arg" ;;
+      esac
+    done
+
+    test -z "$RM" && \
+      func_fatal_help "you must specify an RM program"
+
+    rmdirs=
+
+    for file in $files; do
+      func_dirname "$file" "" "."
+      dir="$func_dirname_result"
+      if test "X$dir" = X.; then
+	odir="$objdir"
+      else
+	odir="$dir/$objdir"
+      fi
+      func_basename "$file"
+      name="$func_basename_result"
+      test "$opt_mode" = uninstall && odir="$dir"
+
+      # Remember odir for removal later, being careful to avoid duplicates
+      if test "$opt_mode" = clean; then
+	case " $rmdirs " in
+	  *" $odir "*) ;;
+	  *) func_append rmdirs " $odir" ;;
+	esac
+      fi
+
+      # Don't error if the file doesn't exist and rm -f was used.
+      if { test -L "$file"; } >/dev/null 2>&1 ||
+	 { test -h "$file"; } >/dev/null 2>&1 ||
+	 test -f "$file"; then
+	:
+      elif test -d "$file"; then
+	exit_status=1
+	continue
+      elif test "$rmforce" = yes; then
+	continue
+      fi
+
+      rmfiles="$file"
+
+      case $name in
+      *.la)
+	# Possibly a libtool archive, so verify it.
+	if func_lalib_p "$file"; then
+	  func_source $dir/$name
+
+	  # Delete the libtool libraries and symlinks.
+	  for n in $library_names; do
+	    func_append rmfiles " $odir/$n"
+	  done
+	  test -n "$old_library" && func_append rmfiles " $odir/$old_library"
+
+	  case "$opt_mode" in
+	  clean)
+	    case " $library_names " in
+	    *" $dlname "*) ;;
+	    *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;;
+	    esac
+	    test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i"
+	    ;;
+	  uninstall)
+	    if test -n "$library_names"; then
+	      # Do each command in the postuninstall commands.
+	      func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
+	    fi
+
+	    if test -n "$old_library"; then
+	      # Do each command in the old_postuninstall commands.
+	      func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1'
+	    fi
+	    # FIXME: should reinstall the best remaining shared library.
+	    ;;
+	  esac
+	fi
+	;;
+
+      *.lo)
+	# Possibly a libtool object, so verify it.
+	if func_lalib_p "$file"; then
+
+	  # Read the .lo file
+	  func_source $dir/$name
+
+	  # Add PIC object to the list of files to remove.
+	  if test -n "$pic_object" &&
+	     test "$pic_object" != none; then
+	    func_append rmfiles " $dir/$pic_object"
+	  fi
+
+	  # Add non-PIC object to the list of files to remove.
+	  if test -n "$non_pic_object" &&
+	     test "$non_pic_object" != none; then
+	    func_append rmfiles " $dir/$non_pic_object"
+	  fi
+	fi
+	;;
+
+      *)
+	if test "$opt_mode" = clean ; then
+	  noexename=$name
+	  case $file in
+	  *.exe)
+	    func_stripname '' '.exe' "$file"
+	    file=$func_stripname_result
+	    func_stripname '' '.exe' "$name"
+	    noexename=$func_stripname_result
+	    # $file with .exe has already been added to rmfiles,
+	    # add $file without .exe
+	    func_append rmfiles " $file"
+	    ;;
+	  esac
+	  # Do a test to see if this is a libtool program.
+	  if func_ltwrapper_p "$file"; then
+	    if func_ltwrapper_executable_p "$file"; then
+	      func_ltwrapper_scriptname "$file"
+	      relink_command=
+	      func_source $func_ltwrapper_scriptname_result
+	      func_append rmfiles " $func_ltwrapper_scriptname_result"
+	    else
+	      relink_command=
+	      func_source $dir/$noexename
+	    fi
+
+	    # note $name still contains .exe if it was in $file originally
+	    # as does the version of $file that was added into $rmfiles
+	    func_append rmfiles " $odir/$name $odir/${name}S.${objext}"
+	    if test "$fast_install" = yes && test -n "$relink_command"; then
+	      func_append rmfiles " $odir/lt-$name"
+	    fi
+	    if test "X$noexename" != "X$name" ; then
+	      func_append rmfiles " $odir/lt-${noexename}.c"
+	    fi
+	  fi
+	fi
+	;;
+      esac
+      func_show_eval "$RM $rmfiles" 'exit_status=1'
+    done
+
+    # Try to remove the ${objdir}s in the directories where we deleted files
+    for dir in $rmdirs; do
+      if test -d "$dir"; then
+	func_show_eval "rmdir $dir >/dev/null 2>&1"
+      fi
+    done
+
+    exit $exit_status
+}
+
+{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } &&
+    func_mode_uninstall ${1+"$@"}
+
+test -z "$opt_mode" && {
+  help="$generic_help"
+  func_fatal_help "you must specify a MODE"
+}
+
+test -z "$exec_cmd" && \
+  func_fatal_help "invalid operation mode \`$opt_mode'"
+
+if test -n "$exec_cmd"; then
+  eval exec "$exec_cmd"
+  exit $EXIT_FAILURE
+fi
+
+exit $exit_status
+
+
+# The TAGs below are defined such that we never get into a situation
+# in which we disable both kinds of libraries.  Given conflicting
+# choices, we go for a static library, that is the most portable,
+# since we can't tell whether shared libraries were disabled because
+# the user asked for that or because the platform doesn't support
+# them.  This is particularly important on AIX, because we don't
+# support having both static and shared libraries enabled at the same
+# time on that platform, so we default to a shared-only configuration.
+# If a disable-shared tag is given, we'll fallback to a static-only
+# configuration.  But we'll never go from static-only to shared-only.
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-shared
+build_libtool_libs=no
+build_old_libs=yes
+# ### END LIBTOOL TAG CONFIG: disable-shared
+
+# ### BEGIN LIBTOOL TAG CONFIG: disable-static
+build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac`
+# ### END LIBTOOL TAG CONFIG: disable-static
+
+# Local Variables:
+# mode:shell-script
+# sh-indentation:2
+# End:
+# vi:sw=2
+
diff --git a/m4/Makefile.am b/m4/Makefile.am
new file mode 100644
index 0000000..d9dc4be
--- /dev/null
+++ b/m4/Makefile.am
@@ -0,0 +1,2 @@
+subdir=m4
+EXTRA_DIST=
diff --git a/m4/Makefile.in b/m4/Makefile.in
new file mode 100644
index 0000000..fa21b29
--- /dev/null
+++ b/m4/Makefile.in
@@ -0,0 +1,432 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/rsb-config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+subdir = m4
+EXTRA_DIST = 
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu m4/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu m4/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/mergesort_macros.m4 b/mergesort_macros.m4
new file mode 100644
index 0000000..ebfd512
--- /dev/null
+++ b/mergesort_macros.m4
@@ -0,0 +1,594 @@
+dnl
+dnl
+dnl	RSB_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_NAME(TYPE,BLOCKORIENTED)
+dnl	-------------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function dispatcher function name.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_NAME',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+rsb__do_mergesort`_'blockoriented`'dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_ARGS(TYPE,BLOCKORIENTED)
+dnl	-------------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function dispatcher function arguments.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_ARGS',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+dnl
+(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *biarray,
+',`')dnl
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *bjarray,'
+,`')dnl
+	void *array,
+	rsb_nnz_idx_t length, 
+ifelse(blockoriented,`BCSR',`	rsb_coo_idx_t mb,
+',`')dnl
+ifelse(blockoriented,`BCSR',`	rsb_coo_idx_t kb,
+',`')dnl
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *biresult,
+',`')dnl
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *bjresult,
+',`')dnl
+	void *result,
+	rsb_type_t type)dnl
+dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_PROTOTYPE(TYPE,BLOCKORIENTED)
+dnl	------------------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function dispatcher function prototype.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_PROTOTYPE',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+rsb_err_t RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_NAME(mtype,blockoriented)dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_ARGS(mtype,blockoriented)dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER(TYPE,BLOCKORIENTED)
+dnl	--------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function dispatcher.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER',`dnl
+dnl
+pushdef(`types',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_PROTOTYPE(,blockoriented)
+dnl
+{
+	/*!
+	 * \ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse ifelse(blockoriented,`VBR',`blocked ')`'matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 * 	\param length  the input  arrays length
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  mtype array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+ifelse(blockoriented,`CSR',`dnl
+	 *	FIXME : UNDOCUMENTED
+')dnl
+ifelse(blockoriented,`VBR',`dnl
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+')dnl
+ifelse(blockoriented,`BCSR',`
+	 *	FIXME : UNDOCUMENTED
+',`')dnl
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+
+dnl
+pushdef(`blockoriented',$2)dnl
+pushdef(`args',`iarray, jarray,
+ifelse(blockoriented,`VBR',`	biarray,bjarray,',`')dnl
+ifelse(blockoriented,`BCSR',`	mb,kb,',`')dnl
+array, length,
+iresult, jresult,
+ifelse(blockoriented,`VBR',`	biresult,bjresult,',`')dnl
+result')dnl
+dnl
+foreach(`mtype',RSB_M4_TYPES,`dnl
+dnl
+	if(`type' == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	return RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_NAME(mtype,blockoriented)(args);dnl
+
+	else
+dnl
+')dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`args')dnl
+dnl
+	return RSB_ERR_UNSUPPORTED_TYPE;
+}
+dnl
+popdef(`types')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_NAME(TYPE,BLOCKORIENTED)
+dnl	--------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates merge function name.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_NAME',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+rsb_do_merge_`'RSB_M4_CHOPSPACES(mtype)`_'blockoriented`'dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_ARGS(TYPE,BLOCKORIENTED)
+dnl	--------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates merge function arguments.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_ARGS',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+(
+		const rsb_coo_idx_t* RSB_M4_RESTRICT ileft, const rsb_coo_idx_t* RSB_M4_RESTRICT iright,  rsb_coo_idx_t*RSB_M4_RESTRICT iresult,
+		const rsb_coo_idx_t* RSB_M4_RESTRICT jleft, const rsb_coo_idx_t* RSB_M4_RESTRICT jright,  rsb_coo_idx_t*RSB_M4_RESTRICT jresult,
+ifelse(blockoriented,`VBR',`const rsb_coo_idx_t * RSB_M4_RESTRICT bileft, const rsb_coo_idx_t * RSB_M4_RESTRICT biright, rsb_coo_idx_t * RSB_M4_RESTRICT biresult,',`')dnl
+ifelse(blockoriented,`VBR',`const rsb_coo_idx_t * RSB_M4_RESTRICT bjleft, const rsb_coo_idx_t * RSB_M4_RESTRICT bjright, rsb_coo_idx_t * RSB_M4_RESTRICT bjresult,',`')dnl
+ifelse(blockoriented,`BCSR',`const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,',`')dnl
+		const mtype* left, const mtype* RSB_M4_RESTRICT right,  mtype* RSB_M4_RESTRICT result,
+dnl		rsb_coo_idx_t left_index, rsb_coo_idx_t right_index, rsb_coo_idx_t result_index, 
+dnl		rsb_coo_idx_t left_mod, rsb_coo_idx_t right_mod, rsb_coo_idx_t result_mod )dnl
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_PROTOTYPE(TYPE,BLOCKORIENTED)
+dnl	-------------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates merge function prototype.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_PROTOTYPE',`
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+void RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_NAME(mtype,blockoriented)dnl
+RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_ARGS(mtype,blockoriented)
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION(TYPE,BLOCKORIENTED)
+dnl	---------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates merge function function.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION',`
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_PROTOTYPE(mtype,blockoriented)
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our blockoriented matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+dnl	rsb_nnz_idx_t left_length=left_mod;
+dnl	rsb_nnz_idx_t right_length = right_mod;
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+dnl	<--------------- mod ------------------------->
+	          ^- index
+	 */
+
+dnl #define LEFT_ADVANCE		left_index =( left_index+1)% left_mod; left_length-- ;
+dnl #define RIGHT_ADVANCE		right_index=(right_index+1)%right_mod; right_length--;
+dnl #define RESULT_ADVANCE		result_index =( result_index+1)% result_mod;
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+ifelse(blockoriented,`VBR',`dnl
+#define RESULT_APPEND(IEL,JEL,BIEL,BJEL,EL)	\
+',`dnl
+#define RESULT_APPEND(IEL,JEL,EL)	\
+')dnl
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+ifelse(blockoriented,`VBR',`dnl
+	biresult[result_index]=(BIEL);  \
+',`')dnl
+ifelse(blockoriented,`VBR',`dnl
+	bjresult[result_index]=(BJEL);  \
+',`')dnl
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+ifelse(blockoriented,`VBR',`dnl
+	biresult[result_index]=bileft[left_index];  \
+',`')dnl
+ifelse(blockoriented,`VBR',`dnl
+	bjresult[result_index]=bjleft[left_index];  \
+',`')dnl
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+ifelse(blockoriented,`VBR',`dnl
+	biresult[result_index]=biright[right_index];  \
+',`')dnl
+ifelse(blockoriented,`VBR',`dnl
+	bjresult[result_index]=bjright[right_index];  \
+',`')dnl
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+	ifelse(blockoriented,`VBR',`
+		bileft[left_index] < biright[right_index] ||
+		(	bileft[left_index] == biright[right_index]	&&
+			bjleft[left_index] <= bjright[right_index]	)
+		)
+	')dnl
+	ifelse(blockoriented,`CSR',`
+		ileft[left_index] < iright[right_index] ||
+		(	ileft[left_index] == iright[right_index]	&&
+			jleft[left_index] <= jright[right_index]	)
+		)
+	')dnl
+	ifelse(blockoriented,`BCSR',`
+		ileft[left_index]/mb < iright[right_index]/mb ||
+		(	ileft[left_index]/mb == iright[right_index]/mb	&&
+			jleft[left_index]/kb <= jright[right_index]/kb	)
+		)
+	')dnl
+	ifelse(blockoriented,`PACK',`
+		ileft[left_index].spmv_uauz < iright[right_index].v )
+	')dnl
+	{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+dnl
+popdef(`blockoriented')dnl
+popdef(`type')dnl
+dnl
+')
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_NAME(TYPE,BLOCKORIENTED)
+dnl	--------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function name.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_NAME',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+dnl rsb_do_mergesort_`'mtype`'dnl
+rsb_do_mergesort_`'RSB_M4_CHOPSPACES(mtype)`_'blockoriented`'dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_ARGS(TYPE,BLOCKORIENTED)
+dnl	--------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function arguments.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_ARGS',`dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+(
+	rsb_coo_idx_t *RSB_M4_RESTRICT iarray,
+	rsb_coo_idx_t *RSB_M4_RESTRICT jarray,
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *RSB_M4_RESTRICT biarray,
+',`')dnl
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *RSB_M4_RESTRICT bjarray,
+',`')dnl
+ifelse(blockoriented,`BCSR',`	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+',`')dnl
+	mtype *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *RSB_M4_RESTRICT iresult,
+	rsb_coo_idx_t *RSB_M4_RESTRICT jresult,
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *RSB_M4_RESTRICT biresult,
+',`')dnl
+ifelse(blockoriented,`VBR',`	rsb_coo_idx_t *RSB_M4_RESTRICT bjresult,
+',`')dnl
+	mtype *RSB_M4_RESTRICT result)
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_PROTOTYPE(TYPE,BLOCKORIENTED)
+dnl	-------------------------------------------------------------------
+dnl	Expands to the mergesort on coordinates function prototype.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_PROTOTYPE',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+rsb_err_t RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_NAME(`mtype',`blockoriented')dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_ARGS(`mtype',`blockoriented')dnl
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION(TYPE,BLOCKORIENTED)
+dnl	---------------------------------------------------------
+dnl	Expands to the block oriented mergesort on coordinates function
+dnl	for VBR partitioning.
+dnl	The blocking is specified as a function argument.
+dnl
+define(`RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`blockoriented',$2)dnl
+dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_PROTOTYPE(mtype,blockoriented)
+dnl
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      mtype matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  mtype array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+ifelse(blockoriented,`VBR',`dnl
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+')dnl
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+ifelse(blockoriented,`VBR',`
+	rsb_coo_idx_t * bileft  ;
+	rsb_coo_idx_t * biright ;
+',`')dnl
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+ifelse(blockoriented,`VBR',`
+	rsb_coo_idx_t * bjleft  ;
+	rsb_coo_idx_t * bjright ;
+',`')dnl
+ifelse(RSB_M4_WANT_OMP,`1',`dnl
+	size_t tn=0;
+dnl	size_t nt;
+')dnl
+	mtype * left  ;
+	mtype * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+ifelse(blockoriented,`VBR',`
+		*biresult = *biarray;
+		*bjresult = *bjarray;
+',`')dnl
+		*(mtype*)result = *(mtype*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+ifelse(blockoriented,`VBR',`
+	bileft  = biarray;
+	bjleft  = bjarray;
+	biright = biarray+middle;
+	bjright = bjarray+middle;
+',`')dnl
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+ifelse(`0',`1',`dnl 20121016 
+ifelse(RSB_M4_WANT_OMP,`1',`dnl
+`#'dnl
+dnl       pragma omp parallel num_threads(rsb_global_session_handle.rsb_g_threads)
+       pragma omp parallel
+	/*	FIXME : warning : experimental */
+	{
+	tn = omp_get_thread_num();
+	nt = omp_get_num_threads();
+	if(tn==0)
+')dnl
+',`dnl
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+')dnl
+	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_NAME(mtype,blockoriented)
+	( ileft, jleft,
+ifelse(blockoriented,`VBR',`dnl
+		bileft, bjleft,
+',`')dnl
+ifelse(blockoriented,`BCSR',` mb, kb, ',`')dnl
+		left,   middle,
+	        iresult  ,       jresult,
+ifelse(blockoriented,`VBR',`dnl
+		biresult, bjresult,
+',`')dnl
+		result         );
+
+ifelse(RSB_M4_WANT_OMP,`1',`dnl
+	if(tn==1)
+')dnl
+	RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_NAME(mtype,blockoriented)
+	(iright, jright,
+ifelse(blockoriented,`VBR',`dnl
+		biright, bjright,
+',`')dnl
+ifelse(blockoriented,`BCSR',` mb, kb, ',`')dnl
+		right, length-middle,  iresult+middle  ,jresult+middle,
+ifelse(blockoriented,`VBR',`dnl
+		biresult+middle, bjresult+middle,
+',`')dnl
+	((mtype*)result)+middle  );
+ifelse(RSB_M4_WANT_OMP,`1',`dnl
+	}
+',`dnl
+	}
+')dnl
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+ifelse(blockoriented,`VBR',`
+	RSB_MEMCPY(bileft ,biresult       ,so*middle);
+	RSB_MEMCPY(bjleft ,bjresult       ,so*middle);
+',`')dnl
+	RSB_MEMCPY(  left, result       ,sizeof(mtype)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+ifelse(blockoriented,`VBR',`
+	RSB_MEMCPY(biright ,biresult+middle       ,so*(length-middle));
+	RSB_MEMCPY(bjright ,bjresult+middle       ,so*(length-middle));
+',`')dnl
+	RSB_MEMCPY( right, ((mtype*)result)+middle ,sizeof(mtype)*(length-middle));
+
+	RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_NAME(mtype,blockoriented)dnl
+		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+ifelse(blockoriented,`BCSR',`	mb,kb,')
+ifelse(blockoriented,`VBR',`dnl
+			bileft, biright, biresult,
+',`')dnl
+ifelse(blockoriented,`VBR',`dnl
+			bjleft, bjright, bjresult,
+',`')dnl
+			left, right, result,
+dnl			0,0,0,
+dnl			middle,length-middle,length
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+dnl
+popdef(`blockoriented')dnl
+popdef(`mtype')dnl
+dnl
+')
+dnl
+dnl
+dnl
diff --git a/missing b/missing
new file mode 100755
index 0000000..86a8fc3
--- /dev/null
+++ b/missing
@@ -0,0 +1,331 @@
+#! /bin/sh
+# Common stub for a few missing GNU programs while installing.
+
+scriptversion=2012-01-06.13; # UTC
+
+# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006,
+# 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
+# Originally by Fran,cois Pinard <pinard at iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+  echo 1>&2 "Try \`$0 --help' for more information"
+  exit 1
+fi
+
+run=:
+sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
+sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
+
+# In the cases where this matters, `missing' is being run in the
+# srcdir already.
+if test -f configure.ac; then
+  configure_ac=configure.ac
+else
+  configure_ac=configure.in
+fi
+
+msg="missing on your system"
+
+case $1 in
+--run)
+  # Try to run requested program, and just exit if it succeeds.
+  run=
+  shift
+  "$@" && exit 0
+  # Exit code 63 means version mismatch.  This often happens
+  # when the user try to use an ancient version of a tool on
+  # a file that requires a minimum version.  In this case we
+  # we should proceed has if the program had been absent, or
+  # if --run hadn't been passed.
+  if test $? = 63; then
+    run=:
+    msg="probably too old"
+  fi
+  ;;
+
+  -h|--h|--he|--hel|--help)
+    echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
+error status if there is no known handling for PROGRAM.
+
+Options:
+  -h, --help      display this help and exit
+  -v, --version   output version information and exit
+  --run           try to run the given command, and emulate it if it fails
+
+Supported PROGRAM values:
+  aclocal      touch file \`aclocal.m4'
+  autoconf     touch file \`configure'
+  autoheader   touch file \`config.h.in'
+  autom4te     touch the output file, or create a stub one
+  automake     touch all \`Makefile.in' files
+  bison        create \`y.tab.[ch]', if possible, from existing .[ch]
+  flex         create \`lex.yy.c', if possible, from existing .c
+  help2man     touch the output file
+  lex          create \`lex.yy.c', if possible, from existing .c
+  makeinfo     touch the output file
+  yacc         create \`y.tab.[ch]', if possible, from existing .[ch]
+
+Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and
+\`g' are ignored when checking the name.
+
+Send bug reports to <bug-automake at gnu.org>."
+    exit $?
+    ;;
+
+  -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+    echo "missing $scriptversion (GNU Automake)"
+    exit $?
+    ;;
+
+  -*)
+    echo 1>&2 "$0: Unknown \`$1' option"
+    echo 1>&2 "Try \`$0 --help' for more information"
+    exit 1
+    ;;
+
+esac
+
+# normalize program name to check for.
+program=`echo "$1" | sed '
+  s/^gnu-//; t
+  s/^gnu//; t
+  s/^g//; t'`
+
+# Now exit if we have it, but it failed.  Also exit now if we
+# don't have it and --version was passed (most likely to detect
+# the program).  This is about non-GNU programs, so use $1 not
+# $program.
+case $1 in
+  lex*|yacc*)
+    # Not GNU programs, they don't have --version.
+    ;;
+
+  *)
+    if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
+       # We have it, but it failed.
+       exit 1
+    elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+       # Could not run --version or --help.  This is probably someone
+       # running `$TOOL --version' or `$TOOL --help' to check whether
+       # $TOOL exists and not knowing $TOOL uses missing.
+       exit 1
+    fi
+    ;;
+esac
+
+# If it does not exist, or fails to run (possibly an outdated version),
+# try to emulate it.
+case $program in
+  aclocal*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+         you modified \`acinclude.m4' or \`${configure_ac}'.  You might want
+         to install the \`Automake' and \`Perl' packages.  Grab them from
+         any GNU archive site."
+    touch aclocal.m4
+    ;;
+
+  autoconf*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+         you modified \`${configure_ac}'.  You might want to install the
+         \`Autoconf' and \`GNU m4' packages.  Grab them from any GNU
+         archive site."
+    touch configure
+    ;;
+
+  autoheader*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+         you modified \`acconfig.h' or \`${configure_ac}'.  You might want
+         to install the \`Autoconf' and \`GNU m4' packages.  Grab them
+         from any GNU archive site."
+    files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
+    test -z "$files" && files="config.h"
+    touch_files=
+    for f in $files; do
+      case $f in
+      *:*) touch_files="$touch_files "`echo "$f" |
+				       sed -e 's/^[^:]*://' -e 's/:.*//'`;;
+      *) touch_files="$touch_files $f.in";;
+      esac
+    done
+    touch $touch_files
+    ;;
+
+  automake*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+         you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
+         You might want to install the \`Automake' and \`Perl' packages.
+         Grab them from any GNU archive site."
+    find . -type f -name Makefile.am -print |
+	   sed 's/\.am$/.in/' |
+	   while read f; do touch "$f"; done
+    ;;
+
+  autom4te*)
+    echo 1>&2 "\
+WARNING: \`$1' is needed, but is $msg.
+         You might have modified some files without having the
+         proper tools for further handling them.
+         You can get \`$1' as part of \`Autoconf' from any GNU
+         archive site."
+
+    file=`echo "$*" | sed -n "$sed_output"`
+    test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+    if test -f "$file"; then
+	touch $file
+    else
+	test -z "$file" || exec >$file
+	echo "#! /bin/sh"
+	echo "# Created by GNU Automake missing as a replacement of"
+	echo "#  $ $@"
+	echo "exit 0"
+	chmod +x $file
+	exit 1
+    fi
+    ;;
+
+  bison*|yacc*)
+    echo 1>&2 "\
+WARNING: \`$1' $msg.  You should only need it if
+         you modified a \`.y' file.  You may need the \`Bison' package
+         in order for those modifications to take effect.  You can get
+         \`Bison' from any GNU archive site."
+    rm -f y.tab.c y.tab.h
+    if test $# -ne 1; then
+        eval LASTARG=\${$#}
+	case $LASTARG in
+	*.y)
+	    SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
+	    if test -f "$SRCFILE"; then
+	         cp "$SRCFILE" y.tab.c
+	    fi
+	    SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
+	    if test -f "$SRCFILE"; then
+	         cp "$SRCFILE" y.tab.h
+	    fi
+	  ;;
+	esac
+    fi
+    if test ! -f y.tab.h; then
+	echo >y.tab.h
+    fi
+    if test ! -f y.tab.c; then
+	echo 'main() { return 0; }' >y.tab.c
+    fi
+    ;;
+
+  lex*|flex*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+         you modified a \`.l' file.  You may need the \`Flex' package
+         in order for those modifications to take effect.  You can get
+         \`Flex' from any GNU archive site."
+    rm -f lex.yy.c
+    if test $# -ne 1; then
+        eval LASTARG=\${$#}
+	case $LASTARG in
+	*.l)
+	    SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
+	    if test -f "$SRCFILE"; then
+	         cp "$SRCFILE" lex.yy.c
+	    fi
+	  ;;
+	esac
+    fi
+    if test ! -f lex.yy.c; then
+	echo 'main() { return 0; }' >lex.yy.c
+    fi
+    ;;
+
+  help2man*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+	 you modified a dependency of a manual page.  You may need the
+	 \`Help2man' package in order for those modifications to take
+	 effect.  You can get \`Help2man' from any GNU archive site."
+
+    file=`echo "$*" | sed -n "$sed_output"`
+    test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+    if test -f "$file"; then
+	touch $file
+    else
+	test -z "$file" || exec >$file
+	echo ".ab help2man is required to generate this page"
+	exit $?
+    fi
+    ;;
+
+  makeinfo*)
+    echo 1>&2 "\
+WARNING: \`$1' is $msg.  You should only need it if
+         you modified a \`.texi' or \`.texinfo' file, or any other file
+         indirectly affecting the aspect of the manual.  The spurious
+         call might also be the consequence of using a buggy \`make' (AIX,
+         DU, IRIX).  You might want to install the \`Texinfo' package or
+         the \`GNU make' package.  Grab either from any GNU archive site."
+    # The file to touch is that specified with -o ...
+    file=`echo "$*" | sed -n "$sed_output"`
+    test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+    if test -z "$file"; then
+      # ... or it is the one specified with @setfilename ...
+      infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
+      file=`sed -n '
+	/^@setfilename/{
+	  s/.* \([^ ]*\) *$/\1/
+	  p
+	  q
+	}' $infile`
+      # ... or it is derived from the source name (dir/f.texi becomes f.info)
+      test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
+    fi
+    # If the file does not exist, the user really needs makeinfo;
+    # let's fail without touching anything.
+    test -f $file || exit 1
+    touch $file
+    ;;
+
+  *)
+    echo 1>&2 "\
+WARNING: \`$1' is needed, and is $msg.
+         You might have modified some files without having the
+         proper tools for further handling them.  Check the \`README' file,
+         it often tells you about the needed prerequisites for installing
+         this package.  You may also peek at any GNU archive site, in case
+         some other package would contain this missing \`$1' program."
+    exit 1
+    ;;
+esac
+
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-time-zone: "UTC"
+# time-stamp-end: "; # UTC"
+# End:
diff --git a/ot-infty_norm.c b/ot-infty_norm.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-infty_norm.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-rowssums.c b/ot-rowssums.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-rowssums.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-scale.c b/ot-scale.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-scale.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv.c b/ot-spmv.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv_sasa.c b/ot-spmv_sasa.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv_sasa.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv_sxsa.c b/ot-spmv_sxsa.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv_sxsa.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv_uaua.c b/ot-spmv_uaua.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv_uaua.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv_uauz.c b/ot-spmv_uauz.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv_uauz.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv_unua.c b/ot-spmv_unua.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv_unua.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spmv_uxua.c b/ot-spmv_uxua.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spmv_uxua.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spsv.c b/ot-spsv.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spsv.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spsv_sxsx.c b/ot-spsv_sxsx.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spsv_sxsx.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot-spsv_uxua.c b/ot-spsv_uxua.c
new file mode 100644
index 0000000..d67b186
--- /dev/null
+++ b/ot-spsv_uxua.c
@@ -0,0 +1 @@
+static int f(){return 0;}
diff --git a/ot.c b/ot.c
new file mode 100644
index 0000000..e456838
--- /dev/null
+++ b/ot.c
@@ -0,0 +1 @@
+int main(){printf("sorry, you did not install octave, so the octave based tester is disabled\n");return 0;}
diff --git a/ot.m b/ot.m
new file mode 100644
index 0000000..825a0eb
--- /dev/null
+++ b/ot.m
@@ -0,0 +1,735 @@
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+## octave-based tester : generates a program with hard coded matrices, right hand sides, and result vectors.
+# Assumes that octave/matlab computations are correct.
+
+# Requires octave-3.6 or newer.
+#
+# NOTE : we have scarce control on the nonzero density!
+# NOTE : this tester is largely superseded by ./rsbench -Q
+# NOTE : apparent problems could arise in case of integer overflows due to extra-big matrices and ill matrix building parameters (not so unlikely).
+# TODO :
+# * a failure/success counter, and more verbose errors
+# * someday should split in pieces (separate files) the octave based tester and lots of code.
+# * s/printf/RSB_ERROR/g
+# * should run some test on non square matrices
+
+source("./sbtg.m")
+
+function dump_transposition(a,br,bc)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	ts="int";
+	printf("/* type is %s */\n",ts);
+	printf("\n");
+	s=sparse(a);
+	nz=nnz(a);
+	m=size(a,1);
+	k=size(a,2);
+	I=zeros(nz,1);
+	JI=zeros(nz,1);
+	SVA=zeros(nz,1);
+	l=1;
+	for i=1:m;
+	for j=1:k;
+		if(s(i,j)!=0)
+			I(l)=i-1;
+			JI(l)=j-1;
+			SVA(l)=s(i,j);
+			++l;
+		endif
+	endfor
+	endfor
+	its="rsb_coo_idx_t";
+	printf("\n%s NIA[]=",its);
+	printf("%s",dump_c_vec(I));
+	printf("\n%s NJA[]=",its);
+	printf("%s",dump_c_vec(JI));
+	printf("\n%s NSVA[]=",ts);
+	printf("%s",dump_c_vec(SVA*0));
+	printf("\n%s SVA[]=",ts);
+	printf("%s",dump_c_vec(SVA));
+	printf("\n");
+end 
+
+function dump_scale(a,br,bc,transi)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	SV=zeros(m+extra,1);
+	SV(1:m)=linspace(1,m,m);
+	ts="int";
+	printf("/* type is %s */\n",ts);
+	printf("\n");
+	s=sparse(a);
+	nz=nnz(a);
+	m=size(a,1);
+	k=size(a,2);
+	I=zeros(nz,1);
+	JI=zeros(nz,1);
+	SVA=zeros(nz,1);
+	l=1;
+	for i=1:m;
+	for j=1:k;
+		if(s(i,j)!=0)
+			I(l)=i-1;
+			JI(l)=j-1;
+			if transi>1
+				SVA(l)=s(i,j)*SV(j);
+			else
+				SVA(l)=s(i,j)*SV(i);
+			endif
+			++l;
+		endif
+	endfor
+	endfor
+	its="rsb_coo_idx_t";
+	printf("\n%s NIA[]=",its);
+	printf("%s",dump_c_vec(I));
+	printf("\n%s NJA[]=",its);
+	printf("%s",dump_c_vec(JI));
+	printf("\n%s SV[]=",ts);
+	printf("%s",dump_c_vec(SV));
+# the following will not work with integers :)
+#	printf("\n%s ISV[]=",ts);
+#	dump_c_vec(1.0/SV)
+	printf("\n%s NSVA[]=",ts);
+	printf("%s",dump_c_vec(SVA*0));
+	printf("\n%s SVA[]=",ts);
+	printf("%s",dump_c_vec(SVA));
+	printf("\n");
+end 
+
+function dump_negation(a,br,bc)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	dx=zeros(k+extra,1);
+	d =zeros(k+extra,1);
+	ts="int";
+	printf("/* type is %s */\n",ts);
+	printf("\n");
+	s=sparse(a);
+	nz=nnz(a);
+	m=size(a,1);
+	k=size(a,2);
+	I=zeros(nz,1);
+	JI=zeros(nz,1);
+	V=zeros(nz,1);
+	l=1;
+	for i=1:m;
+	for j=1:k;
+		if(s(i,j)!=0)
+			I(l)=i-1;
+			JI(l)=j-1;
+			V(l)=s(i,j);
+			++l;
+		endif
+	endfor
+	endfor
+	V=-V;
+	its="rsb_coo_idx_t";
+	printf("\n%s NIA[]=",its);
+	printf("%s",dump_c_vec(I));
+	printf("\n%s NJA[]=",its);
+	printf("%s",dump_c_vec(JI));
+	printf("\n/*const*/ %s NVA[]=",ts);
+	printf("%s",dump_c_vec(V));
+	printf("\n%s NNVA[]=",ts);
+	printf("%s",dump_c_vec(V*0));
+	printf("\n");
+end 
+
+function dump_infty_norm(a,br,bc,transi)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	dx=zeros(k+extra,1);
+	d =zeros(k+extra,1);
+	ts="int";
+	printf("/* type is %s */\n",ts);
+	printf("\n");
+	if transi==2
+		printf("const %s in= %d;\n",ts,norm(a',Inf));
+	else
+		printf("const %s in= %d;\n",ts,norm(a,Inf));
+	endif
+	printf("\n");
+	printf("%s inx=0;",ts);
+end 
+
+function dump_getdiag(a,br,bc)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	dx=zeros(k+extra,1);
+	d =zeros(k+extra,1);
+	ts="int";
+	printf("/* type is %s */\n",ts);
+	printf("\n");
+	printf("/* the result diagonal vector */\n",ts);
+	printf("const %s d[]=",ts);
+	printf("%s",dump_c_vec(diag(a)))
+	printf("\n");
+	printf("/* the vector which will store the result */\n",ts);
+	printf("%s dx[]=",ts);
+	printf("%s",dump_c_vec(dx))
+end 
+
+function dump_getrow(a,br,bc)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	r1x=zeros(k+extra,1);
+	r1 =zeros(k+extra,1);
+	ts="int";
+	printf("/* type is %s */\n",ts);
+	printf("\n");
+	printf("/* the result vector */\n",ts);
+	for i=1:m
+	printf("const %s r%d[]=",ts,i);
+		r1 (1:k)=a(i,1:k);
+	printf("%s",dump_c_vec(r1))
+	printf("\n");
+	endfor
+	printf("/* the vector which will store the result */\n",ts);
+	printf("%s r1x[]=",ts);
+	printf("%s",dump_c_vec(r1x))
+end 
+
+function dump_spmv(alpha,a,br,bc,transi)
+	dump_spmm(alpha,a,br,bc,1,transi);
+end 
+
+function dump_spmm(alpha,a,br,bc,nrhs,transi)
+	extra=br+bc; # padding far more than necessary: will do no harm
+	m=size(a,1);
+	k=size(a,2);
+	b=zeros(k+extra,nrhs);
+	b(1:k,:)=ones(k,nrhs);
+	for nrhsi=1:nrhs
+		b(1:k,nrhsi)*=nrhsi;
+	endfor
+	x=zeros(m+extra,nrhs);
+	r=zeros(m+extra,nrhs);
+	if transi>1
+		x(1:m,:)=alpha*a'*b(1:k,:);
+	else
+		x(1:m,:)=alpha*a *b(1:k,:);
+	endif
+	ts="int";
+	if m<20 
+		printf("/* \n");
+		a
+		printf("*/ \n");
+	endif
+	printf("/* type is %s */\n",ts);
+	printf("/* matrix in coo */\n",ts);
+	printf("\n");
+	printf("/* the vector which will store the result */\n",ts);
+	printf("%s X[]=",ts);
+	printf("%s",dump_c_vec(r));
+	printf("\n");
+	printf("/* the result vector */\n",ts);
+	printf("const %s R[]=",ts);
+	printf("%s",dump_c_vec(x));
+	printf("\n");
+	printf("/* the right hand side vector */\n",ts);
+	printf("const %s B[]=",ts);
+	printf("%s",dump_c_vec(b));
+end 
+
+global aops="";
+global oops="";
+global ops="";
+global op="";
+global main=0;
+
+if nargin == 0
+	# default unrolls
+	rua=cua=linspace(1,4,4);
+elseif nargin == 1
+	# same unrolls
+	rua=eval(["[",cell2mat(argv()(1)),"]"]);
+	cua=eval(["[",cell2mat(argv()(1)),"]"]);
+endif
+
+if nargin >=2
+	# different unrolls
+	rua=eval(["[",cell2mat(argv()(1)),"]"]);
+	cua=eval(["[",cell2mat(argv()(2)),"]"]);
+endif
+
+
+if nargin >= 5
+	op=argv()(5){1};
+	if strmatch(op,"main","exact")
+		main=1;
+		op=0;
+	endif
+endif
+
+if nargin >= 4
+#	ops=eval(["[",cell2mat(argv()(3)),"]"]);
+	ops=argv()(4){1};
+	oops=ops;
+	#if want_op("other")
+#	ops=strcat(ops,",");
+	ops=char(strsplit(ops,","));
+endif
+
+if nargin >= 3
+	aops=argv()(3){1};
+	aops=char(strsplit(aops,","));
+endif
+global extra_ops=("transposition,getdiag,getrow,");
+
+
+function a=want_op(o)
+	global oops;
+	global ops;
+	global op;
+	global main;
+	#a=strfind(ops,o)
+	#a=0;
+	#a=(strmatch(op,o))
+	a=0;
+	a=findstr(strcat(oops,","),o);
+	if a
+		a=a(1);
+	else
+		a=0;
+	endif
+#	oops,op,o,a
+#	for i=1:size(ops,1)
+#		if findstr(ops(i,:),op)
+#			a+=1;
+#		endif
+#	endfor
+end
+
+
+printf("/**\n at file\n at brief integer type coverage testing program\n\n*/\n");
+printf("#include \"rsb.h\"\n");
+printf("#include \"rsb_internals.h\"\n");
+printf("#include <string.h>\n");
+printf("/*\n");
+ops
+rua
+cua
+op
+main
+printf("*/\n");
+
+if main
+	printf("%s",rsb_octave_doc_c_header);
+	printf("%s\n","RSB_INTERNALS_COMMON_HEAD_DECLS");
+	printf("int main%s()\n{\n",op);
+	printf("rsb_err_t errval=RSB_ERR_NO_ERROR;int fi,Ri,Ti,Ci;int octave_failed_tests=0;\n");
+	for oi=1:size(ops,1)
+		printf("if((octave_failed_tests+=main_%s()))\n\tgoto err;\n",ops(oi,:));
+	endfor
+	if main
+		oops=strcat("extra_ops",",",oops);
+	endif
+#	printf("int main()\n{return 0;}\n");
+#	printf("return 0;\n");
+#	printf("err: return -1;\n");
+#	printf(";}\n");
+#	quit;
+else
+	printf("int main_%s()\n{\n",op);
+	printf("rsb_err_t errval=RSB_ERR_NO_ERROR;int fi,Ri,Ti,Ci;int octave_failed_tests=0;\n");
+	#if !strfind(ops,op)
+#	if !strcmp(ops,strcat(op," "))
+#		printf("return 0;}\n");
+#	endif
+#	ops, op
+	if want_op(op)==0
+		printf("return 0;\n");
+#		printf("}\n"); quit; #
+	endif
+endif
+
+
+# We should split the test program in pieces: compilation of this sequence 
+# of subprograms takes more than the sum of individual pieces.
+	if want_here_op("spsv")
+		# FIXME : should build arrays with all supported format flags, for each operation.
+printf("rsb_flags_t Cflagsa[]={  0 }; \n");
+	else
+printf("rsb_flags_t Cflagsa[]={ /*RSB_FLAG_WANT_COLUMN_MAJOR_ORDER ,*/ 0 }; \n");
+	endif
+printf("rsb_flags_t Rflagsa[]={ RSB_FLAG_QUAD_PARTITIONING, 0 }; \n");
+#printf("rsb_flags_t Tflagsa[]=RSB_ROWS_TRANSPOSITIONS_ARRAY; \n");
+printf("rsb_flags_t flagsa[]={ \n");
+
+#printf("#ifdef RSB_FLAG_DEFAULT_STORAGE_FLAGS\n RSB_FLAG_DEFAULT_STORAGE_FLAGS, \n#endif /* RSB_FLAG_DEFAULT_STORAGE_FLAGS */\n");
+#printf("RSB_FLAG_DEFAULT_STORAGE_FLAGS\n");
+printf("RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS \n");
+printf("#ifdef RSB_MATRIX_STORAGE_BCSC \n RSB_FLAG_WANT_BCSS_STORAGE, \n#endif /*RSB_MATRIX_STORAGE_BCSC*/\n");
+printf("#ifdef RSB_MATRIX_STORAGE_LR   \n RSB_FLAG_WANT_LINKED_STORAGE, \n#endif /* RSB_MATRIX_STORAGE_LR */   \n");
+printf("#ifdef RSB_MATRIX_STORAGE_VBR  \n  0, \n#endif /* RSB_MATRIX_STORAGE_VBR */   \n");
+printf("};\n");
+flagsa=[
+	# FIXME: and what about halfword switches ?
+	"RSB_FLAG_EXPERIMENTAL_SWITCH_TO_COO";
+	"RSB_FLAG_WANT_BCSS_STORAGE";
+	"RSB_FLAG_WANT_LINKED_STORAGE ";
+	"0"; ];
+printf("if(rsb_lib_init(RSB_NULL_INIT_OPTIONS))\n\tgoto err;\n");
+#flagsa=[ "RSB_FLAG_WANT_BCSS_STORAGE"; "RSB_FLAG_WANT_BCSS_STORAGE|RSB_FLAG_WANT_COLUMN_MAJOR_ORDER "; ];
+
+###############################################################################
+printf("{\n");
+#for fi=1:size(flagsa,1)
+#printf("for(Ti=0;Ti<sizeof(Tflagsa)/sizeof(rsb_trans_t);++Ti)\n");
+printf("for(Ri=0;Ri<sizeof(Rflagsa)/sizeof(rsb_flags_t);++Ri)\n");
+printf("for(Ci=0;Ci<sizeof(Cflagsa)/sizeof(rsb_flags_t);++Ci)\n");
+printf("for(fi=0;fi<sizeof(flagsa)/sizeof(rsb_flags_t);++fi)\n");
+#printf("if(Tflagsa[Ti]!=RSB_INVALID_TRANS)\n");
+	if want_here_op("spsv")
+#printf("if(Tflagsa[Ti]!=RSB_INVALID_TRANS)\n");
+	endif
+printf("{\n");
+#flags=flagsa(fi,:);
+#printf("rsb_flags_t flags=%s; /*:) */\n",flags);
+printf("rsb_flags_t flags=flagsa[fi] | Rflagsa[Ri] | Cflagsa[Ci];\n");
+#printf("rsb_flags_t trans=Tflagsa[Ti];\n");
+printf("rsb_flags_t trans=RSB_NUMERICAL_TYPE_INVALID_TYPE ;\n");
+#printf("flags|=RSB_FLAG_SHOULD_DEBUG;\n"); # for heavy debugging only
+#printf("printf(\"%%d\\n\",flags);\n"); # print flags
+
+
+l=1;u=20;
+l=1;u=8;
+l=1;u=10;
+#l=1;u=4;
+#l=5;u=8;
+#l=3;u=3;
+#l=1;u=1;
+#for transi=1:3 
+	if want_here_op("spsv")
+		printf("flags |= RSB_FLAG_LOWER_TRIANGULAR;\n");
+	endif
+for transi=1:2 # this is integer testing, dude ..
+	if want_here_op("spsv") && transi==2 ;  continue ; endif
+	if transi==1
+		printf("trans=RSB_TRANSPOSITION_N;\n");trans="RSB_TRANSPOSITION_N";
+	elseif transi==2
+		printf("trans=RSB_TRANSPOSITION_T;\n");trans="RSB_TRANSPOSITION_T";
+	elseif transi==3
+		printf("trans=RSB_TRANSPOSITION_C;\n");
+	endif
+for n=l:u
+# TODO : should adapt to available blockings 
+for bri=1:length(rua)
+for bci=1:length(cua)
+	br=rua(bri);
+	bc=cua(bci);
+	incx=1;incy=1;
+	bis="";
+	if br * bc != 1 ;  bis=sprintf(" blocked %d x %d",br,bc) ; endif # blocking info string
+	fprintf(stderr,"creating test matrix %d/%d%s\n",n,u,bis);
+	#fprintf(stderr,"creating test matrix %d/%d blocked %d x %d\n",n,u,br,bc);
+	printf("{\n");
+
+	its="rsb_coo_idx_t";
+	ts="int";
+	mti=1;
+	mdi=1;
+
+	printf("struct rsb_mtx_t *mtxAp=NULL;\n");
+	printf("rsb_err_t errval=RSB_ERR_NO_ERROR;\n");
+	printf("rsb_flags_t typecode=RSB_NUMERICAL_TYPE_INT;\n");
+	printf("int Mb=%d,Kb=%d; /* plain unblocked */\n",br,bc);
+	printf("char buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */\n");
+	
+	a=gen_test_matrix(op,n);
+#	if transi>=2 ; a=a'; endif
+#	if transi==3 ; a=conj(a); endif
+	ts="int";
+	printf("%s\n",dump_c_coo(a,ts,"c"))
+	printf("\nif((mtxAp=rsb_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,nr,nc,Mb,Kb,flags,&errval))==NULL)\ngoto err;");
+	printf("\nprintf(\"testing matrix of type %%s\\t%%s\\n\",rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),rsb__sprint_matrix_implementation_code(mtxAp,\"*\",flags,buf));\n");
+
+	if want_here_op("v")
+	printf("/* begin spmv_uaua test */\n");
+	dump_spmv(1,a,br,bc,transi)
+#	printf("\nif((mtxAp=rsb_allocate_bcsr_sparse_matrix(VA,IA,JA,nnz,typecode,nr,nc,Mb,Kb))==NULL)\ngoto err;");
+	printf("\nif((errval=rsb_spmv(mtxAp,B,X))                    !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\n#error internal error: wrong code generator parameters!");
+	printf("\nif(memcmp(X,R,sizeof(%s)*nr))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(X,R,nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"spmv_uaua test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"spmv_uaua test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("/* end spmv_uaua test */\n");
+	endif
+
+#
+	# FIXME : this test misses the += part of this operation!
+	extra=br+bc; # padding far more than necessary: will do no harm
+	if want_here_op("vadd")
+	printf("/* begin spmvadd test */\n");
+	dump_spmv(1,a,br,bc,transi) # yes
+#	printf("\nif((mtxAp=rsb_allocate_bcsr_sparse_matrix(VA,IA,JA,nnz,typecode,nr,nc,Mb,Kb))==NULL)\ngoto err;");
+#	printf("\nif((errval=rsb_fill_with_zeros(X,mtxAp->typecode,nr+%d,1))                    !=RSB_ERR_NO_ERROR)\ngoto err;",extra);
+	printf("\nif((errval=rsb_spmv_add(mtxAp,B,X))                    !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(memcmp(X,R,sizeof(%s)*nr))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(X,R,nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"spmvadd test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"spmvadd test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("/* end spmvadd test */\n");
+	endif
+#
+	# FIXME : this test misses the -= part of this operation!
+	if want_here_op("vsub")
+	printf("/* begin spmvsub est */\n");
+	dump_spmv(-1,a,br,bc,transi) # yes
+	printf("\nif((errval=rsb_spmv_sub(mtxAp,B,X))                    !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(memcmp(X,R,sizeof(%s)*nr))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(X,R,nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"spmvsub test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"spmvsub test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("/* end spmvsub test */\n");
+	endif
+#
+	if want_here_op("m")
+	# not implemented yet
+	#printf("return 0;\n");
+
+	printf("/* begin spmm test */\n");
+	dump_spmm(1,a,br,bc,2,transi)
+	printf("\nif((errval=rsb_m(mtxAp,B,X,k+%d+%d,nr+%d+%d,2))                    !=RSB_ERR_NO_ERROR)\ngoto err;",br,bc,br,bc);
+	printf("\nif(memcmp(X,R,sizeof(%s)*nr*2))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(X,R,2*nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"spmm test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"spmm test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("/* end spmm test */\n");
+
+	endif
+#
+	if want_here_op("getrow")
+	printf("/* FIXME: skipping getrow test because of obsolete/superseded rsb__get_row_dense() ! */\n");
+	printf("if(0)\n");
+	printf("{\n");
+	printf("/* begin getrow test */\n");
+	dump_getrow(a,br,bc)
+	for i=1:n
+		printf("\nif((errval=rsb__get_row_dense(mtxAp,r1x,%d-1))                    !=RSB_ERR_NO_ERROR)\ngoto err;",i);
+		printf("\nif(memcmp(r1x,r%d,sizeof(%s)*nc))",i,ts);
+		printf("\n{\n");
+		printf("\nif((errval=rsb__debug_print_vectors_diff(r1x,r%d,nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;",i);
+		printf("\nRSB_OCTAVE_ERROR(\"getrow %d test matrix %d/%d blocked %d x %d is not ok\\n\");\n",i,n,u,br,bc);
+		printf("\n}else printf(\"getrow %d test matrix %d/%d blocked %d x %d is ok\\n\");\n",i,n,u,br,bc);
+	endfor
+	printf("/* end getrow test */\n");
+	printf("}\n");
+	endif
+#
+	if want_here_op("getdiag")
+	printf("/* begin getdiag test */\n");
+	printf("{\n");
+	dump_getdiag(a,br,bc)
+	printf("\nif((errval = rsb__dodo_getdiag /*rsb_do_getdiag <- crashes */(mtxAp,dx))                    !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(memcmp(dx,d,sizeof(%s)*nr))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(dx,d,nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"diag test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"diag test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("/* end getdiag test */\n");
+	printf("}\n");
+	endif
+#
+	if want_here_op("spsv")
+	printf("/* begin spsv test */\n");
+	printf("{\n");
+#	v=[-4,-1,0,1,4]; # THIS IS THE FULL, RIGHTEOUS TEST
+#	v=[-4,0,4];	# YES, BUT THIS IS JUST FASTER :)
+	v=[-4,4];
+#	v=[1];
+#	v=[-1]; # only 1 is possible in this (integer) case
+	av=v;
+	bv=v;
+	for ai=1:length(av)
+	printf("{\n");
+		alpha=av(ai);
+#		beta=bv(bi);
+		beta=0;
+		printf("%s",dump_spsv(a,mdi,br,bc,alpha,1,transi,ts,incx,incy));
+#		printf("\nint alpha=%d,beta=%d;",alpha,beta);
+		printf("\nint alpha=%d;",alpha);
+		printf("\nif((errval=rsb_spsv(trans,&alpha,mtxAp,x,1,y,1))  !=RSB_ERR_NO_ERROR)\ngoto err;");
+		printf(check_spsv(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy));
+#		printf("\nif(memcmp(y,cy,sizeof(%s)*nr))",ts);
+#		printf("\n{\n");
+#		printf("\nif((errval=rsb__debug_print_vectors_diff(y,cy,nr,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+#		printf("\nRSB_OCTAVE_ERROR(\"spsv test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+#		printf("\n}else printf(\"spsv test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("} \n");
+	endfor
+	printf("/* end spsv test */\n");
+	printf("} \n");
+	endif
+#
+	if want_here_op("csmm") ||  want_here_op("spmv")
+	printf("/* begin spmv test */\n");
+	printf("{\n");
+#	v=[-4,-1,0,1,4]; # THIS IS THE FULL, RIGHTEOUS TEST
+	v=[-4,0,4];	# YES, BUT THIS IS JUST FASTER :)
+#	v=[-4,4];
+	av=v;
+	bv=v;
+	for ai=1:length(av)
+	for bi=1:length(bv)
+	printf("{\n");
+		alpha=av(ai);
+		beta =bv(bi);
+		printf(dump_csmm(a,mti,mdi,br,bc,alpha,beta,transi,ts,incx,incy));
+		printf("\nint alpha=%d,beta=%d;",alpha,beta);
+#		printf("\nif((errval=rsb_csmm(mtxAp,x,y,&alpha,&beta))  !=RSB_ERR_NO_ERROR\ngoto err;");
+		printf("\nif((errval=rsb_spmv(trans,&alpha,mtxAp,x,1,&beta,y,1))  !=RSB_ERR_NO_ERROR)\ngoto err;");
+		printf(check_csmm(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy));
+	printf("} \n");
+	endfor
+	endfor
+	printf("/* end spmv test */\n");
+	printf("} \n");
+	endif
+#
+	if want_here_op("infty_norm")
+	printf("/* begin infty_norm test */\n");
+	printf("/*");
+	trans
+	a
+	printf("*/");
+	dump_infty_norm(a,br,bc,transi)
+	printf("\nif((errval = rsb__do_infinity_norm(mtxAp,&inx,RSB_BOOL_FALSE,trans))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(in!=inx)");
+	printf("\n{\n");
+	printf("\nprintf(\"infty_norm : should be %%d, not %%d !\\n\",in,inx);\n");
+	printf("\nRSB_OCTAVE_ERROR(\"infty_norm test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"infty_norm test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("/* end infty_norm test */\n");
+	endif
+#
+	if want_here_op("negation")
+#
+	printf("/* begin negation test */\n");
+	printf("{");
+	printf("/*");
+	-a
+	printf("*/");
+	dump_negation(a,br,bc)
+	printf("\nif((errval=rsb_coo_sort(   NVA , NIA, NJA, nnz, nr, nc, typecode, mtxAp->flags ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval=rsb_negation(mtxAp))                    !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval = rsb_mtx_get_coo(mtxAp, NNVA , NIA, NJA, RSB_FLAG_C_INDICES_INTERFACE ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval=rsb_coo_sort(  NNVA , NIA, NJA, nnz, nr, nc, typecode, mtxAp->flags ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(memcmp(NNVA,NVA,sizeof(%s)*nnz))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(NVA,NNVA,nnz,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"negation test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"negation test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("}");
+	printf("\nif((errval=rsb_negation(mtxAp))                    !=RSB_ERR_NO_ERROR)\ngoto err;/*negating back :) */");
+	printf("/* end negation test */\n");
+#
+	endif
+#
+	if want_here_op("transposition")
+	printf("if(0)/* FIXME: rsb_sym_transpose is still not mature enough */");
+	printf("{");
+	printf("/*");
+	a
+	printf("*/");
+	printf("/* begin transposition test */\n");
+	dump_transposition(a,br,bc)
+	printf("\nif((errval=rsb_coo_sort(   SVA , NIA, NJA, nnz, nr, nc, typecode, mtxAp->flags ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\n errval=rsb_sym_transpose(mtxAp);");
+	printf("\n if(errval!=RSB_ERR_UNIMPLEMENTED_YET ){");
+	printf("\n if(errval!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval = rsb_mtx_get_coo(mtxAp, NSVA , NJA, NIA, RSB_FLAG_C_INDICES_INTERFACE ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval=rsb_coo_sort(   NSVA , NJA, NIA, nnz, nr, nc, typecode, mtxAp->flags ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(memcmp(NSVA,SVA,sizeof(%s)*nnz))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(SVA,NSVA,nnz,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"transpose test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"transpose test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+	printf("\n errval=rsb_sym_transpose(mtxAp);/* transposing back */");
+	printf("\n if(errval!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\n }");
+	printf("\n else");
+	printf("\n { printf(\"skipping transposition test: unsupported\\n\");}");
+	printf("/* end transpose test */\n");
+	printf("}");
+	endif
+#
+	if want_here_op("scale")
+	printf("{");
+	printf("/*");
+	a
+	printf("*/");
+	printf("/* begin scale test */\n");
+	printf("/* (since scaling does modify the matrix, it should be the last op here.. ) */\n");
+	dump_scale(a,br,bc,transi)
+	printf("\nif((errval=rsb_coo_sort(   SVA , NIA, NJA, nnz, nr, nc, typecode, mtxAp->flags ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval=rsb__do_scal(mtxAp,SV,trans))                 !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval = rsb_mtx_get_coo(mtxAp, NSVA , NIA, NJA, RSB_FLAG_C_INDICES_INTERFACE ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif((errval=rsb_coo_sort(   NSVA , NIA, NJA, nnz, nr, nc, typecode, mtxAp->flags ))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nif(memcmp(NSVA,SVA,sizeof(%s)*nnz))",ts);
+	printf("\n{\n");
+	printf("\nif((errval=rsb__debug_print_vectors_diff(SVA,NSVA,nnz,typecode,1,1,0))!=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("\nRSB_OCTAVE_ERROR(\"scale test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc);
+	printf("\n}else printf(\"scale test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc);
+#	printf("\nif((errval=rsb_scal(mtxAp,ISV))                 !=RSB_ERR_NO_ERROR)\ngoto err;");
+	printf("/* end scale test */\n");
+	printf("}");
+	endif
+	printf("\n rsb_mtx_free(mtxAp);");
+#	printf("goto end;\n");
+#
+	printf("\n}\n");
+	# done
+end
+end
+end
+end
+printf("\n}/*fi*/\n");
+printf("}\n");
+#end	# fi
+#printf("\n}/*Ri*/\n");
+
+printf("goto end; /* support compiler happyness worldwide */\n");
+printf("end:;\n");
+printf("if( errval!=RSB_ERR_NO_ERROR)\ngoto err;\n");
+printf("if((errval=rsb_lib_exit(RSB_NULL_EXIT_OPTIONS))!=RSB_ERR_NO_ERROR)\ngoto err;\n");
+if main
+	printf("\nif(octave_failed_tests)RSB_INFO(\"ERROR: %%d failed tests (please file a bug report with this output).\\n\",octave_failed_tests);else RSB_INFO(\"all tests passed.\\n\");\n");
+	printf("return octave_failed_tests?-1:0;\nerr:rsb_perror(NULL,errval);return -1;\n}\n");
+else
+	printf("return octave_failed_tests;\n  err:rsb_perror(NULL,errval);return -1;\n \n");
+	printf("                              ferr:rsb_perror(NULL,errval);return -1;\n}\n");
+endif
+
diff --git a/pd.mtx b/pd.mtx
new file mode 100644
index 0000000..380a400
--- /dev/null
+++ b/pd.mtx
@@ -0,0 +1,48 @@
+%%MatrixMarket matrix coordinate real general
+% a positive definitive matrix, as in
+% http://www.ncsa.uiuc.edu/UserInfo/Resources/Hardware/IBMp690/IBM/usr/lpp/essl.html.en_US/html/essl43.html
+% *                        *
+% | 99  12  13  14  15  16 |
+% | 12  99  12  13  14  15 |
+% | 13  12  99  12  13  14 |
+% | 14  13  12  99  12  13 |
+% | 15  14  13  12  99  12 |
+% | 16  15  14  13  12  99 |
+% *                        *
+6 6 36
+1 1 99
+1 2 12
+1 3 13
+1 4 14
+1 5 15
+1 6 16
+2 1 12
+2 2 99
+2 3 12
+2 4 13
+2 5 14
+2 6 15
+3 1 13
+3 2 12
+3 3 99
+3 4 12
+3 5 13
+3 6 14
+4 1 14
+4 2 13
+4 3 12
+4 4 99
+4 5 12
+4 6 13
+5 1 15
+5 2 14
+5 3 13
+5 4 12
+5 5 99
+5 6 12
+6 1 16
+6 2 15
+6 3 14
+6 4 13
+6 5 12
+6 6 99
diff --git a/psb_mvsv_tester.f90 b/psb_mvsv_tester.f90
new file mode 100644
index 0000000..d0493aa
--- /dev/null
+++ b/psb_mvsv_tester.f90
@@ -0,0 +1,7323 @@
+! /*                                                                                                                            
+! 
+! Copyright (C) 2008-2014 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+! */
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+      REAL*4 :: beta=1
+! A =
+! 1 1
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+      REAL*4 :: beta=1
+! A =
+! 1 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+      REAL*4 :: beta=0
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/3, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+      REAL*4 :: beta=0
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+      REAL*4 :: beta=0
+! A =
+! 1 1
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/3, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+      REAL*4 :: beta=1
+! A =
+! 1 4
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/8, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 2 4
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 4/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 7/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+      REAL*4 :: beta=0
+! A =
+! 1 3
+! 0 5
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 5/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+      REAL*4 :: beta=0
+! A =
+! 1 2
+! 1 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+      REAL*4 :: beta=0
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+      REAL*4 :: beta=1
+! A =
+! 1 2
+! 3 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 2, 3, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-1, -2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+      REAL*4 :: beta=0
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-2, -1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+      REAL*4 :: beta=0
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-1, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+      REAL*4 :: beta=0
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-2, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, -6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+      REAL*4 :: beta=1
+! A =
+! 1 0
+! 1 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+      REAL*4 :: beta=1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+      REAL*4 :: beta=0
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+      REAL*4 :: beta=0
+! A =
+! 1 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_sspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+      REAL*4 :: beta=0
+! A =
+! 1 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE ts_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+      REAL*8 :: beta=1
+! A =
+! 1 1
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+      REAL*8 :: beta=1
+! A =
+! 1 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+      REAL*8 :: beta=1
+! A =
+! 1 1
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+      REAL*8 :: beta=0
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/3, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+      REAL*8 :: beta=0
+! A =
+! 1 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/3, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+      REAL*8 :: beta=0
+! A =
+! 1 1
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 1, 3, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/12, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+      REAL*8 :: beta=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+      REAL*8 :: beta=1
+! A =
+! 1 2
+! 0 5
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 5/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 10/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+      REAL*8 :: beta=1
+! A =
+! 1 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/7, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+      REAL*8 :: beta=0
+! A =
+! 1 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/3, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+      REAL*8 :: beta=0
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+      REAL*8 :: beta=0
+! A =
+! 1 3
+! 1 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 3, 1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+      REAL*8 :: beta=1
+! A =
+! 1 2
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+      REAL*8 :: beta=1
+! A =
+! 1 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+      REAL*8 :: beta=1
+! A =
+! 1 0
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+      REAL*8 :: beta=0
+! A =
+! 1 4
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 4, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-5, -2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+      REAL*8 :: beta=0
+! A =
+! 1 3
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+      REAL*8 :: beta=0
+! A =
+! 1 2
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-2, -2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+      REAL*8 :: beta=1
+! A =
+! 1 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-6, 3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+      REAL*8 :: beta=1
+! A =
+! 1 0
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+      REAL*8 :: beta=1
+! A =
+! 1 1
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+      REAL*8 :: beta=0
+! A =
+! 1 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE td_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+      REAL*8 :: beta=0
+! A =
+! 1 3
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -18/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE td_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_dspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+      REAL*8 :: beta=0
+! A =
+! 1 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE td_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 0+0i
+! 1+0i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,6.e0), (6,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 1+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,9.e0), (6,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 0+3i
+! 0+3i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0.e0,3.e0), (0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-15.e0), (3,-15)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 0+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(3.e0,15.e0), (0,9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 3+0i
+! 0+0i 3+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,0.e0), (3,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(3.e0,6.e0), (18,18)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 0+1i
+! 0+1i 1+12i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0.e0,1.e0), (1,12)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(3.e0,-9.e0), (3,-39)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 0+0i
+! 2+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,0.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,2.e0), (6,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 4+0i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (4.e0,0.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,2.e0), (8,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 4+0i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (4,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,-2.e0), (7,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 1+1i
+! 0+1i 2+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (0.e0,1.e0), (2,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,3.e0), (2,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 4+1i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (4.e0,1.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,3.e0), (5,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 2+4i
+! 1+4i 2+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,4.e0), (1.e0,4.e0), (2,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-6.e0), (4,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 6+2i
+! 0+2i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (6.e0,2.e0), (0.e0,2.e0), (0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-4.e0,-4.e0), (3,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 0+3i
+! 0+3i 0+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0.e0,3.e0), (0,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-5.e0), (3,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 2+0i
+! 0+0i 0+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,0.e0), (0,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,2.e0), (1,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 0+1i
+! 2+1i 0+8i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (2.e0,1.e0), (0,8)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-1.e0,-3.e0), (-2,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 1+5i
+! 6+5i 2+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,5.e0), (6.e0,5.e0), (2,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-7.e0,-7.e0), (-3,-7)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 2+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,3.e0), (0,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-1.e0,5.e0), (-2,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 0+0i
+! 2+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (2,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-6.e0), (-3,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 0+0i
+! 5+0i 3+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (5.e0,0.e0), (3,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-15.e0,-6.e0), (-6,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+      COMPLEX*8 :: beta=1
+! A =
+! 1+2i 3+3i
+! 2+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,3.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-6.e0,15.e0), (-9,9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 0+0i
+! 0+0i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,-6.e0), (0,-12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 2+3i
+! 2+3i 3+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,3.e0), (2.e0,3.e0), (3,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-9.e0,-15.e0), (-15,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_cspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+      COMPLEX*8 :: beta=0
+! A =
+! 1+2i 1+0i
+! 1+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-6.e0,6.e0), (-3,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tc_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 1+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (3,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,9.e0), (12,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+3i
+! 2+3i 3+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (2.e0,3.e0), (3,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(12.e0,15.e0), (12,15)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 3+2i
+! 0+2i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,2.e0), (0.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-12.e0), (12,-12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+0i
+! 1+0i 3+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (3,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(3.e0,6.e0), (12,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+1i
+! 0+1i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0.e0,1.e0), (2,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(3.e0,9.e0), (6,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 2+2i
+! 0+2i 2+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,2.e0), (0.e0,2.e0), (2,6)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(3.e0,-12.e0), (12,-24)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 3+2i
+! 0+2i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,2.e0), (0.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(7.e0,4.e0), (3,4)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 1+2i
+! 2+2i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (2.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,4.e0), (4,4)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+1i
+! 0+1i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0.e0,1.e0), (0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-3.e0), (3,-5)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 2+5i
+! 0+5i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,5.e0), (0.e0,5.e0), (0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(3.e0,7.e0), (0,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+0i
+! 0+0i 0+8i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (0,8)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,2.e0), (0,8)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,-4.e0), (0,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+2i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (1,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-4.e0), (2,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (3,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-1.e0,-3.e0), (3,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 1+3i
+! 2+3i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (2.e0,3.e0), (2,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,5.e0), (0,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 3+1i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,1.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-4.e0,-3.e0), (-1,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+5i
+! 2+5i 2+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,5.e0), (2.e0,5.e0), (2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,-7.e0), (-2,-7)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 3+0i
+! 0+0i 0+8i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,0.e0), (0,8)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-1.e0,2.e0), (-3,8)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+0i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+0i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-6.e0), (3,0)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+      COMPLEX*16 :: beta=1
+! A =
+! 1+2i 0+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,4.e0), (0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,18.e0), (3,12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='n'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+0i
+! 4+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (4,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,-6.e0), (-12,0)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=n is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='t'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 0+2i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-9.e0,-12.e0), (0,-6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=t is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      USE psb_base_mod
+      IMPLICIT NONE
+      CHARACTER(LEN=*) :: afmt
+      TYPE(psb_zspmat_type) :: a
+      TYPE(psb_desc_type)   :: desc_a
+      INTEGER            :: ictxt, iam=-1, np=-1
+      INTEGER            :: info=-1
+      
+      INTEGER::errval,istat=0,i
+      CHARACTER::transA='c'
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+      COMPLEX*16 :: beta=0
+! A =
+! 1+2i 1+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,12.e0), (-3,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            info=-1
+            GOTO 9999
+      ENDIF
+      CALL psb_barrier(ictxt)
+      CALL psb_cdall(ictxt,desc_a,info,nl=nr)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spall(a,desc_a,info,nnz=nnz)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_barrier(ictxt)
+      CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_cdasb(desc_a,info)
+      IF (info .NE. 0)GOTO 9996
+      CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)
+      IF(info.NE.0)PRINT *,"matrix assembly failed"
+      IF(info.NE.0)GOTO 9996
+      
+      CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)
+      IF(info.NE.0)PRINT *,"psb_spmm failed"
+      IF(info.NE.0)GOTO 9996
+      DO i=1,2
+            IF(y(i).NE.cy(i))PRINT*,"results mismatch:",y,"instead of",cy
+            IF(y(i).NE.cy(i))info=-1
+            IF(y(i).NE.cy(i))GOTO 9996
+      ENDDO
+9996      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_spfree(a,desc_a,info)
+      IF (info .NE. 0)GOTO 9997
+9997      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+      CALL psb_cdfree(desc_a,info)
+      IF (info .NE. 0)GOTO 9998
+9998      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+9999      CONTINUE
+      IF(info .NE. 0)errval=errval+1
+            IF(errval.NE.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is not ok"
+            IF(errval.EQ.0)PRINT*,"type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 0 incx=1 incy=1 trans=c is ok"
+      END SUBROUTINE tz_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1 
+!
+      end module psb_mvsv_tester
+!
diff --git a/psb_mvsv_tester.m b/psb_mvsv_tester.m
new file mode 100644
index 0000000..4eb5ab9
--- /dev/null
+++ b/psb_mvsv_tester.m
@@ -0,0 +1,40 @@
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Parallel Sparse Blas Tester Code.
+#
+
+source("./sbtg.m")
+
+res=[rsb_octave_license("f")];
+printf("%s",res);
+
+
+res=[rsb_octave_license("f")];
+#res=[res,"!\n"];
+res=[res,"! Parallel Sparse BLAS fortran interface testing code\n"];
+res=[res,"!\n"];
+res=[res,findent,"module psb_mvsv_tester\n"];
+res=[res,findent,"contains\n"];
+res=[res,"!\n"];
+res=[res,all_test("p","decl")];
+res=[res,"!\n"];
+res=[res,findent,"end module psb_mvsv_tester\n"];
+res=[res,"!\n"];
+printf("%s",res);
diff --git a/psbtf.F90 b/psbtf.F90
new file mode 100644
index 0000000..23807b5
--- /dev/null
+++ b/psbtf.F90
@@ -0,0 +1,532 @@
+! /*                                                                                                                            
+! 
+! Copyright (C) 2008-2014 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+! */
+!
+! Parallel Sparse BLAS fortran interface testing code
+!
+!
+!> @cond INNERDOC
+      PROGRAM main
+
+      USE psb_base_mod
+      USE psb_mvsv_tester
+      IMPLICIT NONE
+      INTEGER :: res,passed=0,failed=0,fi=0
+      INTEGER            :: ictxt, iam=-1, np=-1
+      CHARACTER(LEN=psb_fidasize_) :: afmt
+      CALL psb_init(ictxt)
+      CALL psb_info(ictxt,iam,np)
+      IF(iam<0)THEN
+            GOTO 9999
+      ENDIF
+      DO fi=1,2
+      IF(fi.EQ.1)afmt=psb_csr_afmt_
+      IF(fi.EQ.2)afmt=psb_coo_afmt_
+      CALL       ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       ts_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       td_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tc_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_ap3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_ap1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_anr1_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_n_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_t_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      CALL       tz_sg_de_usmv_2_c_anr3_bnr0_ix1_iy1(errval,afmt,ictxt)
+      IF(errval.NE.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      errval=0
+      
+      ENDDO
+9999      CONTINUE
+      PRINT *,"PASSED:",passed
+      PRINT *,"FAILED:",failed
+      CALL psb_exit(ictxt)
+      END PROGRAM
+!> @endcond
+
+
+
diff --git a/psbtf.m b/psbtf.m
new file mode 100644
index 0000000..07547bd
--- /dev/null
+++ b/psbtf.m
@@ -0,0 +1,60 @@
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Parallel Sparse Blas Tester Code.
+#
+
+source("./sbtg.m")
+
+res=rsb_octave_license("f");
+res=[res,"!\n"];
+res=[res,"! Parallel Sparse BLAS fortran interface testing code\n"];
+res=[res,"!\n"];
+res=[res,"!\n"];
+res=[res,"!> @cond INNERDOC\n"];
+res=[res,findent,"PROGRAM main\n\n"];
+#res=[res,findent,"PROGRAM main\n\n",findent,"INTEGER :: res,passed=0,failed=0;\n"];
+res=[res,findent,"USE psb_base_mod\n"];
+res=[res,findent,"USE psb_mvsv_tester\n"];
+res=[res,findent,"IMPLICIT NONE\n"];
+res=[res,findent,"INTEGER :: res,passed=0,failed=0,fi=0\n"];
+res=[res,findent,"INTEGER            :: ictxt, iam=-1, np=-1\n"];
+res=[res,findent,"CHARACTER(LEN=psb_fidasize_) :: afmt\n"];
+#res=sprintf("%s%s",res,findent,"INTEGER            :: ictxt, iam, np\n");
+res=sprintf("%s%s",res,findent,"CALL psb_init(ictxt)\n");
+res=sprintf("%s%s",res,findent,"CALL psb_info(ictxt,iam,np)\n");
+res=[res,findent,"IF(iam<0)THEN\n"];
+res=[res,findent,findent,"GOTO 9999\n"];
+res=[res,findent,"ENDIF\n"];
+res=[res,findent,"DO fi=1,2\n"];
+res=[res,findent,"IF(fi.EQ.1)afmt=psb_csr_afmt_\n"];
+res=[res,findent,"IF(fi.EQ.2)afmt=psb_coo_afmt_\n"];
+printf("%s",res);
+all_test("p","CALL");
+res=["" ,findent,"ENDDO\n"];
+res=[res,"9999",findent,"CONTINUE\n"];
+res=[res,findent,"PRINT *,\"PASSED:\",passed\n"];
+res=[res,findent,"PRINT *,\"FAILED:\",failed\n"];
+res=sprintf("%s%s",res,findent,"CALL psb_exit(ictxt)\n");
+res=[res,findent,"END PROGRAM\n"];
+res=[res,"!> @endcond\n"];
+res=sprintf("%s%s",res,"\n");
+res=sprintf("%s%s",res,"\n\n");
+res=sprintf("%s%s",res,"");
+printf("%s",res);
diff --git a/rsb-config.h.hin b/rsb-config.h.hin
new file mode 100644
index 0000000..7a9858b
--- /dev/null
+++ b/rsb-config.h.hin
@@ -0,0 +1,3 @@
+/* This header file is not intended to be included librsb programs: it is only for inspection. */
+#ifndef RSB_CONFIG_H_INCLUDED
+#define RSB_CONFIG_H_INCLUDED
diff --git a/rsb-config.h.in b/rsb-config.h.in
new file mode 100644
index 0000000..8e7287a
--- /dev/null
+++ b/rsb-config.h.in
@@ -0,0 +1,424 @@
+/* rsb-config.h.in.  Generated from configure.ac by autoheader.  */
+
+/* Define if building universal (internal helper macro) */
+#undef AC_APPLE_UNIVERSAL_BUILD
+
+/* C compiler. */
+#undef CC
+
+/* Compilation flags. */
+#undef CFLAGS
+
+/* */
+#undef COPYRIGHT_STRING
+
+/* Define to 1 if you have the <assert.h> header file. */
+#undef HAVE_ASSERT_H
+
+/* Define to 1 if you have the <complex.h> header file. */
+#undef HAVE_COMPLEX_H
+
+/* Define to 1 if you have the <ctype.h> header file. */
+#undef HAVE_CTYPE_H
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define to 1 if you have the <dmalloc.h> header file. */
+#undef HAVE_DMALLOC_H
+
+/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */
+#undef HAVE_DOPRNT
+
+/* Define to 1 if you have the `dup' function. */
+#undef HAVE_DUP
+
+/* fileno(): C FILE to posix file descriptor. */
+#undef HAVE_FILENO
+
+/* Define to 1 if you have the `fread' function. */
+#undef HAVE_FREAD
+
+/* Define to 1 if you have the `fwrite' function. */
+#undef HAVE_FWRITE
+
+/* Get an environment variable. */
+#undef HAVE_GETENV
+
+/* If present, will give us host name. */
+#undef HAVE_GETHOSTNAME
+
+/* Define to 1 if you have the <getopt.h> header file. */
+#undef HAVE_GETOPT_H
+
+/* getopt_long is GNU candy */
+#undef HAVE_GETOPT_LONG
+
+/* gettimeofday */
+#undef HAVE_GETTIMEOFDAY
+
+/* Define to 1 if you have the <gsl/gsl_sort.h> header file. */
+#undef HAVE_GSL_GSL_SORT_H
+
+/* Define to 1 if you have the <hwloc.h> header file. */
+#undef HAVE_HWLOC_H
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the `isatty' function. */
+#undef HAVE_ISATTY
+
+/* Define to 1 if you have the <libgen.h> header file. */
+#undef HAVE_LIBGEN_H
+
+/* Define to 1 if you have the <limits.h> header file. */
+#undef HAVE_LIMITS_H
+
+/* Define to 1 if you have the <malloc.h> header file. */
+#undef HAVE_MALLOC_H
+
+/* Define to 1 if you have the <math.h> header file. */
+#undef HAVE_MATH_H
+
+/* This function is obsolete. */
+#undef HAVE_MEMALIGN
+
+/* Define to 1 if you have the `memcmp' function. */
+#undef HAVE_MEMCMP
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the `memset' function. */
+#undef HAVE_MEMSET
+
+/* If present, the mlockall function makes all allocations memory resident. */
+#undef HAVE_MLOCKALL
+
+/* Define to 1 if you have the <omp.h> header file. */
+#undef HAVE_OMP_H
+
+/* Define to 1 if you have the <oski/oski.h> header file. */
+#undef HAVE_OSKI_OSKI_H
+
+/* Define to 1 if you have the <papi.h> header file. */
+#undef HAVE_PAPI_H
+
+/* The POSIX aligned memory allocator.(The function posix_memalign() is
+   available since glibc 2.1.91) */
+#undef HAVE_POSIX_MEMALIGN
+
+/* Define to 1 if you have the <pthread.h> header file. */
+#undef HAVE_PTHREAD_H
+
+/* Define to 1 if you have the `rand' function. */
+#undef HAVE_RAND
+
+/* Define to 1 if you have the <regex.h> header file. */
+#undef HAVE_REGEX_H
+
+/* Define to 1 if you have the <rpc/xdr.h> header file. */
+#undef HAVE_RPC_XDR_H
+
+/* Define to 1 if you have the `sched_getaffinity' function. */
+#undef HAVE_SCHED_GETAFFINITY
+
+/* Define to 1 if you have the <sched.h> header file. */
+#undef HAVE_SCHED_H
+
+/* setenv */
+#undef HAVE_SETENV
+
+/* Define to 1 if you have the <signal.h> header file. */
+#undef HAVE_SIGNAL_H
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#undef HAVE_STDARG_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdio.h> header file. */
+#undef HAVE_STDIO_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the `strcpy' function. */
+#undef HAVE_STRCPY
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the `strncmp' function. */
+#undef HAVE_STRNCMP
+
+/* If present, the sysconf function gives lots of system info. */
+#undef HAVE_SYSCONF
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#undef HAVE_SYS_MMAN_H
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#undef HAVE_SYS_RESOURCE_H
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/systemcfg.h> header file. */
+#undef HAVE_SYS_SYSTEMCFG_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <sys/utsname.h> header file. */
+#undef HAVE_SYS_UTSNAME_H
+
+/* times */
+#undef HAVE_TIMES
+
+/* Define to 1 if you have the <times.h> header file. */
+#undef HAVE_TIMES_H
+
+/* Define to 1 if you have the <time.h> header file. */
+#undef HAVE_TIME_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if you have the `vprintf' function. */
+#undef HAVE_VPRINTF
+
+/* Define to 1 if you have the <zlib.h> header file. */
+#undef HAVE_ZLIB_H
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+   */
+#undef LT_OBJDIR
+
+/* Define to 1 if your C compiler doesn't accept -c and -o together. */
+#undef NO_MINUS_C_MINUS_O
+
+/* OSKI path to installed lua modules. User set OSKI_LUA_PATH environment
+   variable at runtime will override this one, however. */
+#undef OSKI_LUA_PATH
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the home page for this package. */
+#undef PACKAGE_URL
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+/* Extra (undocumented) developer oriented control switches. */
+#undef RSB_ALLOW_INTERNAL_GETENVS
+
+/* If set, the library will use smaller indices in blocks. */
+#undef RSB_BLOCK_SMALL_INDICES
+
+/* Maximal number of supported threads (default 64). */
+#undef RSB_CONST_MAX_SUPPORTED_THREADS
+
+/* If not null, the library will rely on this for memory hierarchy info,
+   unless RSB_USER_SET_MEM_HIERARCHY_INFO is set. */
+#undef RSB_DETECTED_MEM_HIERARCHY_INFO
+
+/* If defined, will not account for internally used memory. */
+#undef RSB_DISABLE_ALLOCATOR_WRAPPER
+
+/* Performance Application Programming Interface. */
+#undef RSB_HAVE_PAPI
+
+/* Inner error verbosity (internal debug level). */
+#undef RSB_INT_ERR_VERBOSITY
+
+/* Error verbosity (often known as debug level). */
+#undef RSB_OUT_ERR_VERBOSITY
+
+/* If set, sort operations will happen in place. */
+#undef RSB_SORT_IN_PLACE
+
+/* If not null, the library will rely on this for memory hierarchy info. */
+#undef RSB_USER_SET_MEM_HIERARCHY_INFO
+
+/* If undefined, NDEBUG will be defined. */
+#undef RSB_USE_ASSERT
+
+/* experimental. */
+#undef RSB_WANT_ACTION_SIGNAL
+
+/* If 1, will allow the user to set hard limits to the memory allocated by
+   librsb. Trespass attempts will fail. */
+#undef RSB_WANT_ALLOCATOR_LIMITS
+
+/* */
+#undef RSB_WANT_DMALLOC
+
+/* On some architectures (notably modern Intel), floating point computations
+   on non double aligned data make loose some clock cycle. */
+#undef RSB_WANT_DOUBLE_ALIGNED
+
+/* Supported input/output functionality. */
+#undef RSB_WANT_IO_LEVEL
+
+/* If set, RSB_WANT_KERNELS_DEBUG will enable comparative consistency checking
+   of the multiplying kernels against a naive, trusted implementation. */
+#undef RSB_WANT_KERNELS_DEBUG
+
+/* Enabling collection of time statistics in librsb operations (this
+   introduces an overhead). */
+#undef RSB_WANT_LIBRSB_STATS
+
+/* Looping kernels. */
+#undef RSB_WANT_LOOPING_KERNELS
+
+/* No MKL support wanted in the benchmarking program. */
+#undef RSB_WANT_MKL
+
+/* Support for reading matrices in parallel (Experimental, untested). */
+#undef RSB_WANT_OMPIO_SUPPORT
+
+/* Recursive kernels parallelized with OpenMP. */
+#undef RSB_WANT_OMP_RECURSIVE_KERNELS
+
+/* OSKI comparative benchmarking. */
+#undef RSB_WANT_OSKI_BENCHMARKING
+
+/* Performance Counters. */
+#undef RSB_WANT_PERFORMANCE_COUNTERS
+
+/* Enabling experimental RSB_NUM_THREADS environment variable. */
+#undef RSB_WANT_RSB_NUM_THREADS
+
+/* If set, a reference, unoptimized Sparse BLAS Level 1 interface will be
+   functional. */
+#undef RSB_WANT_SPARSE_BLAS_LEVEL_1
+
+/* If set, the library will be much more verbose. Should be enabled for
+   debugging purposes only. */
+#undef RSB_WANT_VERBOSE_MESSAGES
+
+/* experimental. */
+#undef RSB_WANT_XDR_SUPPORT
+
+/* Support for reading gzipped matrices. */
+#undef RSB_WANT_ZLIB_SUPPORT
+
+/* HWLOC API support. */
+#undef RSB_WITH_HWLOC
+
+/* LIKWID marker API support. */
+#undef RSB_WITH_LIKWID
+
+/* Sparse BLAS interface compilation. */
+#undef RSB_WITH_SPARSE_BLAS_INTERFACE
+
+/* The size of `char', as computed by sizeof. */
+#undef SIZEOF_CHAR
+
+/* The size of `complex', as computed by sizeof. */
+#undef SIZEOF_COMPLEX
+
+/* The size of `double', as computed by sizeof. */
+#undef SIZEOF_DOUBLE
+
+/* The size of `double complex', as computed by sizeof. */
+#undef SIZEOF_DOUBLE_COMPLEX
+
+/* The size of `float', as computed by sizeof. */
+#undef SIZEOF_FLOAT
+
+/* The size of `float complex', as computed by sizeof. */
+#undef SIZEOF_FLOAT_COMPLEX
+
+/* The size of `int', as computed by sizeof. */
+#undef SIZEOF_INT
+
+/* The size of `long', as computed by sizeof. */
+#undef SIZEOF_LONG
+
+/* The size of `long double', as computed by sizeof. */
+#undef SIZEOF_LONG_DOUBLE
+
+/* The size of `long int', as computed by sizeof. */
+#undef SIZEOF_LONG_INT
+
+/* The size of `long long int', as computed by sizeof. */
+#undef SIZEOF_LONG_LONG_INT
+
+/* The size of `short int', as computed by sizeof. */
+#undef SIZEOF_SHORT_INT
+
+/* The size of `size_t', as computed by sizeof. */
+#undef SIZEOF_SIZE_T
+
+/* The size of `void *', as computed by sizeof. */
+#undef SIZEOF_VOID_P
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* SVN REVISION */
+#undef SVN_REVISION
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#undef TIME_WITH_SYS_TIME
+
+/* Version number of package */
+#undef VERSION
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+   significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined AC_APPLE_UNIVERSAL_BUILD
+# if defined __BIG_ENDIAN__
+#  define WORDS_BIGENDIAN 1
+# endif
+#else
+# ifndef WORDS_BIGENDIAN
+#  undef WORDS_BIGENDIAN
+# endif
+#endif
+
+/* Define to empty if `const' does not conform to ANSI C. */
+#undef const
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+   calls it, or to nothing if 'inline' is not supported under any name.  */
+#ifndef __cplusplus
+#undef inline
+#endif
+
+/* Define to the equivalent of the C99 'restrict' keyword, or to
+   nothing if this is not supported.  Do not define if restrict is
+   supported directly.  */
+#undef restrict
+/* Work around a bug in Sun C++: it does not support _Restrict or
+   __restrict__, even though the corresponding Sun C compiler ends up with
+   "#define restrict _Restrict" or "#define restrict __restrict__" in the
+   previous line.  Perhaps some future version of Sun C++ will work with
+   restrict; if so, hopefully it defines __RESTRICT like Sun C does.  */
+#if defined __SUNPRO_CC && !defined __RESTRICT
+# define _Restrict
+# define __restrict__
+#endif
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+#undef size_t
diff --git a/rsb-incoming.grep b/rsb-incoming.grep
new file mode 100644
index 0000000..f42a5db
--- /dev/null
+++ b/rsb-incoming.grep
@@ -0,0 +1,16 @@
+typedef.*byte_t
+rsb_do_get_csc
+rsb_do_switch_fullword_array_to_compressed
+rsb__weed_out_duplicates
+rsb__allocate_coo_matrix_t
+rsb__fill_with_ones
+rsb__calloc
+rsb__lib_get_info_str
+rsb__debug_print_vector
+rsb__util_set_area_to_converted_integer
+rsb__set_num_threads
+rsb__is_correctly_built_csr_matrix
+rsb__util_is_sorted_coo_as_row_major
+rsb__nnz_split_coo_bsearch
+rsb__do_print_matrix_t
+rsb__cblas_Xscal
diff --git a/rsb-incoming.sed b/rsb-incoming.sed
new file mode 100644
index 0000000..8d2c7f0
--- /dev/null
+++ b/rsb-incoming.sed
@@ -0,0 +1 @@
+s/rsb_dump_flags_t/rsb_flags_t/g;s/RSB_RESTRICT//g;s/FILE/void/g;s/rsb_int\>/rsb_int_t/g;
diff --git a/rsb.F90 b/rsb.F90
new file mode 100644
index 0000000..db5484d
--- /dev/null
+++ b/rsb.F90
@@ -0,0 +1,1202 @@
+!> @file.
+!! @brief Header file automatically generated from <rsb.h>, offering ISO-C-BINDING interfaces to <rsb.h>'s functions.
+!! Defines \c MODULE \c rsb.
+!! For examples of usage, see Fortran examples in \ref rsb_doc_examples.
+!! The official documentation is that of <rsb.h>.
+!! Make sure you are using a modern Fortran compiler.
+      
+!DEC$IF .NOT. DEFINED (RSB_FORTRAN_HEADER)
+!DEC$DEFINE RSB_FORTRAN_HEADER
+      
+      MODULE rsb
+         USE ISO_C_BINDING, ONLY: C_INT,C_PTR,C_NULL_PTR,C_SIGNED_CHAR
+      
+      
+!> ISO C BINDING interface to ::rsb_strerror_r.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_strerror_r&
+        &(errval,buf,buflen)&
+        &BIND(c,NAME = 'rsb_strerror_r')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: errval
+       TYPE(C_PTR), VALUE  :: buf ! CHARACTER(C_CHAR)
+       INTEGER(C_SIZE_T), VALUE  :: buflen
+       END FUNCTION rsb_strerror_r
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_perror.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_perror&
+        &(stream,errval)&
+        &BIND(c,NAME = 'rsb_perror')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: stream ! A numerical type
+       INTEGER(C_INT), VALUE  :: errval
+       END FUNCTION rsb_perror
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_lib_init.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_lib_init&
+        &(iop)&
+        &BIND(c,NAME = 'rsb_lib_init')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: iop ! C_NULL_PTR is a safe value. Please consult the rsb.h documentation for other options.
+       END FUNCTION rsb_lib_init
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_lib_reinit.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_lib_reinit&
+        &(iop)&
+        &BIND(c,NAME = 'rsb_lib_reinit')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: iop ! C_NULL_PTR is a safe value. Please consult the rsb.h documentation for other options.
+       END FUNCTION rsb_lib_reinit
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_lib_set_opt_str.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_lib_set_opt_str&
+        &(opnp,opvp)&
+        &BIND(c,NAME = 'rsb_lib_set_opt_str')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: opnp ! CHARACTER(C_CHAR)
+       TYPE(C_PTR), VALUE  :: opvp ! CHARACTER(C_CHAR)
+       END FUNCTION rsb_lib_set_opt_str
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_lib_set_opt.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_lib_set_opt&
+        &(iof,iop)&
+        &BIND(c,NAME = 'rsb_lib_set_opt')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: iof
+       TYPE(C_PTR),VALUE :: iop ! C_NULL_PTR is a safe value. Please consult the rsb.h documentation for other options.
+       END FUNCTION rsb_lib_set_opt
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_lib_get_opt.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_lib_get_opt&
+        &(iof,iop)&
+        &BIND(c,NAME = 'rsb_lib_get_opt')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: iof
+       TYPE(C_PTR),VALUE :: iop ! C_NULL_PTR is a safe value. Please consult the rsb.h documentation for other options.
+       END FUNCTION rsb_lib_get_opt
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_lib_exit.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_lib_exit&
+        &(iop)&
+        &BIND(c,NAME = 'rsb_lib_exit')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: iop ! C_NULL_PTR is a safe value. Please consult the rsb.h documentation for other options.
+       END FUNCTION rsb_lib_exit
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_coo_begin.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_alloc_from_coo_begin&
+        &(nnzA,typecode,nrA,ncA,flagsA,errvalp)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_coo_begin')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_mtx_alloc_from_coo_begin
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_coo_end.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_alloc_from_coo_end&
+        &(mtxApp)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_coo_end')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxApp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       END FUNCTION rsb_mtx_alloc_from_coo_end
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_csr_const.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_alloc_from_csr_const&
+        &(VA,RP,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_csr_const')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: RP ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_INT), VALUE  :: brA
+       INTEGER(C_INT), VALUE  :: bcA
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_mtx_alloc_from_csr_const
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_csc_const.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_alloc_from_csc_const&
+        &(VA,IA,CP,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_csc_const')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR), VALUE  :: CP ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_INT), VALUE  :: brA
+       INTEGER(C_INT), VALUE  :: bcA
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_mtx_alloc_from_csc_const
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_csr_inplace.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_alloc_from_csr_inplace&
+        &(VA,RP,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_csr_inplace')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: RP ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_INT), VALUE  :: brA
+       INTEGER(C_INT), VALUE  :: bcA
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_mtx_alloc_from_csr_inplace
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_coo_const.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_alloc_from_coo_const&
+        &(VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_coo_const')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_INT), VALUE  :: brA
+       INTEGER(C_INT), VALUE  :: bcA
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_mtx_alloc_from_coo_const
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_alloc_from_coo_inplace.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_alloc_from_coo_inplace&
+        &(VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_mtx_alloc_from_coo_inplace')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_INT), VALUE  :: brA
+       INTEGER(C_INT), VALUE  :: bcA
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_mtx_alloc_from_coo_inplace
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_clone.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_clone&
+        &(mtxBpp,typecode,transA,alphap,mtxAp,flags)&
+        &BIND(c,NAME = 'rsb_mtx_clone')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxBpp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_clone
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_free.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_mtx_free&
+        &(mtxAp)&
+        &BIND(c,NAME = 'rsb_mtx_free')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       END FUNCTION rsb_mtx_free
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_nrm.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_nrm&
+        &(mtxAp,Np,flags)&
+        &BIND(c,NAME = 'rsb_mtx_get_nrm')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: Np ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_extff_t
+       END FUNCTION rsb_mtx_get_nrm
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_vec.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_vec&
+        &(mtxAp,Dp,flags)&
+        &BIND(c,NAME = 'rsb_mtx_get_vec')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: Dp ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_extff_t
+       END FUNCTION rsb_mtx_get_vec
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_rndr.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_rndr&
+        &(filename,mtxAp,pmWidth,pmHeight,rflags)&
+        &BIND(c,NAME = 'rsb_mtx_rndr')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: pmWidth
+       INTEGER(C_INT), VALUE  :: pmHeight
+       INTEGER(C_INT), VALUE  :: rflags !> ISO C BINDING interface to ::rsb_marf_t
+       END FUNCTION rsb_mtx_rndr
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_file_mtx_rndr.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_file_mtx_rndr&
+        &(pmp,filename,pmlWidth,pmWidth,pmHeight,rflags)&
+        &BIND(c,NAME = 'rsb_file_mtx_rndr')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: pmp ! A numerical type
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       INTEGER(C_INT), VALUE  :: pmlWidth
+       INTEGER(C_INT), VALUE  :: pmWidth
+       INTEGER(C_INT), VALUE  :: pmHeight
+       INTEGER(C_INT), VALUE  :: rflags !> ISO C BINDING interface to ::rsb_marf_t
+       END FUNCTION rsb_file_mtx_rndr
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_spmv.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_spmv&
+        &(transA,alphap,mtxAp,Xp,incX,betap,Yp,incY)&
+        &BIND(c,NAME = 'rsb_spmv')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: Xp ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: incX
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: Yp ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: incY
+       END FUNCTION rsb_spmv
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_spmm.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_spmm&
+        &(transA,alphap,mtxAp,nrhs,order,Bp,ldB,betap,Cp,ldC)&
+        &BIND(c,NAME = 'rsb_spmm')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: nrhs
+       INTEGER(C_INT), VALUE  :: order !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR), VALUE  :: Bp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldB
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: Cp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldC
+       END FUNCTION rsb_spmm
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_spsv.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_spsv&
+        &(transT,alphap,mtxTp,Xp,incX,Yp,incY)&
+        &BIND(c,NAME = 'rsb_spsv')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: transT
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxTp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: Xp ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: incX
+       TYPE(C_PTR),VALUE :: Yp ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: incY
+       END FUNCTION rsb_spsv
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_spsm.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_spsm&
+        &(transT,alphap,mtxTp,nrhs,order,betap,Bp,ldB,Cp,ldC)&
+        &BIND(c,NAME = 'rsb_spsm')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: transT
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxTp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: nrhs
+       INTEGER(C_INT), VALUE  :: order !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: Bp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldB
+       TYPE(C_PTR), VALUE  :: Cp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldC
+       END FUNCTION rsb_spsm
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_add_to_dense.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_add_to_dense&
+        &(alphap,mtxAp,ldB,nrB,ncB,rowmajorB,Bp)&
+        &BIND(c,NAME = 'rsb_mtx_add_to_dense')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: ldB
+       INTEGER(C_INT), VALUE  :: nrB
+       INTEGER(C_INT), VALUE  :: ncB
+       INTEGER(C_INT), VALUE  :: rowmajorB !> ISO C BINDING interface to ::rsb_bool_t
+       TYPE(C_PTR), VALUE  :: Bp ! A numerical type
+       END FUNCTION rsb_mtx_add_to_dense
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_sppsp.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_sppsp&
+        &(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_sppsp')
+       USE ISO_C_BINDING
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: transB
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxBp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_sppsp
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_spmsp.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_spmsp&
+        &(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,errvalp&
+      &)&
+        &BIND(c,NAME = 'rsb_spmsp')
+       USE ISO_C_BINDING
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: transB
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxBp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_spmsp
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_spmsp_to_dense.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_spmsp_to_dense&
+        &(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,ldC&
+      &,nrC,ncC,rowmajorC,Cp)&
+        &BIND(c,NAME = 'rsb_spmsp_to_dense')
+       USE ISO_C_BINDING
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: transB
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxBp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: ldC
+       INTEGER(C_INT), VALUE  :: nrC
+       INTEGER(C_INT), VALUE  :: ncC
+       INTEGER(C_INT), VALUE  :: rowmajorC !> ISO C BINDING interface to ::rsb_bool_t
+       TYPE(C_PTR), VALUE  :: Cp ! A numerical type
+       END FUNCTION rsb_spmsp_to_dense
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_switch_to_coo.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_switch_to_coo&
+        &(mtxAp,VAp,IAp,JAp,flags)&
+        &BIND(c,NAME = 'rsb_mtx_switch_to_coo')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VAp ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IAp ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JAp ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_switch_to_coo
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_switch_to_csr.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_switch_to_csr&
+        &(mtxAp,VAp,IAp,JAp,flags)&
+        &BIND(c,NAME = 'rsb_mtx_switch_to_csr')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VAp ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IAp ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JAp ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_switch_to_csr
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_coo.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_coo&
+        &(mtxAp,VA,IA,JA,flags)&
+        &BIND(c,NAME = 'rsb_mtx_get_coo')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_get_coo
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_csr.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_csr&
+        &(typecode,mtxAp,VA,RP,JA,flags)&
+        &BIND(c,NAME = 'rsb_mtx_get_csr')
+       USE ISO_C_BINDING
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: RP ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_get_csr
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_rows_sparse.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_rows_sparse&
+        &(transA,alphap,mtxAp,VA,IA,JA,frA,lrA,rnzp,flags)&
+        &BIND(c,NAME = 'rsb_mtx_get_rows_sparse')
+       USE ISO_C_BINDING
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: frA
+       INTEGER(C_INT), VALUE  :: lrA
+       TYPE(C_PTR), VALUE  :: rnzp ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_get_rows_sparse
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_coo_block.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_coo_block&
+        &(mtxAp,VA,IA,JA,frA,lrA,fcA,lcA,IREN,JREN,rnzp,flags&
+      &)&
+        &BIND(c,NAME = 'rsb_mtx_get_coo_block')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: frA
+       INTEGER(C_INT), VALUE  :: lrA
+       INTEGER(C_INT), VALUE  :: fcA
+       INTEGER(C_INT), VALUE  :: lcA
+       TYPE(C_PTR), VALUE  :: IREN ! INTEGER(C_INT)
+       TYPE(C_PTR), VALUE  :: JREN ! INTEGER(C_INT)
+       TYPE(C_PTR), VALUE  :: rnzp ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_get_coo_block
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_info.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_info&
+        &(mtxAp,miflags,minfop)&
+        &BIND(c,NAME = 'rsb_mtx_get_info')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: miflags
+       TYPE(C_PTR), VALUE  :: minfop ! A numerical type
+       END FUNCTION rsb_mtx_get_info
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_info_str.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_info_str&
+        &(mtxAp,mis,minfop,buflen)&
+        &BIND(c,NAME = 'rsb_mtx_get_info_str')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR), VALUE  :: mis ! CHARACTER(C_CHAR)
+       TYPE(C_PTR), VALUE  :: minfop ! A numerical type
+       INTEGER(C_SIZE_T), VALUE  :: buflen
+       END FUNCTION rsb_mtx_get_info_str
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_upd_vals.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_upd_vals&
+        &(mtxAp,elop_flags,omegap)&
+        &BIND(c,NAME = 'rsb_mtx_upd_vals')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: elop_flags
+       TYPE(C_PTR), VALUE  :: omegap ! A numerical type
+       END FUNCTION rsb_mtx_upd_vals
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_prec.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_prec&
+        &(opdp,mtxAp,prec_flags,ipdp)&
+        &BIND(c,NAME = 'rsb_mtx_get_prec')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: opdp ! A numerical type
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: prec_flags
+       TYPE(C_PTR), VALUE  :: ipdp ! A numerical type
+       END FUNCTION rsb_mtx_get_prec
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_set_vals.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_set_vals&
+        &(mtxAp,VA,IA,JA,nnz,flags)&
+        &BIND(c,NAME = 'rsb_mtx_set_vals')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnz
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_set_vals
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_mtx_get_vals.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_mtx_get_vals&
+        &(mtxAp,VA,IA,JA,nnz,flags)&
+        &BIND(c,NAME = 'rsb_mtx_get_vals')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnz
+       INTEGER(C_INT), VALUE  :: flags !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_mtx_get_vals
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_tune_spmm.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_tune_spmm&
+        &(mtxOpp,sfp,tnp,maxr,maxt,transA,alphap,mtxAp,nrhs,order&
+      &,Bp,ldB,betap,Cp,ldC)&
+        &BIND(c,NAME = 'rsb_tune_spmm')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxOpp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR), VALUE  :: sfp ! REAL*8
+       TYPE(C_PTR), VALUE  :: tnp ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: maxr
+       REAL(C_DOUBLE), VALUE  :: maxt
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: nrhs
+       INTEGER(C_INT), VALUE  :: order !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR), VALUE  :: Bp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldB
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: Cp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldC
+       END FUNCTION rsb_tune_spmm
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_tune_spsm.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_tune_spsm&
+        &(mtxOpp,sfp,tnp,maxr,maxt,transA,alphap,mtxAp,nrhs,order&
+      &,Bp,ldB,betap,Cp,ldC)&
+        &BIND(c,NAME = 'rsb_tune_spsm')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxOpp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR), VALUE  :: sfp ! REAL*8
+       TYPE(C_PTR), VALUE  :: tnp ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: maxr
+       REAL(C_DOUBLE), VALUE  :: maxt
+       INTEGER(C_INT), VALUE  :: transA
+       TYPE(C_PTR),VALUE :: alphap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       INTEGER(C_INT), VALUE  :: nrhs
+       INTEGER(C_INT), VALUE  :: order !> ISO C BINDING interface to ::rsb_flags_t
+       TYPE(C_PTR), VALUE  :: Bp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldB
+       TYPE(C_PTR),VALUE :: betap ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: Cp ! A numerical type
+       INTEGER(C_INT), VALUE  :: ldC
+       END FUNCTION rsb_tune_spsm
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_psblas_trans_to_rsb_trans.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_psblas_trans_to_rsb_trans&
+        &(psbtrans)&
+        &BIND(c,NAME = 'rsb_psblas_trans_to_rsb_trans')
+       USE ISO_C_BINDING
+       CHARACTER(C_CHAR), VALUE  :: psbtrans
+       END FUNCTION rsb_psblas_trans_to_rsb_trans
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_file_mtx_save.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_file_mtx_save&
+        &(mtxAp,filename)&
+        &BIND(c,NAME = 'rsb_file_mtx_save')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: mtxAp ! A matrix pointer variable: (TYPE(C_PTR),TARGET)
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       END FUNCTION rsb_file_mtx_save
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_file_mtx_load.
+      INTERFACE
+       TYPE(C_PTR) FUNCTION &
+        &rsb_file_mtx_load&
+        &(filename,flagsA,typecode,errvalp)&
+        &BIND(c,NAME = 'rsb_file_mtx_load')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       TYPE(C_PTR),VALUE :: errvalp ! INTEGER(C_INT)
+       END FUNCTION rsb_file_mtx_load
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_file_vec_load.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_file_vec_load&
+        &(filename,typecode,Yp,yvlp)&
+        &BIND(c,NAME = 'rsb_file_vec_load')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       TYPE(C_PTR),VALUE :: Yp ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR), VALUE  :: yvlp ! INTEGER(C_INT)
+       END FUNCTION rsb_file_vec_load
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_file_vec_save.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_file_vec_save&
+        &(filename,typecode,Yp,yvl)&
+        &BIND(c,NAME = 'rsb_file_vec_save')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       TYPE(C_PTR),VALUE :: Yp ! A single variable of the same numerical type of the matrix.
+       INTEGER(C_INT), VALUE  :: yvl
+       END FUNCTION rsb_file_vec_save
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_file_mtx_get_dims.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_file_mtx_get_dims&
+        &(filename,nrp,ncp,nzp,flagsp)&
+        &BIND(c,NAME = 'rsb_file_mtx_get_dims')
+       USE ISO_C_BINDING
+       TYPE(C_PTR), VALUE  :: filename ! CHARACTER(C_CHAR)
+       TYPE(C_PTR), VALUE  :: nrp ! INTEGER(C_INT)
+       TYPE(C_PTR), VALUE  :: ncp ! INTEGER(C_INT)
+       TYPE(C_PTR), VALUE  :: nzp ! INTEGER(C_INT)
+       TYPE(C_PTR), VALUE  :: flagsp ! INTEGER(C_INT)
+       END FUNCTION rsb_file_mtx_get_dims
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_coo_sort.
+      INTERFACE
+       INTEGER(C_INT) FUNCTION &
+        &rsb_coo_sort&
+        &(VA,IA,JA,nnzA,nrA,ncA,typecode,flagsA)&
+        &BIND(c,NAME = 'rsb_coo_sort')
+       USE ISO_C_BINDING
+       TYPE(C_PTR),VALUE :: VA ! A single variable of the same numerical type of the matrix.
+       TYPE(C_PTR),VALUE :: IA ! INTEGER(C_INT)
+       TYPE(C_PTR),VALUE :: JA ! INTEGER(C_INT)
+       INTEGER(C_INT), VALUE  :: nnzA
+       INTEGER(C_INT), VALUE  :: nrA
+       INTEGER(C_INT), VALUE  :: ncA
+       INTEGER(C_SIGNED_CHAR), VALUE  :: typecode
+       INTEGER(C_INT), VALUE  :: flagsA !> ISO C BINDING interface to ::rsb_flags_t
+       END FUNCTION rsb_coo_sort
+      END INTERFACE
+      
+!> ISO C BINDING interface to ::rsb_time.
+      INTERFACE
+       REAL(C_DOUBLE) FUNCTION &
+        &rsb_time&
+        &()&
+        &BIND(c,NAME = 'rsb_time')
+       USE ISO_C_BINDING
+       END FUNCTION rsb_time
+      END INTERFACE
+      
+!DEC$ENDIF
+      
+! Error values 
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_NO_ERROR&
+            & = -INT(Z"0000",C_INT) !< See #RSB_ERR_NO_ERROR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_GENERIC_ERROR&
+            & = -INT(Z"0001",C_INT) !< See #RSB_ERR_GENERIC_ERROR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_UNSUPPORTED_OPERATION&
+            & = -INT(Z"0002",C_INT) !< See #RSB_ERR_UNSUPPORTED_OPERATION.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_UNSUPPORTED_TYPE&
+            & = -INT(Z"0004",C_INT) !< See #RSB_ERR_UNSUPPORTED_TYPE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_UNSUPPORTED_FORMAT&
+            & = -INT(Z"0008",C_INT) !< See #RSB_ERR_UNSUPPORTED_FORMAT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_INTERNAL_ERROR&
+            & = -INT(Z"0010",C_INT) !< See #RSB_ERR_INTERNAL_ERROR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_BADARGS&
+            & = -INT(Z"0020",C_INT) !< See #RSB_ERR_BADARGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_ENOMEM&
+            & = -INT(Z"0040",C_INT) !< See #RSB_ERR_ENOMEM.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_UNIMPLEMENTED_YET&
+            & = -INT(Z"0100",C_INT) !< See #RSB_ERR_UNIMPLEMENTED_YET.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_LIMITS&
+            & = -INT(Z"0200",C_INT) !< See #RSB_ERR_LIMITS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_UNSUPPORTED_FEATURE&
+            & = -INT(Z"0400",C_INT) !< See #RSB_ERR_UNSUPPORTED_FEATURE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_NO_USER_CONFIGURATION&
+            & = -INT(Z"0800",C_INT) !< See #RSB_ERR_NO_USER_CONFIGURATION.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_CORRUPT_INPUT_DATA&
+            & = -INT(Z"01000",C_INT) !< See #RSB_ERR_CORRUPT_INPUT_DATA.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_FAILED_MEMHIER_DETECTION&
+            & = -INT(Z"02000",C_INT) !< See #RSB_ERR_FAILED_MEMHIER_DETECTION.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS&
+            & = -INT(Z"04000",C_INT) !< See #RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT&
+            & = -INT(Z"08000",C_INT) !< See #RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_INVALID_NUMERICAL_DATA&
+            & = -INT(Z"010000",C_INT) !< See #RSB_ERR_INVALID_NUMERICAL_DATA.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ERR_MEMORY_LEAK&
+            & = -INT(Z"020000",C_INT) !< See #RSB_ERR_MEMORY_LEAK.
+! Matrix flags values 
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_NOFLAGS&
+            & = INT(Z"0000000",C_INT) !< See #RSB_FLAG_NOFLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_FORTRAN_INDICES_INTERFACE&
+            & = INT(Z"0000001",C_INT) !< See #RSB_FLAG_FORTRAN_INDICES_INTERFACE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_C_INDICES_INTERFACE&
+            & = INT(Z"0000000",C_INT) !< See #RSB_FLAG_C_INDICES_INTERFACE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_USE_HALFWORD_INDICES&
+            & = INT(Z"0000002",C_INT) !< See #RSB_FLAG_USE_HALFWORD_INDICES.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_WANT_ROW_MAJOR_ORDER&
+            & = INT(Z"0000000",C_INT) !< See #RSB_FLAG_WANT_ROW_MAJOR_ORDER.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_WANT_COLUMN_MAJOR_ORDER&
+            & = INT(Z"04000000",C_INT) !< See #RSB_FLAG_WANT_COLUMN_MAJOR_ORDER.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_SORTED_INPUT&
+            & = INT(Z"0000004",C_INT) !< See #RSB_FLAG_SORTED_INPUT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_TRIANGULAR&
+            & = INT(Z"0000008",C_INT) !< See #RSB_FLAG_TRIANGULAR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_LOWER&
+            & = INT(Z"0000010",C_INT) !< See #RSB_FLAG_LOWER.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_UPPER&
+            & = INT(Z"0000020",C_INT) !< See #RSB_FLAG_UPPER.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_UNIT_DIAG_IMPLICIT&
+            & = INT(Z"0000040",C_INT) !< See #RSB_FLAG_UNIT_DIAG_IMPLICIT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_WANT_COO_STORAGE&
+            & = INT(Z"0000100",C_INT) !< See #RSB_FLAG_WANT_COO_STORAGE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DUPLICATES_KEEP_LAST&
+            & = INT(Z"0000000",C_INT) !< See #RSB_FLAG_DUPLICATES_KEEP_LAST.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DUPLICATES_DEFAULT_HANDLE&
+            & = INT(Z"0000000",C_INT) !< See #RSB_FLAG_DUPLICATES_DEFAULT_HANDLE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DUPLICATES_SUM&
+            & = INT(Z"0000200",C_INT) !< See #RSB_FLAG_DUPLICATES_SUM.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DISCARD_ZEROS&
+            & = INT(Z"0000400",C_INT) !< See #RSB_FLAG_DISCARD_ZEROS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_QUAD_PARTITIONING&
+            & = INT(Z"0002000",C_INT) !< See #RSB_FLAG_QUAD_PARTITIONING.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_WANT_BCSS_STORAGE&
+            & = INT(Z"0004000",C_INT) !< See #RSB_FLAG_WANT_BCSS_STORAGE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS&
+            & = INT(Z"0040000",C_INT) !< See #RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT&
+            & = INT(Z"0080000",C_INT) !< See #RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_SYMMETRIC&
+            & = INT(Z"0400000",C_INT) !< See #RSB_FLAG_SYMMETRIC.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_HERMITIAN&
+            & = INT(Z"0800000",C_INT) !< See #RSB_FLAG_HERMITIAN.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS&
+            & = INT(Z"01000000",C_INT) !< See #RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG&
+            & = INT(Z"08000000",C_INT) !< See #RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS&
+            & = INT(Z"040000000",C_INT) !< See #RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_USE_CSR_RESERVED&
+            & = INT(Z"0200000",C_INT) !< See #RSB_FLAG_USE_CSR_RESERVED.
+! Composite flags 
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DEFAULT_STORAGE_FLAGS&
+            & = (RSB_FLAG_WANT_BCSS_STORAGE+&
+            &RSB_FLAG_WANT_COO_STORAGE) !< See #RSB_FLAG_DEFAULT_STORAGE_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS&
+            & = RSB_FLAG_WANT_COO_STORAGE  !< See #RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS&
+            & = RSB_FLAG_WANT_BCSS_STORAGE !< See #RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS&
+            & = (RSB_FLAG_QUAD_PARTITIONING+&
+            &RSB_FLAG_USE_HALFWORD_INDICES+&
+            &RSB_FLAG_WANT_COO_STORAGE+&
+            &RSB_FLAG_WANT_BCSS_STORAGE) !< See #RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DEFAULT_MATRIX_FLAGS&
+            & = RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS !< See #RSB_FLAG_DEFAULT_MATRIX_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_IDENTICAL_FLAGS&
+            & = RSB_FLAG_NOFLAGS !< See #RSB_FLAG_IDENTICAL_FLAGS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_LOWER_HERMITIAN&
+            & = (RSB_FLAG_HERMITIAN + RSB_FLAG_LOWER) !< See #RSB_FLAG_LOWER_HERMITIAN.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_UPPER_HERMITIAN&
+            & = (RSB_FLAG_HERMITIAN + RSB_FLAG_UPPER) !< See #RSB_FLAG_UPPER_HERMITIAN.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_LOWER_TRIANGULAR&
+            & = (RSB_FLAG_TRIANGULAR + RSB_FLAG_LOWER) !< See #RSB_FLAG_LOWER_TRIANGULAR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_UPPER_TRIANGULAR&
+            & = (RSB_FLAG_TRIANGULAR + RSB_FLAG_UPPER) !< See #RSB_FLAG_UPPER_TRIANGULAR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_LOWER_SYMMETRIC&
+            & = (RSB_FLAG_SYMMETRIC + RSB_FLAG_LOWER) !< See #RSB_FLAG_LOWER_SYMMETRIC.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_DIAGONAL&
+            & = (RSB_FLAG_UPPER + RSB_FLAG_LOWER) !< See #RSB_FLAG_DIAGONAL.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_UPPER_SYMMETRIC&
+            & = (RSB_FLAG_SYMMETRIC + RSB_FLAG_UPPER) !< See #RSB_FLAG_UPPER_SYMMETRIC.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_USE_HALFWORD_INDICES_CSR&
+            & = (RSB_FLAG_USE_HALFWORD_INDICES+&
+            &RSB_FLAG_USE_CSR_RESERVED) !< See #RSB_FLAG_USE_HALFWORD_INDICES_CSR.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_USE_HALFWORD_INDICES_COO&
+            & = (RSB_FLAG_USE_HALFWORD_INDICES+&
+            &RSB_FLAG_WANT_COO_STORAGE) !< See #RSB_FLAG_USE_HALFWORD_INDICES_COO.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES&
+            & = (RSB_FLAG_USE_HALFWORD_INDICES_COO+&
+            &RSB_FLAG_USE_HALFWORD_INDICES_CSR) !< See #RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES.
+! Transposition constants 
+      INTEGER(C_INT),PARAMETER::RSB_TRANSPOSITION_N=INT(Z"04E",C_INT) 
+      INTEGER(C_INT),PARAMETER::RSB_TRANSPOSITION_T=INT(Z"054",C_INT) 
+      INTEGER(C_INT),PARAMETER::RSB_TRANSPOSITION_C=INT(Z"043",C_INT) 
+! Numerical types constants 
+      INTEGER(C_SIGNED_CHAR),PARAMETER&
+            &::RSB_NUMERICAL_TYPE_SAME_TYPE=1 
+      INTEGER(C_SIGNED_CHAR),PARAMETER&
+            &::RSB_NUMERICAL_TYPE_INT=ICHAR('I') 
+      INTEGER(C_SIGNED_CHAR),PARAMETER&
+            &::RSB_NUMERICAL_TYPE_DOUBLE=ICHAR('D') 
+      INTEGER(C_SIGNED_CHAR),PARAMETER&
+            &::RSB_NUMERICAL_TYPE_FLOAT=ICHAR('S') 
+      INTEGER(C_SIGNED_CHAR),PARAMETER&
+            &::RSB_NUMERICAL_TYPE_FLOAT_COMPLEX=ICHAR('C') 
+      INTEGER(C_SIGNED_CHAR),PARAMETER&
+            &::RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX=ICHAR('Z') 
+! Other enumerations constants 
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_VERBOSE_INIT&
+            &=INT(Z"0000001",C_INT)  !< See #RSB_IO_WANT_VERBOSE_INIT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_VERBOSE_EXIT&
+            &=INT(Z"0000002",C_INT)  !< See #RSB_IO_WANT_VERBOSE_EXIT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_OUTPUT_STREAM&
+            &=INT(Z"0000003",C_INT)  !< See #RSB_IO_WANT_OUTPUT_STREAM.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_SORT_METHOD&
+            &=INT(Z"0000004",C_INT)  !< See #RSB_IO_WANT_SORT_METHOD.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_CACHE_BLOCKING_METHOD&
+            &=INT(Z"0000005",C_INT)  !< See #RSB_IO_WANT_CACHE_BLOCKING_METHOD.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_SUBDIVISION_MULTIPLIER&
+            &=INT(Z"0000006",C_INT)  !< See #RSB_IO_WANT_SUBDIVISION_MULTIPLIER.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_VERBOSE_ERRORS&
+            &=INT(Z"0000007",C_INT)  !< See #RSB_IO_WANT_VERBOSE_ERRORS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_BOUNDED_BOX_COMPUTATION&
+            &=INT(Z"0000008",C_INT)  !< See #RSB_IO_WANT_BOUNDED_BOX_COMPUTATION.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_EXECUTING_THREADS&
+            &=INT(Z"0000009",C_INT)  !< See #RSB_IO_WANT_EXECUTING_THREADS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE&
+            &=INT(Z"0000010",C_INT)  !< See #RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING&
+            &=INT(Z"0000011",C_INT)  !< See #RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_IS_INITIALIZED_MARKER&
+            &=INT(Z"0000012",C_INT)  !< See #RSB_IO_WANT_IS_INITIALIZED_MARKER.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_MEM_ALLOC_CNT&
+            &=INT(Z"0000013",C_INT)  !< See #RSB_IO_WANT_MEM_ALLOC_CNT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_MEM_ALLOC_TOT&
+            &=INT(Z"0000014",C_INT)  !< See #RSB_IO_WANT_MEM_ALLOC_TOT.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_LEAF_LEVEL_MULTIVEC&
+            &=INT(Z"0000015",C_INT)  !< See #RSB_IO_WANT_LEAF_LEVEL_MULTIVEC.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS&
+            &=INT(Z"0000016",C_INT)  !< See #RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_MAX_MEMORY_ALLOCATED&
+            &=INT(Z"0000017",C_INT)  !< See #RSB_IO_WANT_MAX_MEMORY_ALLOCATED.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_LIBRSB_ETIME&
+            &=INT(Z"0000018",C_INT)  !< See #RSB_IO_WANT_LIBRSB_ETIME.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_IO_WANT_VERBOSE_TUNING&
+            &=INT(Z"0000019",C_INT)  !< See #RSB_IO_WANT_VERBOSE_TUNING.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_NORM_ONE&
+            &=INT(Z"000001001",C_INT)  !< See #RSB_EXTF_NORM_ONE.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_NORM_TWO&
+            &=INT(Z"000001002",C_INT)  !< See #RSB_EXTF_NORM_TWO.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_NORM_INF&
+            &=INT(Z"000001003",C_INT)  !< See #RSB_EXTF_NORM_INF.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_SUMS_ROW&
+            &=INT(Z"000001004",C_INT)  !< See #RSB_EXTF_SUMS_ROW.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_SUMS_COL&
+            &=INT(Z"000001005",C_INT)  !< See #RSB_EXTF_SUMS_COL.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_ASUMS_ROW&
+            &=INT(Z"000001006",C_INT)  !< See #RSB_EXTF_ASUMS_ROW.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_ASUMS_COL&
+            &=INT(Z"000001007",C_INT)  !< See #RSB_EXTF_ASUMS_COL.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_EXTF_DIAG&
+            &=INT(Z"000000004",C_INT)  !< See #RSB_EXTF_DIAG.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MARF_RGB&
+            &=INT(Z"000000001",C_INT)  !< See #RSB_MARF_RGB.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MARF_EPS_S&
+            &=INT(Z"000000010",C_INT)  !< See #RSB_MARF_EPS_S.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MARF_EPS_B&
+            &=INT(Z"000000020",C_INT)  !< See #RSB_MARF_EPS_B.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MARF_EPS&
+            &=INT(Z"000000030",C_INT)  !< See #RSB_MARF_EPS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MARF_EPS_L&
+            &=INT(Z"000000070",C_INT)  !< See #RSB_MARF_EPS_L.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T&
+            &=INT(Z"000000001",C_INT)  !< See #RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T&
+            &=INT(Z"000000002",C_INT)  !< See #RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T&
+            &=INT(Z"000000004",C_INT)  !< See #RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T&
+            &=INT(Z"000000008",C_INT)  !< See #RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T&
+            &=INT(Z"000000010",C_INT)  !< See #RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_TOTAL_SIZE__TO__SIZE_T&
+            &=INT(Z"000000020",C_INT)  !< See #RSB_MIF_TOTAL_SIZE__TO__SIZE_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T&
+            &=INT(Z"000000040",C_INT)  !< See #RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T&
+            &=INT(Z"000000080",C_INT)  !< See #RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_MATRIX_INFO__TO__CHAR_P&
+            &=INT(Z"000000100",C_INT)  !< See #RSB_MIF_MATRIX_INFO__TO__CHAR_P.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T&
+            &=INT(Z"000000200",C_INT)  !< See #RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_MUL&
+            &=INT(Z"000000001",C_INT)  !< See #RSB_ELOPF_MUL.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_DIV&
+            &=INT(Z"000000002",C_INT)  !< See #RSB_ELOPF_DIV.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_POW&
+            &=INT(Z"000000004",C_INT)  !< See #RSB_ELOPF_POW.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_NEG&
+            &=INT(Z"000000008",C_INT)  !< See #RSB_ELOPF_NEG.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_SCALE_ROWS&
+            &=INT(Z"000000010",C_INT)  !< See #RSB_ELOPF_SCALE_ROWS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_SCALE_COLS&
+            &=INT(Z"000000020",C_INT)  !< See #RSB_ELOPF_SCALE_COLS.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_SCALE_ROWS_REAL&
+            &=INT(Z"000000040",C_INT)  !< See #RSB_ELOPF_SCALE_ROWS_REAL.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_ELOPF_SCALE_COLS_REAL&
+            &=INT(Z"000000080",C_INT)  !< See #RSB_ELOPF_SCALE_COLS_REAL.
+      INTEGER(C_INT),PARAMETER&
+            &::RSB_PRECF_ILU&
+            &0=INT(Z"000000001",C_INT)  !< See #RSB_PRECF_ILU0.
+      TYPE(C_PTR),PARAMETER&
+            &::RSB_NULL_INIT_OPTIONS&
+            &=C_NULL_PTR  !< See #RSB_NULL_INIT_OPTIONS.
+      TYPE(C_PTR),PARAMETER&
+            &::RSB_NULL_EXIT_OPTIONS&
+            &=C_NULL_PTR  !< See #RSB_NULL_EXIT_OPTIONS.
+      END MODULE rsb
diff --git a/rsb.h b/rsb.h
new file mode 100644
index 0000000..82f40f4
--- /dev/null
+++ b/rsb.h
@@ -0,0 +1,906 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*! 
+ *  \file
+ *  \brief  This file declares the user interface functions and data structures for the \librsb library.
+ *  \author Michele Martone
+ * */
+/*!
+ \mainpage
+ 
+ A sparse matrix library implementing the `Recursive Sparse Blocks' (\b RSB) matrix storage.
+
+ 
+ This is the documentation for the application programming interface (API)
+ of the \e `\librsb' library.
+ \n
+ In order to use \librsb, there is no need for the user to know the RSB
+ layout and algorithms: this documentation should be sufficient.
+ \n
+ This library is dual-interfaced; it supports:
+ a native (`RSB') interface (with identifiers prefixed by `rsb_' or `RSB_'),
+ and a (mostly complete) Sparse BLAS interface, as a wrapper around the RSB interface.
+ \n
+ Many computationally intensive operations are implemented with thread
+ parallelism, by using OpenMP.
+ \n
+ Thread parallelism can be turned off at configure time, if desired, or limited
+ at execution time.
+ \n
+ Many of the computational kernels source code files (mostly internals) were
+ automatically generated.
+ \n
+ This user documentation concerns the end user API only; that is, neither the
+ internals, nor the code generator.
+ \n
+ 
+ You should consult the remaining documentation (e.g. the README file, code 
+ comments) to find information about how to modify the generator or the 
+ library internals.
+
+ This library is research software and as such, still \b experimental.  
+ For a first approach, we suggest to go through the \ref rsb_doc_examples  documentation
+ section, or the \ref examples_section "quick start examples" section on this page.
+ 
+ \n
+ Information about the supported matrix types and matrix operations 
+ resides in the \link rsb_types.h rsb_types.h \endlink file. 
+
+ A C/C++ user can use the native API of RSB by including the \link rsb.h rsb.h \endlink header.
+ The same interface is available in Fortran via the ISO C Binding interface, specified in \link rsb.F90 rsb.F90\endlink.
+ \n
+ 
+ The C header file for the \ref rsb_doc_sparse_blas  is \link blas_sparse.h blas_sparse.h\endlink.
+
+ \author Michele Martone < michelemartone AT users DOT sourceforge DOT net >
+ 
+ Contents of the README file :
+ \verbinclude README
+
+ \anchor examples_section 
+
+ For a quick startup, consider the following two programs.
+
+ The first, using the internal RSB interface:
+ \include examples/hello.c
+
+ And the second, using the Sparse BLAS interface:
+ \include examples/hello-spblas.c
+
+ For more, see the \ref rsb_doc_examples  section.
+
+ */
+
+/*!
+ \defgroup rsb_doc_rsb The librsb library interface (rsb.h, rsb.F90)
+ \brief
+ The reference documentation of the \librsb library comes in both HTML and Unix man pages formats.
+ The following sections/man pages are available: \ref rsb_doc_rsb ; \ref rsb_doc_sparse_blas ; \ref rsb_doc_examples.
+
+
+ In general, users of this library are interested in high performance sparse matrix computations on cache based shared memory parallel computers.
+ For this, \librsb offers a native C interface (here documented) and a Fortran one (in \ref rsb.F90, equivalent to the C declaration headers from \ref rsb.h), in addition to a the Sparse BLAS one (both C and Fortran, documented).
+
+ Configuration, build, and installation instructions are contained in the \c README file distributed in the sources archive.
+
+ <b> Typical program structure </b>
+
+ \li initialize \librsb with #rsb_lib_init()
+ \li (in any order)
+      allocate matrices (e.g.: with #rsb_mtx_alloc_from_coo_inplace() or others);
+      do any computation with them (e.g.: #rsb_spmv(), #rsb_spsv() );
+      converting matrices (e.g.: with #rsb_mtx_switch_to_coo() );
+      freeing matrices (#rsb_mtx_free() )
+ \li finalize \librsb with #rsb_lib_exit()
+
+
+ <b> Important usage notes </b>
+
+ <b> General program structure </b>
+ Before calling any \librsb function, a program is required to initialize \librsb's internal status.
+ This is done by calling #rsb_lib_init() .
+ Afterwards, any \librsb function can be safely used.
+ When \librsb functions are not intended to be called anymore, a program may call #rsb_lib_exit() to free any resource.
+ Then, #rsb_lib_init() should be called for further usage of \librsb.
+
+ <b> Manipulating matrices and vectors </b>
+ In order to use \librsb, the user is not required to use explicitly any of \librsb's data structures: their manipulation is to be performed by \librsb functions.
+ Therefore, knowledge of \librsb's matrix type (\c rsb_mtx_t) is not necessary at all: this structure is intended to be used as an opaque container.
+
+ On the contrary, arrays for numerical vectors (or more generally, dense matrices) are expected to be managed by the user: \librsb does not furnish any specific vector type.
+ Computational functions treat dense vectors/matrices are simple arrays of a specified type; see the \ref rsb_doc_examples  .
+
+ <b> Computational functions </b>
+ This library can be configured at build time to support a custom subset of numerical types.
+ To keep the programming interface compact, it has been decided to not replicate the computational functions to each numerical type.
+ Instead, the type is expected to be specified by the user via a type flag. 
+ For instance, matrix assembly functions (e.g.: #rsb_mtx_alloc_from_coo_const() ) accept a type information and keep it stored in the matrix structure.
+ Therefore, computational functions (e.g.: #rsb_spmv() ) can fetch this information from their \c rsb_mtx_t operand, and treat accordingly the other parameters (e.g.: \a alphap, \a Xp, ...).
+ Mixed type operations are currently not supported.
+
+
+ <b> Memory management </b>
+
+ Matrix structures (\c rsb_mtx_t) allocated by \librsb shall be freed only via #rsb_mtx_free() .
+
+ <b> Benchmarking </b>
+
+ If you want to benchmark this library, there are different possibilities:
+ \include ./examples/benchex.sh
+
+ <b> Tuning and Customization </b>
+ 
+ There are different \c ./configure  options you may look at for tuning or customizing the library.
+
+
+
+ \defgroup rsb_doc_examples	Example programs and code 
+ \brief	Examples of usage of \librsb.
+
+ 	The following fully working example programs illustrate correct ways of using the library.
+	The script displayed here should be sufficient to build them.
+ \include examples/make.sh
+
+ \include examples/hello.c
+ \include examples/hello-spblas.c
+ \include examples/hello-spblas.c
+ \include examples/autotune.c
+ \include examples/io-spblas.c
+ \include examples/transpose.c
+ \include examples/power.c
+ \include examples/fortran.F90
+ \include examples/fortran_rsb_fi.F90
+*/
+
+/*!
+ \defgroup rsb_doc_sparse_blas The Sparse BLAS interface to librsb (blas_sparse.h, rsb_blas_sparse.F90) 
+ \brief
+ 	A Sparse BLAS interface (see http://www.netlib.org/blas/blast-forum/) to \librsb.  Level 1 (vector-vector operations) is supported in a basic way.  Level 2 (sparse matrix-dense vector operations) is supported fully.  Level 3 (sparse matrix-dense matrix operations) is supported as a wrapper around Level 2.
+
+We also implement a number of useful extra functions as custom extensions, giving access to other \librsb functionality.
+
+The usage pattern of this interface matches that of the Sparse BLAS standard, exception made for the necessity of initialization/finalization of \librsb.
+The Sparse BLAS interface is also available for Fortran: see \ref rsb_blas_sparse.F90.
+
+
+The user should be aware of the following:
+\li Because this Sparse BLAS implementation is built around \librsb, initialization with #rsb_lib_init() and finalization with #rsb_lib_exit() is necessary. Inclusion of the \c rsb.h header is necessary.
+\li \librsb gives users freedom of in/out arbitrarily BLAS types support at configure/build time. Hence, while all the interface functions are always included the Sparse BLAS header file, they may return an error code. Be sure of having configured correctly the library at configure time (and see the \ref blas_sparse.h header file for types configured in the current build).
+\li According to the standard, the complex type functions for C accept scalar values by reference rather than by copy; equivalent functions for other types do not do so, so this may cause confusion. Be careful.
+\li Error checking is weak; so for instance, passing a function the handle of a matrix of mismatching type will not be detected as an error, although it's incorrect.
+\li According to the standard, VBR and BCSR styled constructors are supported, although these are interfaces for \librsb's own matrix representation.
+\li Here we list functions for both Fortran and C functions. However, the Fortran functions are declared and documented with the C notation.  We may provide a better documentation in a subsequent release.
+\li Each identifier documented here suffixed by \c _  (e.g.: #blas_susdot_()) can be used from Fortran with the name stripped by that suffix (so in this case, \c blas_susdot).
+We will provide a proper fix to this inconvenience in a subsequent release.
+\li Each Fortran program using \librsb's Sparse BLAS Implementation shall \c use  modules \c blas_sparse  and \c rsb.
+\li Also Fortran programs have to call #rsb_lib_init() and #rsb_lib_exit() e.g.:
+\verbatim
+       	USE blas_sparse             ! module implementing the Sparse BLAS on the top of librsb
+       	USE rsb                     ! rsb module
+	...
+	INTEGER :: istat            ! integer variable
+	...
+       	istat = rsb_lib_init(RSB_NULL_INIT_OPTIONS) ! please note that this is not part of Sparse BLAS but it is needed by librsb
+	if(istat.NE.0)STOP          ! a value different than zero signals an error
+	...
+	! code calling Sparse BLAS routines
+	...
+       	istat = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) ! please note that this is not part of Sparse BLAS but it is needed by librsb
+	if(istat.NE.0)STOP          ! a value different than zero signals an error
+	...
+\endverbatim
+	\li For Fortran, more procedures exist, although they are not documented here. According to the Sparse BLAS (http://www.netlib.org/blas/blast-forum/), for almost each subroutine whose identifier prefixed with \c blas_X (with \c X being one of S,D,C,Z), a corresponding generic modern Fortran version exists.
+	Please note how not all of the certain procedures identifier prefixes include the type character. 
+
+	E.g.:
+\code
+      ! the following code ('d' stays for 'double precision'):
+      CALL blas_duscr_begin(nr,nc,A,istat)
+      CALL blas_ussp(A,blas_lower_symmetric,istat)
+      CALL blas_duscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      CALL blas_duscr_end(A,istat)
+      CALL blas_dusmv(transT,alpha,A,X,incX,B,incB,istat) 
+      CALL blas_dusds(A,istat)
+      ! is equivalent to:
+      CALL duscr_begin(nr,nc,A,istat) ! here, 'd' must be retained for avoiding ambiguity
+      CALL ussp(A,blas_lower_symmetric,istat)
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      CALL uscr_end(A,istat)
+      CALL usmv(transT,alpha,A,X,incX,B,incB,istat) 
+      CALL usds(A,istat)
+\endcode
+*/
+
+/*
+ * External interface to our implementation.
+ *
+ * This is the only header file which should be included for using this library.
+ *
+ * It defines its API (Application Programming Interface).
+ * */
+#ifndef RSB_RSB_H_INCLUDED
+#define RSB_RSB_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>	/* size_t */
+#if 0
+#include <stdint.h>	/* uint16_t,..  */
+#endif
+
+/*!
+ \name Type definitions
+ \anchor definitions_section
+
+ These are definitions of \librsb base types.
+ */
+/*!
+ * The block arrays index type. 
+ *
+ * Could be an unsigned type.
+ * Should not overflow when indexing matrix blocks by block coordinates.
+ * */
+typedef signed int rsb_blk_idx_t;
+
+/*!
+ * The coordinate arrays index type.
+ *
+ * Should not overflow when indexing matrix elements by coordinates.
+ * Legal values when specifying a matrix size should be within #RSB_MIN_MATRIX_DIM and #RSB_MAX_MATRIX_DIM 
+ * */
+typedef signed int rsb_coo_idx_t;
+
+/*!
+ * The nnz counter index type.
+ *
+ * Should not overflow when indexing matrix elements.
+ * On most common archs sizeof(long)>=sizeof(int).
+ * Legal values when specifying a matrix size should be within #RSB_MIN_MATRIX_NNZ and #RSB_MAX_MATRIX_NNZ 
+ * */
+typedef signed int rsb_nnz_idx_t;	
+
+/* We would like the following typedefs to be long, but
+   they should be compatible with many int functions */
+
+/*!
+ A type for specifying matrix assembly or coordinate conversions option flags.
+ Should be >= 4 bytes.
+ See \ref flags_section for possible values.
+ */
+typedef signed int rsb_flags_t;
+
+/*!
+ A type for specifying numerical type codes (See \ref matrix_type_symbols_section for a list of valid values).
+ */
+typedef char rsb_type_t;
+
+/*!
+ A type specific for error flags.
+ Should be >= 4 bytes.
+
+ A textual description of an error value may be obtained via #rsb_strerror_r() or #rsb_perror().
+ */
+typedef signed int rsb_err_t; /* note that an unsigned would break the RSB_IS_COO_VALUE_MORE_THAN_HALF_BITS_LONG macros! */
+
+/*!
+ An integer type declaration for interface functions.
+ Should always be 'int'.
+ */
+typedef signed    int rsb_int_t;		/*!< A signed integer type */
+
+/*! A boolean type. */
+typedef rsb_flags_t rsb_bool_t;
+
+/*!
+ * The type for specifying transposition (See \ref matrix_transposition_flags_section)
+ */
+typedef rsb_flags_t rsb_trans_t;
+
+/*!  A floating point numerical type.  */
+typedef double rsb_real_t;
+
+/*!
+ A type for character strings.
+ */
+typedef char rsb_char_t;
+
+/*!  A floating point numerical type for time measurements with #rsb_time().  */
+typedef rsb_real_t rsb_time_t;
+
+/*!
+ \name Other constants
+
+ Other constants for some typedefs.
+ */
+#define RSB_BOOL_TRUE	1	/*!< A "true"  value for #rsb_bool_t. */
+#define RSB_BOOL_FALSE	0 /*!< A "false" value for #rsb_bool_t. */
+#define RSB_DO_FLAG_ADD(V,F)	(V) |=  (F)	/*!< The flag variable \c V gets the logical OR value with flag \c F. */
+#define RSB_DO_FLAG_DEL(V,F)	(V) &= ~(F)	/*!< The flag variable \c V gets the logical NAND value with flag \c F. */
+#define RSB_DO_FLAG_FILTEROUT(V,F)	((V) & ~(F))	/*!< The flag variable \c V after logical NAND value with flag \c F. */
+#define RSB_DO_FLAG_FILTERONLY(V,F)	((V) & (F))	/*!< The flag variable \c V after logical AND value with flag \c F. */
+#define RSB_DO_FLAG_HAS(V,F)	((((V)&(F))==(F))?RSB_BOOL_TRUE:RSB_BOOL_FALSE)	 /*!< Presence check for flag \c F. */
+#define RSB_DO_FLAG_HAS_INTERSECTION(V,F)	(((V)&(F))?RSB_BOOL_TRUE:RSB_BOOL_FALSE)	/*!< Presence check for flag \c F.*/
+
+#define RSB_DEFAULT_ROW_BLOCKING 1	/*!< Reserved for future use. */
+#define RSB_DEFAULT_COL_BLOCKING 1	/*!< Reserved for future use. */
+#define RSB_DEFAULT_BLOCKING 1 /*!< A safe value for column blocking (reserved for future use). */
+
+/*  Macros to get indices types liminal values.  */
+#define RSB_IS_SIGNED(T)   (((T)0) > (((T)-1)))
+#define RSB_MAX_UNSIGNED(T) ((T)-1)
+#define RSB_CHAR_BIT 8	/* bits per byte; if not 8, librsb compilation should fail */
+#define RSB_HALF_MAX_SIGNED(T) ((T)1 << (sizeof(T)*RSB_CHAR_BIT-2))
+#define RSB_MAX_SIGNED(T) (RSB_HALF_MAX_SIGNED(T) - 1 + RSB_HALF_MAX_SIGNED(T))
+#define RSB_MAX_VALUE_FOR_TYPE(T) (RSB_IS_SIGNED(T)?RSB_MAX_SIGNED(T):RSB_MAX_UNSIGNED(T))
+
+#define RSB_MIN_MATRIX_DIM 0 /*!> Minimum allowed matrix dimension. */
+#define RSB_MIN_MATRIX_NNZ 0 /*!> Minimum allowed matrix nonzeroes count. */
+#define RSB_NNZ_BLK_MAX 255 /* Dense block maximal allowed size (still unused, for now internal) */
+#define RSB_MAX_MATRIX_DIM (RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t)-RSB_NNZ_BLK_MAX-255) /*!> Maximum allowed matrix dimension. */
+#define RSB_MAX_MATRIX_NNZ (RSB_MAX_VALUE_FOR_TYPE(rsb_nnz_idx_t)-RSB_NNZ_BLK_MAX) /*!> Maximum allowed matrix nonzeroes count. */
+#define RSB_MARKER_COO_VALUE (RSB_MAX_MATRIX_DIM+1)		/* */
+#define RSB_MARKER_NNZ_VALUE (RSB_MAX_MATRIX_NNZ+1)		/* */
+#define RSB_INVALID_COO_IDX_VAL ((RSB_MARKER_COO_VALUE)+1)	/*< A value which is illegal for any #rsb_coo_idx_t variable. */
+#define RSB_INVALID_NNZ_IDX_VAL ((RSB_MARKER_NNZ_VALUE)+1)	/*< A value which is illegal for any #rsb_nnz_idx_t variable. */
+
+/*! \anchor rsb_mtx_t  struct rsb_mtx_t declaration is in a separate, internal include file */
+
+/*!
+ \ingroup rsb_doc_rsb
+ \name Matrix assembly flags
+ \anchor flags_section
+
+ These are flags which could be combined to specify the assembly of sparse matrices and in various matrix-related operations.
+ \n
+ If unsure what flags to use to a function, #RSB_FLAG_NOFLAGS shall be a good default in most cases.
+ */
+/*!@{*/
+
+/*! Default storage flags. */
+#define RSB_FLAG_DEFAULT_STORAGE_FLAGS		 	(RSB_FLAG_WANT_BCSS_STORAGE|RSB_FLAG_WANT_COO_STORAGE)
+
+/*! A flag combination specifying a pure COO matrix.  */
+#define RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS		 	RSB_FLAG_WANT_COO_STORAGE 
+
+/*! A flag combination specifying a pure CSR matrix.  */
+#define RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS		 	RSB_FLAG_WANT_BCSS_STORAGE
+
+/*! A flag combination specifying a pure RSB matrix.  */
+#define RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS (RSB_FLAG_QUAD_PARTITIONING|RSB_FLAG_USE_HALFWORD_INDICES|RSB_FLAG_WANT_COO_STORAGE|RSB_FLAG_WANT_BCSS_STORAGE)
+
+/*! A flag combination specifying a matrix in a default, supported format.  */
+#define RSB_FLAG_DEFAULT_MATRIX_FLAGS			RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS
+
+/*! The null (empty) flag. */
+#define RSB_FLAG_NOFLAGS		 		0x000000
+
+/*! The identical flag (used in cloning function #rsb_mtx_clone). */
+#define RSB_FLAG_IDENTICAL_FLAGS RSB_FLAG_NOFLAGS
+
+/*! If set, the input/output coordinate indices will be assumed to be 1 based. */
+#define RSB_FLAG_FORTRAN_INDICES_INTERFACE		0x000001
+
+/*! If set, the input/output coordinate indices will be assumed to be 0 based (default). */
+#define RSB_FLAG_C_INDICES_INTERFACE		0x000000
+
+/*! If set, the matrix will internally use a half word (16 bit) type for indices. */
+#define RSB_FLAG_USE_HALFWORD_INDICES	0x000002
+
+/*! Used to specify multi-vector (dense matrix) operations. */
+#define RSB_FLAG_WANT_ROW_MAJOR_ORDER 			0x000000
+
+/*! Used to specify multi-vector (dense matrix) operations. */
+#define RSB_FLAG_WANT_COLUMN_MAJOR_ORDER 		0x4000000
+
+/*! If set, the code will assume the input nonzeroes as sorted.	*/
+#define RSB_FLAG_SORTED_INPUT				0x000004
+
+/*! If set, the matrix is considered as triangular. \see #RSB_FLAG_LOWER,#RSB_FLAG_UPPER. */ 
+#define RSB_FLAG_TRIANGULAR 				0x000008
+
+/*! If set, the matrix will be stored in as lower (triangular or symmetric). \see #RSB_FLAG_TRIANGULAR,#RSB_FLAG_SYMMETRIC,#RSB_FLAG_UPPER. */
+#define RSB_FLAG_LOWER		 			0x000010
+
+/*! If set, the matrix will be stored in as upper (triangular or symmetric). \see #RSB_FLAG_LOWER*/
+#define RSB_FLAG_UPPER		 			0x000020
+
+/*! If set, the (whole super-)matrix will not store the diagonal, which will be assumed to be unitary. */
+#define RSB_FLAG_UNIT_DIAG_IMPLICIT			0x000040
+
+/* ghost flag ( moved in a non-public header, and reserved): 0x000080	*/
+/* ghost flag ( moved in a non-public header, and reserved): 0x80000000	*/
+
+/*! If set, the matrix will use COO storage, where necessary. */
+#define RSB_FLAG_WANT_COO_STORAGE		0x000100
+
+/*! Keep the last nonzero duplicate, at matrix assembly time. */ 
+#define RSB_FLAG_DUPLICATES_KEEP_LAST				0x000000
+
+/*! The default nonzeroes duplicates handling.  */ 
+#define RSB_FLAG_DUPLICATES_DEFAULT_HANDLE			0x000000
+
+/*! Compute and keep the sum of nonzero duplicates, at matrix assembly time.  */ 
+#define RSB_FLAG_DUPLICATES_SUM				0x000200
+
+/*! If set, explicit zeros will not be inserted	\warning: this flag is active by default	*/
+#define RSB_FLAG_DISCARD_ZEROS				0x000400
+
+/* ghost flag ( moved in a non-public header, and reserved): 0x000800 */
+/* ghost flag ( moved in a non-public header, and reserved): 0x001000 */
+
+/*! If set, matrix will be organized as a quad tree of submatrices. */
+#define RSB_FLAG_QUAD_PARTITIONING 			0x002000
+
+/*! If set, the block partitioning will be fixed (BCSS: BCSR or BCSC, but no VBR).	*/
+#define RSB_FLAG_WANT_BCSS_STORAGE 			0x004000
+
+/* ghost flag ( moved in a non-public header, and reserved): 0x008000 */
+/* ghost flag ( moved in a non-public header, and reserved): 0x010000 */
+/* ghost flag ( moved in a non-public header, and reserved): 0x020000 */
+
+/*! If set, matrices will be fit in the three input coo arrays, after conversion. */ 
+#define RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS		0x040000
+
+/*! \internal \todo: should remove this. */ 
+#define RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT	0x080000
+
+/* ghost flag ( moved in a non-public header, and reserved): 0x100000*/
+/* ghost flag (temporarily reserved): 0x200000*/
+
+/*! If set, the input matrix will be treated as symmetric (stored as a lower triangular one by default). \see #RSB_FLAG_LOWER,#RSB_FLAG_LOWER. */
+#define RSB_FLAG_SYMMETRIC 			0x400000
+
+/*! If set, the input matrix will be treated as symmetric hermitian (stored as a lower triangular one). \see #RSB_FLAG_LOWER,#RSB_FLAG_LOWER. */
+#define RSB_FLAG_HERMITIAN 			0x800000
+
+/*! If set, recursion on small matrices will last at least the number of active threads. */
+#define RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS	0x1000000
+
+/* ghost flag ( moved in a non-public header, and reserved): 0x2000000	*/
+
+/*! Combined flags for a lower hermitian matrix. */
+#define RSB_FLAG_LOWER_HERMITIAN			(RSB_FLAG_HERMITIAN | RSB_FLAG_LOWER)
+
+/*! Combined flags for an upper hermitian matrix. */
+#define RSB_FLAG_UPPER_HERMITIAN			(RSB_FLAG_HERMITIAN | RSB_FLAG_UPPER)
+
+/*! Combined flags for a lower triangular matrix. */
+#define RSB_FLAG_LOWER_TRIANGULAR 			(RSB_FLAG_TRIANGULAR | RSB_FLAG_LOWER)
+
+/*! Combined flags for an upper triangular matrix. */
+#define RSB_FLAG_UPPER_TRIANGULAR 			(RSB_FLAG_TRIANGULAR | RSB_FLAG_UPPER)
+
+/*! Combined flags for a symmetric, lower-stored matrix. */
+
+#define RSB_FLAG_LOWER_SYMMETRIC 			(RSB_FLAG_SYMMETRIC | RSB_FLAG_LOWER)
+
+/*! Combined flags for a diagonal matrix. */
+#define RSB_FLAG_DIAGONAL 				(RSB_FLAG_UPPER | RSB_FLAG_LOWER)
+
+/*! Combined flags for a symmetric, upper-stored matrix. */
+#define RSB_FLAG_UPPER_SYMMETRIC 			(RSB_FLAG_SYMMETRIC | RSB_FLAG_UPPER)
+
+/*! If set, the matrix will be subdivided at a finer grain on diagonal blocks. */
+#define RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG 	0x8000000
+
+/*! If set, the input COO arrays to the assembly functions will not be freed at matrix destruction time.
+  \warning Please do NOT use this flag, for the default memory allocation handling is still not specified. Instead, use the in place allocation functions: #rsb_mtx_alloc_from_csr_inplace() and #rsb_mtx_alloc_from_coo_inplace().
+ */
+#define RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS 		0x40000000
+
+/* Reserved, undocumented flags. Not for use. */
+#define RSB_FLAG_USE_CSR_RESERVED	0x200000
+
+/*! \internal Combined flags for half word CSR. */
+#define RSB_FLAG_USE_HALFWORD_INDICES_CSR	(RSB_FLAG_USE_HALFWORD_INDICES|RSB_FLAG_USE_CSR_RESERVED)
+
+/*! Combined flags for half word COO. */
+#define RSB_FLAG_USE_HALFWORD_INDICES_COO	(RSB_FLAG_USE_HALFWORD_INDICES|RSB_FLAG_WANT_COO_STORAGE)
+
+/*! A combination of flags which is forbidden (so don't use it). */
+#define RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES	(RSB_FLAG_USE_HALFWORD_INDICES_COO|RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+/*!@}*/
+
+
+/*! A macro for the error code value. */
+#define RSB_ERR_CAST(E) (-(E))
+
+/* Error handling functions. */
+rsb_err_t rsb_strerror_r(rsb_err_t errval, rsb_char_t * buf, size_t buflen);
+rsb_err_t rsb_perror(void *stream, rsb_err_t errval);
+
+/*! No error occurred (success). The return value that means function operation success, in most cases.   */
+#define RSB_ERR_NO_ERROR		RSB_ERR_CAST(0x000)
+
+/*! An unspecified, generic error occurred. */
+#define RSB_ERR_GENERIC_ERROR		RSB_ERR_CAST(0x001)
+
+/*! The user requested an operation which is not supported (e.g.: was opted out at build time). */
+#define RSB_ERR_UNSUPPORTED_OPERATION	RSB_ERR_CAST(0x002)
+
+/*! The user requested to use a type which is not supported (e.g.: was opted out at build time). */
+#define RSB_ERR_UNSUPPORTED_TYPE	RSB_ERR_CAST(0x004)
+
+/*! The user requested to use a matrix storage format which is not supported (e.g.: was opted out at build time). */
+#define RSB_ERR_UNSUPPORTED_FORMAT	RSB_ERR_CAST(0x008)
+
+/*! An error occurred which is not apparently caused by a user's fault (internal error). */
+#define RSB_ERR_INTERNAL_ERROR		RSB_ERR_CAST(0x010)
+
+/*! The user supplied some corrupt data as argument. */
+#define RSB_ERR_BADARGS			RSB_ERR_CAST(0x020)
+
+/*! There is not enough dynamical memory to perform the requested operation. */
+#define RSB_ERR_ENOMEM			RSB_ERR_CAST(0x040)
+
+/*! The requested operation was not implemented yet in this code revision (but probably will be, someday). */
+#define RSB_ERR_UNIMPLEMENTED_YET	RSB_ERR_CAST(0x100)
+
+/*! The requested operation could not be executed, or index overflow will happen. */
+#define RSB_ERR_LIMITS			RSB_ERR_CAST(0x200)
+
+/*! A Fortran specific error occurred. */
+#define RSB_ERR_FORTRAN_ERROR		RSB_ERR_GENERIC_ERROR
+
+/*! The requested feature (e.g.:blocking) is not available because it was opted out or not configured at build time. */
+#define RSB_ERR_UNSUPPORTED_FEATURE	RSB_ERR_CAST(0x400)
+
+/*! A file containing user set configuration was not present. */
+#define RSB_ERR_NO_USER_CONFIGURATION	RSB_ERR_CAST(0x800)
+
+/*! User supplied data (e.g.: from file) was corrupt. */
+#define RSB_ERR_CORRUPT_INPUT_DATA	RSB_ERR_CAST(0x1000)
+
+/*! Memory hierarchy info failed to be detected. You can bypass this by setting a meaningful \c RSB_USER_SET_MEM_HIERARCHY_INFO environment variable. */
+#define RSB_ERR_FAILED_MEMHIER_DETECTION	RSB_ERR_CAST(0x2000)
+
+/*! User gave flags for an in place assembly in a copy-based function. */
+#define RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS	RSB_ERR_CAST(0x4000)
+
+/*! User requested writing to a file stream, while this feature is configured out. */
+#define RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT	RSB_ERR_CAST(0x8000)
+
+/*! User gave some input with invalid numerical data. */
+#define RSB_ERR_INVALID_NUMERICAL_DATA	RSB_ERR_CAST(0x10000)
+
+/*! Probable memory leak (user did not deallocate librsb structures before calling rsb_lib_exit()). */
+#define RSB_ERR_MEMORY_LEAK	RSB_ERR_CAST(0x20000)
+
+/*! Collation of "unsupported" type errors. */
+#define RSB_ERRS_UNSUPPORTED_FEATURES	(RSB_ERR_UNSUPPORTED_FEATURE|RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT)
+
+/*! Program success error code (int). */
+#ifdef EXIT_SUCCESS
+#define RSB_PROGRAM_SUCCESS	(EXIT_SUCCESS)
+#else
+#define RSB_PROGRAM_SUCCESS		(0)
+#endif
+
+/*! Program error code (int). */
+#ifdef EXIT_FAILURE
+#define RSB_PROGRAM_ERROR		(EXIT_FAILURE)
+#else
+#define RSB_PROGRAM_ERROR		(-1)
+#endif
+
+/*! Program error code (int). */
+#define RSB_ERR_TO_PROGRAM_ERROR(E)	((E)==(RSB_ERR_NO_ERROR)?RSB_PROGRAM_SUCCESS:RSB_PROGRAM_ERROR)
+
+
+/*! \ingroup rsb_doc_misc rsb_doc_rsb
+\brief library option values for \see_lib_init_funcs. */
+enum rsb_opt_t
+{
+/*! #RSB_IO_WANT_VERBOSE_INIT prompts for a verbose initialization of the library: messages will be written
+ * to the file descriptor (\c FILE*) pointed by the value pointer when calling \ref rsb_lib_init.
+ */
+  RSB_IO_WANT_VERBOSE_INIT =0x000001	/* (FILE*) */
+,
+/*! #RSB_IO_WANT_VERBOSE_EXIT prompts for a verbose finalization of the library: messages will be written
+ * to the file descriptor (\c FILE*) pointed by the value pointer when calling \ref rsb_lib_exit.
+ */
+  RSB_IO_WANT_VERBOSE_EXIT =0x000002	/* (FILE*) */
+,
+/*! Specifies the default output stream. Output (debug info) info will be written  
+ * to the file descriptor (\c FILE*) pointed by the value pointer.
+ */
+  RSB_IO_WANT_OUTPUT_STREAM =0x000003	/* (FILE*) */
+,
+/*! Specifies the default sorting method. Specified as a pointed integer (#rsb_int_t) number, in {[0],1}. (internal)
+ */
+  RSB_IO_WANT_SORT_METHOD =0x000004	/* (rsb_int_t) */
+,
+/*! Specifies the default cache blocking method. Specified as a pointed integer (#rsb_int_t) number, in {-1,[0],1}. (internal)
+ */
+  RSB_IO_WANT_CACHE_BLOCKING_METHOD =0x000005	/* (rsb_int_t) */
+,
+/*! Specifies a multiplier for finer (if >1.0) or coarser (if <1.0) subdivisions. Specified as a pointed (#rsb_real_t) number, in {..,[1.0],..}. (internal)
+ */
+  RSB_IO_WANT_SUBDIVISION_MULTIPLIER =0x000006	/* (rsb_real_t) */
+,
+/*! Prompts for a verbose error reporting: messages will be written
+ * to the file descriptor (\c FILE*) pointed by the value pointer. Only meaningful if an
+ * interface error verbosity greater than 0 was set at configure time. 
+ */
+  RSB_IO_WANT_VERBOSE_ERRORS =0x000007	/* (FILE*) */
+,
+/*! Prompts for bounded box computation, for a smoother submatrices locking; pointed #rsb_int_t in {0,[1]}. (internal).
+ */
+  RSB_IO_WANT_BOUNDED_BOX_COMPUTATION =0x000008	/* (rsb_int_t) */
+,
+/*! Specifies the number of desired executing threads; pointed #rsb_int_t in {[0],1,..}.
+ */
+  RSB_IO_WANT_EXECUTING_THREADS =0x000009	/* (rsb_int_t) */
+,
+/*! Specifies the level of interface verbosity; if setting, pointed #rsb_int_t values should be in {[0],1,..}. Support may be enabled or disabled at build time via the \c --enable-internals-error-verbosity configure option. If disabled, only getting is supported and yields -1, but setting is not supported and the #RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT error will be returned.
+ */
+  RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE =0x000010	/* (rsb_int_t) */
+,
+/*! Specifies a custom memory hierarchy info string; pointed \c const #rsb_char_t*; (may point to a NULL string pointer).
+ */
+  RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING =0x000011	/* (const rsb_char_t*) */
+,
+/*! Used for getting whether the library has been initialized (#RSB_BOOL_TRUE) or not (#RSB_BOOL_FALSE) ; pointed \c const #rsb_bool_t*; (this is NOT for general users).
+ */
+  RSB_IO_WANT_IS_INITIALIZED_MARKER =0x000012	/* (const rsb_bool_t*) */
+,
+/*! Used for getting the count of memory allocations performed by librsb employing librsb's memory allocation wrapper (if disabled, will return zero); pointed \c const \c size_t*; (this is for debugging purposes).
+ */
+  RSB_IO_WANT_MEM_ALLOC_CNT =0x000013	/* (const size_t*) */
+,
+/*! Used for getting the total amount of memory allocated by librsb employing librsb's memory allocation wrapper (if disabled, will return zero); pointed \c const \c size_t*; (this is for debugging purposes).
+ */
+  RSB_IO_WANT_MEM_ALLOC_TOT =0x000014	/* (const size_t*) */
+,
+/*! Specifies whether the default multi-vector ops shall act at a leaf level (default value of 0 is yes). Specified as a pointed integer (#rsb_int_t) number, in {-1,[0]}. (internal)
+ */
+  RSB_IO_WANT_LEAF_LEVEL_MULTIVEC =0x000015	/* (rsb_int_t) */
+,
+/*! Specifies an upper limit to the count of allocated memory areas (default value of 0 means no limit). Specified as a pointed \c size_t. \rsb_configure_memwrap
+ */
+  RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS =0x000016	/* (size_t) */
+,
+/*! Specifies an upper limit to the amount of allocated memory (default value of 0 means no limit). Specified as a pointed \c size_t. \rsb_configure_memwrap
+ */
+  RSB_IO_WANT_MAX_MEMORY_ALLOCATED =0x000017	/* (size_t) */
+,
+/*! Represents time spent in librsb. Specified as a pointed #rsb_time_t. Only works if statistics collection (\c --enable-librsb-stats) was specified at configure time.
+ */
+  RSB_IO_WANT_LIBRSB_ETIME =0x000018	/* (rsb_time_t) */
+,
+/*! Auto tuning verbosity level for rsb_tune_spmm/rsb_tune_spsm. If 0, no verbosity; if 1, verbose; if 2, verbose with trace files being dumped.
+ */
+  RSB_IO_WANT_VERBOSE_TUNING =0x000019	/* (rsb_int_t) */
+};
+
+/*! A handy macro for invoking #rsb_lib_reinit() with a single get/set specifier.
+ * An appropriate I/O flag is supplied as first parameter; a valid pointer (according to the flag) should be passed as second parameter; either #RSB_IO_SPECIFIER_SET or #RSB_IO_SPECIFIER_GET is passed as third parameter; a #rsb_err_t variable as fourth one, in order to detect any error.
+ * \deprecated	This macro has been deprecated and will be removed in a future version: use #rsb_lib_set_opt or #rsb_lib_get_opt instead.
+ * */
+#define RSB_REINIT_SINGLE_VALUE(IOF,IOP,IOS,ERRVAL) { enum rsb_opt_t keys[]={IOF}; void*values[]={(IOP)}; struct rsb_initopts io; io.action=(IOS); io.keys=keys; io.values=values; io.n_pairs=1; ERRVAL=rsb_lib_reinit(&io); }
+
+/*! Like #RSB_REINIT_SINGLE_VALUE, but considering \c IOP \c const.
+ * \deprecated	This macro has been deprecated and will be removed in a future version: use #rsb_lib_set_opt instead.
+ * */
+#define RSB_REINIT_SINGLE_VALUE_C_IOP(IOF,IOP,IOS,ERRVAL) { enum rsb_opt_t keys[]={IOF}; const void*values[]={(IOP)}; struct rsb_initopts io; io.action=(IOS); io.keys=keys; (io.values)=(void**)values; io.n_pairs=1; ERRVAL=rsb_lib_reinit(&io); }
+
+/*! A handy macro for invoking #RSB_REINIT_SINGLE_VALUE with a single set specifier.
+ * An appropriate I/O flag is supplied as first parameter; a valid pointer (according to the flag) should be passed as second parameter; a #rsb_err_t variable as third one, in order to detect any error.
+ * \deprecated	This macro has been deprecated and will be removed in a future version: use #rsb_lib_set_opt instead.
+ * */
+#define RSB_REINIT_SINGLE_VALUE_SET(IOF,IOP,ERRVAL) RSB_REINIT_SINGLE_VALUE(IOF,IOP,RSB_IO_SPECIFIER_SET,ERRVAL)
+
+/*! A handy macro for invoking #RSB_REINIT_SINGLE_VALUE with a single get specifier.
+ * An appropriate I/O flag is supplied as first parameter; a valid pointer (according to the flag) should be passed as second parameter; a #rsb_err_t variable as third one, in order to detect any error.
+ * \deprecated	This macro has been deprecated and will be removed in a future version: use #rsb_lib_get_opt instead.
+ * */
+#define RSB_REINIT_SINGLE_VALUE_GET(IOF,IOP,ERRVAL) RSB_REINIT_SINGLE_VALUE(IOF,IOP,RSB_IO_SPECIFIER_GET,ERRVAL)
+
+
+/*!
+ * @brief A structure specifying library (initialization) options, to be used with the \ref rsb_lib_reinit() function.
+ * \n
+ *
+ * The structure specifies, for \c i=0,..,n_pairs-1 , a list of (key,value)
+ * pairs, stored respectively as (\c keys[i],values[i]).
+ * \n
+ * Each flag specifies the type and possible range of values it accepts. 
+ * \n
+ * The structure may he used to set or query various library parameters. 
+ *
+ * Example:
+ * \code
+ 	const int max_io=10; // the number of different options we want to set
+	struct rsb_initopts io={NULL,NULL,0,RSB_IO_SPECIFIER_SET},
+ 	*iop=&io; // pointer to the options structure
+	void * io_values[max_io]; // an array of pointers to max_io different option values (we shall set)
+	enum rsb_opt_t io_keys[max_io]; // an array of max_io flag values specifying the type of values we are handing over to the library
+	io.keys=io_keys; // io.keys will now point to io_keys as its keys array
+	io.values=io_values; // io.values will now point to io_keys as its values array
+	io.n_pairs=0; // we have 0 pairs specified so far
+	io.keys[io.n_pairs]=RSB_IO_WANT_BOUNDED_BOX_COMPUTATION; // the first (at index 0) option we want to specify is RSB_IO_WANT_BOUNDED_BOX_COMPUTATION
+	io.values[io.n_pairs]=1; // the value we want to set the RSB_IO_WANT_BOUNDED_BOX_COMPUTATION option to
+	io.n_pairs++; // io.n_pairs is set to 1: we have one option set, so even if we have (max_io-io.n_pairs) left, only the first will be read
+	... // we are free to specify other option (type, value) pairs
+ * \endcode
+ * */
+struct rsb_initopts
+{
+	/*! An array of value types key flags. */
+	enum rsb_opt_t * keys;
+	/*! An array of value pointers, as specified by each flag value. */
+	void ** values;
+	/*! The length of the \c keys and \c values arrays. */
+	rsb_int_t n_pairs;
+	/*! The action we are requesting (either one of #RSB_IO_SPECIFIER_GET or #RSB_IO_SPECIFIER_SET)*/
+	rsb_int_t action;
+};
+
+#define RSB_IO_SPECIFIER_GET	1 /*!< Specifies to #RSB_REINIT_SINGLE_VALUE that a given #rsb_initopts is going to be get by the user. */
+#define RSB_IO_SPECIFIER_SET	0 /*!< Specifies to #RSB_REINIT_SINGLE_VALUE that a given #rsb_initopts is going to be set by the user. */
+#define RSB_NULL_INIT_OPTIONS NULL /*!<  A valid value for specifying default (null) options to #rsb_lib_init().  */
+#define RSB_NULL_EXIT_OPTIONS NULL /*!<  A valid value for specifying default (null) options to #rsb_lib_exit().  */
+
+rsb_err_t rsb_lib_init(struct rsb_initopts * iop);
+rsb_err_t rsb_lib_reinit(struct rsb_initopts * iop);
+rsb_err_t rsb_lib_set_opt_str(const rsb_char_t* opnp, const rsb_char_t* opvp);
+rsb_err_t rsb_lib_set_opt(enum rsb_opt_t iof, const void*iop);
+rsb_err_t rsb_lib_get_opt(enum rsb_opt_t iof, void*iop);
+rsb_err_t rsb_lib_exit(struct rsb_initopts * iop);
+
+struct rsb_mtx_t * rsb_mtx_alloc_from_coo_begin(rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_flags_t flagsA, rsb_err_t * errvalp);
+rsb_err_t rsb_mtx_alloc_from_coo_end(struct rsb_mtx_t ** mtxApp);
+struct rsb_mtx_t * rsb_mtx_alloc_from_csr_const(const void *VA, const rsb_coo_idx_t * RP, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb_mtx_alloc_from_csc_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * CP, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb_mtx_alloc_from_csr_inplace(void * VA, rsb_nnz_idx_t * RP, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb_mtx_alloc_from_coo_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb_mtx_alloc_from_coo_inplace(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp );
+rsb_err_t rsb_mtx_clone(struct rsb_mtx_t ** mtxBpp, rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_flags_t flags);
+struct rsb_mtx_t * rsb_mtx_free(struct rsb_mtx_t * mtxAp);
+
+/*! \ingroup rsb_doc_misc rsb_doc_rsb
+ \brief Extraction filter flags, to be used with #rsb_mtx_get_nrm()/#rsb_mtx_get_vec(). */
+enum rsb_extff_t
+{
+  RSB_EXTF_NORM_ONE	=0x00001001			/*!< #rsb_mtx_get_nrm() flag value for computing the one-norm. */
+, RSB_EXTF_NORM_TWO	=0x00001002			/*!< #rsb_mtx_get_nrm() flag value for computing the two-norm (Frobenius norm). */
+, RSB_EXTF_NORM_INF	=0x00001003			/*!< #rsb_mtx_get_nrm() flag value for computing the infinity-norm. */
+, RSB_EXTF_SUMS_ROW	=0x00001004			/*!< #rsb_mtx_get_vec() flag value for computing the sum along each row. */
+, RSB_EXTF_SUMS_COL	=0x00001005			/*!< #rsb_mtx_get_vec() flag value for computing the sum along each column. */
+, RSB_EXTF_ASUMS_ROW	=0x00001006			/*!< #rsb_mtx_get_vec() flag value for computing the absolute values sum, along each row. */
+, RSB_EXTF_ASUMS_COL	=0x00001007			/*!< #rsb_mtx_get_vec() flag value for computing the absolute values sum, along each column. */
+, RSB_EXTF_DIAG		=0x00000004			/*!< #rsb_mtx_get_vec() flag value for extracting the diagonal submatrix.*/
+};
+
+typedef rsb_flags_t rsb_marf_t;					/*!< Matrix rendering flags (see \ref marf_section for possible values). */
+/*!@{*/
+/*!
+ \ingroup rsb_doc_rsb
+ \name Matrix rendering flags
+ \anchor marf_section
+
+ These are flags which could be combined to specify rendering options to #rsb_mtx_rndr and #rsb_file_mtx_rndr.
+ */
+#define RSB_MARF_RGB	0x00000001			/*!< #rsb_marf_t Flag value for requesting an RGB rendering of a matrix. */
+#define RSB_MARF_EPS_S	0x00000010			/*!< #rsb_marf_t Flag value for requesting an Encapsulated Postscript rendering of a matrix (spy plot). */
+#define RSB_MARF_EPS_B	0x00000020			/*!< #rsb_marf_t Flag value for requesting an Encapsulated Postscript rendering of a matrix (blocks plot). */
+#define RSB_MARF_EPS	0x00000030			/*!< #rsb_marf_t Flag value for requesting an Encapsulated Postscript rendering of a matrix (spy plot + blocks). */
+#define RSB_MARF_EPS_L	0x00000070			/*!< #rsb_marf_t Flag value for requesting an Encapsulated Postscript rendering of a matrix (spy plot + blocks + labels). */
+/*!@}*/
+
+rsb_err_t rsb_mtx_get_nrm(const struct rsb_mtx_t * mtxAp , void * Np, enum rsb_extff_t flags);
+#define rsb_mtx_get_norm rsb_mtx_get_nrm /*!< \deprecated #rsb_mtx_get_norm has been deprecated: use #rsb_mtx_get_nrm . */
+rsb_err_t rsb_mtx_get_vec(const struct rsb_mtx_t * mtxAp , void * Dp, enum rsb_extff_t flags);
+rsb_err_t rsb_mtx_rndr(const rsb_char_t * filename, const struct rsb_mtx_t*mtxAp, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags);
+rsb_err_t rsb_file_mtx_rndr(void * pmp, const rsb_char_t * filename, rsb_coo_idx_t pmlWidth, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags);
+#define rsb_file_mtx_render rsb_file_mtx_rndr /*!< \deprecated #rsb_file_mtx_render has been deprecated: use #rsb_file_mtx_rndr. */
+
+rsb_err_t rsb_spmv(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Yp, rsb_coo_idx_t incY);
+rsb_err_t rsb_spmm(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC);
+rsb_err_t rsb_spsv(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxTp, const void * Xp, rsb_coo_idx_t incX, void * Yp, rsb_coo_idx_t incY);
+rsb_err_t rsb_spsm(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxTp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * betap, const void * Bp, rsb_nnz_idx_t ldB, void * Cp, rsb_nnz_idx_t ldC);
+
+rsb_err_t rsb_mtx_add_to_dense(const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_nnz_idx_t ldB, rsb_nnz_idx_t nrB, rsb_nnz_idx_t ncB, rsb_bool_t rowmajorB, void * Bp);
+
+struct rsb_mtx_t * rsb_sppsp(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb_spmsp(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp);
+rsb_err_t rsb_spmsp_to_dense(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp , rsb_nnz_idx_t ldC, rsb_nnz_idx_t nrC, rsb_nnz_idx_t ncC, rsb_bool_t rowmajorC, void *Cp);
+
+rsb_err_t rsb_mtx_switch_to_coo(struct rsb_mtx_t * mtxAp, void ** VAp, rsb_coo_idx_t ** IAp, rsb_coo_idx_t ** JAp, rsb_flags_t flags);
+rsb_err_t rsb_mtx_switch_to_csr(struct rsb_mtx_t * mtxAp, void ** VAp, rsb_coo_idx_t ** IAp, rsb_coo_idx_t ** JAp, rsb_flags_t flags);
+rsb_err_t rsb_mtx_get_coo(const struct rsb_mtx_t * mtxAp, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_flags_t flags );
+rsb_err_t rsb_mtx_get_csr(rsb_type_t typecode, const struct rsb_mtx_t * mtxAp, void * VA, rsb_nnz_idx_t * RP, rsb_coo_idx_t * JA, rsb_flags_t flags );
+rsb_err_t rsb_mtx_get_rows_sparse(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags );
+rsb_err_t rsb_mtx_get_coo_block(const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_coo_idx_t fcA, rsb_coo_idx_t lcA, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnzp, rsb_flags_t flags );
+
+/*! \ingroup rsb_doc_misc rsb_doc_rsb
+\brief Flags for getting matrix informations via #rsb_mtx_get_info()/#rsb_mtx_get_info_str().
+*/
+enum rsb_mif_t
+{
+  RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T		=0x00000001	/*!< Index storage occupation, in bytes. (size_t) */
+, RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T	=0x00000002	/*!< Index storage occupation per nnz, in bytes. (#rsb_real_t) */
+, RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T		=0x00000004	/*!< Rows count(#rsb_coo_idx_t) */
+, RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T		=0x00000008	/*!< Columns count (#rsb_coo_idx_t) */
+, RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T		=0x00000010	/*!< Nonzeroes count (#rsb_nnz_idx_t) */
+, RSB_MIF_TOTAL_SIZE__TO__SIZE_T			=0x00000020	/*!< Total size, in bytes (size_t) */
+, RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T			=0x00000040	/*!< Matrix flags (#rsb_flags_t) */
+, RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T		=0x00000080	/*!< Matrix type code (#rsb_type_t) */
+, RSB_MIF_MATRIX_INFO__TO__CHAR_P			=0x00000100	/*!< Matrix info string, only for #rsb_mtx_get_info_str() (#rsb_char_t*) */
+, RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T		=0x00000200	/*!< Leaf submatrices count (#rsb_blk_idx_t) */
+};
+rsb_err_t rsb_mtx_get_info(const struct rsb_mtx_t *mtxAp, enum rsb_mif_t miflags, void* minfop);
+rsb_err_t rsb_mtx_get_info_str(const struct rsb_mtx_t *mtxAp, const rsb_char_t *mis, void* minfop, size_t buflen);
+
+/*! \ingroup rsb_doc_misc rsb_doc_rsb
+\brief Flags for specifying a particular elemental/row-wise operation with #rsb_mtx_upd_vals(). */
+enum rsb_elopf_t
+{
+  RSB_ELOPF_MUL		=0x00000001		/*!< Elemental multiplication of the matrix by a specified scalar (usable with #rsb_mtx_upd_vals(), binary operation). */
+, RSB_ELOPF_DIV		=0x00000002		/*!< Elemental division by a specified scalar (usable with #rsb_mtx_upd_vals(), binary operation). */
+, RSB_ELOPF_POW		=0x00000004		/*!< Elemental power to a specified scalar (usable with #rsb_mtx_upd_vals(), binary operation). */
+, RSB_ELOPF_NEG		=0x00000008		/*!< Elemental negation (usable with #rsb_mtx_upd_vals(), unary operation). */
+, RSB_ELOPF_SCALE_ROWS	=0x00000010		/*!< Row    wise scaling by a specified scaling vector (usable with #rsb_mtx_upd_vals(), binary operation). */
+, RSB_ELOPF_SCALE_COLS	=0x00000020		/*!< Column wise scaling by a specified scaling vector (usable with #rsb_mtx_upd_vals(), binary operation). */
+, RSB_ELOPF_SCALE_ROWS_REAL	=0x00000040	/*!< Row    wise scaling by a specified scaling vector. If matrix is of a complex type, the argument is expected to be of the corresponding real type (assumed that that type has been enabled). (usable with #rsb_mtx_upd_vals(), binary operation). */
+, RSB_ELOPF_SCALE_COLS_REAL	=0x00000080	/*!< Column wise scaling by a specified scaling vector. If matrix is of a complex type, the argument is expected to be of the corresponding real type (assumed that that type has been enabled). (usable with #rsb_mtx_upd_vals(), binary operation). */
+};
+
+rsb_err_t rsb_mtx_upd_vals(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags, const void * omegap);
+#define rsb_mtx_upd_values rsb_mtx_upd_vals /*!< \deprecated #rsb_mtx_upd_values has been deprecated: use #rsb_mtx_upd_vals. */
+typedef rsb_flags_t rsb_precf_t;	/*!< Basic preconditioner flags to be used with #rsb_mtx_get_prec(). */
+#define RSB_PRECF_ILU0		0x00000001		/*!< ILU-0 preconditioner request to #rsb_mtx_get_prec(). */
+rsb_err_t rsb_mtx_get_prec(void *opdp, const struct rsb_mtx_t * mtxAp, rsb_precf_t prec_flags, const void *ipdp);
+#define rsb_mtx_get_preconditioner rsb_mtx_get_prec 	/*!< \deprecated #rsb_mtx_get_preconditioner has been deprecated: use #rsb_mtx_get_prec. */
+rsb_err_t rsb_mtx_set_vals(struct rsb_mtx_t * mtxAp, const void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+#define rsb_mtx_set_values rsb_mtx_set_vals /*!< \deprecated #rsb_mtx_set_values has been deprecated: use #rsb_mtx_set_vals. */
+rsb_err_t rsb_mtx_get_vals(const struct rsb_mtx_t * mtxAp, void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+#define rsb_mtx_get_values rsb_mtx_get_vals /*!< \deprecated #rsb_mtx_get_values has been deprecated: use #rsb_mtx_get_vals. */
+rsb_err_t rsb_tune_spmm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC);
+rsb_err_t rsb_tune_spsm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC);
+rsb_trans_t rsb_psblas_trans_to_rsb_trans(const char psbtrans);
+rsb_err_t rsb_file_mtx_save(const struct rsb_mtx_t * mtxAp, const rsb_char_t * filename);
+struct rsb_mtx_t * rsb_file_mtx_load(const rsb_char_t * filename, rsb_flags_t flagsA, rsb_type_t typecode, rsb_err_t *errvalp);
+rsb_err_t rsb_file_vec_load(const rsb_char_t * filename, rsb_type_t typecode, void * Yp, rsb_coo_idx_t *yvlp);
+rsb_err_t rsb_file_vec_save(const rsb_char_t * filename, rsb_type_t typecode, const void * Yp, rsb_coo_idx_t yvl);
+rsb_err_t rsb_file_mtx_get_dims(const rsb_char_t * filename, rsb_coo_idx_t* nrp, rsb_coo_idx_t *ncp, rsb_coo_idx_t *nzp, rsb_flags_t*flagsp);
+#define rsb_file_mtx_get_dimensions rsb_file_mtx_get_dims /*!< \deprecated #rsb_file_mtx_get_dimensions has been deprecated: use #rsb_file_mtx_get_dims. */
+rsb_err_t rsb_coo_sort(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA,  rsb_type_t typecode, rsb_flags_t flagsA );
+rsb_time_t rsb_time(void);
+
+/*! \internal
+ NOTE: user programs should never include explicitly rsb_types.h.
+ */
+#include "rsb_types.h"
+
+/*! \ingroup rsb_doc_misc rsb_doc_rsb
+ Use #RSB_SIZEOF macro to get the size (in bytes) of a type supported by the library (e.g.: when allocating numerical vectors).
+ */
+#define RSB_SIZEOF(TYPE) RSB_NUMERICAL_TYPE_SIZE(TYPE)
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif	/* RSB_RSB_H_INCLUDED */
diff --git a/rsb_asm.c b/rsb_asm.c
new file mode 100644
index 0000000..f00fc84
--- /dev/null
+++ b/rsb_asm.c
@@ -0,0 +1,200 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Sparse matrices assembling code.
+ * @author Michele Martone
+ * */
+
+#include "rsb_common.h"
+#define RSB_WANT_PRINT_WARNING_ON_DISCARDED_NNZ 0
+extern struct rsb_session_handle_t rsb_global_session_handle;
+
+struct rsb_mtx_t * rsb__mtx_alloc_inner(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_type_t typecode, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_blk_idx_t Mb, rsb_blk_idx_t Kb, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	/*!
+
+	   Allocates a blocked sparse matrix being recursively partitioned, 
+	   but in a data structure which is specified by flags, and 
+	   thus not necessarily with exact BCSR/BCSC leaves.
+
+	   \return a valid matrix pointer or NULL
+	*/
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	RSB_DEBUG_ASSERT(roff>=-1 && coff>=-1); /* for Fortran */
+
+	if((m==0 || k==0) && nnz>0)
+	{
+		/* as a special case, we detect the m and k boundaries, if nnz>0 and m or k are zero */
+		/* TODO: shall use rsb_util_coo_alloc_copy_and_stats instead */
+		if(m==0 && IA) {m = rsb__util_find_max_index_val(IA,nnz)+roff+1;}
+		if(k==0 && JA) {k = rsb__util_find_max_index_val(JA,nnz)+coff+1;}
+		//printf("rc %d %d %d \n",m,k,nnz);
+	}
+
+	
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+	{
+		RSB_PERR_GOTO(err,"!\n");
+	}
+
+	if(roff && IA) rsb__util_coo_array_add(IA,nnz,roff);
+	if(coff && JA) rsb__util_coo_array_add(JA,nnz,coff);
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_OWN_PARTITIONING_ARRAYS);	/* this is in order to free p_r and p_c with the matrix itself, and ignore original flag on this topic */
+	if(
+			(m<RSB_MIN_MATRIX_DIM||k<RSB_MIN_MATRIX_DIM||nnz<RSB_MIN_MATRIX_NNZ) ||
+			(m>RSB_MAX_MATRIX_DIM||k>RSB_MAX_MATRIX_DIM||nnz>RSB_MAX_MATRIX_NNZ) 
+	)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+	if(RSB_DO_TOOFEWNNZFORRCSR(nnz,RSB_MIN(m,k)))
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_COO_STORAGE);
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+	if( RSB_HAVE_GOOD_PARMS_FOR_IN_PLACE_RCSR(m,k,nnz,flags)
+#if RSB_ALLOW_EMPTY_MATRICES
+	|| (nnz==0)
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+			)
+	{
+		if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_NON_ROOT_MATRIX))
+			return	rsb__allocate_recursive_sparse_matrix_from_row_major_coo(VA,IA,JA,m,k,nnz,typecode,NULL,flags,errvalp);
+	}
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+	errval = RSB_ERR_INTERNAL_ERROR;
+	RSB_PERR_GOTO(err,"trying to call obsolete code!\n");
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+
+	if(mtxAp)
+		return mtxAp;
+	else
+		goto err;
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return NULL;
+}
+
+rsb_err_t rsb__do_cleanup_nnz(void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t *onnzp, rsb_type_t typecode, rsb_flags_t flags)
+{
+	/* 
+	 * TODO: this check should be done at leaf level only. are we sure we are leaf here ?
+	 * ... no. but why at leaf level only ?
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(nnz==0) /* diagonal implicit, for example */
+		goto ok;
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+	{
+		rsb_nnz_idx_t discarded = 0, gap = 0;
+		errval = rsb_weed_out_diagonal(VA,IA,JA,nnz,typecode,&gap,&discarded);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,"failed compacting non diagonal elements !\n");
+		}
+		RSB_DEBUG_ASSERT(discarded>=0);
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz-discarded));
+		if(RSB_WANT_PRINT_WARNING_ON_DISCARDED_NNZ && discarded>0)
+			;//RSB_INFO("#RSB_FLAG_UNIT_DIAG_IMPLICIT (EXPERIMENTAL) : discarded %zd diagonal elements\n",(rsb_printf_int_t)discarded);
+		nnz -= discarded;
+	}
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR) && roff==coff && roff==0)
+	{
+		rsb_nnz_idx_t discarded = 0, gap = 0;
+		errval = rsb_weed_out_non_lowtri(VA,IA,JA,nnz,typecode,&gap,&discarded);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,"failed compacting non lower triangular elements !\n");
+		}
+		RSB_DEBUG_ASSERT(discarded>=0);
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz-discarded));
+		if(RSB_WANT_PRINT_WARNING_ON_DISCARDED_NNZ && discarded>0)
+			RSB_INFO("#RSB_FLAG_LOWER_TRIANGULAR (EXPERIMENTAL) : discarded %zd non lower triangular\n",(rsb_printf_int_t)discarded);
+		nnz -= discarded;
+	}
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR) && roff==coff && roff==0)
+	{
+		rsb_nnz_idx_t discarded = 0, gap = 0;
+		errval = rsb_weed_out_non_upptri(VA,IA,JA,nnz,typecode,&gap,&discarded);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,"failed compacting non upper triangular elements !\n");
+		}
+		RSB_DEBUG_ASSERT(discarded>=0);
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz-discarded));
+		if(RSB_WANT_PRINT_WARNING_ON_DISCARDED_NNZ && discarded>0)
+			RSB_INFO("#RSB_FLAG_UPPER_TRIANGULAR (EXPERIMENTAL) : discarded %zd non upper triangular\n",(rsb_printf_int_t)discarded);
+		nnz -= discarded;
+	}
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_DISCARD_ZEROS))
+	{
+		rsb_nnz_idx_t discarded = 0, gap = 0;
+		errval = rsb_util_compact_nonzeros(VA,IA,JA,nnz,typecode,&gap,&discarded,RSB_FLAG_NOFLAGS);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,"failed compacting nonzeros!\n");
+		}
+		RSB_DEBUG_ASSERT(discarded>=0);
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz-discarded));
+		if(RSB_WANT_PRINT_WARNING_ON_DISCARDED_NNZ && discarded>0)
+			RSB_INFO("#RSB_FLAG_DISCARD_ZEROS (EXPERIMENTAL) : discarded %zd zeros\n",(rsb_printf_int_t)discarded);
+		nnz -= discarded;
+	}
+
+	if(1)
+	{
+		rsb_nnz_idx_t discarded = 0, gap = 0;
+		errval = rsb_do_util_compact_out_of_range(VA,IA,JA,nnz,roff,coff,m,k,typecode,&gap,&discarded);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,"failed compacting out of range!\n");
+		}
+		RSB_DEBUG_ASSERT(discarded>=0);
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz-discarded));
+		if(RSB_WANT_PRINT_WARNING_ON_DISCARDED_NNZ && discarded>0)
+			RSB_INFO("#: discarded %zd nonzeroes with out of range coordinates\n",(rsb_printf_int_t)discarded);
+		nnz -= discarded;
+	}
+ok:
+	*onnzp = nnz;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_asm.h b/rsb_asm.h
new file mode 100644
index 0000000..3df6acb
--- /dev/null
+++ b/rsb_asm.h
@@ -0,0 +1,35 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * @brief Sparse matrices assembling code.
+ * */
+
+#ifndef RSB_ASM_H_INCLUDED
+#define RSB_ASM_H_INCLUDED
+#include "rsb_common.h"
+struct rsb_mtx_t * rsb__mtx_alloc_inner(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_type_t typecode, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_blk_idx_t Mb, rsb_blk_idx_t Kb, rsb_flags_t flags, rsb_err_t * errvalp);
+#endif /* RSB_ASM_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_bench.c b/rsb_bench.c
new file mode 100644
index 0000000..207f4d7
--- /dev/null
+++ b/rsb_bench.c
@@ -0,0 +1,2861 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+
+ Performance info gathering code. (OBSOLETE)
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb_internals.h"
+#ifdef RSB_HAVE_CBLAS_H
+#include <cblas.h>
+#endif /* RSB_HAVE_CBLAS_H */
+#ifdef RSB_HAVE_CLAPACK_H
+#include <clapack.h>
+#endif /* RSB_HAVE_CLAPACK_H */
+#include <math.h>
+rsb_err_t rsb_fit_hyp(double x[], double y[], size_t nb_loop, double * a, double * b, double *c, double c_s)
+{
+#if !(RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS)
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+#else
+	/**
+	 * \ingroup gr_bench
+         * Note : 
+	 * 
+	 * This function will compute a performance predictor based on
+         * nonzero per row ratio, by fitting the two input x (non zeros per row)
+         * and y (megaflops) vectors (both with n = RSB_FITTING_SAMPLES points) to
+         * the following formula :
+         *
+         *           megaflops (nnz_per_row) a + b / ( c + nnz_per_row )
+         *
+         * The c_s and nb_loop arguments will be documented some day.
+         *
+	 * This model is discussed in the following article :
+
+ at article{ButtEijkLang:spmvp,
+  title = {Performance Optimization and Modeling of Blocked Sparse Kernels},
+  author = {Buttari, Alfredo and Eijkhout, Victor and Langou, Julien and Filippone, Salvatore},
+  pages = {467--484},
+  year = 2007,
+  journal = {IJHPCA},
+  volume = 21,
+  url = {\url{{http://www.tacc.utexas.edu/~eijkhout/Articles/2007-buttari-spmvp.pdf}}}
+}
+         *
+         */
+
+	rsb_int nparms=3;
+	rsb_int n = RSB_FITTING_SAMPLES;
+	/* Fortran arrays */
+#define RSB_FORTRAN_ARRAY(AI,ROWS,COLS) AI[(ROWS)*(COLS)]
+
+	rsb_int nj = 3;
+	rsb_int i,j;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	double RSB_FORTRAN_ARRAY(G ,n,3);
+	double RSB_FORTRAN_ARRAY(G1,n,3);
+	double RSB_FORTRAN_ARRAY(GG,3,3);
+	double RSB_FORTRAN_ARRAY(z ,n,1);
+	double RSB_FORTRAN_ARRAY(z0,n,1);
+	double RSB_FORTRAN_ARRAY(dy,n,1);
+	double RSB_FORTRAN_ARRAY(ddy,3,1);
+	double RSB_FORTRAN_ARRAY(xj ,nj,1);
+	double RSB_FORTRAN_ARRAY(yj ,nj,1);
+	double RSB_FORTRAN_ARRAY(zj ,nj,1);
+
+	double xcpy[n];
+	double a_t,b_t,sum1,sum2,sum3,sum4,error,tmp_a,tmp_b,tmp_c, min_err,max,min,avg,intl;
+  	int /*i,*/info,ipivot[3],/*nj,j,*/k,cnt;
+	rsb_memcpy(xcpy,x,sizeof(xcpy));	/* not a bit more .. and please note that sizeof(x)=sizeof(double*) != sizeof(x[n])*/
+
+
+	RSB_INFO("starting analysis...\n");
+	RSB_STDOUT("\n");
+	RSB_STDOUT("performance data:\n");
+	for(i=0;i<n;++i)
+	{
+		RSB_STDOUT("%lg %lg\n",xcpy[i],y[i]);
+	}
+
+	sum1=0;
+	sum2=0;
+	sum3=0;
+	sum4=0;
+
+
+  	*a=y[n-1];
+	
+	rsb_memcpy(xj,x,sizeof(xj));	/* not a bit more */
+	rsb_memcpy(yj,y,sizeof(yj));	/* not a bit more */
+
+	for(i=0;i<nj;++i)
+  	{
+		zj[i]=yj[i]-*a;
+  		zj[i]=1/zj[i];
+	}
+
+	for(i=0;i<nj;++i)
+	{
+		sum1=sum1 + xj[i]*zj[i];
+		sum2=sum2 + xj[i];
+		sum3=sum3 + zj[i];
+		sum4=sum4 + xj[i]*xj[i];
+	}
+
+	a_t= (sum3*sum4-sum2*sum1)/(nj*sum4-sum2*sum2);
+	b_t=(nj*sum1 - sum2*sum3) / (nj*sum4 - sum2*sum2);
+
+  	*b=1/b_t;
+	*c=a_t* *b;
+
+	for(i=0;i<n;++i)
+		z0[i]= *a +*b/(x[i]+*c);
+
+	error = 0;
+	for(j=0;j<n;++j)
+		error = error + (fabs( z0[j] - y[j] ) / y[j] );
+
+	error = error / n * 100;
+
+	min_err=error;
+
+	tmp_a=*a;
+	tmp_b=*b;
+	tmp_c=*c;
+
+	for(i=0;i<nb_loop;++i)
+	{
+		for(j=0;j<n;++j)
+			dy[j] = z0[j]-y[j];
+
+		for(j=0;j<n;++j)
+		{
+			G[j+0*n]=1;
+			G[j+1*n]=1/(x[j]+tmp_c);
+			G[j+2*n]=-tmp_b/( (x[j]+tmp_c)*(x[j]+tmp_c) );
+
+			G1[j+0*n]= G[j+0*n];
+			G1[j+1*n]= G[j+1*n];
+			G1[j+2*n]= G[j+2*n];
+		}
+
+#if 
+		cblas_dgemm(CblasColMajor,CblasTrans,CblasNoTrans,3,3,n,1.0,G,n,G1,n,0.0,GG,3);
+		errval =  clapack_dgetrf(CblasColMajor,3,3,GG,3,ipivot);
+		if(RSB_SOME_ERROR(errval)) goto err;
+		cblas_dgemv(CblasColMajor,CblasTrans,n,3,1.0,G,n,dy,1,0.0,ddy,1);
+		errval =  clapack_dgetrs(CblasColMajor,CblasNoTrans,3,1,GG,3,ipivot,ddy,3);
+		if(RSB_SOME_ERROR(errval)) goto err;
+#else /* (RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS) */
+#endif /* (RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS) */
+	
+		tmp_a = tmp_a-ddy[1-1];
+		tmp_b = tmp_b-ddy[2-1];
+		tmp_c = tmp_c-ddy[3-1];
+
+		for(j=0;j<n;++j)
+			z0[j]= tmp_a +tmp_b/(x[j]+tmp_c);
+
+		error = 0;
+		for(j=0;j<n;++j)
+	       		error = error + (fabs( z0[j] - y[j] ) / y[j] );
+
+		error = error / n * 100;
+		if(error < min_err)
+		{
+		        *a=tmp_a;
+		        *b=tmp_b;
+		        *c=tmp_c;
+		}
+	}
+
+	if((*c< 0) && (*c  < c_s))
+	{
+		*c=10000;
+		*b=10000;
+		avg=0;
+		max=y[0];
+		min=y[0];
+		for(i=0;i<n;++i)
+		{
+		        if (y[i] > max) max=y[i];
+		        if (y[i] < min) min=y[i];
+		        avg=avg+y[i];
+		}
+		avg=avg/(double)(n);
+		*a=avg;
+		intl=max-min;
+		avg=0;
+		cnt=0;
+		for(/*i=0*/;i<n;++i)
+		//for(i=0;i<n;++i)
+		{
+        		if (fabs(y[i]-avg) < (0.3*intl))
+			{
+				avg = avg + y[i];
+				cnt=cnt+1;
+			}
+		}
+     		if(cnt > 0) *a=avg/(double)cnt;
+	}
+	else
+  	if (*b >= 0)
+	{
+		*c=10000;
+		*b=10000;
+		avg=0;
+		max=y[0];
+		min=y[0];
+		for(i=0;i<n;++i)
+		{
+			if (y[i] > max) max=y[i];
+			if (y[i] < min) min=y[i];
+			avg=avg+y[i];
+		}
+		avg=avg/(double)n;
+		intl=max-min;
+		avg=0;
+		cnt=0;
+		//for(i=0;i<n;++i)
+		for(/*i=0*/;i<n;++i)
+		{
+		        if (fabs(y[i]-avg) < (0.3*intl))
+			{
+				avg = avg + y[i];
+				cnt=cnt+1;
+			}
+		}
+		if(cnt > 0) *a=avg/ (double) cnt;
+	}
+
+
+	RSB_STDOUT("\n");
+	RSB_STDOUT("alpha:%lg beta:%lg gamma:%lg\n",*a,*b,*c);
+
+	RSB_STDOUT("\nfitting:\n");
+	for(i=0;i<n;++i)
+	{
+		RSB_STDOUT("%lg %lg\n", xcpy[i], *a+*b/(xcpy[i]+*c));
+	}
+
+	return RSB_ERR_NO_ERROR;
+	err:
+	RSB_DO_ERR_RETURN(errval)
+#endif /* RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS */
+}
+
+rsb_err_t rsb__do_referencebenchmark(void)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * A complete benchmark program.
+	 * Will benchmark all supported matrix operations over all supported types
+	 * over all supported matrix partitionings.
+	 *
+	 * Moreover, it WILL perform analysis of performance data and results dumput.
+         *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 * FIXME : UNFINISHED: should process and dump this info in a header file.
+	 */
+	struct rsb_global_reference_performance_info_t grpi;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blk_idx_t ri,ci;	/* row index, columns index */
+	rsb_coo_idx_t order=20000;
+	rsb_coo_idx_t rows=order,cols=order;	/* FIXME : TEMPORARY */
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	double tot_secs=0.0,pred_secs=1.0;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	size_t kernels_n = RSB_ROWS_UNROLL_ARRAY_LENGTH*RSB_COLUMNS_UNROLL_ARRAY_LENGTH*RSB_IMPLEMENTED_MOPS*RSB_IMPLEMENTED_TYPES;
+	rsb_int ti=0;	/* type index */
+	int fbw,bwi;
+	RSB_BZERO_P(&grpi);
+
+	/* if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS))){goto err;} we skip this to enable calling this from within our library (FIXME) */
+
+	if(RSB_FITTING_SAMPLES<2)
+	{	
+		fbw=(RSB_FIRST_FITTING_SAMPLE_BW_MAX + RSB_FIRST_FITTING_SAMPLE_BW_MIN)/2;
+		bwi=fbw;
+	}
+	else
+	{
+		fbw = RSB_FIRST_FITTING_SAMPLE_BW_MIN;
+		bwi=(RSB_FIRST_FITTING_SAMPLE_BW_MAX - RSB_FIRST_FITTING_SAMPLE_BW_MIN)/(RSB_FITTING_SAMPLES-1);
+	}
+	
+	tot_secs = -rsb_time();
+	pred_secs *= RSB_ROWS_UNROLL_ARRAY_LENGTH * RSB_COLUMNS_UNROLL_ARRAY_LENGTH * RSB_FITTING_SAMPLES * RSB_IMPLEMENTED_META_MOPS *  RSB_IMPLEMENTED_TYPES * RSB_BENCHMARK_MIN_SECONDS;
+	RSB_STDERR("#reference benchmarking of %zd kernels (no transposed, no symmetric, and so on) should take at least %lg seconds..\n",kernels_n,pred_secs);
+
+	/* double type benchmarking */
+/*	RSB_INFO("#mtype type benchmarking\n");*/
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t br = rua[ri];
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_coo_idx_t bw,mbw=(cols/bc);
+			rsb_int si=0;	/* sample index */
+			mbw=(cols-bc)/bc;	/* tune here to fill further our matrix */
+			/* FIXME : there is the danger of empty samples! */
+			for(bw=fbw;bw<=mbw && si< RSB_FITTING_SAMPLES ;bw+=bwi)	/* this parameter should be tunable, too */
+			{
+				//RSB_INFO("bw = %d\n",bw);
+				rsb_int moi=0;	/* matrix operation index */
+				double time,*timep=&time;
+				struct rsb_mtx_t * mtxAp =
+					rsb__generate_blocked_banded(br,bc,rows,cols,bw,timep,RSB_NUMERICAL_TYPE_DOUBLE ,RSB_BOOL_TRUE );	/* FIXME : generating triangular factors always ! */
+				if(!mtxAp)
+				{
+					RSB_STDERR(RSB_ERRM_IE);
+					{errval = RSB_ERR_GENERIC_ERROR; goto err;}
+				}
+
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uaua operation benchmarking\n");*/
+					/* spmv_uaua operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_uaua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_uaua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spmv_uaua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spmv_uaua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spmv_uaua;}
+					++moi;
+
+					erri_double_spmv_uaua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uauz operation benchmarking\n");*/
+					/* spmv_uauz operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_uauz;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_uauz;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spmv_uauz */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spmv_uauz(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spmv_uauz;}
+					++moi;
+
+					erri_double_spmv_uauz:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uxua operation benchmarking\n");*/
+					/* spmv_uxua operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spmv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spmv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spmv_uxua;}
+					++moi;
+
+					erri_double_spmv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_unua operation benchmarking\n");*/
+					/* spmv_unua operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_unua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_unua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spmv_unua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spmv_unua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spmv_unua;}
+					++moi;
+
+					erri_double_spmv_unua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sasa operation benchmarking\n");*/
+					/* spmv_sasa operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_sasa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_sasa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spmv_sasa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spmv_sasa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spmv_sasa;}
+					++moi;
+
+					erri_double_spmv_sasa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_uxua operation benchmarking\n");*/
+					/* spsv_uxua operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spsv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spsv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spsv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spsv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spsv_uxua;}
+					++moi;
+
+					erri_double_spsv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sxsa operation benchmarking\n");*/
+					/* spmv_sxsa operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_sxsa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spmv_sxsa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spmv_sxsa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spmv_sxsa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spmv_sxsa;}
+					++moi;
+
+					erri_double_spmv_sxsa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_sxsx operation benchmarking\n");*/
+					/* spsv_sxsx operation benchmarking */
+					double *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spsv_sxsx;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_spsv_sxsx;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation spsv_sxsx */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_spsv_sxsx(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_spsv_sxsx;}
+					++moi;
+
+					erri_double_spsv_sxsx:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("infty_norm operation benchmarking\n");*/
+					/* infty_norm operation benchmarking */
+					double * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_double_infty_norm;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_double_infty_norm;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation infty_norm */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_infty_norm(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_infty_norm;}
+					++moi;
+
+					erri_double_infty_norm:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("rowssums operation benchmarking\n");*/
+					/* rowssums operation benchmarking */
+					double * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_double_rowssums;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_double_rowssums;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation rowssums */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_rowssums(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_rowssums;}
+					++moi;
+
+					erri_double_rowssums:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("scale operation benchmarking\n");*/
+					/* scale operation benchmarking */
+
+					
+					double * scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri_double_scale;}
+					if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri_double_scale;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation scale */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_scale(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,scale_factors);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_scale;}
+					++moi;
+
+					erri_double_scale:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(scale_factors);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("mat_stats operation benchmarking\n");*/
+					/* mat_stats operation benchmarking */
+
+					
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double library implementation for operation mat_stats */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 0;/* meta-op : we already measured matrix creation time  */
+grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]=time;
+grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]=((double)rsb__do_get_matrix_nnz(mtxAp))/1000000;
+/* FIXME : this is experimental and unfinished code */
+
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_mat_stats;}
+					++moi;
+
+					erri_double_mat_stats:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				}
+				RSB_MTX_FREE(mtxAp);
+				++si;
+			}	
+		}
+	}
+	{
+		rsb_int moi;
+		rsb_char_t * mops[] = RSB_MATRIX_OPS_ARRAY;
+		rsb_char_t * types[] = RSB_MATRIX_TYPES_ARRAY;
+		rsb_char_t s[128];
+		rsb__print_mop_reference_performance_info_header();
+		for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS;++moi)
+		{	
+/*			rsb_int si;*/
+			/* informational printout */
+			sprintf(s,"%s\t%s\t",types[ti], mops[moi]);
+			rsb__print_mop_reference_performance_info(&(grpi.gpi[ti].pipmo[moi]),s);
+/*			for(si=0;si<RSB_FITTING_SAMPLES;++si)*/
+/*				rsb__dump_performance_info(&(grpi.gpi[ti].pipmo[moi].pipfs[si]), NULL);*/
+		}
+	}
+	++ti;
+	/* float type benchmarking */
+/*	RSB_INFO("#mtype type benchmarking\n");*/
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t br = rua[ri];
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_coo_idx_t bw,mbw=(cols/bc);
+			rsb_int si=0;	/* sample index */
+			mbw=(cols-bc)/bc;	/* tune here to fill further our matrix */
+			/* FIXME : there is the danger of empty samples! */
+			for(bw=fbw;bw<=mbw && si< RSB_FITTING_SAMPLES ;bw+=bwi)	/* this parameter should be tunable, too */
+			{
+				//RSB_INFO("bw = %d\n",bw);
+				rsb_int moi=0;	/* matrix operation index */
+				double time,*timep=&time;
+				struct rsb_mtx_t * mtxAp =
+					rsb__generate_blocked_banded(br,bc,rows,cols,bw,timep,RSB_NUMERICAL_TYPE_FLOAT ,RSB_BOOL_TRUE );	/* FIXME : generating triangular factors always ! */
+				if(!mtxAp)
+				{
+					RSB_STDERR(RSB_ERRM_IE);
+					{errval = RSB_ERR_GENERIC_ERROR; goto err;}
+				}
+
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uaua operation benchmarking\n");*/
+					/* spmv_uaua operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_uaua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_uaua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spmv_uaua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spmv_uaua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spmv_uaua;}
+					++moi;
+
+					erri_float_spmv_uaua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uauz operation benchmarking\n");*/
+					/* spmv_uauz operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_uauz;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_uauz;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spmv_uauz */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spmv_uauz(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spmv_uauz;}
+					++moi;
+
+					erri_float_spmv_uauz:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uxua operation benchmarking\n");*/
+					/* spmv_uxua operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spmv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spmv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spmv_uxua;}
+					++moi;
+
+					erri_float_spmv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_unua operation benchmarking\n");*/
+					/* spmv_unua operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_unua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_unua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spmv_unua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spmv_unua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spmv_unua;}
+					++moi;
+
+					erri_float_spmv_unua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sasa operation benchmarking\n");*/
+					/* spmv_sasa operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_sasa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_sasa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spmv_sasa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spmv_sasa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spmv_sasa;}
+					++moi;
+
+					erri_float_spmv_sasa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_uxua operation benchmarking\n");*/
+					/* spsv_uxua operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spsv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spsv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spsv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spsv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spsv_uxua;}
+					++moi;
+
+					erri_float_spsv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sxsa operation benchmarking\n");*/
+					/* spmv_sxsa operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_sxsa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spmv_sxsa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spmv_sxsa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spmv_sxsa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spmv_sxsa;}
+					++moi;
+
+					erri_float_spmv_sxsa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_sxsx operation benchmarking\n");*/
+					/* spsv_sxsx operation benchmarking */
+					float *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spsv_sxsx;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_spsv_sxsx;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation spsv_sxsx */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_spsv_sxsx(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_spsv_sxsx;}
+					++moi;
+
+					erri_float_spsv_sxsx:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("infty_norm operation benchmarking\n");*/
+					/* infty_norm operation benchmarking */
+					float * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_float_infty_norm;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_float_infty_norm;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation infty_norm */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_infty_norm(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_infty_norm;}
+					++moi;
+
+					erri_float_infty_norm:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("rowssums operation benchmarking\n");*/
+					/* rowssums operation benchmarking */
+					float * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_float_rowssums;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_float_rowssums;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation rowssums */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_rowssums(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_rowssums;}
+					++moi;
+
+					erri_float_rowssums:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("scale operation benchmarking\n");*/
+					/* scale operation benchmarking */
+
+					
+					float * scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri_float_scale;}
+					if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri_float_scale;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation scale */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_scale(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,scale_factors);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_scale;}
+					++moi;
+
+					erri_float_scale:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(scale_factors);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("mat_stats operation benchmarking\n");*/
+					/* mat_stats operation benchmarking */
+
+					
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float library implementation for operation mat_stats */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 0;/* meta-op : we already measured matrix creation time  */
+grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]=time;
+grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]=((double)rsb__do_get_matrix_nnz(mtxAp))/1000000;
+/* FIXME : this is experimental and unfinished code */
+
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_mat_stats;}
+					++moi;
+
+					erri_float_mat_stats:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				}
+				RSB_MTX_FREE(mtxAp);
+				++si;
+			}	
+		}
+	}
+	{
+		rsb_int moi;
+		rsb_char_t * mops[] = RSB_MATRIX_OPS_ARRAY;
+		rsb_char_t * types[] = RSB_MATRIX_TYPES_ARRAY;
+		rsb_char_t s[128];
+		rsb__print_mop_reference_performance_info_header();
+		for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS;++moi)
+		{	
+/*			rsb_int si;*/
+			/* informational printout */
+			sprintf(s,"%s\t%s\t",types[ti], mops[moi]);
+			rsb__print_mop_reference_performance_info(&(grpi.gpi[ti].pipmo[moi]),s);
+/*			for(si=0;si<RSB_FITTING_SAMPLES;++si)*/
+/*				rsb__dump_performance_info(&(grpi.gpi[ti].pipmo[moi].pipfs[si]), NULL);*/
+		}
+	}
+	++ti;
+	/* float complex type benchmarking */
+/*	RSB_INFO("#mtype type benchmarking\n");*/
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t br = rua[ri];
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_coo_idx_t bw,mbw=(cols/bc);
+			rsb_int si=0;	/* sample index */
+			mbw=(cols-bc)/bc;	/* tune here to fill further our matrix */
+			/* FIXME : there is the danger of empty samples! */
+			for(bw=fbw;bw<=mbw && si< RSB_FITTING_SAMPLES ;bw+=bwi)	/* this parameter should be tunable, too */
+			{
+				//RSB_INFO("bw = %d\n",bw);
+				rsb_int moi=0;	/* matrix operation index */
+				double time,*timep=&time;
+				struct rsb_mtx_t * mtxAp =
+					rsb__generate_blocked_banded(br,bc,rows,cols,bw,timep,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,RSB_BOOL_TRUE );	/* FIXME : generating triangular factors always ! */
+				if(!mtxAp)
+				{
+					RSB_STDERR(RSB_ERRM_IE);
+					{errval = RSB_ERR_GENERIC_ERROR; goto err;}
+				}
+
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uaua operation benchmarking\n");*/
+					/* spmv_uaua operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_uaua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_uaua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spmv_uaua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spmv_uaua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spmv_uaua;}
+					++moi;
+
+					erri_float_complex_spmv_uaua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uauz operation benchmarking\n");*/
+					/* spmv_uauz operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_uauz;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_uauz;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spmv_uauz */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spmv_uauz(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spmv_uauz;}
+					++moi;
+
+					erri_float_complex_spmv_uauz:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uxua operation benchmarking\n");*/
+					/* spmv_uxua operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spmv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spmv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spmv_uxua;}
+					++moi;
+
+					erri_float_complex_spmv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_unua operation benchmarking\n");*/
+					/* spmv_unua operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_unua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_unua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spmv_unua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spmv_unua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spmv_unua;}
+					++moi;
+
+					erri_float_complex_spmv_unua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sasa operation benchmarking\n");*/
+					/* spmv_sasa operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_sasa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_sasa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spmv_sasa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spmv_sasa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spmv_sasa;}
+					++moi;
+
+					erri_float_complex_spmv_sasa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_uxua operation benchmarking\n");*/
+					/* spsv_uxua operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spsv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spsv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spsv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spsv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spsv_uxua;}
+					++moi;
+
+					erri_float_complex_spsv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sxsa operation benchmarking\n");*/
+					/* spmv_sxsa operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_sxsa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spmv_sxsa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spmv_sxsa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spmv_sxsa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spmv_sxsa;}
+					++moi;
+
+					erri_float_complex_spmv_sxsa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_sxsx operation benchmarking\n");*/
+					/* spsv_sxsx operation benchmarking */
+					float complex *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spsv_sxsx;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_float_complex_spsv_sxsx;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation spsv_sxsx */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_spsv_sxsx(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_spsv_sxsx;}
+					++moi;
+
+					erri_float_complex_spsv_sxsx:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("infty_norm operation benchmarking\n");*/
+					/* infty_norm operation benchmarking */
+					float complex * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_float_complex_infty_norm;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_float_complex_infty_norm;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation infty_norm */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_infty_norm(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_infty_norm;}
+					++moi;
+
+					erri_float_complex_infty_norm:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("rowssums operation benchmarking\n");*/
+					/* rowssums operation benchmarking */
+					float complex * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_float_complex_rowssums;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_float_complex_rowssums;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation rowssums */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_rowssums(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_rowssums;}
+					++moi;
+
+					erri_float_complex_rowssums:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("scale operation benchmarking\n");*/
+					/* scale operation benchmarking */
+
+					
+					float complex * scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri_float_complex_scale;}
+					if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri_float_complex_scale;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation scale */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_float_complex_scale(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,scale_factors);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_scale;}
+					++moi;
+
+					erri_float_complex_scale:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(scale_factors);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("mat_stats operation benchmarking\n");*/
+					/* mat_stats operation benchmarking */
+
+					
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our float complex library implementation for operation mat_stats */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 0;/* meta-op : we already measured matrix creation time  */
+grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]=time;
+grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]=((double)rsb__do_get_matrix_nnz(mtxAp))/1000000;
+/* FIXME : this is experimental and unfinished code */
+
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_float_complex_mat_stats;}
+					++moi;
+
+					erri_float_complex_mat_stats:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				}
+				RSB_MTX_FREE(mtxAp);
+				++si;
+			}	
+		}
+	}
+	{
+		rsb_int moi;
+		rsb_char_t * mops[] = RSB_MATRIX_OPS_ARRAY;
+		rsb_char_t * types[] = RSB_MATRIX_TYPES_ARRAY;
+		rsb_char_t s[128];
+		rsb__print_mop_reference_performance_info_header();
+		for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS;++moi)
+		{	
+/*			rsb_int si;*/
+			/* informational printout */
+			sprintf(s,"%s\t%s\t",types[ti], mops[moi]);
+			rsb__print_mop_reference_performance_info(&(grpi.gpi[ti].pipmo[moi]),s);
+/*			for(si=0;si<RSB_FITTING_SAMPLES;++si)*/
+/*				rsb__dump_performance_info(&(grpi.gpi[ti].pipmo[moi].pipfs[si]), NULL);*/
+		}
+	}
+	++ti;
+	/* double complex type benchmarking */
+/*	RSB_INFO("#mtype type benchmarking\n");*/
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t br = rua[ri];
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_coo_idx_t bw,mbw=(cols/bc);
+			rsb_int si=0;	/* sample index */
+			mbw=(cols-bc)/bc;	/* tune here to fill further our matrix */
+			/* FIXME : there is the danger of empty samples! */
+			for(bw=fbw;bw<=mbw && si< RSB_FITTING_SAMPLES ;bw+=bwi)	/* this parameter should be tunable, too */
+			{
+				//RSB_INFO("bw = %d\n",bw);
+				rsb_int moi=0;	/* matrix operation index */
+				double time,*timep=&time;
+				struct rsb_mtx_t * mtxAp =
+					rsb__generate_blocked_banded(br,bc,rows,cols,bw,timep,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,RSB_BOOL_TRUE );	/* FIXME : generating triangular factors always ! */
+				if(!mtxAp)
+				{
+					RSB_STDERR(RSB_ERRM_IE);
+					{errval = RSB_ERR_GENERIC_ERROR; goto err;}
+				}
+
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uaua operation benchmarking\n");*/
+					/* spmv_uaua operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_uaua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_uaua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spmv_uaua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spmv_uaua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spmv_uaua;}
+					++moi;
+
+					erri_double_complex_spmv_uaua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uauz operation benchmarking\n");*/
+					/* spmv_uauz operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_uauz;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_uauz;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spmv_uauz */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spmv_uauz(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spmv_uauz;}
+					++moi;
+
+					erri_double_complex_spmv_uauz:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_uxua operation benchmarking\n");*/
+					/* spmv_uxua operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spmv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spmv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spmv_uxua;}
+					++moi;
+
+					erri_double_complex_spmv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_unua operation benchmarking\n");*/
+					/* spmv_unua operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_unua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_unua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spmv_unua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spmv_unua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spmv_unua;}
+					++moi;
+
+					erri_double_complex_spmv_unua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sasa operation benchmarking\n");*/
+					/* spmv_sasa operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_sasa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_sasa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spmv_sasa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spmv_sasa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spmv_sasa;}
+					++moi;
+
+					erri_double_complex_spmv_sasa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_uxua operation benchmarking\n");*/
+					/* spsv_uxua operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spsv_uxua;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spsv_uxua;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spsv_uxua */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spsv_uxua(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spsv_uxua;}
+					++moi;
+
+					erri_double_complex_spsv_uxua:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spmv_sxsa operation benchmarking\n");*/
+					/* spmv_sxsa operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_sxsa;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spmv_sxsa;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spmv_sxsa */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spmv_sxsa(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spmv_sxsa;}
+					++moi;
+
+					erri_double_complex_spmv_sxsa:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("spsv_sxsx operation benchmarking\n");*/
+					/* spsv_sxsx operation benchmarking */
+					double complex *out=NULL,*rhs=NULL;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+
+					
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+					rsb_coo_idx_t incx=1,incy=1;
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spsv_sxsx;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_double_complex_spsv_sxsx;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation spsv_sxsx */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_spsv_sxsx(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,rhs,out,alphap,incx,incy,transA);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_spsv_sxsx;}
+					++moi;
+
+					erri_double_complex_spsv_sxsx:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("infty_norm operation benchmarking\n");*/
+					/* infty_norm operation benchmarking */
+					double complex * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_double_complex_infty_norm;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_double_complex_infty_norm;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation infty_norm */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_infty_norm(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_infty_norm;}
+					++moi;
+
+					erri_double_complex_infty_norm:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("rowssums operation benchmarking\n");*/
+					/* rowssums operation benchmarking */
+					double complex * row_sums;
+
+					
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_double_complex_rowssums;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_double_complex_rowssums;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation rowssums */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_rowssums(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,row_sums);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_rowssums;}
+					++moi;
+
+					erri_double_complex_rowssums:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(row_sums);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("scale operation benchmarking\n");*/
+					/* scale operation benchmarking */
+
+					
+					double complex * scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri_double_complex_scale;}
+					if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri_double_complex_scale;}
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation scale */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 
+rsb__do_benchmark_double_complex_scale(&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),mtxAp,transA,scale_factors);
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_scale;}
+					++moi;
+
+					erri_double_complex_scale:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+					RSB_CONDITIONAL_FREE(scale_factors);
+				}
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("mat_stats operation benchmarking\n");*/
+					/* mat_stats operation benchmarking */
+
+					
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our double complex library implementation for operation mat_stats */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = 0;/* meta-op : we already measured matrix creation time  */
+grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]=time;
+grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]=((double)rsb__do_get_matrix_nnz(mtxAp))/1000000;
+/* FIXME : this is experimental and unfinished code */
+
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_double_complex_mat_stats;}
+					++moi;
+
+					erri_double_complex_mat_stats:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				}
+				RSB_MTX_FREE(mtxAp);
+				++si;
+			}	
+		}
+	}
+	{
+		rsb_int moi;
+		rsb_char_t * mops[] = RSB_MATRIX_OPS_ARRAY;
+		rsb_char_t * types[] = RSB_MATRIX_TYPES_ARRAY;
+		rsb_char_t s[128];
+		rsb__print_mop_reference_performance_info_header();
+		for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS;++moi)
+		{	
+/*			rsb_int si;*/
+			/* informational printout */
+			sprintf(s,"%s\t%s\t",types[ti], mops[moi]);
+			rsb__print_mop_reference_performance_info(&(grpi.gpi[ti].pipmo[moi]),s);
+/*			for(si=0;si<RSB_FITTING_SAMPLES;++si)*/
+/*				rsb__dump_performance_info(&(grpi.gpi[ti].pipmo[moi].pipfs[si]), NULL);*/
+		}
+	}
+	++ti;
+	tot_secs += rsb_time();
+	RSB_STDERR("#reference benchmarking took %lg seconds (predicted %lg :)....\n",tot_secs,pred_secs);
+
+	grpi.initialized=1;	/* FIXME : only partially */
+	//rsb__dump_global_reference_performance_info(&grpi);
+#if RSB_WANT_PERFORMANCE_FILE
+	rsb__save_global_reference_performance_info(&grpi);
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	return RSB_ERR_NO_ERROR;	/* FIXME : temporary */
+
+	ti=0;	/* type index */
+	for(ti=0;ti<RSB_IMPLEMENTED_TYPES	;++ti)
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_int moi=0;	/* matrix operation index */
+			for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS ;++moi)
+			{
+				rsb_int si=0;	/* sample index */
+
+				double y[RSB_FITTING_SAMPLES];
+				double * x = grpi.gpi[ti].pipmo[moi].blocks_per_row;
+
+				for(si=0;si< RSB_FITTING_SAMPLES ;++si)
+				{
+					/* we tune our mtype library implementation for operation mop */
+						y[si] = 
+							grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]/
+							grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci];
+				}
+
+				/*
+				 * FIXME : make this fitting analysis offline respect our benchmark!
+				 */
+				errval = rsb_fit_hyp(
+						x, y, 3, 
+						&(grpi.gpi[ti].pipmo[moi].alpha[ri][ci]),
+						&(grpi.gpi[ti].pipmo[moi].beta [ri][ci]),
+						&(grpi.gpi[ti].pipmo[moi].gamma[ri][ci]), (double)bc
+						/* FIXME : is this right ?*/
+					);
+				if(RSB_SOME_ERROR(errval))goto err;
+			}
+		}
+	}
+
+	if( rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) )
+		return RSB_ERR_INTERNAL_ERROR;
+
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+/* @endcond */
diff --git a/rsb_bench.h b/rsb_bench.h
new file mode 100644
index 0000000..60998f7
--- /dev/null
+++ b/rsb_bench.h
@@ -0,0 +1,96 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+
+ Performance info gathering code. (OBSOLETE)
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+#ifndef RSB_BENCH_H_INCLUDED
+#define RSB_BENCH_H_INCLUDED
+
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb_internals.h"
+#ifdef RSB_HAVE_CBLAS_H
+#include <cblas.h>
+#endif /* RSB_HAVE_CBLAS_H */
+#ifdef RSB_HAVE_CLAPACK_H
+#include <clapack.h>
+#endif /* RSB_HAVE_CLAPACK_H */
+#include <math.h>
+rsb_err_t rsb_fit_hyp(double x[], double y[], size_t nb_loop, double * a, double * b, double *c, double c_s);
+
+rsb_err_t rsb__do_referencebenchmark(void);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_BENCH_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_bench.m4 b/rsb_bench.m4
new file mode 100644
index 0000000..2127a81
--- /dev/null
+++ b/rsb_bench.m4
@@ -0,0 +1,629 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+
+ Performance info gathering code. (OBSOLETE)
+ */
+dnl
+include(`rsb_misc.m4')dnl
+dnl
+RSB_M4_HEADER_MESSAGE()dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_BENCH_H_INCLUDED
+#define RSB_BENCH_H_INCLUDED
+')
+dnl
+include(`do_unroll.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl
+include(`rsb_krnl_macros.m4')dnl
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+dnl
+#include "rsb_internals.h"
+dnl
+#ifdef RSB_HAVE_CBLAS_H
+#include <cblas.h>
+#endif /* RSB_HAVE_CBLAS_H */
+#ifdef RSB_HAVE_CLAPACK_H
+#include <clapack.h>
+#endif /* RSB_HAVE_CLAPACK_H */
+#include <math.h>
+dnl
+dnl
+dnl
+dnl	RSB_M4_HYPERBOLIC_FITTING_FUNCTION_ARGS()
+dnl	---------------------------------
+dnl
+define(`RSB_M4_HYPERBOLIC_FITTING_FUNCTION_ARGS',`dnl
+dnl
+`(double x[], double y[], size_t nb_loop, double * a, double * b, double *c, double c_s)'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_HYPERBOLIC_FITTING_FUNCTION_IDENTIFIER()
+dnl	---------------------------------
+dnl
+define(`RSB_M4_HYPERBOLIC_FITTING_FUNCTION_IDENTIFIER',`dnl
+dnl
+`rsb_fit_hyp'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_HYPERBOLIC_FITTING_FUNCTION()
+dnl	---------------------------------
+dnl
+define(`RSB_M4_HYPERBOLIC_FITTING_FUNCTION',`dnl
+dnl
+rsb_err_t RSB_M4_HYPERBOLIC_FITTING_FUNCTION_IDENTIFIER()`'dnl
+RSB_M4_HYPERBOLIC_FITTING_FUNCTION_ARGS()`'dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+{
+#if !(RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS)
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+#else
+	/**
+	 * \ingroup gr_bench
+         * Note : 
+	 * 
+	 * This function will compute a performance predictor based on
+         * nonzero per row ratio, by fitting the two input x (non zeros per row)
+         * and y (megaflops) vectors (both with n = RSB_FITTING_SAMPLES points) to
+         * the following formula :
+         *
+         *           `megaflops (nnz_per_row) a + b / ( c + nnz_per_row )'
+         *
+         * The c_s and nb_loop arguments will be documented some day.
+         *
+	 * This model is discussed in the following article :
+
+ at article{ButtEijkLang:spmvp,
+  title = {Performance Optimization and Modeling of Blocked Sparse Kernels},
+  author = {Buttari, Alfredo and Eijkhout, Victor and Langou, Julien and Filippone, Salvatore},
+  pages = {467--484},
+  year = 2007,
+  journal = {IJHPCA},
+  volume = 21,
+  url = {\url{{http://www.tacc.utexas.edu/~eijkhout/Articles/2007-buttari-spmvp.pdf}}}
+}
+         *
+         */
+
+	rsb_int nparms=3;
+	rsb_int n = RSB_FITTING_SAMPLES;
+	/* Fortran arrays */
+#define RSB_FORTRAN_ARRAY(AI,ROWS,COLS) AI[(ROWS)*(COLS)]
+
+	rsb_int nj = 3;
+	rsb_int i,j;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	double RSB_FORTRAN_ARRAY(G ,n,3);
+	double RSB_FORTRAN_ARRAY(G1,n,3);
+	double RSB_FORTRAN_ARRAY(GG,3,3);
+	double RSB_FORTRAN_ARRAY(z ,n,1);
+	double RSB_FORTRAN_ARRAY(z0,n,1);
+	double RSB_FORTRAN_ARRAY(dy,n,1);
+	double RSB_FORTRAN_ARRAY(ddy,3,1);
+	double RSB_FORTRAN_ARRAY(xj ,nj,1);
+	double RSB_FORTRAN_ARRAY(yj ,nj,1);
+	double RSB_FORTRAN_ARRAY(zj ,nj,1);
+
+	double xcpy[n];
+	double a_t,b_t,sum1,sum2,sum3,sum4,error,tmp_a,tmp_b,tmp_c, min_err,max,min,avg,intl;
+  	int /*i,*/info,ipivot[3],/*nj,j,*/k,cnt;
+	rsb_memcpy(xcpy,x,sizeof(xcpy));	/* not a bit more .. and please note that sizeof(x)=sizeof(double*) != sizeof(x[n])*/
+
+
+	RSB_INFO("starting analysis...\n");
+	RSB_STDOUT("\n");
+	RSB_STDOUT("performance data:\n");
+	for(i=0;i<n;++i)
+	{
+		RSB_STDOUT("%lg %lg\n",xcpy[i],y[i]);
+	}
+
+	sum1=0;
+	sum2=0;
+	sum3=0;
+	sum4=0;
+
+
+  	*a=y[n-1];
+	
+	rsb_memcpy(xj,x,sizeof(xj));	/* not a bit more */
+	rsb_memcpy(yj,y,sizeof(yj));	/* not a bit more */
+
+	for(i=0;i<nj;++i)
+  	{
+		zj[i]=yj[i]-*a;
+  		zj[i]=1/zj[i];
+	}
+
+	for(i=0;i<nj;++i)
+	{
+		sum1=sum1 + xj[i]*zj[i];
+		sum2=sum2 + xj[i];
+		sum3=sum3 + zj[i];
+		sum4=sum4 + xj[i]*xj[i];
+	}
+
+	a_t= (sum3*sum4-sum2*sum1)/(nj*sum4-sum2*sum2);
+	b_t=(nj*sum1 - sum2*sum3) / (nj*sum4 - sum2*sum2);
+
+  	*b=1/b_t;
+	*c=a_t* *b;
+
+	for(i=0;i<n;++i)
+		z0[i]= *a +*b/(x[i]+*c);
+
+	error = 0;
+	for(j=0;j<n;++j)
+		error = error + (fabs( z0[j] - y[j] ) / y[j] );
+
+	error = error / n * 100;
+
+	min_err=error;
+
+	tmp_a=*a;
+	tmp_b=*b;
+	tmp_c=*c;
+
+	for(i=0;i<nb_loop;++i)
+	{
+		for(j=0;j<n;++j)
+			dy[j] = z0[j]-y[j];
+
+		for(j=0;j<n;++j)
+		{
+			G[j+0*n]=1;
+			G[j+1*n]=1/(x[j]+tmp_c);
+			G[j+2*n]=-tmp_b/( (x[j]+tmp_c)*(x[j]+tmp_c) );
+
+			G1[j+0*n]= G[j+0*n];
+			G1[j+1*n]= G[j+1*n];
+			G1[j+2*n]= G[j+2*n];
+		}
+
+#if 
+		cblas_dgemm(CblasColMajor,CblasTrans,CblasNoTrans,3,3,n,1.0,G,n,G1,n,0.0,GG,3);
+		errval =  clapack_dgetrf(CblasColMajor,3,3,GG,3,ipivot);
+		if(RSB_SOME_ERROR(errval)) goto err;
+		cblas_dgemv(CblasColMajor,CblasTrans,n,3,1.0,G,n,dy,1,0.0,ddy,1);
+		errval =  clapack_dgetrs(CblasColMajor,CblasNoTrans,3,1,GG,3,ipivot,ddy,3);
+		if(RSB_SOME_ERROR(errval)) goto err;
+#else /* (RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS) */
+#endif /* (RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS) */
+	
+		tmp_a = tmp_a-ddy[1-1];
+		tmp_b = tmp_b-ddy[2-1];
+		tmp_c = tmp_c-ddy[3-1];
+
+		for(j=0;j<n;++j)
+			z0[j]= tmp_a +tmp_b/(x[j]+tmp_c);
+
+		error = 0;
+		for(j=0;j<n;++j)
+	       		error = error + (fabs( z0[j] - y[j] ) / y[j] );
+
+		error = error / n * 100;
+		if(error < min_err)
+		{
+		        *a=tmp_a;
+		        *b=tmp_b;
+		        *c=tmp_c;
+		}
+	}
+
+	if((*c< 0) && (*c  < c_s))
+	{
+		*c=10000;
+		*b=10000;
+		avg=0;
+		max=y[0];
+		min=y[0];
+		for(i=0;i<n;++i)
+		{
+		        if (y[i] > max) max=y[i];
+		        if (y[i] < min) min=y[i];
+		        avg=avg+y[i];
+		}
+		avg=avg/(double)(n);
+		*a=avg;
+		intl=max-min;
+		avg=0;
+		cnt=0;
+		for(/*i=0*/;i<n;++i)
+		//for(i=0;i<n;++i)
+		{
+        		if (fabs(y[i]-avg) < (0.3*intl))
+			{
+				avg = avg + y[i];
+				cnt=cnt+1;
+			}
+		}
+     		if(cnt > 0) *a=avg/(double)cnt;
+	}
+	else
+  	if (*b >= 0)
+	{
+		*c=10000;
+		*b=10000;
+		avg=0;
+		max=y[0];
+		min=y[0];
+		for(i=0;i<n;++i)
+		{
+			if (y[i] > max) max=y[i];
+			if (y[i] < min) min=y[i];
+			avg=avg+y[i];
+		}
+		avg=avg/(double)n;
+		intl=max-min;
+		avg=0;
+		cnt=0;
+		//for(i=0;i<n;++i)
+		for(/*i=0*/;i<n;++i)
+		{
+		        if (fabs(y[i]-avg) < (0.3*intl))
+			{
+				avg = avg + y[i];
+				cnt=cnt+1;
+			}
+		}
+		if(cnt > 0) *a=avg/ (double) cnt;
+	}
+
+
+	RSB_STDOUT("\n");
+	RSB_STDOUT("alpha:%lg beta:%lg gamma:%lg\n",*a,*b,*c);
+
+	RSB_STDOUT("\nfitting:\n");
+	for(i=0;i<n;++i)
+	{
+		RSB_STDOUT("%lg %lg\n", xcpy[i], *a+*b/(xcpy[i]+*c));
+	}
+
+	return RSB_ERR_NO_ERROR;
+	err:
+	RSB_DO_ERR_RETURN(errval)
+#endif /* RSB_HAVE_CLAPACK && RSB_HAVE_CBLAS */
+}
+')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_REFERENCEBENCHMARK_FUNCTION_ARGS()
+dnl	------------------------------------------------------------------
+dnl
+define(`RSB_M4_REFERENCEBENCHMARK_FUNCTION_ARGS',`dnl
+dnl
+`(void)'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_REFERENCEBENCHMARK_FUNCTION_IDENTIFIER()
+dnl	--------------------------------------------
+dnl
+define(`RSB_M4_REFERENCEBENCHMARK_FUNCTION_IDENTIFIER',`dnl
+dnl
+`rsb__do_referencebenchmark'dnl
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_REFERENCEBENCHMARK_FUNCTION_NAME()
+dnl	--------------------------------------
+dnl
+define(`RSB_M4_REFERENCEBENCHMARK_FUNCTION_NAME',`dnl
+dnl
+rsb_err_t RSB_M4_REFERENCEBENCHMARK_FUNCTION_IDENTIFIER`'dnl
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_REFERENCEBENCHMARK_FUNCTION()
+dnl	---------------------------------
+dnl
+define(`RSB_M4_REFERENCEBENCHMARK_FUNCTION',`dnl
+dnl
+RSB_M4_REFERENCEBENCHMARK_FUNCTION_NAME`'dnl
+RSB_M4_REFERENCEBENCHMARK_FUNCTION_ARGS`'dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+{
+	/*!
+	 * \ingroup gr_bench
+	 * A complete benchmark program.
+	 * Will benchmark all supported matrix operations over all supported types
+	 * over all supported matrix partitionings.
+	 *
+	 * Moreover, it WILL perform analysis of performance data and results dumput.
+         *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 * FIXME : UNFINISHED: should process and dump this info in a header file.
+	 */
+	struct rsb_global_reference_performance_info_t grpi;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blk_idx_t ri,ci;	/* row index, columns index */
+	rsb_coo_idx_t order=20000;
+	rsb_coo_idx_t rows=order,cols=order;	/* FIXME : TEMPORARY */
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	double tot_secs=0.0,pred_secs=1.0;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	size_t kernels_n = RSB_ROWS_UNROLL_ARRAY_LENGTH*RSB_COLUMNS_UNROLL_ARRAY_LENGTH*RSB_IMPLEMENTED_MOPS*RSB_IMPLEMENTED_TYPES;
+	rsb_int ti=0;	/* type index */
+	int fbw,bwi;
+	RSB_BZERO_P(&grpi);
+
+	/* if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS))){goto err;} we skip this to enable calling this from within our library (FIXME) */
+
+	if(RSB_FITTING_SAMPLES<2)
+	{	
+		fbw=(RSB_FIRST_FITTING_SAMPLE_BW_MAX + RSB_FIRST_FITTING_SAMPLE_BW_MIN)/2;
+		bwi=fbw;
+	}
+	else
+	{
+		fbw = RSB_FIRST_FITTING_SAMPLE_BW_MIN;
+		bwi=(RSB_FIRST_FITTING_SAMPLE_BW_MAX - RSB_FIRST_FITTING_SAMPLE_BW_MIN)/(RSB_FITTING_SAMPLES-1);
+	}
+	
+	tot_secs = -rsb_time();
+	pred_secs *= RSB_ROWS_UNROLL_ARRAY_LENGTH * RSB_COLUMNS_UNROLL_ARRAY_LENGTH * RSB_FITTING_SAMPLES * RSB_IMPLEMENTED_META_MOPS *  RSB_IMPLEMENTED_TYPES * RSB_BENCHMARK_MIN_SECONDS;
+	RSB_STDERR("#reference benchmarking of %zd kernels (no transposed, no symmetric, and so on) should take at least %lg seconds..\n",kernels_n,pred_secs);
+
+foreach(`mtype',RSB_M4_MATRIX_TYPES,`dnl
+	/* mtype type benchmarking */
+/*	RSB_INFO("#mtype type benchmarking\n");*/
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t br = rua[ri];
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_coo_idx_t bw,mbw=(cols/bc);
+			rsb_int si=0;	/* sample index */
+			mbw=(cols-bc)/bc;	/* tune here to fill further our matrix */
+			/* FIXME : there is the danger of empty samples! */
+			for(bw=fbw;bw<=mbw && si< RSB_FITTING_SAMPLES ;bw+=bwi)	/* this parameter should be tunable, too */
+			{
+				//RSB_INFO("bw = %d\n",bw);
+				rsb_int moi=0;	/* matrix operation index */
+				double time,*timep=&time;
+				struct rsb_mtx_t * mtxAp =
+					rsb__generate_blocked_banded(br,bc,rows,cols,bw,timep,RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype),RSB_BOOL_TRUE );	/* FIXME : generating triangular factors always ! */
+				if(!mtxAp)
+				{
+					RSB_STDERR(RSB_ERRM_IE);
+					{errval = RSB_ERR_GENERIC_ERROR; goto err;}
+				}
+dnl				struct rsb_options_t * o = mtxAp->options;
+
+foreach(`mop',RSB_M4_MATRIX_META_OPS,`dnl
+				{
+/*					RSB_INFO("#mtype type, ");*/
+/*					RSB_INFO("mop operation benchmarking\n");*/
+					/* mop operation benchmarking */
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+					mtype *out=NULL,*rhs=NULL;
+')dnl
+ifelse(RSB_M4_IS_SPXX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+')dnl
+ifelse(RSB_M4_IS_SPXX_SCALING_KERNEL_MOP(mop),1,`dnl
+			double beta =1.0;/* FIXME */
+			double * betap  = &beta ;
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+					mtype * row_sums;
+')dnl
+
+					
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+					row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+					if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+')dnl
+ifelse(mop,`scale',`dnl
+					mtype * scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+					if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+					if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+')dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+					rsb_coo_idx_t nrhs=4;
+					rsb_coo_idx_t bstride = cols+bc;
+					rsb_coo_idx_t cstride = rows+br;
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`dnl
+					rsb_coo_idx_t incx=1,incy=1;
+',`dnl
+					rsb_coo_idx_t incx=1,incy=1;
+')dnl
+					incx=1,incy=1;	/* this is just a pacifier for "unused variable"-like warnings */
+					rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+					out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+					if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+					if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,1)){errval = RSB_ERR_ENOMEM;goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+')dnl
+ifelse(mop,`negation',`dnl
+					int please_fix_RSB_M4_ARGS_TO_ACTUAL_ARGS=-1;/* here to fix negation */
+')dnl
+
+					grpi.gpi[ti].pipmo[moi].blocks_per_row[si]=bw*bc; /* FIXME : TEMPORARY !!  */
+
+					/* we benchmark our mtype library implementation for operation mop */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+					grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+					errval = dnl
+ifelse(RSB_M4_MATRIX_OP_IS_META_OP(mop),`1',dnl
+`0;/* meta-op : we already measured matrix creation time  */
+grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]=time;
+grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]=((double)rsb__do_get_matrix_nnz(mtxAp))/1000000;
+/* FIXME : this is experimental and unfinished code */
+',`
+RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_IDENTIFIER(mop,mtype)(dnl 
+&(grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci]),dnl
+&(grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]),dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ACTUAL_ARGS(mop,mtype));')
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci]  = rsb__do_get_matrix_fillin(mtxAp); 
+					grpi.gpi[ti].pipmo[moi].pipfs[si].rows = rows;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].cols = cols;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].nnz  = rsb__do_get_matrix_nnz(mtxAp) ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].flags= mtxAp->flags ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].storage= mtxAp->matrix_storage ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].typecode= mtxAp->typecode ;
+					grpi.gpi[ti].pipmo[moi].pipfs[si].element_count= mtxAp->element_count;
+
+					grpi.gpi[ti].pipmo[moi].pipfs[si].e_mflops[ri][ci] = 
+						grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci] /
+						grpi.gpi[ti].pipmo[moi].pipfs[si].fillin[ri][ci];
+
+					if(RSB_SOME_ERROR(errval)){goto erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop;}
+					++moi;
+
+					erri_`'RSB_M4_CHOPSPACES(mtype)`'`_'`'mop:
+					if(RSB_SOME_ERROR(errval))goto err;
+
+					RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+					RSB_CONDITIONAL_FREE(row_sums);
+')dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+					RSB_CONDITIONAL_FREE(out);
+					RSB_CONDITIONAL_FREE(rhs);
+')dnl
+ifelse(mop,`scale',`dnl
+					RSB_CONDITIONAL_FREE(scale_factors);
+')dnl
+				}
+')dnl
+				RSB_MTX_FREE(mtxAp);
+				++si;
+			}	
+		}
+	}
+	{
+		rsb_int moi;
+		rsb_char_t * mops[] = RSB_M4_MATRIX_META_OPS_ARRAY;
+		rsb_char_t * types[] = RSB_M4_MATRIX_TYPES_ARRAY;
+		rsb_char_t s[RSB_M4_BUFLEN];
+		rsb__print_mop_reference_performance_info_header();
+		for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS;++moi)
+		{	
+/*			rsb_int si;*/
+			/* informational printout */
+			sprintf(s,"%s\t%s\t",types[ti], mops[moi]);
+			rsb__print_mop_reference_performance_info(&(grpi.gpi[ti].pipmo[moi]),s);
+/*			for(si=0;si<RSB_FITTING_SAMPLES;++si)*/
+/*				rsb__dump_performance_info(&(grpi.gpi[ti].pipmo[moi].pipfs[si]), NULL);*/
+		}
+	}
+	++ti;
+')dnl
+	tot_secs += rsb_time();
+	RSB_STDERR("#reference benchmarking took %lg seconds (predicted %lg :)....\n",tot_secs,pred_secs);
+
+	grpi.initialized=1;	/* FIXME : only partially */
+	//rsb__dump_global_reference_performance_info(&grpi);
+#if RSB_WANT_PERFORMANCE_FILE
+	rsb__save_global_reference_performance_info(&grpi);
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	return RSB_ERR_NO_ERROR;	/* FIXME : temporary */
+
+	ti=0;	/* type index */
+	for(ti=0;ti<RSB_IMPLEMENTED_TYPES	;++ti)
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_blk_idx_t bc = cua[ci];
+			rsb_int moi=0;	/* matrix operation index */
+			for(moi=0;moi<RSB_IMPLEMENTED_META_MOPS ;++moi)
+			{
+				rsb_int si=0;	/* sample index */
+
+				double y[RSB_FITTING_SAMPLES];
+				double * x = grpi.gpi[ti].pipmo[moi].blocks_per_row;
+
+				for(si=0;si< RSB_FITTING_SAMPLES ;++si)
+				{
+					/* we tune our mtype library implementation for operation mop */
+						y[si] = 
+							grpi.gpi[ti].pipmo[moi].pipfs[si].m_flops[ri][ci]/
+							grpi.gpi[ti].pipmo[moi].pipfs[si].seconds[ri][ci];
+				}
+
+				/*
+				 * FIXME : make this fitting analysis offline respect our benchmark!
+				 */
+				errval = RSB_M4_HYPERBOLIC_FITTING_FUNCTION_IDENTIFIER()(
+						x, y, 3, 
+						&(grpi.gpi[ti].pipmo[moi].alpha[ri][ci]),
+						&(grpi.gpi[ti].pipmo[moi].beta [ri][ci]),
+						&(grpi.gpi[ti].pipmo[moi].gamma[ri][ci]), (double)bc
+						/* FIXME : is this right ?*/
+					);
+				if(RSB_SOME_ERROR(errval))goto err;
+			}
+		}
+	}
+
+	if( rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) )
+		return RSB_ERR_INTERNAL_ERROR;
+
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+RSB_M4_HYPERBOLIC_FITTING_FUNCTION()
+RSB_M4_REFERENCEBENCHMARK_FUNCTION()
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_BENCH_H_INCLUDED */
+')
+/* @endcond */
+dnl
diff --git a/rsb_bio.c b/rsb_bio.c
new file mode 100644
index 0000000..a7e4e31
--- /dev/null
+++ b/rsb_bio.c
@@ -0,0 +1,551 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix binary I/O functions.
+ * */
+/*
+ * RSB_HAVE_RPC_XDR_H -> RSB_WANT_XDR_SUPPORT
+ * */
+
+#include "rsb_common.h"
+#if RSB_WANT_XDR_SUPPORT
+//#ifndef RSB_HAVE_RPC_XDR_H
+#include <rpc/xdr.h>
+
+/*                                                                                                                   
+                                                |  5  | >0 && <RSB_VSL|             50>RSB_VSL                           |  */
+#define RSB_BINARY_SPARSE_MATRIX_FILE_SIGNATURE "%RSB-"
+#ifndef RSB_PACKAGE_VERSION
+#define RSB_PACKAGE_VERSION "?"
+#endif
+#define RSB_BINARY_SPARSE_MATRIX_FILE_HEADER RSB_BINARY_SPARSE_MATRIX_FILE_SIGNATURE""RSB_PACKAGE_VERSION"                                                  "
+#define RSB_BINARY_SPARSE_MATRIX_FILE_SIGNATURE_LEN   5
+#define RSB_BINARY_SPARSE_MATRIX_FILE_HEADER_LEN     32	/* the first RSB_BINARY_SPARSE_MATRIX_FILE_HEADER_LEN bytes of RSB_BINARY_SPARSE_MATRIX_FILE_HEADER are written at the beginning of the file */
+
+#define RSB_XDR_ERROR {errval = RSB_ERR_INTERNAL_ERROR;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+#define RSB_XDR_LOAD_ERROR RSB_XDR_ERROR 
+#define RSB_XDR_SAVE_ERROR RSB_XDR_ERROR 
+#define RSB_XDR_SAVE_TRY(EXP) if(EXP!=1)RSB_XDR_ERROR 
+
+rsb_err_t rsb__do_bindump_init(void)
+{
+	/*!
+	 * \ingroup gr_bio
+	 *
+	 * \return RSB_ERR_NO_ERROR if the binary dumping of matrices is supported, or RSB_ERR_UNSUPPORTED_FEATURE.
+	 */
+#if RSB_WANT_XDR_SUPPORT
+	rsb_err_t errval = RSB_ERR_UNSUPPORTED_FEATURE;
+#else
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#endif /* RSB_WANT_XDR_SUPPORT */
+	RSB_DO_ERR_RETURN(errval)
+}
+
+
+
+#if RSB_WANT_XDR_SUPPORT
+static rsb_err_t rsb_do_rw_matrix_dimensions_xdr(struct rsb_mtx_t * mtxAp, XDR *xdrsp)
+{
+	/*!
+	 * \ingroup gr_bio
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	uint64_t el_size_ = mtxAp->el_size;
+	uint64_t element_count_ = mtxAp->element_count;
+
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->nnz)));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(xdrsp,&(el_size_)));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(xdrsp,&(element_count_)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->block_count)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->all_leaf_matrices_n)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->nr)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->nc)));
+	RSB_XDR_SAVE_TRY(xdr_char(xdrsp,&(mtxAp->typecode)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->flags)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->roff)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->coff)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->bm)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->bk)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->broff)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->bcoff)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->roff)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->coff)));
+	RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,&(mtxAp->nzoff)));
+	mtxAp->element_count = element_count_;
+	mtxAp->el_size = el_size_;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_rw_matrix_times_xdr(struct rsb_mtx_t * mtxAp, XDR *xdrsp)
+{
+	/*!
+	 * \ingroup gr_bio
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->sat)));
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->eit)));
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->est)));
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->pet)));
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->cpt)));
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->rpt)));
+	RSB_XDR_SAVE_TRY(xdr_double(xdrsp,&(mtxAp->tat)));
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+#if RSB_WANT_XDR_SUPPORT
+static rsb_err_t rsb_do_rw_matrix_struct_xdr(struct rsb_mtx_t * mtxAp, struct rsb_mtx_t ** smp, XDR *xdrsp, const rsb_char_t rw)
+{
+	/*!
+	 * \ingroup gr_bio
+	 * FIXME: this code will break much of library's configurability
+	 * to fix this, should use an intermediate struct before saving.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	errval = rsb_do_rw_matrix_dimensions_xdr(mtxAp,xdrsp);
+	if(RSB_SOME_ERROR(errval))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	errval = rsb_do_rw_matrix_times_xdr(mtxAp,xdrsp);
+	if(RSB_SOME_ERROR(errval))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	if(rw=='w')
+	{
+		uint32_t submatrices = 0;
+		submatrices = (mtxAp->sm[0]!=NULL)*1+ (mtxAp->sm[1]!=NULL)*2+ (mtxAp->sm[2]!=NULL)*4+ (mtxAp->sm[3]!=NULL)*8;
+		RSB_XDR_SAVE_TRY(xdr_uint32_t(xdrsp,&(submatrices)));
+	}
+	else
+	if(rw=='r')
+	{
+		uint32_t submatrices = 0;
+		int i;
+		struct rsb_mtx_t * sm = *smp;
+		RSB_XDR_SAVE_TRY(xdr_uint32_t(xdrsp,&(submatrices)));
+		for(i=0;i<RSB_FOUR;++i)
+		if(submatrices&(1<<i))
+		{
+			mtxAp->sm[i] = sm++;
+		}
+		else
+			mtxAp->sm[i] = NULL;
+		*smp = sm;
+		RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS);
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_rw_matrix_xdr_ia(struct rsb_mtx_t * mtxAp, struct rsb_mtx_t ** smp, rsb_nnz_idx_t *rnnzp, XDR *xdrsp, const rsb_char_t rw)
+{
+	/*!
+	 * \ingroup gr_bio
+	 * FIXME: this code will break much of library's configurability
+	 * to fix this, should use an intermediate struct before saving.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(rw!='r' &&  rw!= 'w') {	errval = RSB_ERR_INTERNAL_ERROR; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+	if(rw=='r')
+	{
+	}
+	else
+	{
+		/* FIXME: write me */
+	}
+	errval = rsb_do_rw_matrix_struct_xdr(mtxAp,smp,xdrsp,rw);
+	errval = rsb__set_init_flags_and_stuff(mtxAp,NULL,NULL,mtxAp->nr,mtxAp->nc,mtxAp->nnz,mtxAp->block_count,mtxAp->element_count,mtxAp->typecode,mtxAp->flags);
+	if(RSB_SOME_ERROR(errval))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i = 0,j = 0;
+		struct rsb_mtx_t * submatrix = NULL;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_rw_matrix_xdr_ia(submatrix,smp,rnnzp,xdrsp,rw));
+	}
+	else
+	{
+		rsb_nnz_idx_t n = 0;
+		rsb_nnz_idx_t * bpntr = NULL;
+		if(smp && rw=='r')
+			bpntr = (rsb_nnz_idx_t*)(*smp),
+			mtxAp->bpntr = bpntr;
+
+		if(mtxAp->bpntr)
+		for(n=0;n<mtxAp->Mdim+1;++n)
+		RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,mtxAp->bpntr+n));
+
+		*rnnzp += mtxAp->nnz;
+		if(smp && rw=='r')
+			*smp = (struct rsb_mtx_t*)(bpntr+mtxAp->Mdim+1);
+	/*  
+		Now it remains:
+		void * VA;
+		rsb_nnz_idx_t  *indptr;
+		rsb_coo_idx_t	*bindx;
+		rsb_coo_idx_t	*rpntr;
+		rsb_coo_idx_t *cpntr;
+		rsb_coo_idx_t *mpntr,*Mpntr;
+		rsb_nnz_idx_t *bpntr;
+		struct rsb_options_t *options;
+	
+		struct rsb_mtx_t * sm[RSB_FOUR];
+		struct rsb_expected_info_t einfo;
+		struct rsb_translated_matrix_t * all_leaf_matrices;
+	 */
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_rw_matrix_xdr_ja(struct rsb_mtx_t * mtxAp, rsb_coo_idx_t * JA, rsb_nnz_idx_t *rnnzp, XDR *xdrsp, const rsb_char_t rw)
+{
+	/*!
+	 * \ingroup gr_bio
+	 * FIXME: this code will break much of library's configurability
+	 * to fix this, should use an intermediate struct before saving.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(rw!='r' &&  rw!= 'w') {	errval = RSB_ERR_INTERNAL_ERROR; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+	if(RSB_SOME_ERROR(errval))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_rw_matrix_xdr_ja(submatrix,JA,rnnzp,xdrsp,rw));
+	}
+	else
+	{
+		rsb_nnz_idx_t n;
+		if(rw=='r')
+			mtxAp->bindx = JA+*rnnzp;
+		/* should dump the rnnz VA and JA elements */
+		for(n=0;n<mtxAp->nnz;++n)
+			RSB_XDR_SAVE_TRY(xdr_int32_t(xdrsp,mtxAp->bindx+n));
+		*rnnzp += mtxAp->nnz;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_rw_matrix_xdr_va(struct rsb_mtx_t * mtxAp, rsb_char_t * VA, rsb_nnz_idx_t *rnnzp, XDR *xdrsp, const rsb_char_t rw)
+{
+	/*!
+	 * \ingroup gr_bio
+	 * FIXME: this code will break much of library's configurability
+	 * to fix this, should use an intermediate struct before saving.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(rw!='r' &&  rw!= 'w') {	errval = RSB_ERR_INTERNAL_ERROR; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+	if(rw=='r')
+		mtxAp->VA = VA+mtxAp->el_size**rnnzp;
+	if(RSB_SOME_ERROR(errval))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_rw_matrix_xdr_va(submatrix,VA,rnnzp,xdrsp,rw));
+	}
+	else
+	{
+		rsb_nnz_idx_t n;
+		if(0)
+			;
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+		else
+		if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE)
+		{
+			for(n=0;n<mtxAp->nnz;++n)
+				RSB_XDR_SAVE_TRY(xdr_double(xdrsp,((double*)(mtxAp->VA))+n));
+		}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+		else
+		if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT)
+		{
+			for(n=0;n<mtxAp->nnz;++n)
+				RSB_XDR_SAVE_TRY(xdr_float(xdrsp,((float*)(mtxAp->VA))+n));
+		}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+		else
+		if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX)
+		{
+			for(n=0;n<2*mtxAp->nnz;++n)
+				RSB_XDR_SAVE_TRY(xdr_double(xdrsp,((double*)(mtxAp->VA))+n));
+		}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+		else
+		if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX)
+		{
+			for(n=0;n<2*mtxAp->nnz;++n)
+				RSB_XDR_SAVE_TRY(xdr_float(xdrsp,((float*)(mtxAp->VA))+n));
+		}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+		else
+		{
+			/* TODO: if you have a new type, complete here */
+			errval = RSB_ERR_UNSUPPORTED_TYPE;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		*rnnzp += mtxAp->nnz;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_WANT_XDR_SUPPORT */
+
+static rsb_err_t rsb_do_compute_total_bytes_for_binary_dump_recursive(const struct rsb_mtx_t * mtxAp, uint64_t * ia_size, uint64_t * ja_size, uint64_t * va_size)
+{
+	/*!
+	 * \ingroup gr_bio
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_compute_total_bytes_for_binary_dump_recursive(submatrix,ia_size,ja_size,va_size));
+	}
+	else
+		if(!rsb__is_csr_matrix(mtxAp))
+		{
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+	*ia_size += sizeof(struct rsb_mtx_t)+sizeof(rsb_nnz_idx_t)*(mtxAp->Mdim+1);
+	*ja_size += 0;
+	*va_size += 0;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_compute_total_bytes_for_binary_dump(const struct rsb_mtx_t * mtxAp, uint64_t * ia_size, uint64_t * ja_size, uint64_t * va_size)
+{
+	/*!
+	 * \ingroup gr_bio
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	*ia_size = 0;
+	*ja_size = 0;
+	*va_size = 0;
+	*ia_size += sizeof(struct rsb_translated_matrix_t)*(rsb__terminal_recursive_matrix_count(mtxAp));
+	*ja_size += sizeof(rsb_coo_idx_t)*(mtxAp->nnz);// FIXME: nnz or nnz+1 (locally in each mtxAp, of course) ?
+	*va_size += mtxAp->el_size*mtxAp->nnz;
+	errval = rsb_do_compute_total_bytes_for_binary_dump_recursive(mtxAp,ia_size,ja_size,va_size);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_load_matrix_file_as_binary(struct rsb_mtx_t ** mtxApp, const rsb_char_t * filename)
+{
+	/*!
+	 * \ingroup gr_bio
+	 */
+#if RSB_WANT_XDR_SUPPORT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	uint64_t ia_size = 0,ja_size = 0,va_size = 0;
+	FILE *fd = NULL;
+	void *IA = NULL, *JA = NULL, *VA = NULL;
+	XDR xdrs;
+	rsb_time_t lt;
+	struct rsb_mtx_t * mtxAp = NULL, *smp = NULL;
+	rsb_nnz_idx_t rnnz = 0;
+	const char * signature[RSB_BINARY_SPARSE_MATRIX_FILE_HEADER_LEN];
+	u_int slen = RSB_BINARY_SPARSE_MATRIX_FILE_HEADER_LEN;
+
+	RSB_IO_NOTICE("binary loading file %s..\n",filename);
+	lt = - rsb_time();
+
+	fd = fopen(filename,"r");
+	xdrstdio_create(&xdrs,fd,XDR_DECODE); 
+	RSB_XDR_SAVE_TRY(fread(signature,slen,1,fd));
+//	if(RSB_MEMCMP(signature,RSB_BINARY_SPARSE_MATRIX_FILE_HEADER,RSB_BINARY_SPARSE_MATRIX_FILE_HEADER_LEN))
+	if(RSB_MEMCMP(signature,RSB_BINARY_SPARSE_MATRIX_FILE_SIGNATURE,RSB_BINARY_SPARSE_MATRIX_FILE_SIGNATURE_LEN))
+	{
+		RSB_IO_ERROR("wrong file signature (not beginning with %s): skipping..\n",RSB_BINARY_SPARSE_MATRIX_FILE_SIGNATURE);
+		goto ierr;
+	
+	}
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(&xdrs,&(ia_size)));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(&xdrs,&(ja_size)));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(&xdrs,&(va_size)));
+	/* FIXME: should validate input, here */
+	IA = rsb__calloc(ia_size);
+	JA = rsb__calloc(ja_size);
+	VA = rsb__calloc(va_size);
+	if(!IA || !JA || !VA)
+		goto ierr;	/* FIXME: err should close streams */
+	mtxAp = IA; // FIXME
+	smp = mtxAp+1;
+
+	errval = rsb_do_rw_matrix_xdr_ia(mtxAp,&smp,&rnnz,&xdrs,'r');
+	if(RSB_SOME_ERROR(errval))
+		goto ierr;
+	if(rnnz!=mtxAp->nnz)
+	{
+		RSB_IO_ERROR("error : read %d instead of %d nnz!\n",rnnz,mtxAp->nnz);
+		errval = RSB_ERR_GENERIC_ERROR; goto ierr;
+	}
+
+
+	rnnz = 0;
+	errval = rsb_do_rw_matrix_xdr_ja(mtxAp,JA,&rnnz,&xdrs,'r');
+	if(rnnz!=mtxAp->nnz)
+	{
+		RSB_IO_ERROR("error : read %d instead of %d nnz!\n",rnnz,mtxAp->nnz);
+		errval = RSB_ERR_GENERIC_ERROR; goto ierr;
+	}
+	rnnz = 0;
+	errval = rsb_do_rw_matrix_xdr_va(mtxAp,VA,&rnnz,&xdrs,'r');
+	if(rnnz!=mtxAp->nnz)
+	{
+		RSB_IO_ERROR("error : read %d instead of %d nnz!\n",rnnz,mtxAp->nnz);
+		errval = RSB_ERR_GENERIC_ERROR; goto ierr;
+	}
+
+	mtxAp->all_leaf_matrices = (struct rsb_translated_matrix_t*)smp;
+	errval = rsb__get_array_of_leaf_matrices(mtxAp,&mtxAp->all_leaf_matrices,NULL);
+	if(RSB_SOME_ERROR(errval))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+//	rsb__do_get_first_submatrix(mtxAp)->bindx = JA;
+//	rsb__do_get_first_submatrix(mtxAp)->VA = VA;		// FIXME: temporarily here
+
+	// place a check here
+	if(!rsb__mtx_chk(mtxAp))
+	{
+		errval = RSB_ERR_CORRUPT_INPUT_DATA;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+ierr:
+	xdr_destroy(&xdrs);
+	if(fclose(fd)!=0)
+	{
+		// NOTE: we ignore this error
+	}
+	/* FIXME: the matrix should be validated, now, before returning */
+	if(!mtxAp)
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	*mtxApp = mtxAp;
+	RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_FIX_FOR_BINARY_LOADED_MATRIX);
+
+	lt += rsb_time();
+	RSB_IO_NOTICE("#ia_size %d..\n",(int)ia_size);
+	RSB_IO_NOTICE("#ja_size %d..\n",(int)ja_size);
+	RSB_IO_NOTICE("#va_size %d..\n",(int)va_size);
+	RSB_IO_NOTICE("#binary loading file %s succeeded and took %lf s (%.0f nnz/s).\n",filename,lt,(1.0/(lt/mtxAp->nnz)));
+	/* FIXME : this is debug info */
+//	RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_TIMES|RSB_CONST_DUMP_DIMENSIONS|RSB_CONST_DUMP_RECURSION,NULL));
+	goto ret;
+err:
+	/* FIXME : missing error handling */
+	RSB_MTX_FREE(mtxAp);
+#else /* RSB_WANT_XDR_SUPPORT */
+	rsb_err_t errval = RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_WANT_XDR_SUPPORT */
+ret:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_save_matrix_file_as_binary(const struct rsb_mtx_t * mtxAp, FILE * fd)
+{
+	/*!
+	 * \ingroup gr_bio
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_WANT_XDR_SUPPORT
+	uint64_t ia_size = 0,ja_size = 0,va_size = 0;
+	XDR xdrs;
+	rsb_nnz_idx_t rnnz = 0;
+	const char * signature = RSB_BINARY_SPARSE_MATRIX_FILE_HEADER;
+	u_int slen = RSB_BINARY_SPARSE_MATRIX_FILE_HEADER_LEN;
+
+	if(!mtxAp || !fd)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	// file signature dump
+	if(!rsb__mtx_chk(mtxAp))
+	{
+		errval = RSB_ERR_CORRUPT_INPUT_DATA;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb_do_compute_total_bytes_for_binary_dump(mtxAp,&ia_size,&ja_size,&va_size);
+	xdrstdio_create(&xdrs,fd,XDR_ENCODE); 
+	RSB_XDR_SAVE_TRY(fwrite(signature,slen,1,fd));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(&xdrs,&(ia_size)));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(&xdrs,&(ja_size)));
+	RSB_XDR_SAVE_TRY(xdr_uint64_t(&xdrs,&(va_size)));
+	rnnz = 0;
+	errval = rsb_do_rw_matrix_xdr_ia((struct rsb_mtx_t*)mtxAp,NULL,&rnnz,&xdrs,'w');
+	if(rnnz!=mtxAp->nnz)
+	{
+		RSB_IO_ERROR("error : wrote %d instead of %d nnz!\n",rnnz,mtxAp->nnz);
+		errval = RSB_ERR_GENERIC_ERROR; goto ierr;
+	}
+	rnnz = 0;
+	errval = rsb_do_rw_matrix_xdr_ja((struct rsb_mtx_t*)mtxAp,NULL,&rnnz,&xdrs,'w');
+	if(rnnz!=mtxAp->nnz)
+	{
+		RSB_IO_ERROR("error : wrote %d instead of %d nnz!\n",rnnz,mtxAp->nnz);
+		errval = RSB_ERR_GENERIC_ERROR; goto ierr;
+	}
+	rnnz = 0;
+	errval = rsb_do_rw_matrix_xdr_va((struct rsb_mtx_t*)mtxAp,NULL,&rnnz,&xdrs,'w');
+	if(rnnz!=mtxAp->nnz)
+	{
+		RSB_IO_ERROR("error : read %d instead of %d nnz!\n",rnnz,mtxAp->nnz);
+		errval = RSB_ERR_GENERIC_ERROR; goto ierr;
+	}
+ierr:
+	xdr_destroy(&xdrs);
+#else /* RSB_WANT_XDR_SUPPORT */
+	rsb_err_t errval = RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_WANT_XDR_SUPPORT */
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#else /* RSB_WANT_XDR_SUPPORT */
+rsb_err_t rsb__do_bindump_init(void){return RSB_ERR_UNSUPPORTED_FEATURE;}
+rsb_err_t rsb__do_load_matrix_file_as_binary(struct rsb_mtx_t ** mtxApp, const rsb_char_t * filename){return RSB_ERR_UNSUPPORTED_FEATURE;}
+rsb_err_t rsb__do_save_matrix_file_as_binary(const struct rsb_mtx_t * mtxAp, FILE * fd){return RSB_ERR_UNSUPPORTED_FEATURE;}
+#endif /* RSB_WANT_XDR_SUPPORT */
+/* @endcond */
diff --git a/rsb_bio.h b/rsb_bio.h
new file mode 100644
index 0000000..9849c86
--- /dev/null
+++ b/rsb_bio.h
@@ -0,0 +1,37 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix binary I/O functions.
+ * */
+
+#ifndef RSB_BIO_H_INCLUDED
+#define RSB_BIO_H_INCLUDED
+#include "rsb_common.h"
+rsb_err_t rsb__do_bindump_init(void);
+rsb_err_t rsb__do_load_matrix_file_as_binary(struct rsb_mtx_t ** mtxApp, const rsb_char_t * filename);
+rsb_err_t rsb__do_save_matrix_file_as_binary(const struct rsb_mtx_t * mtxAp, FILE * fd);
+#endif /* RSB_BIO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_blas_sparse.F90 b/rsb_blas_sparse.F90
new file mode 100644
index 0000000..39eb53b
--- /dev/null
+++ b/rsb_blas_sparse.F90
@@ -0,0 +1,1736 @@
+! 
+! Copyright (C) 2008-2015 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+
+!
+!> @file
+!! @brief This file implements the Fortran Sparse BLAS interface to \librsb.
+!!
+!!
+
+#define RSB_HAVE_RSB_KERNELS 1
+
+      MODULE blas_sparse
+        !> A Sparse BLAS interface for RSB
+        IMPLICIT NONE
+PUBLIC
+
+        
+        !> inserts a single entry
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE uscr_insert_entry
+        MODULE PROCEDURE suscr_insert_entry &
+        &, duscr_insert_entry &
+        &, cuscr_insert_entry &
+        &, zuscr_insert_entry &
+        & ;
+        END INTERFACE
+        
+        !> inserts multiple entries
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE uscr_insert_entries
+        MODULE PROCEDURE suscr_insert_entries &
+        &, duscr_insert_entries &
+        &, cuscr_insert_entries &
+        &, zuscr_insert_entries &
+        & ;
+        END INTERFACE
+        
+        !> inserts a sparse column
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE uscr_insert_col
+        MODULE PROCEDURE suscr_insert_col &
+        &, duscr_insert_col &
+        &, cuscr_insert_col &
+        &, zuscr_insert_col &
+        & ;
+        END INTERFACE
+        
+        !> inserts a sparse row
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE uscr_insert_row
+        MODULE PROCEDURE suscr_insert_row &
+        &, duscr_insert_row &
+        &, cuscr_insert_row &
+        &, zuscr_insert_row &
+        & ;
+        END INTERFACE
+        
+        !> inserts a clique
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE uscr_insert_clique
+        MODULE PROCEDURE suscr_insert_clique &
+        &, duscr_insert_clique &
+        &, cuscr_insert_clique &
+        &, zuscr_insert_clique &
+        & ;
+        END INTERFACE
+        
+        !> inserts a dense block
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE uscr_insert_block
+        MODULE PROCEDURE suscr_insert_block &
+        &, duscr_insert_block &
+        &, cuscr_insert_block &
+        &, zuscr_insert_block &
+        & ;
+        END INTERFACE
+        
+        !> multiplication  : c <- beta c + alpha A b
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE usmv
+        MODULE PROCEDURE susmv &
+        &, dusmv &
+        &, cusmv &
+        &, zusmv &
+        & ;
+        END INTERFACE
+        
+        !> triangular solve: b <- alpha A^-1 b
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE ussv
+        MODULE PROCEDURE sussv &
+        &, dussv &
+        &, cussv &
+        &, zussv &
+        & ;
+        END INTERFACE
+        
+        !> multiplication  : c <- beta c + alpha A b
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE usmm
+        MODULE PROCEDURE susmm &
+        &, dusmm &
+        &, cusmm &
+        &, zusmm &
+        & ;
+        END INTERFACE
+        
+        !> triangular solve: b <- alpha A^-1 b
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        INTERFACE ussm
+        MODULE PROCEDURE sussm &
+        &, dussm &
+        &, cussm &
+        &, zussm &
+        & ;
+        END INTERFACE
+        
+        INTEGER, PARAMETER :: blas_sparse_const_success=0
+        INTEGER, PARAMETER :: blas_sparse_const_failure=-1 ! value returned by this interface on failure
+        INTEGER, PARAMETER :: blas_sparse_const_not_available=-9999 ! value returned by this interface when deactivated
+! This file has been auto-generated from blas_enum.h.
+        INTEGER,PARAMETER :: blas_rowmajor=101
+        INTEGER,PARAMETER :: blas_colmajor=102
+        INTEGER,PARAMETER :: blas_no_trans=111
+        INTEGER,PARAMETER :: blas_trans=112
+        INTEGER,PARAMETER :: blas_conj_trans=113
+        INTEGER,PARAMETER :: blas_upper=121
+        INTEGER,PARAMETER :: blas_lower=122
+        INTEGER,PARAMETER :: blas_non_unit_diag=131
+        INTEGER,PARAMETER :: blas_unit_diag=132
+        INTEGER,PARAMETER :: blas_left_side=141
+        INTEGER,PARAMETER :: blas_right_side=142
+        INTEGER,PARAMETER :: blas_base=151
+        INTEGER,PARAMETER :: blas_t=152
+        INTEGER,PARAMETER :: blas_rnd=153
+        INTEGER,PARAMETER :: blas_ieee=154
+        INTEGER,PARAMETER :: blas_emin=155
+        INTEGER,PARAMETER :: blas_emax=156
+        INTEGER,PARAMETER :: blas_eps=157
+        INTEGER,PARAMETER :: blas_prec=158
+        INTEGER,PARAMETER :: blas_underflow=159
+        INTEGER,PARAMETER :: blas_overflow=160
+        INTEGER,PARAMETER :: blas_sfmin=161
+        INTEGER,PARAMETER :: blas_one_norm=171
+        INTEGER,PARAMETER :: blas_real_one_norm=172
+        INTEGER,PARAMETER :: blas_two_norm=173
+        INTEGER,PARAMETER :: blas_frobenius_norm=174
+        INTEGER,PARAMETER :: blas_inf_norm=175
+        INTEGER,PARAMETER :: blas_real_inf_norm=176
+        INTEGER,PARAMETER :: blas_max_norm=177
+        INTEGER,PARAMETER :: blas_real_max_norm=178
+        INTEGER,PARAMETER :: blas_increasing_order=181
+        INTEGER,PARAMETER :: blas_decreasing_order=182
+        INTEGER,PARAMETER :: blas_conj=191
+        INTEGER,PARAMETER :: blas_no_conj=192
+        INTEGER,PARAMETER :: blas_jrot_inner=201
+        INTEGER,PARAMETER :: blas_jrot_outer=202
+        INTEGER,PARAMETER :: blas_jrot_sorted=203
+        INTEGER,PARAMETER :: blas_prec_single=211
+        INTEGER,PARAMETER :: blas_prec_double=212
+        INTEGER,PARAMETER :: blas_prec_indigenous=213
+        INTEGER,PARAMETER :: blas_prec_extra=214
+        INTEGER,PARAMETER :: blas_zero_base=221
+        INTEGER,PARAMETER :: blas_one_base=222
+        INTEGER,PARAMETER :: blas_general=231
+        INTEGER,PARAMETER :: blas_symmetric=232
+        INTEGER,PARAMETER :: blas_hermitian=233
+        INTEGER,PARAMETER :: blas_triangular=234
+        INTEGER,PARAMETER :: blas_lower_triangular=235
+        INTEGER,PARAMETER :: blas_upper_triangular=236
+        INTEGER,PARAMETER :: blas_lower_symmetric=237
+        INTEGER,PARAMETER :: blas_upper_symmetric=238
+        INTEGER,PARAMETER :: blas_lower_hermitian=239
+        INTEGER,PARAMETER :: blas_upper_hermitian=240
+        INTEGER,PARAMETER :: blas_complex=241
+        INTEGER,PARAMETER :: blas_real=242
+        INTEGER,PARAMETER :: blas_double_precision=243
+        INTEGER,PARAMETER :: blas_single_precision=244
+        INTEGER,PARAMETER :: blas_num_rows=251
+        INTEGER,PARAMETER :: blas_num_cols=252
+        INTEGER,PARAMETER :: blas_num_nonzeros=253
+        INTEGER,PARAMETER :: blas_invalid_handle=261
+        INTEGER,PARAMETER :: blas_new_handle=262
+        INTEGER,PARAMETER :: blas_open_handle=263
+        INTEGER,PARAMETER :: blas_valid_handle=264
+        INTEGER,PARAMETER :: blas_regular=271
+        INTEGER,PARAMETER :: blas_irregular=272
+        INTEGER,PARAMETER :: blas_block=273
+        INTEGER,PARAMETER :: blas_unassembled=274
+        INTEGER,PARAMETER :: blas_rsb_spmv_autotuning_on=6660
+        INTEGER,PARAMETER :: blas_rsb_spmv_autotuning_off=6661
+        INTEGER,PARAMETER :: blas_rsb_spmv_n_autotuning_on=6662
+        INTEGER,PARAMETER :: blas_rsb_spmv_n_autotuning_off=6663
+        INTEGER,PARAMETER :: blas_rsb_spmv_t_autotuning_on=6664
+        INTEGER,PARAMETER :: blas_rsb_spmv_t_autotuning_off=6665
+        INTEGER,PARAMETER :: blas_rsb_autotune_next_operation=6666
+        INTEGER,PARAMETER :: blas_rsb_rep_rsb=9995
+        INTEGER,PARAMETER :: blas_rsb_rep_csr=9996
+        INTEGER,PARAMETER :: blas_rsb_rep_coo=9997
+        INTEGER,PARAMETER :: blas_rsb_duplicates_ovw=9998
+        INTEGER,PARAMETER :: blas_rsb_duplicates_sum=9999
+
+        INTERFACE
+          TYPE(C_PTR) FUNCTION &
+          &rsb_blas_get_mtx&
+          &(A)&
+          &BIND(c,NAME = "rsb_blas_get_mtx")
+          USE ISO_C_BINDING
+          INTEGER(C_INT), VALUE  :: A
+          END FUNCTION rsb_blas_get_mtx
+        END INTERFACE
+
+CONTAINS
+
+         !> \rsb_spblasl2_ds_msg\rsb_spblas_return_msg
+         !> \rsb_spblas_f_istat_msg
+         !! 
+         
+         SUBROUTINE usds(A,istat)
+           IMPLICIT NONE
+           INTEGER,INTENT(IN)::A
+           INTEGER::istat
+
+           istat=blas_sparse_const_success
+#if defined(RSB_HAVE_RSB_KERNELS)
+           CALL blas_usds(A,istat)
+           IF(istat.NE.blas_sparse_const_success)&
+            &istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+         END SUBROUTINE
+         
+
+         !> \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+         !> \rsb_spblas_f_istat_msg
+         !! 
+         
+         SUBROUTINE uscr_end(A,istat)
+
+           IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+           INTEGER,INTENT(IN)::A
+
+           istat=blas_sparse_const_success
+#if defined(RSB_HAVE_RSB_KERNELS)
+           CALL blas_uscr_end(A,istat)
+
+           IF(istat.NE.blas_sparse_const_success)&
+            &istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+         END SUBROUTINE
+         
+
+         !> \rsb_spblasl2_gp_msg\rsb_spblas_return_msg
+         !> \rsb_spblas_f_istat_msg
+         !! 
+         
+         SUBROUTINE usgp(A,pname,istat)
+
+           IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+           INTEGER,INTENT(IN)::A
+           INTEGER,INTENT(IN)::pname
+
+           istat=blas_sparse_const_success
+#if defined(RSB_HAVE_RSB_KERNELS)
+           CALL blas_usgp(A,pname,istat)
+
+           !istat does not have the meaning of an error value, here
+           !IF(istat.NE.blas_sparse_const_success)istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+         END SUBROUTINE
+         
+
+         !> \rsb_spblasl2_sp_msg\rsb_spblas_return_msg
+         !> \rsb_spblas_f_istat_msg
+         !! 
+         
+         SUBROUTINE ussp(A,pname,istat)
+
+           IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+           INTEGER,INTENT(IN)::A
+           INTEGER,INTENT(IN)::pname
+
+           istat=blas_sparse_const_success
+#if defined(RSB_HAVE_RSB_KERNELS)
+           CALL blas_ussp(A,pname,istat)
+
+           IF(istat.NE.blas_sparse_const_success)&
+            &istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+
+         END SUBROUTINE
+         
+
+        
+        
+        !> \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE suscr_begin&
+         &(m,n,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: m 
+          INTEGER :: n 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_begin&
+           &(m,n,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE duscr_begin&
+         &(m,n,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: m 
+          INTEGER :: n 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_begin&
+           &(m,n,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE cuscr_begin&
+         &(m,n,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: m 
+          INTEGER :: n 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_begin&
+           &(m,n,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE zuscr_begin&
+         &(m,n,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: m 
+          INTEGER :: n 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_begin&
+           &(m,n,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE suscr_block_begin&
+         &(Mb,Nb,k,l,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: k 
+          INTEGER :: l 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_block_begin&
+           &(Mb,Nb,k,l,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE duscr_block_begin&
+         &(Mb,Nb,k,l,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: k 
+          INTEGER :: l 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_block_begin&
+           &(Mb,Nb,k,l,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE cuscr_block_begin&
+         &(Mb,Nb,k,l,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: k 
+          INTEGER :: l 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_block_begin&
+           &(Mb,Nb,k,l,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE zuscr_block_begin&
+         &(Mb,Nb,k,l,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: k 
+          INTEGER :: l 
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_block_begin&
+           &(Mb,Nb,k,l,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE suscr_variable_block_begin&
+         &(Mb,Nb,K,L,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: K (:)
+          INTEGER :: L (:)
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_variable_block_begin&
+           &(Mb,Nb,K,L,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE duscr_variable_block_begin&
+         &(Mb,Nb,K,L,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: K (:)
+          INTEGER :: L (:)
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_variable_block_begin&
+           &(Mb,Nb,K,L,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE cuscr_variable_block_begin&
+         &(Mb,Nb,K,L,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: K (:)
+          INTEGER :: L (:)
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_variable_block_begin&
+           &(Mb,Nb,K,L,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+        !> \rsb_spblas_f_istat_msg\rsb_spblasl2_A_msg_ftn
+
+        !! 
+        
+        SUBROUTINE zuscr_variable_block_begin&
+         &(Mb,Nb,K,L,A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: Mb 
+          INTEGER :: Nb 
+          INTEGER :: K (:)
+          INTEGER :: L (:)
+          INTEGER,INTENT(OUT) :: A
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_variable_block_begin&
+           &(Mb,Nb,K,L,A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_end&
+         &(A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_end&
+           &(A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_end&
+         &(A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_end&
+           &(A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_end&
+         &(A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_end&
+           &(A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_end&
+         &(A,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_end&
+           &(A,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_insert_entry&
+         &(A,val,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          REAL(KIND(1.e0)) :: val 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_insert_entry&
+           &(A,val,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_insert_entry&
+         &(A,val,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          REAL(KIND(1.d0)) :: val 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_insert_entry&
+           &(A,val,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_insert_entry&
+         &(A,val,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          COMPLEX(KIND(1.e0)) :: val 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_insert_entry&
+           &(A,val,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_insert_entry&
+         &(A,val,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          COMPLEX(KIND(1.d0)) :: val 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_insert_entry&
+           &(A,val,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_insert_entries&
+         &(A,nnz,val,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: nnz 
+          REAL(KIND(1.e0)) :: val (:)
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_insert_entries&
+           &(A,nnz,val,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_insert_entries&
+         &(A,nnz,val,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: nnz 
+          REAL(KIND(1.d0)) :: val (:)
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_insert_entries&
+           &(A,nnz,val,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_insert_entries&
+         &(A,nnz,val,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: nnz 
+          COMPLEX(KIND(1.e0)) :: val (:)
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_insert_entries&
+           &(A,nnz,val,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_insert_entries&
+         &(A,nnz,val,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: nnz 
+          COMPLEX(KIND(1.d0)) :: val (:)
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_insert_entries&
+           &(A,nnz,val,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_insert_col&
+         &(A,j,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: j 
+          INTEGER :: nnz 
+          REAL(KIND(1.e0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_insert_col&
+           &(A,j,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_insert_col&
+         &(A,j,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: j 
+          INTEGER :: nnz 
+          REAL(KIND(1.d0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_insert_col&
+           &(A,j,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_insert_col&
+         &(A,j,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: j 
+          INTEGER :: nnz 
+          COMPLEX(KIND(1.e0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_insert_col&
+           &(A,j,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_insert_col&
+         &(A,j,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: j 
+          INTEGER :: nnz 
+          COMPLEX(KIND(1.d0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_insert_col&
+           &(A,j,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_insert_row&
+         &(A,i,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: i 
+          INTEGER :: nnz 
+          REAL(KIND(1.e0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_insert_row&
+           &(A,i,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_insert_row&
+         &(A,i,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: i 
+          INTEGER :: nnz 
+          REAL(KIND(1.d0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_insert_row&
+           &(A,i,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_insert_row&
+         &(A,i,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: i 
+          INTEGER :: nnz 
+          COMPLEX(KIND(1.e0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_insert_row&
+           &(A,i,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_insert_row&
+         &(A,i,nnz,val,indx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: i 
+          INTEGER :: nnz 
+          COMPLEX(KIND(1.d0)) :: val (:)
+          INTEGER :: indx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_insert_row&
+           &(A,i,nnz,val,indx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_insert_clique&
+         &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: k 
+          INTEGER :: l 
+          REAL(KIND(1.e0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_insert_clique&
+           &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_insert_clique&
+         &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: k 
+          INTEGER :: l 
+          REAL(KIND(1.d0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_insert_clique&
+           &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_insert_clique&
+         &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: k 
+          INTEGER :: l 
+          COMPLEX(KIND(1.e0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_insert_clique&
+           &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_insert_clique&
+         &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          INTEGER :: k 
+          INTEGER :: l 
+          COMPLEX(KIND(1.d0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: indx (:)
+          INTEGER :: jndx (:)
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_insert_clique&
+           &(A,k,l,val,row_stride,col_stride,indx,jndx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE suscr_insert_block&
+         &(A,val,row_stride,col_stride,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          REAL(KIND(1.e0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_suscr_insert_block&
+           &(A,val,row_stride,col_stride,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE duscr_insert_block&
+         &(A,val,row_stride,col_stride,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          REAL(KIND(1.d0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_duscr_insert_block&
+           &(A,val,row_stride,col_stride,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cuscr_insert_block&
+         &(A,val,row_stride,col_stride,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          COMPLEX(KIND(1.e0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cuscr_insert_block&
+           &(A,val,row_stride,col_stride,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zuscr_insert_block&
+         &(A,val,row_stride,col_stride,i,j,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: A 
+          COMPLEX(KIND(1.d0)) :: val (:)
+          INTEGER :: row_stride 
+          INTEGER :: col_stride 
+          INTEGER :: i 
+          INTEGER :: j 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zuscr_insert_block&
+           &(A,val,row_stride,col_stride,i,j,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE susmv&
+         &(transA,alpha,A,x,incx,y,incy,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transA 
+          REAL(KIND(1.e0)) :: alpha 
+          INTEGER :: A 
+          REAL(KIND(1.e0)) :: x (:)
+          INTEGER :: incx 
+          REAL(KIND(1.e0)) :: y (:)
+          INTEGER :: incy 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_susmv&
+           &(transA,alpha,A,x,incx,y,incy,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE dusmv&
+         &(transA,alpha,A,x,incx,y,incy,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transA 
+          REAL(KIND(1.d0)) :: alpha 
+          INTEGER :: A 
+          REAL(KIND(1.d0)) :: x (:)
+          INTEGER :: incx 
+          REAL(KIND(1.d0)) :: y (:)
+          INTEGER :: incy 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_dusmv&
+           &(transA,alpha,A,x,incx,y,incy,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cusmv&
+         &(transA,alpha,A,x,incx,y,incy,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transA 
+          COMPLEX(KIND(1.e0)) :: alpha 
+          INTEGER :: A 
+          COMPLEX(KIND(1.e0)) :: x (:)
+          INTEGER :: incx 
+          COMPLEX(KIND(1.e0)) :: y (:)
+          INTEGER :: incy 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cusmv&
+           &(transA,alpha,A,x,incx,y,incy,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zusmv&
+         &(transA,alpha,A,x,incx,y,incy,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transA 
+          COMPLEX(KIND(1.d0)) :: alpha 
+          INTEGER :: A 
+          COMPLEX(KIND(1.d0)) :: x (:)
+          INTEGER :: incx 
+          COMPLEX(KIND(1.d0)) :: y (:)
+          INTEGER :: incy 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zusmv&
+           &(transA,alpha,A,x,incx,y,incy,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE sussv&
+         &(transT,alpha,T,x,incx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transT 
+          REAL(KIND(1.e0)) :: alpha 
+          INTEGER :: T 
+          REAL(KIND(1.e0)) :: x (:)
+          INTEGER :: incx 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_sussv&
+           &(transT,alpha,T,x,incx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE dussv&
+         &(transT,alpha,T,x,incx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transT 
+          REAL(KIND(1.d0)) :: alpha 
+          INTEGER :: T 
+          REAL(KIND(1.d0)) :: x (:)
+          INTEGER :: incx 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_dussv&
+           &(transT,alpha,T,x,incx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cussv&
+         &(transT,alpha,T,x,incx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transT 
+          COMPLEX(KIND(1.e0)) :: alpha 
+          INTEGER :: T 
+          COMPLEX(KIND(1.e0)) :: x (:)
+          INTEGER :: incx 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cussv&
+           &(transT,alpha,T,x,incx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zussv&
+         &(transT,alpha,T,x,incx,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: transT 
+          COMPLEX(KIND(1.d0)) :: alpha 
+          INTEGER :: T 
+          COMPLEX(KIND(1.d0)) :: x (:)
+          INTEGER :: incx 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zussv&
+           &(transT,alpha,T,x,incx,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE susmm&
+         &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transA 
+          INTEGER :: nrhs 
+          REAL(KIND(1.e0)) :: alpha 
+          INTEGER :: A 
+          REAL(KIND(1.e0)) :: b (:)
+          INTEGER :: ldb 
+          REAL(KIND(1.e0)) :: c (:)
+          INTEGER :: ldc 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_susmm&
+           &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE dusmm&
+         &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transA 
+          INTEGER :: nrhs 
+          REAL(KIND(1.d0)) :: alpha 
+          INTEGER :: A 
+          REAL(KIND(1.d0)) :: b (:)
+          INTEGER :: ldb 
+          REAL(KIND(1.d0)) :: c (:)
+          INTEGER :: ldc 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_dusmm&
+           &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cusmm&
+         &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transA 
+          INTEGER :: nrhs 
+          COMPLEX(KIND(1.e0)) :: alpha 
+          INTEGER :: A 
+          COMPLEX(KIND(1.e0)) :: b (:)
+          INTEGER :: ldb 
+          COMPLEX(KIND(1.e0)) :: c (:)
+          INTEGER :: ldc 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cusmm&
+           &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zusmm&
+         &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transA 
+          INTEGER :: nrhs 
+          COMPLEX(KIND(1.d0)) :: alpha 
+          INTEGER :: A 
+          COMPLEX(KIND(1.d0)) :: b (:)
+          INTEGER :: ldb 
+          COMPLEX(KIND(1.d0)) :: c (:)
+          INTEGER :: ldc 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zusmm&
+           &(order,transA,nrhs,alpha,A,b,ldb,c,ldc,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+        
+        !> \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE sussm&
+         &(order,transT,nrhs,alpha,T,b,ldb,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transT 
+          INTEGER :: nrhs 
+          REAL(KIND(1.e0)) :: alpha 
+          INTEGER :: T 
+          REAL(KIND(1.e0)) :: b (:)
+          INTEGER :: ldb 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_sussm&
+           &(order,transT,nrhs,alpha,T,b,ldb,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE dussm&
+         &(order,transT,nrhs,alpha,T,b,ldb,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transT 
+          INTEGER :: nrhs 
+          REAL(KIND(1.d0)) :: alpha 
+          INTEGER :: T 
+          REAL(KIND(1.d0)) :: b (:)
+          INTEGER :: ldb 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_dussm&
+           &(order,transT,nrhs,alpha,T,b,ldb,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE cussm&
+         &(order,transT,nrhs,alpha,T,b,ldb,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transT 
+          INTEGER :: nrhs 
+          COMPLEX(KIND(1.e0)) :: alpha 
+          INTEGER :: T 
+          COMPLEX(KIND(1.e0)) :: b (:)
+          INTEGER :: ldb 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_cussm&
+           &(order,transT,nrhs,alpha,T,b,ldb,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        !> \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+        !> \rsb_spblas_f_istat_msg
+        !! 
+        
+        SUBROUTINE zussm&
+         &(order,transT,nrhs,alpha,T,b,ldb,istat)
+          IMPLICIT NONE
+          INTEGER, INTENT(OUT) ::istat
+          INTEGER :: order 
+          INTEGER :: transT 
+          INTEGER :: nrhs 
+          COMPLEX(KIND(1.d0)) :: alpha 
+          INTEGER :: T 
+          COMPLEX(KIND(1.d0)) :: b (:)
+          INTEGER :: ldb 
+
+
+          istat = blas_sparse_const_success
+          CALL blas_zussm&
+           &(order,transT,nrhs,alpha,T,b,ldb,istat)
+
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+        END SUBROUTINE
+        
+        
+        
+      END MODULE blas_sparse
diff --git a/rsb_blas_sparse.m4 b/rsb_blas_sparse.m4
new file mode 100644
index 0000000..8a81984
--- /dev/null
+++ b/rsb_blas_sparse.m4
@@ -0,0 +1,257 @@
+! 
+! Copyright (C) 2008-2015 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+dnl
+include(`rsb_fortran_macros.m4')dnl
+dnl
+define(`RSB_M4_BLAS_SPARSE_INTERFACE_BEGIN',`ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`INTERFACE',`')')dnl
+define(`RSB_M4_BLAS_SPARSE_INTERFACE_END',`ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`END INTERFACE',`')')dnl
+dnl
+dnl        @author: Michele Martone
+dnl
+dnl        This macro generates a Sparse BLAS fortran module for librsb.
+dnl !! @cond INNERDOC 
+dnl ! author: Michele Martone
+!
+!> @file
+!! @brief This file implements the Fortran Sparse BLAS interface to \librsb.
+!!
+ifelse(`0',`1',`
+!! Supported types are: foreach(`mtype',RSB_M4_SBLAS_MATRIX_SUPPORTED_TYPES,` RSB_M4_C2F_TYPE(mtype)').
+!! Supported operations are: foreach(`pmop',RSB_M4_SBLAS_INTERFACE_OPS,` RSB_M4_SBLAS_INTERFACE_IDENTIFIER(pmop)').
+')dnl
+!!
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+#define RSB_HAVE_RSB_KERNELS 1
+dnl foreach(`type',RSB_M4_SBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+dnl dnl
+dnl `#define' RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL(type) 1 /*!< Type type is supported.*/
+dnl dnl
+dnl ')dnl
+')dnl
+
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`dnl
+      MODULE blas_sparse
+        !> A Sparse BLAS interface for RSB
+        IMPLICIT NONE
+PUBLIC
+')dnl
+
+dnl ifelse(RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST_LENGTH,0,`',`dnl
+        foreach(`pmop',RSB_M4_SBLAS_GENERIC_OPS,`
+        !> RSB_M4_SBLAS_SUBROUTINE_HELP_COMMENT(pmop,`*')
+        !> RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT(pmop)
+dnl        !> RSB_M4_SPBLAS_HELP_INFO(pmop)
+        !! 
+dnl         MODULE PROCEDURE RSB_M4_INTERFACE_LIST(RSB_M4_COMMA_LIST((RSB_M4_CHOPTRAILINGSPACES(foreach(`mtype',RSB_M4_SBLAS_MATRIX_SUPPORTED_TYPES,`RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER(pmop,mtype) ')))))dnl
+        INTERFACE RSB_M4_SBLAS_INTERFACE_IDENTIFIER(pmop)
+        ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`PROCEDURE',`MODULE PROCEDURE') RSB_M4_INTERFACE_LIST(RSB_M4_COMMA_LIST((RSB_M4_CHOPTRAILINGSPACES(foreach(`mtype',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER(pmop,mtype) ')))))dnl
+        END INTERFACE
+        ')
+dnl ')dnl
+dnl
+ifelse(RSB_M4_want_old_fortran_float_types,`1',`dnl
+dnl        ....
+')dnl
+dnl
+        INTEGER, PARAMETER :: blas_sparse_const_success=0
+        INTEGER, PARAMETER :: blas_sparse_const_failure=-1 ! value returned by this interface on failure
+        INTEGER, PARAMETER :: blas_sparse_const_not_available=-9999 ! value returned by this interface when deactivated
+ifelse(`0',`1',`
+        INTEGER, PARAMETER :: blas_lower_hermitian=239 ! # FIXME
+        INTEGER, PARAMETER :: blas_lower_symmetric=237 ! # FIXME
+        !
+        INTEGER, PARAMETER :: blas_unit_diag=132 ! # FIXME
+        !
+        INTEGER, PARAMETER :: blas_lower_triangular=235 ! # FIXME
+        INTEGER, PARAMETER :: blas_upper_triangular=236 ! # FIXME
+        INTEGER, PARAMETER :: blas_no_trans=111 ! # FIXME
+        INTEGER, PARAMETER :: blas_trans=112 ! # FIXME
+        INTEGER, PARAMETER :: blas_conj_trans=113 ! # FIXME
+        INTEGER, PARAMETER :: blas_rsb_autotuning_on = 666
+        INTEGER, PARAMETER :: blas_rsb_autotuning_off = 999
+',`dnl
+include(`blas_sparse/blas_enum.F90')dnl
+')dnl
+dnl
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+        INTERFACE
+          TYPE(C_PTR) FUNCTION &
+          &rsb_blas_get_mtx&
+          &(A)&
+          &BIND(c,NAME = "rsb_blas_get_mtx")
+          USE ISO_C_BINDING
+          INTEGER(C_INT), VALUE  :: A
+          END FUNCTION rsb_blas_get_mtx
+        END INTERFACE
+')dnl
+dnl
+
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`CONTAINS')
+
+dnl         SUBROUTINE RSB_M4_SBLAS_INTERFACE_RADIX`_'init(istat)
+dnl           IMPLICIT NONE
+dnl           INTEGER::istat
+dnl           istat=blas_sparse_const_success
+dnl #ifdef RSB_HAVE_RSB_KERNELS
+dnl           CALL RSB_M4_SBLAS2VBR_SUBROUTINE_RADIX`'init(istat)
+dnl           IF(istat.NE.blas_sparse_const_success)istat=blas_sparse_const_failure
+dnl #else
+dnl           istat=blas_sparse_const_not_available
+dnl #endif
+dnl         END SUBROUTINE
+dnl 
+         !> RSB_M4_SPBLAS_HELP_INFO(`ds')
+         !> RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT()
+         !! 
+         RSB_M4_BLAS_SPARSE_INTERFACE_BEGIN
+         SUBROUTINE RSB_M4_SBLAS_INTERFACE_RADIX`'ds(A,istat)
+           IMPLICIT NONE
+           INTEGER,INTENT(IN)::A
+           INTEGER::istat
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+           istat=blas_sparse_const_success
+`#if defined(RSB_HAVE_RSB_KERNELS)'
+           CALL RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER(`ds',`',`f90')`'(A,istat)
+           IF(istat.NE.blas_sparse_const_success)&
+            &istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+')dnl
+         END SUBROUTINE
+         RSB_M4_BLAS_SPARSE_INTERFACE_END
+
+dnl           !> RSB_M4_SBLAS_SUBROUTINE_HELP_COMMENT(`cr_end',`*')
+         !> RSB_M4_SPBLAS_HELP_INFO(`cr_end')
+         !> RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT()
+         !! 
+         RSB_M4_BLAS_SPARSE_INTERFACE_BEGIN
+         SUBROUTINE RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER(`cr_end',`')`'RSB_M4_SBLAS_SUBROUTINE_ARGS(`cr_end',`',`f90')
+           IMPLICIT NONE
+RSB_M4_SBLAS_SUBROUTINE_INFO_DECLARATION(istat)dnl
+           INTEGER,INTENT(IN)::A
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+           istat=blas_sparse_const_success
+`#if defined(RSB_HAVE_RSB_KERNELS)'
+           CALL RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER(`cr_end',`',`f90')`'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_SB_INTERFACE(`(RSB_M4_SBLAS_SUBROUTINE_ARGS(`cr_end',`',`f90'))')
+           IF(istat.NE.blas_sparse_const_success)&
+            &istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+')dnl
+         END SUBROUTINE
+         RSB_M4_BLAS_SPARSE_INTERFACE_END
+
+dnl           !> RSB_M4_SBLAS_SUBROUTINE_HELP_COMMENT(`gp',`*')
+         !> RSB_M4_SPBLAS_HELP_INFO(`gp')
+         !> RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT()
+         !! 
+         RSB_M4_BLAS_SPARSE_INTERFACE_BEGIN
+         SUBROUTINE RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER(`gp',`')`'RSB_M4_SBLAS_SUBROUTINE_ARGS(`gp',`',`f90')
+           IMPLICIT NONE
+RSB_M4_SBLAS_SUBROUTINE_INFO_DECLARATION(istat)dnl
+           INTEGER,INTENT(IN)::A
+           INTEGER,INTENT(IN)::pname
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+           istat=blas_sparse_const_success
+`#if defined(RSB_HAVE_RSB_KERNELS)'
+           CALL RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER(`gp',`',`f90')`'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_SB_INTERFACE(`(RSB_M4_SBLAS_SUBROUTINE_ARGS(`gp',`',`f90'))')
+           !istat does not have the meaning of an error value, here
+           !IF(istat.NE.blas_sparse_const_success)istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+')dnl
+         END SUBROUTINE
+         RSB_M4_BLAS_SPARSE_INTERFACE_END
+
+         !> RSB_M4_SPBLAS_HELP_INFO(`sp')
+dnl           !> RSB_M4_SBLAS_SUBROUTINE_HELP_COMMENT(`sp',`*')
+         !> RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT()
+         !! 
+         RSB_M4_BLAS_SPARSE_INTERFACE_BEGIN
+         SUBROUTINE RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER(`sp',`')`'RSB_M4_SBLAS_SUBROUTINE_ARGS(`sp',`',`f90')
+           IMPLICIT NONE
+RSB_M4_SBLAS_SUBROUTINE_INFO_DECLARATION(istat)dnl
+           INTEGER,INTENT(IN)::A
+           INTEGER,INTENT(IN)::pname
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+           istat=blas_sparse_const_success
+`#if defined(RSB_HAVE_RSB_KERNELS)'
+           CALL RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER(`sp',`',`f90')`'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_SB_INTERFACE(`(RSB_M4_SBLAS_SUBROUTINE_ARGS(`sp',`',`f90'))')
+           IF(istat.NE.blas_sparse_const_success)&
+            &istat=blas_sparse_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+           istat=blas_sparse_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+')
+         END SUBROUTINE
+         RSB_M4_BLAS_SPARSE_INTERFACE_END
+
+        foreach(`pmop',RSB_M4_SBLAS_INTERFACE_OPS,`
+dnl        RSB_M4_SBLAS_MATRIX_SUPPORTED_TYPES
+        foreach(`mtype',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`
+dnl          !> RSB_M4_SBLAS_SUBROUTINE_HELP_COMMENT(pmop,mtype)
+        !> RSB_M4_SPBLAS_HELP_INFO(pmop)
+        !> RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT(pmop)
+        !! 
+        RSB_M4_BLAS_SPARSE_INTERFACE_BEGIN
+        SUBROUTINE RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER(pmop,mtype)`&
+         &'RSB_M4_SBLAS_SUBROUTINE_ARGS(pmop,mtype,`f90')dnl
+          IMPLICIT NONE
+RSB_M4_SBLAS_SUBROUTINE_INFO_DECLARATION(istat)dnl
+RSB_M4_SBLAS_SUBROUTINE_ARGS_DECLARATION(pmop,mtype)dnl
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`
+ifelse(`0',`1',`
+dnl `#if ( defined(RSB_HAVE_RSB_KERNELS)' && RSB_M4_HAVE_TYPE(mtype))
+`#if ( defined(RSB_HAVE_RSB_KERNELS)' && defined(RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL(mtype)))
+          istat = blas_sparse_const_success
+          CALL RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER(pmop,mtype,`f90')`'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_SB_INTERFACE(`(RSB_M4_SBLAS_SUBROUTINE_ARGS(pmop,mtype,`f90'))')
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat=blas_sparse_const_failure
+#else  /* defined(RSB_HAVE_RSB_KERNELS) && RSB_M4_HAVE_TYPE('mtype`) */
+          istat=blas_sparse_const_not_available
+#endif /* defined(RSB_HAVE_RSB_KERNELS) && RSB_M4_HAVE_TYPE('mtype`) */
+          istat=blas_sparse_const_success
+',`
+          istat = blas_sparse_const_success
+          CALL RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER(pmop,mtype,`f90')`&
+           &'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_SB_INTERFACE(`(RSB_M4_SBLAS_SUBROUTINE_ARGS(pmop,mtype,`f90'))')
+          IF(istat.NE.blas_sparse_const_success)&
+           &istat = blas_sparse_const_failure
+')dnl
+')dnl
+        END SUBROUTINE
+        RSB_M4_BLAS_SPARSE_INTERFACE_END
+        ')
+        ')
+dnl
+ifelse(RSB_M4_WANT_BLAS_SPARSE_INTERFACE,`1',`',`dnl
+      END MODULE blas_sparse
+')dnl
+dnl
+dnl !! @endcond
+dnl
diff --git a/rsb_blas_stuff.c b/rsb_blas_stuff.c
new file mode 100644
index 0000000..1b02bbd
--- /dev/null
+++ b/rsb_blas_stuff.c
@@ -0,0 +1,90 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ *
+ * BLAS like stuff
+ * */
+#include "rsb_blas_stuff.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+void rsb__BLAS_Xaxpy_parallel(rsb_int_t n, const void *alphap, void * a, rsb_int_t inca, const void * b, rsb_int_t incb, rsb_type_t typecode)
+{
+	/**
+		\ingroup gr_internals
+		alphap can be NULL
+	 	a <- a + alpha * b
+	 */
+	const rsb_nnz_idx_t wet = rsb_get_num_threads(); /* want executing threads */
+
+	if(RSB_UNLIKELY(n<wet*RSB_MIN_THREAD_XAXPY_NNZ))/* at least RSB_MIN_THREAD_MEMCPY_NNZ nnz to trigger memcpy */
+	{
+		rsb__cblas_Xaxpy(typecode,n,alphap,b,incb,a,inca);
+	}
+	else
+	{
+		rsb_nnz_idx_t wi, cnz = (wet+n-1)/wet;	/* chunk size */
+		size_t es = RSB_SIZEOF(typecode);
+
+		#pragma omp parallel for schedule(static,1) RSB_NTC 
+		for(wi=0;wi<wet;++wi)
+		{
+			rsb_nnz_idx_t coff = wi*cnz;
+			rsb_nnz_idx_t cnnz = (wi<wet-1)?cnz:n-((wet-1)*cnz);
+			rsb__cblas_Xaxpy(typecode,cnnz,alphap,((rsb_byte_t*)b)+es*coff*incb,incb,((rsb_byte_t*)a)+es*coff*inca,inca);
+		}
+	}
+}
+
+void rsb__cblas_Xscal_parallel(rsb_type_t typecode, size_t n, const void * alphap, void * a, size_t stride)
+{
+	/**
+		\ingroup gr_internals
+		alphap can be NULL
+	 	a <- alpha * a
+	 */
+	const rsb_nnz_idx_t wet = rsb_get_num_threads(); /* want executing threads */
+
+	if(RSB_UNLIKELY(n<wet*RSB_MIN_THREAD_XAXPY_NNZ))/* at least RSB_MIN_THREAD_MEMCPY_NNZ nnz to trigger memcpy */
+	{
+		rsb__cblas_Xscal(typecode,n,alphap,a,stride);
+	}
+	else
+	{
+		rsb_nnz_idx_t wi,cnz = (wet+n-1)/wet;	/* chunk size */
+		size_t es = RSB_SIZEOF(typecode);
+
+		#pragma omp parallel for schedule(static,1) RSB_NTC 
+		for(wi=0;wi<wet;++wi)
+		{
+			rsb_nnz_idx_t coff = wi*cnz;
+			rsb_nnz_idx_t cnnz = (wi<wet-1)?cnz:n-((wet-1)*cnz);
+			rsb__cblas_Xscal(typecode,cnnz,alphap,((rsb_byte_t*)a)+es*coff*stride,stride);
+		}
+	}
+}
+
+/* @endcond */
diff --git a/rsb_blas_stuff.h b/rsb_blas_stuff.h
new file mode 100644
index 0000000..86730e0
--- /dev/null
+++ b/rsb_blas_stuff.h
@@ -0,0 +1,39 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ *
+ * BLAS like stuff
+ * */
+#ifndef RSB_BLAS_STUFF_H_INCLUDED
+#define RSB_BLAS_STUFF_H_INCLUDED
+
+#include "rsb_internals.h"
+
+void rsb__BLAS_Xaxpy_parallel(rsb_int_t n, const void *alphap, void * a, rsb_int_t inca, const void * b, rsb_int_t incb, rsb_type_t typecode);
+void rsb__cblas_Xscal_parallel(rsb_type_t typecode, size_t n, const void * alphap, void * a, size_t stride);
+
+#endif /* RSB_BLAS_STUFF_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_clone.c b/rsb_clone.c
new file mode 100644
index 0000000..a04fb7b
--- /dev/null
+++ b/rsb_clone.c
@@ -0,0 +1,1009 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ *
+ * Matrix cloning biotech.
+ * \internal
+ *
+ * */
+#include "rsb_common.h"
+
+//#define RSB_MTX_REASSIGN(OLD_MTXP,NEW_MTXP) {if(rsb_do_assign(NEW_MTXP,OLD_MTXP)) {RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+#define RSB_MTX_REASSIGN(OLD_MTXP,NEW_MTXP) { RSB_MTX_FREE(OLD_MTXP); (OLD_MTXP)=(NEW_MTXP); }
+
+struct rsb_session_handle_t rsb_global_session_handle;
+
+void * rsb__clone_area_with_extra(const void *src, size_t csize, size_t bsize, size_t esize)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * (m)allocates an area of bsize+csize+esize bytes and copies there csize bytes from src, at offset bsize
+	 * */
+	rsb_byte_t * dst = NULL;
+
+	if(!src /* || esize < 0 */)
+		goto ret;
+	dst = rsb__malloc(csize+bsize+esize);
+	if(!dst)
+		goto ret;
+	rsb_memcpy(dst+bsize,src,csize);
+ret:
+	return dst;
+}
+
+void * rsb__clone_area(const void *src, size_t size)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param src the source data pointer
+	 * \param size the amount of data to clone
+	 * \return the cloned amount, or NULL in case of error
+	 *
+	 * allocates an area of size bytes and copies there data from src
+	 * */
+	void * dst = NULL;
+
+	if(!src || size < 1)
+		goto ret;
+	dst = rsb__clone_area_with_extra(src,size,0,0);
+ret:
+	return dst;
+}
+
+rsb_err_t rsb_util_coo_alloc(void **RSB_RESTRICT VAp, rsb_coo_idx_t ** RSB_RESTRICT IAp, rsb_coo_idx_t ** RSB_RESTRICT JAp, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_bool_t do_calloc)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VA_ = NULL,*IA_ = NULL,*JA_ = NULL;
+
+	if( RSB_MATRIX_UNSUPPORTED_TYPE(typecode) )
+	{
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+		goto err;
+	}
+
+	if(do_calloc != RSB_BOOL_TRUE)
+	{
+		VA_ = rsb__malloc_vector((nnz),typecode),
+		IA_ = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz)),
+		JA_ = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz));
+	}
+	else
+	{
+		VA_ = rsb__calloc_vector((nnz),typecode),
+		IA_ = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz)),
+		JA_ = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz));
+	}
+
+	if(!VA_ || !IA_ || !JA_)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	*VAp = VA_;
+	*IAp = IA_;
+	*JAp = JA_;
+	goto done;
+err:
+	RSB_CONDITIONAL_FREE(IA_);
+	RSB_CONDITIONAL_FREE(JA_);
+	RSB_CONDITIONAL_FREE(VA_);
+done:
+	return errval;
+}
+
+rsb_err_t rsb_util_coo_alloc_copy_and_stats(void **RSB_RESTRICT VAp, rsb_coo_idx_t ** RSB_RESTRICT IAp, rsb_coo_idx_t ** RSB_RESTRICT JAp, const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t*RSB_RESTRICT mp, rsb_coo_idx_t*RSB_RESTRICT kp, rsb_nnz_idx_t nnz, rsb_nnz_idx_t ennz, rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t iflags, rsb_flags_t*RSB_RESTRICT flagsp)
+{
+	/*!
+	 * Copies contents of a COO arrays triple to a freshly allocated COO arrays triple.
+	 * Size is assumed to be nnz+ennz.
+	 * Last ennz elements are not zeroed.
+	 *
+	 * Flags are determined: RSB_FLAG_UPPER_TRIANGULAR, RSB_FLAG_LOWER_TRIANGULAR.
+	 *
+	 * TODO: May implement input sanitization or zeroes detection.
+	 * TODO: Check for nnz+ennz overflow.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VA_ = NULL;
+	rsb_coo_idx_t *IA_ = NULL,*JA_ = NULL;
+
+	errval = rsb_util_coo_alloc((void**)(&VA_),&IA_,&JA_,nnz+ennz,typecode,RSB_BOOL_FALSE);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	if(!VA && !IA && !JA)
+	       	goto nocopy; /* it's ok: alloc only semantics */
+	/* TODO: the following shall be made parallel */
+	if(mp || kp)
+		errval = rsb_util_coo_copy_and_stats(VA,IA,JA,VA_,IA_,JA_,mp,kp,nnz,typecode,offi,offo,iflags,flagsp);
+	else
+	{
+		errval = rsb_util_coo_copy(VA,IA,JA,VA_,IA_,JA_,nnz,typecode,offi,offo);
+		/* ... flags may not always be desired! */
+	/*	if(flagsp)
+			(*flagsp)|=rsb__util_coo_determine_uplo_flags(IA_,JA_,nnz);*/
+	}
+nocopy:
+	*VAp = VA_;
+	*IAp = IA_;
+	*JAp = JA_;
+	goto done;
+err:
+	RSB_CONDITIONAL_FREE(IA_);
+	RSB_CONDITIONAL_FREE(JA_);
+	RSB_CONDITIONAL_FREE(VA_);
+done:
+	return errval;
+}
+
+void * rsb__clone_area_parallel(const void *src, size_t size, size_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param src the source data pointer
+	 * \param size the amount of data to clone
+	 * \return the cloned amount, or NULL in case of error
+	 *
+	 * allocates an area of size bytes and copies there data from src
+	 * */
+	void * dst = NULL;
+
+	if(!src || size < 1)
+		goto ret;
+	dst = rsb__malloc(size*n);
+	if(!dst)
+		goto ret;
+	RSB_A_MEMCPY_parallel(dst,src,0,0,n,size);
+ret:
+	return dst;
+}
+
+
+#if RSB_WANT_BITMAP
+static void * rsb__clone_options_t(const struct rsb_options_t *o, rsb_blk_idx_t M_b, rsb_blk_idx_t K_b)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param o a valid option structure pointer
+	 * \return the input pointer
+	 *
+	 * clones a rsb_options_t structure, deeply
+	 *
+	 * p.s.: the input rsb_options_t could be NULL. in that case it won't be cloned because there will be no need of doing so.
+	 * */
+	struct rsb_options_t *no = NULL;
+
+	if(!o)
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+
+	/* we allocate a new options structure */
+	if(! (no = rsb__clone_area(o,sizeof(*no))))
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+
+	if( o->bitmap)
+	{
+		no->bitmap = rsb__clone_area(o->bitmap,RSB_BYTES_PER_BITMAP( M_b,K_b));
+		if(!no->bitmap)
+		{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	}
+	return no;
+
+	err:
+	rsb__destroy_options_t(no);
+	return NULL;
+}
+#endif /* RSB_WANT_BITMAP */
+
+#define RSB_ARE_MTX_COO_CONFORMANT(MTXAP,MTXBP) \
+	( ( (MTXAP)->nnz == (MTXBP)->nnz ) && ( (MTXAP)->nr == (MTXBP)->nr ) && ( (MTXAP)->nc == (MTXBP)->nc ) && ( (MTXAP)->typecode == (MTXBP)->typecode ) )
+
+rsb_err_t rsb__mtx_shift_leaf_ptrs(struct rsb_mtx_t *RSB_RESTRICT  mtxCp, const struct rsb_mtx_t *RSB_RESTRICT  mtxAp, long smc)
+{
+	/* 
+	 * Adjusts pointers displacements in the matrix tree.
+	 * Please note that no pointer is being accessed.
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t n,si;
+
+	for(n=0;n<smc;++n)
+		if(mtxCp[n].nnz) /* If valid. FIXME: IF NOT (E.G. MERGED), SHALL BE COMPLETELY ZEROED. */
+		for(si=0;si<RSB_FOUR;++si)
+			if(mtxAp[n].sm[si])
+			{
+				RSB_PTR_SHIFT(mtxCp[n].sm[si],mtxAp,mtxCp,(struct rsb_mtx_t*));
+				//mtxCp[n].sm[si] = mtxCp+(mtxAp[n].sm[si]-mtxAp);
+			/*	RSB_STDOUT("%03d/%03d: %p\n",n,si,mtxCp[n].sm[si]); */
+			}
+	return errval;
+}
+
+rsb_err_t rsb__mtx_transplant_from_clone(struct rsb_mtx_t ** mtxDpp, struct rsb_mtx_t * mtxSp)
+{
+	/* 
+	 Moves the inner content of mtxSp to mtxDp.
+	 Shall free mtxSp at the end and not change the outer allocation status of mtxSp.
+	 Shall work even if any of the two matrices is in-place.
+	 Can only work if matrices match in size (nonzeroes, rows, columns, ...).
+
+	 Untested.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxDp = NULL;
+	struct rsb_mtx_t *fsmS = NULL;
+	struct rsb_mtx_t *fsmD = NULL;
+	void * VS = NULL,* VD = NULL;
+	rsb_coo_idx_t * IS = NULL, *JS = NULL;
+	rsb_coo_idx_t * ID = NULL, *JD = NULL;
+	rsb_long_t smc = 0;
+
+	if( ( !mtxDpp) || ( !*mtxDpp) || (!mtxSp) )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err, RSB_ERRM_E_MTXAP);
+	}
+
+	mtxDp = *mtxDpp;
+
+	if( ! ( RSB_ARE_MTX_COO_CONFORMANT( mtxSp, mtxDp ) ) )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err, RSB_ERRM_ES);
+	}
+
+#if 0
+	RSB_STDOUT("		==== CLONING: ==== \n");
+	RSB_STDOUT("will transplant: \n");
+	RSB_STDOUT(RSB_PRINTF_MTX_SUMMARY_ARGS(mtxSp)),
+	RSB_STDOUT("to: \n");
+	RSB_STDOUT(RSB_PRINTF_MTX_SUMMARY_ARGS(mtxDp)),
+	RSB_STDOUT("\n");
+	RSB_STDOUT("S ip: : %x \n",(RSB_DO_FLAG_HAS(mtxSp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS)));
+	RSB_STDOUT("D ip: : %x \n",(RSB_DO_FLAG_HAS(mtxDp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS)));
+#endif
+
+	fsmS = rsb__do_get_first_submatrix(mtxSp);
+	fsmD = rsb__do_get_first_submatrix(mtxDp);
+	VS = fsmS->VA, VD = fsmD->VA;
+	IS = fsmS->bpntr, JS = fsmS->bindx;
+	ID = fsmD->bpntr, JD = fsmD->bindx;
+
+	errval = rsb_util_coo_copy(VS, IS, JS, VD, ID, JD, mtxSp->nnz, mtxSp->typecode, 0, 0);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	// correct mtxSp pointers recursively with the three offsets
+
+	/* get rid of the source COO arrays */
+	if(RSB_DO_FLAG_HAS(mtxSp->flags, RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{	
+		fsmS->VA = NULL;
+		fsmS->bindx = NULL;
+		fsmS->bpntr = NULL;
+	}
+	else
+	{
+		RSB_CONDITIONAL_FREE(fsmS->VA);
+		RSB_CONDITIONAL_FREE(fsmS->bindx);
+		RSB_CONDITIONAL_FREE(fsmS->bpntr);
+	}
+	smc = 1 + rsb__submatrices_max_ptr_diff(mtxSp);
+
+	if(RSB_DO_FLAG_HAS(mtxDp->flags, RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+		RSB_DO_FLAG_ADD(mtxSp->flags, RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	else
+		; /* mtxSp will retain its original flags with these regards  */
+
+	/* set new matrix arrays by translating the submatrix pointers */
+	rsb__do_set_in_place_submatrices_offsets(mtxSp, smc, VD, ID, JD, mtxDp->el_size);
+
+	/* mtxDp->all_leaf_matrices shall be ok... */
+
+	/* free now unnecessary original destination matrix pointer */
+	fsmD->VA = NULL;
+	fsmD->bindx = NULL;
+	fsmD->bpntr = NULL;
+	RSB_MTX_FREE(mtxDp);
+
+	/* overwrite the output pointer */
+	mtxDp = mtxSp;
+	*mtxDpp = mtxDp;
+	mtxSp = NULL;
+
+#if 0
+	RSB_STDOUT("obtained: \n");
+	RSB_STDOUT(RSB_PRINTF_MTX_SUMMARY_ARGS(mtxDp)),
+	RSB_STDOUT("\n");
+#endif
+err:
+	return errval;
+}
+
+#if 0
+static rsb_err_t rsb_do_assign(struct rsb_mtx_t * mtxBp, const struct rsb_mtx_t * mtxAp)
+{
+	rsb_submatrix_idx_t i,j;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * submatrix=NULL;
+
+	if(!mtxBp || !mtxAp)
+		goto err;
+
+	rsb__destroy_inner(mtxBp);
+
+	rsb_memcpy(mtxBp,mtxAp,sizeof(*mtxAp));
+	rsb__init_blank_pointers(mtxBp);
+
+	if(rsb__clone_inner(mtxAp,mtxBp)==NULL)
+		goto err;
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+	{
+		if((mtxBp->sm[i*2+j]=rsb__clone_simple(submatrix))==NULL)
+			goto err;
+	}
+
+#if RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS
+	if(mtxAp->all_leaf_matrices)
+	{
+		mtxBp->all_leaf_matrices=NULL;
+		errval = rsb__get_array_of_leaf_matrices(mtxBp,&mtxBp->all_leaf_matrices,&mtxBp->all_leaf_matrices_n);
+		if(RSB_SOME_ERROR(errval))
+			goto errr;
+	}
+#endif /* RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS */
+	goto errr;
+err:
+	errval = RSB_ERR_GENERIC_ERROR;
+errr:
+	return errval;
+}
+#endif
+
+size_t rsb__submatrices_max_ptr_diff(const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * 	\ingroup gr_internals
+	 * 	Note: this makes only sense if submatrices are allocated in one block.
+	 */
+	size_t md = 0;
+	rsb_submatrix_idx_t i,j;
+	const struct rsb_mtx_t * submatrix;
+
+	if(!mtxAp)
+	{
+		return 0;
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	{
+		if(submatrix)
+		{
+			size_t sd = rsb__submatrices_max_ptr_diff(submatrix);
+			md = RSB_MAX(md,sd+(submatrix-mtxAp));
+		}
+	}
+	return md;
+}
+
+static void * rsb__clone_area_guided(void * RSB_RESTRICT dst, const void *RSB_RESTRICT src, size_t size, size_t nmemb, const struct rsb_mtx_t *RSB_RESTRICT mtxAp, const rsb_thread_t * RSB_RESTRICT cta, const rsb_thread_t nct, rsb_err_t * errvalp)
+{
+	/*
+		Initializes, eventually allocating and/or copying in parallel, using specified chunk sizes and array.
+		If dst supplied, will use it, otherwise will allocate one.
+		If src supplied, will use it, otherwise will only zero the arrays.
+		If mtxAp == NULL, then must also be  cta == NULL && nct == 0.
+		Returns the either dst or the newly allocated area address. 
+	*/
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+	if(size*nmemb == 0)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(mtxAp == NULL && (cta != NULL || nct != 0) )
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(dst == NULL && ( dst = rsb__malloc(size*nmemb) ) == NULL )
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	if(mtxAp == NULL)
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	{
+		/* master thread */
+		RSB_A_MEMCPY(dst,src,0,0,nmemb,size);
+		errval = RSB_ERR_NO_ERROR;
+		goto done;
+	}
+
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	if(cta == NULL)
+	{
+		RSB_DEBUG_ASSERT((mtxAp)->all_leaf_matrices_n);
+#pragma omp parallel shared(mtxAp) 
+{
+	rsb_submatrix_idx_t smi; /* submatrix index */
+	rsb_thread_t omt = omp_get_max_threads(), otn = omp_get_thread_num();
+	/* auto: each submatrix a round robin thread */
+	for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi) /* FIXME: make this an OpenMP-friendly macro */
+	if( ( smi % omt ) == otn )
+	{
+		const struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[smi].mtxlp;
+		size_t off = submatrix->nzoff;
+		rsb_nnz_idx_t nnz = submatrix->nnz;
+
+		if(src)
+			RSB_A_MEMCPY(dst,src,off,off,nnz,size);
+		else
+			RSB_A_BZERO(dst,off,nnz,size);
+	}
+}
+#pragma omp barrier
+		errval = RSB_ERR_NO_ERROR;
+		goto done;
+	}
+
+#pragma omp parallel shared(mtxAp) RSB_NTC 
+{
+	/* guided: each submatrix a specified thread */
+	rsb_thread_t otn = omp_get_thread_num();
+	rsb_submatrix_idx_t cti; /* thread index */
+
+	for(cti=0;cti<nct;++cti)
+	if( otn == cta[cti] )
+	{
+		const struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[cti].mtxlp;
+		size_t off = submatrix->nzoff;
+		rsb_nnz_idx_t nnz = submatrix->nnz;
+
+		if(src)
+			RSB_A_MEMCPY(dst,src,off,off,nnz,size);
+		else
+			RSB_A_BZERO(dst,off,nnz,size);
+	}
+}
+#pragma omp barrier
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = RSB_ERR_NO_ERROR;
+	goto done;
+err:
+	dst = NULL;
+done:
+	/* FIXME: errval unused so far */
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return dst;
+}
+
+struct rsb_mtx_t *rsb__clone_simple_extra(const struct rsb_mtx_t *mtxAp, rsb_submatrix_idx_t esmc)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Clones a whole matrix, retaining the same submatrices structure.
+	 * TODO: need better error handling.
+	 * FIXME : Unfinished: only the RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS case is handled.
+	 * TODO: rename from rsb__clone_simple_extra to rsb__mtx_clone_simple_extra.
+	 */
+	struct rsb_mtx_t *mtxCp = NULL;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+#if RSB_ALLOW_INTERNAL_GETENVS
+	rsb_time_t ct = RSB_TIME_ZERO;
+	rsb_time_t mact = RSB_TIME_ZERO;
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+	if(!mtxAp)
+	{
+		RSB_PERR_GOTO(nerr,RSB_ERRM_ES);
+	}
+
+	flags = mtxAp->flags;
+#if RSB_ALLOW_INTERNAL_GETENVS
+	ct = -rsb_time();
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS))
+	{
+		if(rsb__is_root_matrix(mtxAp))
+		{
+			/* this is a trick: fitting the whole recursive matrix in three arrays */
+			/* void *IA = NULL,*JA = NULL,*VA = NULL; */
+			const rsb_nnz_idx_t nnz = mtxAp->nnz;
+			/* rsb_bool_t is_bio = rsb__do_is_matrix_binary_loaded(mtxAp);*/ /* binary I/O matrix (20120930 FIXME why is this unused ?) */
+			/* rsb_long_t smc = rsb__submatrices(mtxAp); */
+			rsb_long_t smc = 1 + rsb__submatrices_max_ptr_diff(mtxAp);
+			const struct rsb_mtx_t *fsm = rsb__do_get_first_submatrix(mtxAp);
+			rsb_err_t errval = RSB_ERR_NO_ERROR;
+			void * VA = NULL;
+			rsb_coo_idx_t * bpntr = NULL, * bindx = NULL;
+
+			RSB_DEBUG_ASSERT(1+rsb__submatrices_max_ptr_diff(mtxAp)>=rsb__submatrices(mtxAp));
+			
+			/* RSB_STDOUT("MAX PTR DIFF: %d  SUBM COUNT:%d\n",1+rsb__submatrices_max_ptr_diff(mtxAp), rsb__submatrices(mtxAp)); */
+
+			/* mtxCp = rsb__clone_area(mtxAp,sizeof(struct rsb_mtx_t)*(smc+esmc)); */
+			mtxCp = rsb__clone_area_with_extra(mtxAp,sizeof(struct rsb_mtx_t)*(smc),0,sizeof(struct rsb_mtx_t)*(esmc));
+
+			if(!mtxCp)
+			{
+				RSB_PERR_GOTO(nerr,RSB_ERRM_PAL);
+			}
+			
+			errval = rsb__mtx_shift_leaf_ptrs(mtxCp, mtxAp, smc);
+
+			mtxCp->all_leaf_matrices = NULL;
+#if 0
+			errval = rsb__get_array_of_leaf_matrices(mtxCp,&(mtxCp->all_leaf_matrices),&(mtxCp->all_leaf_matrices_n));
+#else
+			mtxCp->all_leaf_matrices = rsb__clone_area_with_extra(mtxAp->all_leaf_matrices,sizeof(mtxAp->all_leaf_matrices[0])*(mtxCp->all_leaf_matrices_n),0,0);
+			if( mtxCp->all_leaf_matrices == NULL )
+			{
+				errval = RSB_ERR_ENOMEM;
+			}
+			else
+			{
+				rsb_submatrix_idx_t si;
+				for(si=0;si<mtxCp->all_leaf_matrices_n;++si)
+					RSB_PTR_SHIFT(mtxCp->all_leaf_matrices[si].mtxlp,mtxAp,mtxCp,(struct rsb_mtx_t*));
+			}
+#endif
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(nerr,RSB_ERRM_NAOL);
+			}
+			/* FIXME: and what when nnz==0 and VA!=NULL ? */
+			mtxCp->bindx = NULL; mtxCp->bpntr = NULL; mtxCp->VA = NULL;
+			if(nnz)
+			{
+#if RSB_ALLOW_INTERNAL_GETENVS
+				mact = -rsb_time();
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+				RSB_ASSERT( fsm->el_size );
+				RSB_ASSERT( fsm->bindx );
+				RSB_ASSERT( fsm->bpntr );
+				RSB_ASSERT( fsm->VA );
+
+#if RSB_WANT_SM_TO_THREAD_MOD_MAPPING
+			if( 1 /*rsb__util_atoi(getenv("RSB_CLONE_SERIAL")) */ )
+			{
+				/* rsb_thread_t nct = (rsb__util_atoi(getenv("RSB_CLONE_SERIAL"))) - 1; */
+				rsb_thread_t nct = 0;
+				bindx = rsb__clone_area_guided(NULL,fsm->bindx,sizeof(rsb_coo_idx_t),nnz,mtxAp,NULL,nct,&errval);
+				bpntr = rsb__clone_area_guided(NULL,fsm->bpntr,sizeof(rsb_coo_idx_t),nnz,mtxAp,NULL,nct,&errval);
+				VA    = rsb__clone_area_guided(NULL,fsm->VA   ,mtxAp->el_size,       nnz,mtxAp,NULL,nct,&errval);
+			}
+			else
+#endif
+			{
+				bindx = rsb__clone_area_parallel(fsm->bindx,sizeof(rsb_coo_idx_t),nnz);
+				bpntr = rsb__clone_area_parallel(fsm->bpntr,sizeof(rsb_coo_idx_t),nnz);
+				VA = rsb__clone_area_parallel(fsm->VA,mtxAp->el_size,nnz);
+			}
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+				mact += rsb_time();
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+				if(!bindx || !bpntr || !VA || !mtxAp->el_size)
+				{
+					RSB_ASSERT( mtxCp->el_size );
+					RSB_ASSERT( bindx );
+					RSB_ASSERT( bpntr );
+					RSB_ASSERT( VA );
+					RSB_PERR_GOTO(ierr,RSB_ERRM_NNTC);
+				}
+			}
+#if !RSB_ALLOW_EMPTY_MATRICES
+			else
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+			{
+				// ok
+			}
+			else
+			{
+				RSB_PERR_GOTO(ierr,RSB_ERRM_NDIANN);
+			}
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+			rsb__do_set_in_place_submatrices_offsets(mtxCp,smc,VA,bpntr,bindx,mtxCp->el_size);
+
+			if(!smc)
+				mtxCp->bindx = bindx,
+				mtxCp->bpntr = bpntr,
+				mtxCp->VA = VA;
+
+			/* note: the cloned matrix won't have the is_bio property */
+			goto ret;
+ierr:
+			RSB_ERROR(RSB_ERRM_ES);
+			if(mtxCp)
+			{
+				RSB_CONDITIONAL_FREE(mtxCp->bpntr);
+				RSB_CONDITIONAL_FREE(mtxCp->bindx);
+				RSB_CONDITIONAL_FREE(mtxCp->VA);
+				RSB_CONDITIONAL_FREE(mtxCp->all_leaf_matrices);
+			}
+			RSB_CONDITIONAL_FREE(mtxCp);
+			goto ret;
+		}
+		else
+		{
+			RSB_PERR_GOTO(ret,"no cloning possible for a non root matrix!\n");
+			/* no cloning for a non root */
+		}
+	}
+	else
+	{
+		/* we allocate a new matrix structure */
+		mtxCp = rsb__clone_area(mtxAp,sizeof(*mtxCp));
+	
+		if(!mtxCp)
+			{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	
+		rsb__init_blank_pointers(mtxCp);
+
+		RSB_MTX_REASSIGN(mtxCp,(struct rsb_mtx_t*)mtxAp);
+		goto ret;
+	}
+err:
+	RSB_MTX_FREE(mtxCp);
+nerr:
+	RSB_CONDITIONAL_FREE(mtxCp);
+ret:
+#if RSB_ALLOW_INTERNAL_GETENVS
+	ct += rsb_time();
+	if( rsb__util_atoi(getenv("RSB_MTX_CLONE_STATS") ) != 0)
+	if(mtxCp)
+	{
+		size_t szv = rsb__get_sizeof(mtxCp);
+		RSB_STDOUT("Cloned a %zd nnz, %zd bytes matrix in %0.2lgs (%0.3lg MiB/s x 2 = r+w); of which %0.2lgs for the main arrays.\n",
+				(size_t)(mtxCp->nnz),szv,ct,(((rsb_time_t)szv)/ct)/RSB_MEGABYTE,mact);
+	}
+#endif /* RSB_ALLOW_INTERNAL_GETENV */
+ 	if(mtxCp)
+		RSB_DO_FLAG_DEL(mtxCp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	return mtxCp;
+}
+
+struct rsb_mtx_t *rsb__clone_simple(const struct rsb_mtx_t *mtxAp)
+{
+	/* TODO: rename from rsb__clone_simple to rsb__mtx_clone_simple */
+	return rsb__clone_simple_extra(mtxAp, 0);
+}
+
+rsb_err_t rsb__clone_coo(const struct rsb_mtx_t * mtxAp, rsb_trans_t transA, const void *alphap, rsb_type_t typecode, struct rsb_coo_matrix_t*dcoop, rsb_flags_t flags/*, rsb_extff_t eflags*/)
+{
+	/* 
+	   TODO: may integrate here fortran indices handling and so on
+	   TODO: missing checks for indices overflow
+	   TODO: shall juggle appropriately with 'sorted' flags
+	*/
+	rsb_flags_t cflags = RSB_FLAG_NOFLAGS;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t dels = 0;
+	rsb_coo_idx_t ioff,joff;
+	struct rsb_coo_matrix_t dcoo,scoo;
+	rsb_bool_t expsymm = RSB_BOOL_FALSE, expherm = RSB_BOOL_FALSE;
+
+	RSB_BZERO_P(&scoo);
+	RSB_BZERO_P(&dcoo);
+	ioff = joff = ( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+	scoo.nr = dcoo.nr = mtxAp->nr;
+	scoo.nc = dcoo.nc = mtxAp->nc;
+	dcoo.nnz = scoo.nnz = mtxAp->nnz;
+	expsymm = (RSB_DO_FLAG_HAVE_XOR(flags,mtxAp->flags,RSB_FLAG_SYMMETRIC) && !RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_DIAGONAL));
+	expherm = (RSB_DO_FLAG_HAVE_XOR(flags,mtxAp->flags,RSB_FLAG_HERMITIAN) && !RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_DIAGONAL));
+	if(expsymm || expherm)
+		dcoo.nnz *= 2;/* of course, this is overkill in the case of a diagonal matrix */
+	if(RSB_DO_FLAG_HAVE_XOR(flags,mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+		dels = RSB_MIN(dcoo.nr,dcoo.nc);
+	dcoo.nnz += dels;
+	//if(dels)
+	//	RSB_STDOUT("on diag: %d\n",dels);
+
+	scoo.typecode = mtxAp->typecode;
+	dcoo.typecode = typecode;
+	if(dcoo.nnz>0)
+	{
+		if(rsb__allocate_coo_matrix_t(&dcoo)!=&dcoo)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(ierr,RSB_ERRM_PAL);
+	       	}
+		if(rsb__allocate_coo_matrix_t(&scoo)!=&scoo)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(ierr,RSB_ERRM_PAL);
+	       	}
+		errval = rsb__do_get_coo_noalloc(mtxAp,scoo.VA,dcoo.IA,dcoo.JA,NULL,/*mtxAp->*/flags);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(ierr,RSB_ERRM_NL);
+		}
+		errval = rsb__do_copy_converted_scaled(scoo.VA,dcoo.VA,alphap,mtxAp->typecode,typecode,mtxAp->nnz,transA);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(ierr,RSB_ERRM_NL);
+	       	}
+		if(expsymm || expherm)
+			RSB_COO_MEMCPY(dcoo.VA,dcoo.IA,dcoo.JA,dcoo.VA,dcoo.JA,dcoo.IA,scoo.nnz,0,scoo.nnz,RSB_SIZEOF(typecode));
+		if(expherm)
+			rsb__util_do_conjugate(((rsb_byte_t*)(dcoo.VA))+(RSB_SIZEOF(typecode)*scoo.nnz),typecode,scoo.nnz);
+		if(RSB_DO_FLAG_HAVE_XOR(flags,mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+			rsb__do_fill_with_diag(dcoo.VA,dcoo.IA,dcoo.JA,ioff,joff,dcoo.nnz-dels,typecode,dels);
+		rsb__destroy_coo_matrix_t(&scoo);
+		RSB_BZERO_P(&scoo);
+	}
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR))
+		RSB_DO_FLAG_ADD(cflags,RSB_FLAG_UPPER_TRIANGULAR);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR))
+		RSB_DO_FLAG_ADD(cflags,RSB_FLAG_LOWER_TRIANGULAR);
+	if(RSB_DO_FLAG_HAVE_XOR(flags,mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+		RSB_DO_FLAG_ADD(cflags,RSB_FLAG_UNIT_DIAG_IMPLICIT);
+	if((cflags != RSB_FLAG_NOFLAGS) || expsymm || expherm)
+	{
+		rsb_util_sort_row_major_inner(dcoo.VA,dcoo.IA,dcoo.JA,dcoo.nnz,dcoo.nr,dcoo.nc,typecode,flags);
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+		dcoo.nnz = rsb_weed_out_duplicates(dcoo.IA,dcoo.JA,dcoo.VA,dcoo.nnz,typecode,flags);
+		errval = rsb__do_cleanup_nnz(dcoo.VA,dcoo.IA,dcoo.JA,dcoo.nnz,0,0,dcoo.nr,dcoo.nc,&dcoo.nnz,dcoo.typecode,cflags); /* FIXME: are we using roff,coff well here ? */
+	}
+	if(RSB_SOME_ERROR(errval))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(ierr,RSB_ERRM_CP);
+       	}
+
+	if(RSB_DOES_TRANSPOSE(transA))
+		rsb__transpose_coo_matrix_t(&dcoo);
+	*dcoop = dcoo;
+ierr:
+	return errval;
+}
+
+rsb_err_t rsb__clone(struct rsb_mtx_t ** mtxBpp, rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * clones a rsb_mtx_t structure, deeply
+	 * TODO: rename rsb__clone -> rsb_cln ?
+	 * This routine may/shall be optimized in plenty of ways, in the future.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxCp = NULL;
+
+	if( (!mtxAp) || (!mtxBpp) )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,"user did not supply a valid matrix pointer\n");
+	}
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,"user supplied wrong flags (RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS is illegal here)\n");
+	}
+
+	if( flags == RSB_FLAG_IDENTICAL_FLAGS )
+		flags = mtxAp->flags;
+	if(typecode == RSB_NUMERICAL_TYPE_SAME_TYPE)
+		typecode = mtxAp->typecode;
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);/* unnecessary here */
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_NON_ROOT_MATRIX);
+
+	/* what about RSB_FLAG_DISCARD_ZEROS ? */
+	/* what about many other structural flags ? */
+
+	if(((!alphap) || RSB_IS_ELEMENT_ONE(alphap,mtxAp->typecode)) &&
+			(flags == mtxAp->flags ) &&  /* FIXME: this condition is unnecessarily strict and cause inefficiencies */
+		/* RSB_DOES_NOT_TRANSPOSE(transA) && */ (typecode==mtxAp->typecode) )
+	{
+		if( (*mtxBpp) != mtxAp)
+			mtxCp = rsb__clone_simple(mtxAp);
+		else
+			mtxCp = *mtxBpp;
+		if( transA == RSB_TRANSPOSITION_C )
+			errval = rsb__do_transpose(&mtxCp,RSB_BOOL_TRUE);
+		else
+		if( transA == RSB_TRANSPOSITION_T )
+			errval = rsb__do_transpose(&mtxCp,RSB_BOOL_FALSE);
+		if( (*mtxBpp) == mtxAp)
+		{
+			*mtxBpp = mtxCp;
+			goto ok;
+		}
+	}
+	else
+	{
+		struct rsb_coo_matrix_t dcoo;
+		RSB_BZERO_P(&dcoo);
+#if 0
+		struct rsb_coo_matrix_t scoo;
+		RSB_BZERO_P(&scoo);
+		scoo.nr = dcoo.nr = mtxAp->nr;
+		scoo.nc = dcoo.nc = mtxAp->nc;
+		dcoo.nnz = scoo.nnz = mtxAp->nnz;
+		scoo.typecode = mtxAp->typecode;
+		dcoo.typecode = typecode;
+		if(mtxAp->nnz>0)
+		{
+		if(rsb__allocate_coo_matrix_t(&dcoo)!=&dcoo)
+		{
+		       	errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(ierr,RSB_ERRM_PAL);
+		}
+		if(rsb__allocate_coo_matrix_t(&scoo)!=&scoo)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(ierr,RSB_ERRM_PAL);
+		}
+		errval = rsb__do_get_coo_noalloc(mtxAp,scoo.VA,dcoo.IA,dcoo.JA,NULL,mtxAp->flags);
+		rsb__do_copy_converted_scaled(scoo.VA,dcoo.VA,alphap,mtxAp->typecode,typecode,mtxAp->nnz,transA);
+		rsb__destroy_coo_matrix_t(&scoo);
+		RSB_BZERO_P(&scoo);
+		}
+		if(RSB_DOES_TRANSPOSE(transA))
+			rsb__transpose_coo_matrix_t(&dcoo);
+#else
+		errval = rsb__clone_coo(mtxAp,transA,alphap,typecode,&dcoo,flags);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(ierr,RSB_ERRM_NL);
+	       	}
+#endif
+		mtxCp = rsb__do_mtx_alloc_from_coo_inplace(dcoo.VA,dcoo.IA,dcoo.JA,dcoo.nnz,dcoo.typecode,dcoo.nr,dcoo.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags,NULL);
+		if(mtxCp)
+			RSB_DO_FLAG_DEL(mtxCp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+ierr:
+		if(!mtxCp)
+			rsb__destroy_coo_matrix_t(&dcoo);
+	}
+
+	if( (*mtxBpp) == NULL )
+	{
+		*mtxBpp = mtxCp;
+	}
+	else
+	{
+		RSB_MTX_REASSIGN(*mtxBpp,mtxCp);
+	}
+ok:
+err:
+	return errval;
+}
+
+#if 0
+void * rsb__clone_inner(const struct rsb_mtx_t *mtxAp, struct rsb_mtx_t *mtxCp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * clones a rsb_mtx_t structure, deeply
+	 *
+	 * \param matrix valid matrix pointer (to an empty mtxAp)
+	 * \param mtxCp valid matrix pointer
+	 * \return a pointer to the cloned structure (mtxCp) in case of success, NULL otherwise
+	 *
+	 * \note matrix flags are largely ignored in this function.
+	 **/
+
+	if(!mtxAp || !mtxCp)
+	{RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+
+#if RSB_WANT_BITMAP
+	/* we allocate a new options structure */
+	mtxCp->options = rsb__clone_options_t(mtxAp->options,mtxAp->M_b,mtxAp->K_b);
+
+	if(! mtxCp->options && mtxAp->options )
+	{RSB_PERR_GOTO(err_opt,RSB_ERRM_ES);}
+#endif /* RSB_WANT_BITMAP */
+	if( mtxAp->rpntr && (mtxAp->flags & RSB_FLAG_OWN_PARTITIONING_ARRAYS))
+	{
+		mtxCp->rpntr = rsb__clone_area(mtxAp->rpntr,sizeof(rsb_coo_idx_t)*(mtxAp->M_b+1));
+		if(!mtxCp->rpntr)
+		{RSB_PERR_GOTO(err_rpntr,RSB_ERRM_ES);}
+	}
+	else
+		mtxCp->rpntr = mtxAp->rpntr;
+
+	if( mtxAp->cpntr && (mtxAp->flags & RSB_FLAG_OWN_PARTITIONING_ARRAYS))
+	{
+		mtxCp->cpntr = rsb__clone_area(mtxAp->cpntr,sizeof(rsb_coo_idx_t)*(mtxAp->K_b+1));
+		if(!mtxCp->cpntr)
+		{RSB_PERR_GOTO(err_cpntr,RSB_ERRM_ES);}
+	}
+	else
+		mtxCp->cpntr = mtxAp->cpntr;
+
+	if( mtxAp->bindx)
+	{
+		mtxCp->bindx = rsb__clone_area(mtxAp->bindx,sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1));
+		if(!mtxCp->bindx)
+			{RSB_PERR_GOTO(err_bindx,RSB_ERRM_ES);}
+	}
+
+	if( mtxAp->indptr)
+	{
+		mtxCp->indptr = rsb__clone_area(mtxAp->indptr,sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1));
+		if(!mtxCp->indptr)
+			{RSB_PERR_GOTO(err_indptr,RSB_ERRM_ES);}
+	}
+
+	if( mtxAp->bpntr)
+	{
+		mtxCp->bpntr = rsb__clone_area(mtxAp->bpntr,sizeof(rsb_nnz_idx_t)*(mtxAp->Mdim+1));
+		if(!mtxCp->bpntr)
+			{RSB_PERR_GOTO(err_bpntr,RSB_ERRM_ES);}
+	}
+
+#if RSB_WANT_BITMAP
+	if( mtxAp->VA)
+	{
+		mtxCp->VA = rsb__clone_area(mtxAp->VA,(RSB_TOTAL_BLOCK_BYTES(mtxAp,mtxAp->options)));
+		if(!mtxCp->VA)
+			{RSB_PERR_GOTO(err_va,RSB_ERRM_ES);}
+	}
+#endif
+	goto ret;
+
+#if RSB_WANT_BITMAP
+err_va:
+	if( mtxAp->VA)
+		RSB_CONDITIONAL_FREE(mtxCp->VA);
+#endif
+err_bpntr:
+	if( mtxAp->bpntr )
+		RSB_CONDITIONAL_FREE(mtxCp->bpntr);
+err_indptr:
+	if( mtxAp->indptr )
+		RSB_CONDITIONAL_FREE(mtxCp->indptr);
+err_bindx:
+	if( mtxAp->bindx )
+		RSB_CONDITIONAL_FREE(mtxCp->bindx);
+err_cpntr:
+	if( mtxAp->cpntr && (mtxAp->flags & RSB_FLAG_OWN_PARTITIONING_ARRAYS))
+		RSB_CONDITIONAL_FREE(mtxCp->cpntr);
+err_rpntr:
+	if( mtxAp->rpntr && (mtxAp->flags & RSB_FLAG_OWN_PARTITIONING_ARRAYS))
+		RSB_CONDITIONAL_FREE(mtxCp->rpntr);
+#if RSB_WANT_BITMAP
+err_opt:
+	RSB_CONDITIONAL_FREE(mtxCp->options);
+#endif /* RSB_WANT_BITMAP */
+err:
+	mtxCp = NULL;
+ret:
+	return mtxCp;
+}
+#endif
+
+/* @endcond */
diff --git a/rsb_clone.h b/rsb_clone.h
new file mode 100644
index 0000000..edef52f
--- /dev/null
+++ b/rsb_clone.h
@@ -0,0 +1,54 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*
+ * @author Michele Martone
+ */
+#ifndef RSB_CLONE_H_INCLUDED
+#define RSB_CLONE_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb_common.h"
+
+void * rsb__clone_area(const void *src, size_t size);
+rsb_err_t rsb_util_coo_alloc(void **RSB_RESTRICT VAp, rsb_coo_idx_t ** RSB_RESTRICT IAp, rsb_coo_idx_t ** RSB_RESTRICT JAp, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_bool_t do_calloc);
+rsb_err_t rsb_util_coo_alloc_copy_and_stats(void **RSB_RESTRICT VAp, rsb_coo_idx_t ** RSB_RESTRICT IAp, rsb_coo_idx_t ** RSB_RESTRICT JAp, const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t*RSB_RESTRICT mp, rsb_coo_idx_t*RSB_RESTRICT kp, rsb_nnz_idx_t nnz, rsb_nnz_idx_t ennz, rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t iflags, rsb_flags_t*RSB_RESTRICT flagsp);
+void * rsb__clone_area_with_extra(const void *src, size_t csize, size_t bsize, size_t esize);
+/* void * rsb__clone_area_parallel(const void *src, size_t size); */
+struct rsb_mtx_t *rsb__clone_simple(const struct rsb_mtx_t *mtxAp);
+struct rsb_mtx_t *rsb__clone_simple_extra(const struct rsb_mtx_t *mtxAp, rsb_submatrix_idx_t esmc);
+rsb_err_t rsb__clone(struct rsb_mtx_t ** mtxBpp, rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_flags_t flags);
+void * rsb__clone_inner(const struct rsb_mtx_t *mtxAp, struct rsb_mtx_t *new_matrix);
+rsb_err_t rsb__clone_coo(const struct rsb_mtx_t * mtxAp, rsb_trans_t transA, const void *alphap, rsb_type_t typecode, struct rsb_coo_matrix_t*dcoop, rsb_flags_t flags/*, rsb_extff_t eflags*/);
+rsb_err_t rsb__mtx_transplant_from_clone(struct rsb_mtx_t ** mtxDpp, struct rsb_mtx_t * mtxSp);
+size_t rsb__submatrices_max_ptr_diff(const struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__mtx_shift_leaf_ptrs(struct rsb_mtx_t *RSB_RESTRICT  mtxCp, const struct rsb_mtx_t *RSB_RESTRICT  mtxAp, long smc);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_CLONE_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_common.h b/rsb_common.h
new file mode 100644
index 0000000..888ad0f
--- /dev/null
+++ b/rsb_common.h
@@ -0,0 +1,1417 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @author Michele Martone
+ * @file
+ * @brief Low level routines and tools for our sparse matrix formats implementations.
+ */
+#ifndef RSB_COMMON_H_INCLUDED
+#define RSB_COMMON_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#ifdef __cplusplus
+#define restrict	/* for now, the restrict keyword is not allowed in C++ */
+#endif  /* __cplusplus */
+/**
+ *
+ * VBR internals the user shouldn't use, no way.
+ * This file contents are not meant to be used as an API (Application Programming Interface).
+ * 
+ * Manipulate this file at your own risk.
+ *
+ */
+#include <stdlib.h>	/* bsearch, calloc, malloc */
+#include <stdio.h>	/* printf */
+
+#ifdef HAVE_CONFIG_H	/* hopefully, the only non-RSB_ prefixed symbol of ours */
+#define RSB_HAVE_CONFIG_H HAVE_CONFIG_H
+#endif /* HAVE_CONFIG_H */
+
+#ifdef RSB_HAVE_CONFIG_H		/* autotools makefiles define this */
+#include "rsb-config.h"		/* this should be the first include */
+#endif /* RSB_HAVE_CONFIG_H */
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+#include <omp.h>
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+#ifdef RSB_HAVE_UNISTD_H
+#include <unistd.h>	/* getopt, gethostname (some system don't have it here!) */
+#endif /* RSB_HAVE_UNISTD_H */
+#ifdef RSB_HAVE_GETOPT_H 
+/* RSB_HAVE_GETOPT_LONG */
+#include <getopt.h>	/* getopt_long is not always available (e.g.: on AIX, or any non GNU system) */
+typedef struct option rsb_option;
+#else /* RSB_HAVE_GETOPT_H  */
+/*#undef required_argument*/
+/*#undef no_argument*/
+/*#undef optional_argument*/
+#define required_argument	0 /* ONLY A STUB */
+#define no_argument 		1 /* ONLY A STUB */
+#define optional_argument	2 /* ONLY A STUB */
+       extern char *optarg;
+       extern int optind, opterr, optopt;
+
+           struct rsb_option_struct {
+               const char *name;
+               int         has_arg;
+               int        *flag;
+               int         val;
+           };
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+typedef struct rsb_option_struct rsb_option;
+#endif /* RSB_HAVE_GETOPT_H */
+#include <ctype.h>	/*isdigit*/
+
+/*
+ * Comment / uncomment the following to your likes
+ */
+#define RSB_WITH_MM
+/*#undef RSB_WITH_MM*/
+
+#ifdef RSB_WITH_MM
+#include "rsb_mmio.h"
+#endif /* RSB_WITH_MM */
+
+
+/* the NDEBUG and DEBUG symbols will affect lot of checking code */
+/* these flags are NOT supported : they should be debugged :) */
+/*#define NDEBUG 1*/
+/*#undef  NDEBUG*/
+
+/*
+#ifdef NDEBUG
+#define DEBUG 1
+#endif
+*/
+
+/*!
+ \internal
+ For all the situations where 'int' would be used.
+ */
+typedef int rsb_int;
+
+/* some ill situations could arise the need for this */
+#if 0
+#ifndef NULL
+#define NULL ((VOID *)(0))
+#endif
+#endif
+#define RSB_NUL '\0'
+
+/*  #define RSB_INLINE inline	*/ /* experimental */
+#define RSB_INLINE 	/* experimental */
+
+/* if defined, less partitioning arrays allocations will occur for plain CSR and CSC matrices (EXPERIMENTAL) */
+#define RSB_WANT_EXPERIMENTAL_IN_PLACE_RECURSIVE	 0	/** EXPERIMENTAL */
+#define RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 1	/** EXPERIMENTAL: should avoid extra (non BCSR-related) arrays*/
+#define RSB_WANT_EXPERIMENTAL_FILLIN_ESTIMATOR 2		/* */
+
+#define RSB_WANT_INDEX_BASED_SORT 1	/**< if defined, will enable a faster index+permutation based sorting (EXPERIMENTAL) */
+#define RSB_WANT_Z_SORT 1		/**< if 1, enable Z sorting at all */
+#define RSB_FORTRAN_VERBOSE_CALLS 0		/**< */
+#define RSB_EXPERIMENTAL_WANT_PURE_BCSS 0	/**< EXPERIMENTAL : for BCSR, will prevent from allocating VBR arrays */
+#define RSB_EXPERIMENTAL_USE_PURE_BCSS  1	/**< EXPERIMENTAL : for BCSR, will prevent from using VBR arrays   */
+#define RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR  1	/**< EXPERIMENTAL : for BCSR, will prevent from using VBR arrays   */
+
+#define RSB_WANT_OMP_RECURSIVE_SPSV						1 	/**< EXPERIMENTAL  */
+#define RSB_WANT_OMP_RECURSIVE_SPMV						1 	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_FORCE_ROW_SUBDIVISIONS_UNTIL_CORES_NUMBER	   	0 	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_ONE_SINGLE_LOCK_FOR_ALL_SUBMATRICES		   	(1&&RSB_WANT_OMP_RECURSIVE_KERNELS) 	/**< EXPERIMENTAL: incompatible with the prev.  */
+
+#define RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT 1
+#define RSB_ALLOW_EMPTY_MATRICES 1
+
+#define RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS   	(1&&RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT ) 	/**< EXPERIMENTAL  */
+
+#define RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS2   	(1&&RSB_WANT_OMP_RECURSIVE_KERNELS) 	/**< EXPERIMENTAL  */
+/* mutually exclusive options for : RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS2   	 */
+#define RSB_EXPERIMENTAL_SHOULD_TRAVERSE_WITHOUT_BLOCKING		   	1 	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_ALTERNATING_SUBMATRIX_HEURISTIC			0	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_WORK_BALANCING_HEURISTIC				1	/**< EXPERIMENTAL  */
+
+#define RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_LINKED_LIST	0 	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_TREE		(0) 	/**< The standard mechanism, 2-partitioned.  */
+#define RSB_SHOULD_FAIL_INIT_IF_MEMHIER_DETECTION_FAILS		1 	/**<  */
+
+#define RSB_EXPERIMENTAL_NO_SUBDIVIDE_ON_MIN_NNZ_PER_ROW_OR_COLUMN		1 	/**< Block matrix subdivision under a threshold */
+#define RSB_EXPERIMENTAL_ROWS_SUBDIVIDE_TO_CORES_NUM				1 	/**< Subdivide to obtain no less matrices than cores */
+#define RSB_CONST_MIN_NNZ_PER_ROW_OR_COLUMN_PER_SUBMATRIX				3	/**< Lower threshold for nnz/m or nnz/k, for subdivision, should be determined heuristically  */
+
+#define RSB_EXPERIMENTAL_WANT_BEST_TIMES   1 	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_UNLIMITED_RECURSION  		0 	/**< EXPERIMENTAL  */
+#define RSB_EXPERIMENTAL_MORTON_ORDERED_RECURSION  	0 	/**< EXPERIMENTAL, UNFINISHED, just for demo purposes  */
+
+#define RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_NAIVE 		0	/**< EXPERIMENTAL */
+#define RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_UNSYMMETRIC 	1	/**< EXPERIMENTAL, UNIMPLEMENTED */
+#define RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY 			RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_NAIVE
+
+#define RSB_CONST_IMPOSSIBLY_BIG_TIME   		1000000000 	/**< in seconds, used when computing 'minimum' running times. any measured time interval should be less than RSB_CONST_IMPOSSIBLY_BIG_TIME.  */
+#define RSB_MIN_ABOVE_INF(X,Y,MIN) RSB_MAX(RSB_MIN(X,Y),MIN)
+#define RSB_CONST_TIME_FOR_MICRO_BENCHMARK   		0.1 	/**<  */
+#define RSB_CONST_MIN_TIMES_FOR_MICRO_BENCHMARK   	10 	/**<  */
+
+#define RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT	0	/**< EXPERIMENTAL */
+#define RSB_WANT_BOUNDED_BOXES 			1
+#define RSB_WANT_BOUNDED_BOXES_SPMV 			(RSB_WANT_BOUNDED_BOXES && 1)
+#define RSB_WANT_BOUNDED_BOXES_SPSV 			(RSB_WANT_BOUNDED_BOXES && 1)
+#define RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV	1
+#define RSB_WANT_SM_TO_THREAD_MOD_MAPPING		1
+#define RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPSV	1
+
+#define RSB_WANT_BITMAP 0
+	/** if RSB_WANT_BITMAP, should att to rsb_mtx_t:
+	 * auxiliary data structures */
+	/*struct rsb_options_t *options;*/	/* FIXME: deprecated -- will be deleted soon */
+
+#define RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE	(/*RSB_OUT_ERR_VERBOSITY>0 &&*/ RSB_INT_ERR_VERBOSITY>0) 
+#define RSB_WANT_PERFORMANCE_FILE	0
+#if defined(RSB_HAVE_SIGNAL_H) /* && defined(RSB_HAVE_BITS_SIGACTION_H) */
+#define RSB_WANT_ACTION 1
+#else
+#define RSB_WANT_ACTION 0
+#endif
+/* #define RSB_WANT_ACTION RSB_ALLOW_INTERNAL_GETENVS */ /* 1 */
+#if RSB_WANT_ACTION
+#define RSB_SHALL_QUIT ( rsb__quit_rsbench != 0 )
+#define RSB_INTERNALS_RSBENCH_HEAD_DECLS extern int rsb__quit_rsbench;
+void rsb__sigh(int signal);
+#define RSB_SIGHR rsb__sigr();
+#else /* RSB_WANT_ACTION */
+#define RSB_SHALL_QUIT ( 0 != 0 )
+#define RSB_INTERNALS_RSBENCH_HEAD_DECLS
+#define RSB_SIGHR
+#endif /* RSB_WANT_ACTION */
+
+#include "rsb_struct.h"		/* */
+
+/* @cond INNERDOC */
+/*!
+ * \internal
+ * \ingroup gr_internals
+ * \brief An internal, helper structure (OBSOLETE).
+ *
+ * A rsb_options_t structure could keep track of helper information
+ * like :
+ *
+ * - desired nonzero pattern
+ * - pointers to optimal or desired functions for certain operations
+ * - ..
+ * - .. data which is perfectly optional
+ * */
+struct rsb_options_t{
+	/** An auxiliary bitmap */
+	rsb_bitmap_data_t *bitmap;
+	double a;
+};
+/* @endcond */
+
+
+#if  RSB_FORTRAN_VERBOSE_CALLS
+#define  RSB_FORTRAN_VERBOSE_CALL(M) RSB_ERROR(M)
+#else /* RSB_FORTRAN_VERBOSE_CALLS */
+#define  RSB_FORTRAN_VERBOSE_CALL(M)
+#endif /* RSB_FORTRAN_VERBOSE_CALLS */
+
+
+/*                                DEBUG FLAGS                                */
+/*
+	Enable any combination of the following flags to activate debug code.
+	Do this only when debugging/developing, because it will slow down the code a lot, 
+*/
+
+#define RSB_WANT_DEBUG_PARANOID_ASSERTS 0	/**< if 1, will activate a number of assert() calls which won't change the code flow but will check for anomalous error conditions */
+#if RSB_WANT_DEBUG_PARANOID_ASSERTS
+#define RSB_DEBUG_ASSERT(e) assert(e)
+#else /* RSB_WANT_DEBUG_PARANOID_ASSERTS */
+#define RSB_DEBUG_ASSERT(e) 
+#endif /* RSB_WANT_DEBUG_PARANOID_ASSERTS */
+
+#define RSB_ASSERT(e) assert(e)		/* NOTE: in the future, could be replaced with some {if(..)goto err;} or exit()-like statement  */
+
+/* commented out 20120915, since it was not used anyway most of the time
+#ifdef DEBUG
+#define RSB_DEBUG_BITMAP 1
+#define RSB_DEBUG_BLOCK_STUFF 1
+#define RSB_DEBUG_SORT_STUFF 1
+#endif
+*/
+
+/*#define RSB_QUIET 1*/
+
+/* FIXME : TODO : join these macros as a single debug flag */
+#define RSB_MEM_DEBUG 		0	/* if 1, will make trigger printouts on allocations and deallocations */
+#define RSB_QUIET_MEM_ERRORS	0	/* if 1, will not even print out fatal error conditions */
+
+/*                            END DEBUG FLAGS                                */
+
+
+/** Macros to check basic indices values validity ( FIXME : unfinished, should be much stricter )  */
+#define RSB_INVALID_COO_INDEX(I)	((I)>RSB_MAX_MATRIX_DIM || (I)<0)	/* should fail only if signed */
+#define RSB_INVALID_NNZ_INDEX(I)	((I)>RSB_MAX_MATRIX_NNZ)
+#define RSB_INVALID_BLK_INDEX(I)	(RSB_INVALID_COO_INDEX(I))
+#define RSB_INVALID_NNZ_COUNT(I)	((I)<1L || ( RSB_NNZ_ADD_OVERFLOW((I),RSB_INDEX_OF_SAFE_EXTRA) ))
+#define RSB_INVALID_NNZ_COUNT_FOR_FLAGS(I,F) ((!RSB_DO_FLAG_HAS((F),RSB_FLAG_UNIT_DIAG_IMPLICIT)) && RSB_INVALID_NNZ_COUNT(I))
+
+#define RSB_DO_FLAG_HAVE_AND(V1,V2,F) RSB_BOOL_AND(RSB_DO_FLAG_HAS(V1,F),RSB_DO_FLAG_HAS(V2,F))
+#define RSB_DO_FLAG_HAVE_NAND(V1,V2,F) RSB_BOOL_NAND(RSB_DO_FLAG_HAS(V1,F),RSB_DO_FLAG_HAS(V2,F))
+#define RSB_DO_FLAG_HAVE_XOR(V1,V2,F) RSB_BOOL_XOR(RSB_DO_FLAG_HAS(V1,F),RSB_DO_FLAG_HAS(V2,F))
+#define RSB_DO_FLAG_HAVE_OR(V1,V2,F) RSB_BOOL_OR(RSB_DO_FLAG_HAS(V1,F),RSB_DO_FLAG_HAS(V2,F))
+#define RSB_DO_FLAG_HAVE_NOR(V1,V2,F) RSB_BOOL_NOR(RSB_DO_FLAG_HAS(V1,F),RSB_DO_FLAG_HAS(V2,F))
+#define RSB_DO_FLAG_SUBST(FLAGSVAR,FLAGS_OLD,FLAGS_NEW) RSB_DO_FLAG_DEL(FLAGSVAR,(FLAGS_OLD)), RSB_DO_FLAG_ADD(FLAGSVAR,(FLAGS_NEW))
+
+#define RSB_INVALID_COO_COUNT(I)	((I)<1L || ( RSB_COO_ADD_OVERFLOW((I),RSB_INDEX_OF_SAFE_EXTRA) ))
+#define RSB_IS_VALID_NNZ_COUNT(I)	(!RSB_INVALID_NNZ_COUNT(I))
+#define RSB_IS_VALID_COO_INDEX(I)	(!RSB_INVALID_COO_INDEX(I))
+#define RSB_IS_VALID_COO_DIM(I)		(!RSB_INVALID_COO_COUNT(I))
+#define RSB_IS_VALID_BLK_INDEX(I)	(!RSB_INVALID_BLK_INDEX(I))
+#define RSB_IS_VALID_NNZ_INDEX(I)	(!RSB_INVALID_NNZ_INDEX(I))
+#define RSB_IS_VALID_INCX_VALUE(I)	(!RSB_INVALID_NNZ_COUNT(I))
+#define RSB_ARE_VALID_MATRIX_INIT_PARS(R,C,NNZ,TYPE)	\
+	RSB_IS_VALID_NNZ_COUNT(NNZ)&&			\
+	RSB_IS_VALID_COO_INDEX(R)&&			\
+	RSB_IS_VALID_COO_INDEX(C)&&			\
+	(!RSB_MATRIX_UNSUPPORTED_TYPE(TYPE))
+/*#define RSB_IS_VALID_NNZ_SUM(NZ1,NZ2)	RSB_IS_VALID_NNZ_COUNT(((size_t)(NZ1))+((size_t)(NZ2)))*/
+#define RSB_IS_VALID_NNZ_SUM(NZ1,NZ2)	(((size_t)(RSB_MAX_MATRIX_NNZ))>=(((size_t)(NZ1))+((size_t)(NZ2))))
+#define RSB_IS_INVALID_TYPE_SIZE(TS) ((TS)<1)
+
+#define RSB_IS_VALID_TRANS(T)  ((T)>=RSB_MIN(RSB_MIN(RSB_TRANSPOSITION_T,RSB_TRANSPOSITION_C),(RSB_TRANSPOSITION_N)) && (T)<=RSB_MAX(RSB_MAX(RSB_TRANSPOSITION_T,RSB_TRANSPOSITION_C),(RSB_TRANSPOSITION_N))) /* */
+#define RSB_IS_VALID_THREAD_COUNT(C)	((C)> 0 && (C)<=RSB_CONST_MAX_SUPPORTED_CORES)	/* */
+#define RSB_IS_VALID_THREAD_SPEC(C)	((C)>=0 && (C)<=RSB_CONST_MAX_SUPPORTED_CORES)	/* */
+
+/** An initializer value for index variables. */
+#define RSB_INI ((rsb_coo_idx_t)(-1))
+
+
+#include <stdlib.h>		/* basic types and functions definitions */
+
+/*
+ * Bitmap stuff macros and functions.
+ * Uses row major order by default.
+ * By defining RSB_BITMAP_ROW_MAJOR_ORDER, row major order will be adopted.
+ *
+ * p.s.: please DO NOT use the following fixed macros outside the nearby macros.
+ * */
+/*#define RSB_BITMAP_ROW_MAJOR_ORDER 1*/
+#define RSB_BITS_PER_INT  	(sizeof(rsb_bitmap_data_t)*RSB_CHAR_BIT)
+#define RSB_BITS_PER_ROW(cols)  ((cols)+(RSB_BITS_PER_INT-1))
+#define RSB_BYTES_PER_ROW(cols) ((cols+(RSB_CHAR_BIT-1))/RSB_CHAR_BIT)
+#define RSB_INTS_PER_ROW(cols)   ((cols+((RSB_BITS_PER_INT)-1))/(RSB_BITS_PER_INT))
+#define RSB_INT_IN_ROW(cols)   ((cols)/(RSB_BITS_PER_INT))
+
+/* note that this is (logically) machine independent code */
+#define RSB_SET_BIT(p,b)  (*(rsb_bitmap_data_t*)(p))=(*(rsb_bitmap_data_t*)(p)|(1<<(b)))
+#define RSB_UNSET_BIT(p,b)  (*(rsb_bitmap_data_t*)(p))=(*(rsb_bitmap_data_t*)(p)&~(1<<(b)))
+#define RSB_GET_BIT(p,b)  ((*(rsb_bitmap_data_t*)(p))&(1<<(b)))
+#define RSB_BITMAP_POINTER(p,rw,r,c) (((rsb_bitmap_data_t*)(p))+(RSB_INTS_PER_ROW(rw)*(r)+RSB_INT_IN_ROW(c)))
+
+#ifdef RSB_BITMAP_ROW_MAJOR_ORDER
+/* Note that only a swap in the following three macros is needed to switch the storage format of our bitmap */
+#define RSB_BITMAP_GET(p,rows,cols,r,c) RSB_GET_BIT((RSB_BITMAP_POINTER((p),(rows),(c),(r))),((r)%(RSB_BITS_PER_INT)))
+#define RSB_BITMAP_SET(p,rows,cols,r,c) RSB_SET_BIT((RSB_BITMAP_POINTER((p),(rows),(c),(r))),((r)%(RSB_BITS_PER_INT)))
+#define RSB_BITMAP_UNSET(p,rows,cols,r,c) RSB_UNSET_BIT((RSB_BITMAP_POINTER((p),(rows),(c),(r))),((r)%(RSB_BITS_PER_INT)))
+#define RSB_BYTES_PER_BITMAP_(ld,d) (sizeof(rsb_bitmap_data_t)*(RSB_INTS_PER_ROW(ld)) * (d))
+#define RSB_BITMAP_CLEAR(p,rows,cols)	RSB_BZERO((p),(RSB_BYTES_PER_BITMAP_(cols,rows)))
+#else /* RSB_BITMAP_ROW_MAJOR_ORDER */
+#define RSB_BITMAP_GET(p,rows,cols,r,c) RSB_GET_BIT((RSB_BITMAP_POINTER((p),(cols),(r),(c))),((c)%(RSB_BITS_PER_INT)))
+#define RSB_BITMAP_SET(p,rows,cols,r,c) RSB_SET_BIT((RSB_BITMAP_POINTER((p),(cols),(r),(c))),((c)%(RSB_BITS_PER_INT)))
+#define RSB_BITMAP_UNSET(p,rows,cols,r,c) RSB_UNSET_BIT((RSB_BITMAP_POINTER((p),(cols),(r),(c))),((c)%(RSB_BITS_PER_INT)))
+#define RSB_BYTES_PER_BITMAP_(d,ld) (sizeof(rsb_bitmap_data_t)*(RSB_INTS_PER_ROW(ld)) * (d))
+#define RSB_BITMAP_CLEAR(p,rows,cols)	RSB_BZERO((p),(RSB_BYTES_PER_BITMAP_(rows,cols)))
+#endif /* RSB_BITMAP_ROW_MAJOR_ORDER */
+#define RSB_WORDS_PER_BITMAP(ld,d) ((RSB_BYTES_PER_BITMAP_(ld,d)+(sizeof(rsb_bitmap_data_t)-1))/sizeof(rsb_bitmap_data_t))
+
+#define RSB_BYTES_PER_BITMAP(rows,cols) RSB_BYTES_PER_BITMAP_(rows,cols)
+#define RSB_BLOCK_UNSET_BIT_FOR_NNZ(IA,JA,k,M) {rsb_coo_idx_t i=(RSB_GET_BLOCK_ROW_FOR_NZ(IA,(M))); rsb_coo_idx_t j=(RSB_GET_BLOCK_COL_FOR_NZ(JA,M)); RSB_BITMAP_UNSET((M)->options->bitmap,(M)->M_b,(M)->K_b,i,j);}
+#define RSB_BLOCK_SET_BIT_FOR_NNZ(IA,JA,k,M) {rsb_coo_idx_t i=(RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,M)); rsb_coo_idx_t j=(RSB_GET_BLOCK_COL_FOR_NZ(JA+k,M)); RSB_BITMAP_SET((M)->options->bitmap,(M)->M_b,(M)->K_b,i,j);}
+#define RSB_BLOCK_GET_BIT_FOR_NNZ(IA,JA,k,M) {rsb_coo_idx_t i=(RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,M)); rsb_coo_idx_t j=(RSB_GET_BLOCK_COL_FOR_NZ(JA+k,M)); RSB_BITMAP_GET((M)->options->bitmap,(M)->M_b,(M)->K_b,i,j);}
+
+/* 
+ * Macros for one dimensional bitmaps -- easier to use.
+ * */
+#ifdef RSB_BITMAP_ROW_MAJOR_ORDER
+#define RSB_BITVECTOR_GET(p,bits,bit)     RSB_BITMAP_GET(p,bits,1,bit,0) 
+#define RSB_BITVECTOR_SET(p,bits,bit)     RSB_BITMAP_SET(p,bits,1,bit,0) 
+#define RSB_BITVECTOR_UNSET(p,bits,bit)   RSB_BITMAP_UNSET(p,bits,1,bit,0)
+#define RSB_BITVECTOR_CLEAR(p,bits)       RSB_BITMAP_CLEAR(p,bits,1)
+#define RSB_BYTES_PER_BITVECTOR(bits)     RSB_BYTES_PER_BITMAP(bits,1)
+#define RSB_WORDS_PER_BITVECTOR(bits)     RSB_WORDS_PER_BITMAP(bits,1)
+#else /* RSB_BITMAP_ROW_MAJOR_ORDER */
+#define RSB_BITVECTOR_GET(p,bits,bit)     RSB_BITMAP_GET(p,1,bits,0,bit) 
+#define RSB_BITVECTOR_SET(p,bits,bit)     RSB_BITMAP_SET(p,1,bits,0,bit) 
+#define RSB_BITVECTOR_UNSET(p,bits,bit)   RSB_BITMAP_UNSET(p,1,bits,0,bit)
+#define RSB_BITVECTOR_CLEAR(p,bits)       RSB_BITMAP_CLEAR(p,1,bits)
+#define RSB_BYTES_PER_BITVECTOR(bits)     RSB_BYTES_PER_BITMAP(1,bits)
+#define RSB_WORDS_PER_BITVECTOR(bits)     RSB_WORDS_PER_BITMAP(1,bits)
+#endif /* RSB_BITMAP_ROW_MAJOR_ORDER */
+
+	/* given :
+	 * the address of the nonzero element column index
+	 * a rsb_options_t pointer
+	 * will return the block column variable as it should.
+	 * note that this macro will work only if o->M_b, o->K_b, o->p_r IA are properly initialized and o->p_r sorted.
+	 *
+	 * p.s.: note that the use of this could be avoided with a modest memory allocation...
+	 * p.s.: in the following, we blindly trust that bsearch won't fail
+	 * */
+#define RSB_GET_BLOCK_COL_FOR_NZ_(columnidxp,cpntr,K_b) (((rsb_coo_idx_t*)bsearch((columnidxp),(cpntr),(K_b),sizeof(rsb_coo_idx_t),(rsb__nnz_coord_compar))-((cpntr))))
+#define RSB_GET_BLOCK_COL_FOR_NZ(columnidxp,M)		(RSB_GET_BLOCK_COL_FOR_NZ_((columnidxp),(M)->cpntr,(M)->K_b))
+
+#define RSB_GET_BLOCK_ROW_FOR_NZ_(rowidxp,rpntr,M_b) (((rsb_coo_idx_t*)bsearch((rowidxp)   ,(rpntr),(M_b),sizeof(rsb_coo_idx_t),(rsb__nnz_coord_compar))-((rpntr))))
+#define RSB_GET_BLOCK_ROW_FOR_NZ(rowidxp   ,M)		(RSB_GET_BLOCK_ROW_FOR_NZ_((rowidxp),(M)->rpntr,(M)->M_b))
+
+#define RSB_GET_BLOCK_MAJ_FOR_NZ_(majidxp,Mpntr,Md_b) (((rsb_coo_idx_t*)bsearch((majidxp)   ,(Mpntr),(Md_b),sizeof(rsb_coo_idx_t),(rsb__nnz_coord_compar))-((Mpntr))))
+#define RSB_GET_BLOCK_MAJ_FOR_NZ(majidxp   ,M)		(RSB_GET_BLOCK_MAJ_FOR_NZ_((majidxp),(M)->Mpntr,(M)->Mdim))
+
+#define RSB_GET_BLOCK_MIN_FOR_NZ_(minidxp,mpntr,md_b) (((rsb_coo_idx_t*)bsearch((minidxp)   ,(mpntr),(md_b),sizeof(rsb_coo_idx_t),(rsb__nnz_coord_compar))-((mpntr))))
+#define RSB_GET_BLOCK_MIN_FOR_NZ(minidxp   ,M)		(RSB_GET_BLOCK_MIN_FOR_NZ_((minidxp),(M)->mpntr,(M)->mdim))
+
+#define GET_BLOCK_FIRST_COLUMN(column,M)	((M)->cpntr[(column)])
+#define GET_BLOCK_FIRST_ROW(row,M)		((M)->rpntr[ (row)  ])
+
+#define GET_BLOCK_WIDTH(column,M)		(((M)->cpntr[(column)+1])-((M)->cpntr[(column)]))
+#define GET_BLOCK_HEIGHT(row,M)			(((M)->rpntr[ (row)  +1])-((M)->rpntr[ (row)  ]))
+
+#define GET_BLOCK_SIZE(row,column,M)	((GET_BLOCK_WIDTH((column),(M)))*(GET_BLOCK_HEIGHT((row),(M))))
+
+/*
+ * Blanks a whole matrix block.
+ * */
+#define RSB_BLANK_BLOCK(BP,M,BLOCKROW,BLOCKCOLUMN)				\
+	RSB_BZERO( ((rsb_byte_t*)(BP)),						\
+		( (M)->el_size * GET_BLOCK_SIZE((BLOCKROW),(BLOCKCOLUMN),(M)) ) );
+
+#define RSB_INTRA_BLOCK_ROW(row,blockrow,M) ((row) - (M)->rpntr[(blockrow)])
+#define RSB_INTRA_BLOCK_COLUMN(column,blockcolumn,M) ((column) - (M)->rpntr[(blockcolumn)])
+#define RSB_GET_INTRA_BLOCK_OFFSET_ROW_MAJOR(row,column,blockrow,blockcolumn,M) ((( (row) - (M)->rpntr[(blockrow)]) * (GET_BLOCK_WIDTH((blockcolumn),(M))) + ( (column) - (M)->cpntr[(blockcolumn)] )) * (M)->el_size)
+#define RSB_GET_INTRA_BLOCK_OFFSET_COLUMN_MAJOR(row,column,blockrow,blockcolumn,M) ((( (column) - (M)->cpntr[(blockcolumn)]) * (GET_BLOCK_HEIGHT((blockrow,(M))) + ( (row) - (M)->rpntr[(blockrow)] )) * (M)->el_size)
+
+#define RSB_GET_INTRA_BLOCK_ROW_STRIDE(blockrow,blockcolumn,M) (GET_BLOCK_WIDTH((blockcolumn),(M)))
+#define RSB_GET_INTRA_BLOCK_OFFSET(row,column,blockrow,blockcolumn,M) \
+	(RSB_GET_INTRA_BLOCK_OFFSET_ROW_MAJOR(row,column,blockrow,blockcolumn,M)) 
+#define RSB_GET_INTRA_BLOCK_OFFSET_TRANSPOSED(row,column,blockrow,blockcolumn,M) \
+	(RSB_GET_INTRA_BLOCK_OFFSET_COLUMN_MAJOR(row,column,blockrow,blockcolumn,M)) 
+
+/*!
+ * Macros for diagonal-related comparisons.
+ *
+ * \code
+ *  (ROW,COL)      (ROW,COL+COLS)
+ *     +--------------+
+ *     |              |
+ *     |              |
+ *     |              |
+ *    ...            ...
+ *     |              |
+ *     |              |
+ *     +--------------+
+ *  (ROW+ROWS,COL)      (ROW+ROWS,COL+COLS)
+ *
+ *
+ * 	under diagonal  	        over diagonal
+ * 	intersects first at row COL     intersects first at row ROW
+ * 	intersects last at row ROW+ROWS     intersects last at row COL+COLS
+ *
+ *	+---------------+       +-\-------------+
+ *     \|               |       |  \            |
+ *      \               |       |   \           |
+ *      |\              |       |    \          |
+ *     ...             ...     ...             ...
+ *      |               |       |              \|
+ *      |               |       |               \
+ *      +-----\---------+	+---------------+
+ * \endcode
+ * */
+
+#define RSB_POINT_QUASI_UNDER_DIAGONAL(ROW,COL) 	((ROW)>=(COL))
+#define RSB_POINT_QUASI_OVER_DIAGONAL(ROW,COL)	 	((ROW)<=(COL))
+#define RSB_POINT_UNDER_DIAGONAL(ROW,COL) 		((ROW)> (COL))
+#define RSB_POINT_OVER_DIAGONAL(ROW,COL) 		((ROW)< (COL))
+
+#define RSB_POINT_UNDER_SUPRA_DIAGONAL(ROW,COL,OFFSET) 	  (RSB_POINT_UNDER_DIAGONAL(((ROW)+(OFFSET)),(COL)))
+#define RSB_POINT_UNDER_SUB_DIAGONAL(ROW,COL,OFFSET) 	  (RSB_POINT_UNDER_DIAGONAL(((ROW)),((COL)+(OFFSET))))
+#define RSB_POINT_OVER_SUPRA_DIAGONAL(ROW,COL,OFFSET) 	  (RSB_POINT_OVER_DIAGONAL(((ROW)+(OFFSET)),(COL)))
+#define RSB_POINT_OVER_SUB_DIAGONAL(ROW,COL,OFFSET) 	  (RSB_POINT_OVER_DIAGONAL((ROW),((COL)+(OFFSET))))
+
+/* assumes COLS>=1, ROWS>=1 */
+#define RSB_BLOCK_CROSSED_BY_DIAGONAL(ROW,COL,ROWS,COLS)	\
+	(							\
+	RSB_POINT_QUASI_UNDER_DIAGONAL((ROW)+(ROWS-1),(COL)) && 	\
+	RSB_POINT_QUASI_OVER_DIAGONAL((ROW),((COL)+(COLS-1))) )
+#define RSB_BLOCK_CROSSED_BY_SUPRA_DIAGONAL(ROW,COL,ROWS,COLS,OFFSET)	\
+	RSB_BLOCK_CROSSED_BY_DIAGONAL((ROW)+(OFFSET),(COL),(ROWS),(COLS))
+#define RSB_BLOCK_CROSSED_BY_SUB_DIAGONAL(ROW,COL,ROWS,COLS,OFFSET)	\
+	RSB_BLOCK_CROSSED_BY_SUPRA_DIAGONAL(COL,ROW,COLS,ROWS,OFFSET)
+#define RSB_BLOCK_CROSSED_BY_SUPRA_OR_SUB_DIAGONAL(ROW,COL,ROWS,COLS,LOFFSET,UOFFSET)	\
+	(										\
+	(LOFFSET)>(UOFFSET)?								\
+	(RSB_BLOCK_CROSSED_BY_SUB_DIAGONAL(ROW,COL,ROWS,COLS,LOFFSET)):			\
+	(RSB_BLOCK_CROSSED_BY_SUPRA_DIAGONAL(ROW,COL,ROWS,COLS,UOFFSET))	)		
+
+/* 
+ * The offset in the block to the first element which is on the diagonal 
+ * The stride will be ROWS+1 or COLS+1, depending on the internal storage.
+ * We here assume C storage.
+ * */
+#define RSB_BLOCK_DIAGONAL_OFFSET(ROW,COL,ROWS,COLS)	\
+	((RSB_POINT_UNDER_DIAGONAL((ROW),(COL)))  ? ((ROW)-(COL)) : (((COL)-(ROW))*(COLS)) )
+/* if the block is internally stored in Fortran, then : */
+#define RSB_BLOCK_DIAGONAL_OFFSET_FORTRAN_STORED(ROW,COL,ROWS,COLS)	\
+	((RSB_POINT_OVER_DIAGONAL((ROW),(COL)))  ? (((COL)-(ROW))*(ROWS)) : ((ROW)-(COL))  )
+
+/* The following is ordering-neutral.
+
+   +------------------->                 	
+   | \   +-----+ ^^
+   |   \ |     | ||
+   |     \     | |v RSB_BLOCK_DIAGONAL_OFFSET_FIRST_ROW
+   |     +-\---+ v  RSB_BLOCK_DIAGONAL_OFFSET_LAST_ROW
+   |<--->    \      RSB_BLOCK_DIAGONAL_OFFSET
+   |           \
+   v  
+
+   +------------------->                 	
+   |    \        ^^
+   |      \      ||
+   |     +--\--+ v| RSB_BLOCK_SUPRA_DIAGONAL_OFFSET_FIRST_ROW
+   |     |    \|  v RSB_BLOCK_SUPRA_DIAGONAL_OFFSET_LAST_ROW
+   |     |     |\
+   |     +-----+  \
+   |
+   v  
+
+   +------------------->                 	
+   |             ^^
+   |             ||
+   | \   +-----+ ||
+   |   \ |     | ||
+   |     \     | |v RSB_BLOCK_SUB_DIAGONAL_OFFSET_FIRST_ROW
+   |     +-\---+ v  RSB_BLOCK_SUB_DIAGONAL_OFFSET_LAST_ROW
+   |         \
+   |           \
+   v  
+ */
+#define RSB_BLOCK_DIAGONAL_OFFSET_FIRST_ROW(ROW,COL,ROWS,COLS)	\
+	((RSB_POINT_UNDER_DIAGONAL((ROW),(COL)))  ? (ROW) :  (COL) )
+
+#define RSB_BLOCK_DIAGONAL_OFFSET_LAST_ROW(ROW,COL,ROWS,COLS)	\
+	((RSB_POINT_UNDER_DIAGONAL((((ROW)+(ROWS))-1),(((COL)+(COLS))-1)))  ? (((COL)+(COLS))-1):(((ROW)+(ROWS))-1)  )
+
+#define RSB_BLOCK_SUPRA_DIAGONAL_OFFSET_FIRST_ROW(ROW,COL,ROWS,COLS,OFFSET)	\
+	(RSB_BLOCK_DIAGONAL_OFFSET_FIRST_ROW(((ROW)+(OFFSET)),(COL),ROWS,COLS))
+
+/*#define RSB_BLOCK_SUPRA_DIAGONAL_OFFSET_LAST_ROW(ROW,COL,ROWS,COLS,OFFSET)	*/
+/*	RSB_BLOCK_DIAGONAL_OFFSET_LAST_ROW((ROW)+(OFFSET),COL,ROWS,COLS)*/
+
+#define RSB_BLOCK_SUB_DIAGONAL_OFFSET_FIRST_ROW(ROW,COL,ROWS,COLS,OFFSET)	\
+	(RSB_BLOCK_DIAGONAL_OFFSET_FIRST_ROW(ROW,(COL)+(OFFSET),ROWS,COLS))
+
+#define RSB_BLOCK_SUB_OR_SUPRA_DIAGONAL_OFFSET_FIRST_ROW(ROW,COL,ROWS,COLS,LOFFSET,UOFFSET)	\
+	((RSB_POINT_UNDER_DIAGONAL((ROW)+(UOFFSET),(COL)+(LOFFSET)))  ? (ROW) :  (COL)+(LOFFSET)-(UOFFSET) )
+
+#define RSB_BLOCK_SUB_OR_SUPRA_DIAGONAL_OFFSET_FIRST_COL(ROW,COL,ROWS,COLS,LOFFSET,UOFFSET)	\
+	RSB_BLOCK_SUB_OR_SUPRA_DIAGONAL_OFFSET_FIRST_ROW(COL,ROW,COLS,ROWS,UOFFSET,LOFFSET)
+
+#define RSB_BLOCK_SUB_DIAGONAL_OFFSET_LAST_ROW(ROW,COL,ROWS,COLS,OFFSET)	\
+	RSB_BLOCK_DIAGONAL_OFFSET_LAST_ROW((ROW),(COL)+(OFFSET),ROWS,COLS)
+
+#define RSB_BLOCK_SUB_OR_SUPRA_DIAGONAL_OFFSET_LAST_ROW(ROW,COL,ROWS,COLS,LOFFSET,UOFFSET)	\
+	((RSB_POINT_UNDER_DIAGONAL((ROW)+((ROWS)-1)+(UOFFSET),(COL)+((COLS)-1)+(LOFFSET)))  ? (COL)+((COLS)-1)+(LOFFSET)-(UOFFSET) : (ROW)+(ROWS)-1)
+
+#define RSB_BLOCK_SUB_OR_SUPRA_DIAGONAL_OFFSET_LAST_COL(ROW,COL,ROWS,COLS,LOFFSET,UOFFSET)	\
+	RSB_BLOCK_SUB_OR_SUPRA_DIAGONAL_OFFSET_LAST_ROW(COL,ROW,COLS,ROWS,UOFFSET,LOFFSET)
+
+
+/* pure VBR, with no trailing structs : */
+
+/* row major order (default) : */
+
+#define	RSB_GET_NEXT_BLOCK_POINTER(BP,M,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	/*										\
+	 * *input*									\
+	 * M		should be a valid rsb_mtx_t structure pointer		\
+	 * *output*									\
+	 * ROWVAR	will be set to the base row    of this block			\
+	 * COLVAR	will be set to the base column of this block			\
+	 * BLOCKROWSVAR	will be set to the rows   count of this block			\
+	 * BLOCKCOLSVAR	will be set to the column count of this block			\
+	 * BP		 will be set to the current block pointer			\
+	 * */										\
+	++_k;										\
+	if(_k>=(M)->bpntr[*_pi+1])							\
+	{										\
+		++*_pi;	/* new blocks row */						\
+		while( (M)->bpntr[*_pi] == (M)->bpntr[*_pi+1] )		/* skipping empty rows */		\
+			++*_pi;											\
+	}													\
+	*_pj=(M)->bindx[_k]; 						/* the current block column index  */	\
+	_lastk=_k;												\
+	(BLOCKROWVAR)=_i;											\
+	(BLOCKCOLUMNVAR)=_j;											\
+	(ROWVAR)=(M)->rpntr[_i];					/* _i is the current block row index */	\
+	(COLVAR)=(M)->cpntr[_j]; 					/* the current block column index  */	\
+	/*(BLOCKROWSVAR)=(M)->rpntr[_i+1]-(M)->rpntr[_i];*/ 		/* the current block rows    count */	\
+	/*(BLOCKCOLSVAR)=(M)->cpntr[_j+1]-(M)->cpntr[_j];*/			/* the current block columns count */	\
+	/*(BP)=(rsb_byte_t*)((M)->VA ) + (M)->el_size * (M)->indptr[_k] ; */						\
+	(BLOCKROWSVAR)=GET_BLOCK_HEIGHT(_i,(M));	/* the current block rows    count */			\
+	(BLOCKCOLSVAR)=GET_BLOCK_WIDTH( _j,(M)); 	/* the current block rows    count */			\
+	(BP)=(rsb_byte_t*)(RSB_BLOCK_ADDRESS((M),_k));										\
+	;
+
+/* row major order : */
+#define RSB_GET_FIRST_BLOCK_POINTER(BP,M,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	rsb_blk_idx_t _i=0,_j=0;										\
+	rsb_blk_idx_t *_pi=NULL,*_pj=NULL;									\
+	rsb_nnz_idx_t _k=0,_lastk=0;										\
+	if((M)->flags&RSB_FLAG_WANT_COLUMN_MAJOR_ORDER){_pi=&_j;_pj=&_i;}else{_pi=&_i;_pj=&_j;} 		\
+	while( (M)->bpntr[*_pi] == (M)->bpntr[*_pi+1] )								\
+		++*_pi;												\
+	_k=(M)->bpntr[*_pi]; 		/* _k is the first block index for the current row of blocks */		\
+	*_pj=(M)->bindx[_k]; 						/* the current block column index  */	\
+	(BLOCKROWVAR)=_i;											\
+	(BLOCKCOLUMNVAR)=_j;											\
+	(ROWVAR)=(M)->rpntr[_i];					/* _i is the current block row index */	\
+	(COLVAR)=(M)->cpntr[_j]; 					/* the current block column index  */	\
+	(BLOCKROWSVAR)=GET_BLOCK_HEIGHT(_i,(M));	/* the current block rows    count */			\
+	(BLOCKCOLSVAR)=GET_BLOCK_WIDTH( _j,(M)); 	/* the current block rows    count */			\
+	(BP)=(rsb_byte_t*)(RSB_BLOCK_ADDRESS((M),_k));										
+#define RSB_GOT_LAST_BLOCK_POINTER(M)	( _lastk >= (M)->block_count )
+
+
+#define RSB_POINT_IN_BOX(R0,C0,RH,CW,R,C)	( ((R)>=(R0)) && (R)<((R0)+(RH)) && ((C)>=(C0)) && (C)<((C0)+(CW)) )
+#define RSB_MATRIX_CONTAINS(M,R,C) 		( RSB_POINT_IN_BOX((M)->roff,(M)->coff,(M)->nr,(M)->nc,R,C) )
+#define RSB_SUBMATRIX_CONTAINS_ROW(M,R) 		( RSB_POINT_IN_BOX((M)->roff,0,(M)->nr,0,R,1) )
+#define RSB_SUBMATRIX_INTERSECTS_COLS(M,C0,C1) 							\
+	   ( ( (M)->coff <= (C1) ) && ( (M)->coff+(M)->nc > (C0) ) )
+#define RSB_SUBMATRIX_INTERSECTS_ROWS(M,R0,R1) 							\
+	   ( ( (M)->roff <= (R1) ) && ( (M)->roff+(M)->nr > (R0) ) )
+
+#define RSB_SUBMATRIX_INTERSECTS_BOX(M,R0,R1,C0,C1) 			\
+	(RSB_SUBMATRIX_INTERSECTS_ROWS(M,R0,R1)&&RSB_SUBMATRIX_INTERSECTS_COLS(M,C0,C1))
+
+#define RSB_FIND_SUBMATRIX_CONTAINING(M,R,C)	( \
+	((M)->sm[0]&&RSB_MATRIX_CONTAINS((M)->sm[0],R,C)?(M)->sm[0]: \
+	((M)->sm[1]&&RSB_MATRIX_CONTAINS((M)->sm[1],R,C)?(M)->sm[1]: \
+	((M)->sm[2]&&RSB_MATRIX_CONTAINS((M)->sm[2],R,C)?(M)->sm[2]: \
+	((M)->sm[3]&&RSB_MATRIX_CONTAINS((M)->sm[3],R,C)?(M)->sm[3]:NULL )))))
+
+#define RSB_SUBMATRIX_INDEX(M,I,J) (M->sm[(I)*2+(J)])
+/*
+ * this should be fixed. we would prefer to use intrinsics here. TODO
+ * */
+#define RSB_FABS(x) ((x)<(0)?(-x):(x))
+
+/*!
+ * Misc macros.
+ */
+#define RSB_ASSIGN_IF_ZERO(VAR,VAL) if( (VAR) == 0) (VAR) = (VAL);
+#define RSB_SWAP(TYPE,X,Y) {TYPE __tmp=(X);(X)=(Y);(Y)=(__tmp);}
+
+#define RSB_SUBMATRIX_FOREACH_(matrix,submatrix,smi,smj,smk) 					\
+	/*int smk;*/										\
+	for(smk=0;smk<4;++smk)									\
+	if( (smi=smk/2) >=0 && (smj=smk%2) >= 0 && (submatrix=(matrix)->sm[smi*2+(smj)] ) )	\
+ 	/* NOTE : handle with care (the 'submatrix' pointer could be NULL) */		\
+
+#define RSB_SUBMATRIX_FOREACH_REVERSE(matrix,submatrix,smi,smj) 				\
+	/*int smi,smj;*/								\
+	for(smi=1;smi+1>0;--smi)/* fisrt smi, then smj, or will break spmv_uxux, ... */		\
+	for(smj=1,submatrix=matrix->sm[smi*2+smj];					\
+		smj+1>0;									\
+		--smj,submatrix=(smi<2 && smj<2)?matrix->sm[smi*2+(smj)]:NULL)		\
+ 	/* NOTE : handle with care (the 'submatrix' pointer could be NULL) */		\
+
+#define RSB_SUBMATRIX_FOREACH(MTXAP,submatrix,smi,smj) 				\
+	/*int smi,smj;*/								\
+	for(smi=0;smi<2;++smi)/* fisrt smi, then smj, or will break spmv_uxux, ... */		\
+	for(smj=0,submatrix=MTXAP->sm[smi*2+smj];					\
+		smj<2;									\
+		++smj,submatrix=(smi<2 && smj<2)?MTXAP->sm[smi*2+(smj)]:NULL)		\
+ 	/* NOTE : handle with care (the 'submatrix' pointer could be NULL) */		\
+
+/* The following is incorrect, as it accesses one further pointer. */
+/*
+#define RSB_SUBMATRIX_FOREACH_LEAF(MTXAP,submatrix,smi) 				\
+	for(	(smi)=0,submatrix=(MTXAP)->all_leaf_matrices[(smi)].mtxlp;		\
+		(smi)<(MTXAP)->all_leaf_matrices_n;					\
+			++(smi),submatrix=(MTXAP)->all_leaf_matrices[smi].mtxlp)	\
+*/
+
+/* The following is correct, even if less elegant because of the bad style of assignment. */
+#define RSB_SUBMATRIX_FOREACH_LEAF(MTXAP,submatrix,smi) 				\
+	for(	(smi)=0;		\
+		((smi)<(MTXAP)->all_leaf_matrices_n) && ( submatrix=(MTXAP)->all_leaf_matrices[(smi)].mtxlp );	\
+			++(smi))
+
+#define RSB_SUBMATRIX_FOREACH_LEAF_PERMUTED(MTXAP,submatrix,smi,PV)				\
+	for(	(smi)=0;		\
+		((smi)<(MTXAP)->all_leaf_matrices_n) && ( submatrix=(MTXAP)->all_leaf_matrices[PV[(smi)]].mtxlp );	\
+			++(smi))
+
+#define RSB_SUBMATRIX_IS_ON_DIAG(matrix) 	((matrix)->roff==(matrix)->coff)
+#define RSB_SUBMATRIX_IS_LOWDIAG(matrix) 	((matrix)->roff>(matrix)->coff)
+#define RSB_SUBMATRIX_IS_UPPDIAG(matrix) 	((matrix)->roff<(matrix)->coff)
+
+#define RSB_SUBMATRIX_FOREACH_DIAG_LEAF(matrix,submatrix,smi) 				\
+	RSB_SUBMATRIX_FOREACH_LEAF(matrix,submatrix,smi) 				\
+		if(RSB_SUBMATRIX_IS_ON_DIAG(submatrix))
+
+#define RSB_SUBMATRIX_FOREACH_LOWDIAG_LEAF(matrix,submatrix,smi)			\
+	RSB_SUBMATRIX_FOREACH_LEAF(matrix,submatrix,smi) 				\
+		if(RSB_SUBMATRIX_IS_LOWDIAG(submatrix))
+
+#define RSB_SUBMATRIX_FOREACH_UPPDIAG_LEAF(matrix,submatrix,smi) 				\
+	RSB_SUBMATRIX_FOREACH_LEAF(matrix,submatrix,smi) 				\
+		if(RSB_SUBMATRIX_IS_UPPDIAG(submatrix))
+
+#define RSB_SUBMATRIX_COLS_INTERSECTION_FIRST(matrix,C)					\
+	   RSB_MAX(((matrix)->coff),(C))
+
+#define RSB_SUBMATRIX_COLS_INTERSECTION_LAST(matrix,C)					\
+	   RSB_MIN(((matrix)->coff+(matrix->nc-1)),(C))	/* FIXME: requires matrix->nc > 0 */
+
+#define RSB_SUBMATRIX_ROWS_INTERSECTION_FIRST(matrix,R)					\
+	   RSB_MAX(((matrix)->roff),(R))
+
+#define RSB_SUBMATRIX_ROWS_INTERSECTION_LAST(matrix,R)					\
+	   RSB_MIN(((matrix)->roff+(matrix->nr-1)),(R))	/* FIXME: requires matrix->nr > 0 */
+
+#define RSB_BCSS_MATRIX_FOREACH_BLOCK(matrix,blockpointer,bri,bci,blockindex,baserow,basecolumn,BR,BC)	\
+	RSB_DEBUG_ASSERT((matrix)->VA);									\
+	RSB_DEBUG_ASSERT((matrix)->el_size>0);								\
+	RSB_DEBUG_ASSERT((matrix)->br>0 && (matrix)->bc>0);							\
+	blockpointer=(matrix)->VA;									\
+	for(	bri=0,											\
+		baserow=(bri)*(BR);									\
+		bri<(matrix)->Mdim;									\
+		++bri,											\
+		baserow=(bri)*(BR)									\
+		)											\
+	for(	bi=(matrix)->bpntr[bri],									\
+		bci=(matrix)->bindx[bi],									\
+		basecolumn=(bci)*(BC);								\
+		bi<(matrix)->bpntr[(bri)+1];								\
+		++bi,											\
+		blockpointer=((rsb_byte_t*)blockpointer)+(matrix)->el_size*(BR)*(BC),		\
+		bci=(matrix)->bindx[bi],									\
+		baserow=(bri)*(BR),									\
+		basecolumn=(bci)*(BC)								\
+		)
+
+#define RSB_BCSR_MATRIX_FOREACH_BLOCK(matrix,blockpointer,bri,bci,blockindex,baserow,basecolumn)	\
+	RSB_BCSS_MATRIX_FOREACH_BLOCK(matrix,blockpointer,bri,bci,blockindex,baserow,basecolumn,matrix->br,matrix->bc)
+
+#define RSB_BCSC_MATRIX_FOREACH_BLOCK(matrix,blockpointer,bri,bci,blockindex,baserow,basecolumn)	\
+	RSB_BCSS_MATRIX_FOREACH_BLOCK(matrix,blockpointer,bci,bri,blockindex,basecolumn,baserow,matrix->bc,matrix->br)
+
+#define RSB_CONST_ENOUGH_BYTES_FOR_ANY_TYPE 32 /** should adapt this in case of need */
+#define RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE (RSB_CONST_ENOUGH_BYTES_FOR_ANY_TYPE/sizeof(rsb_aligned_t))	/** should adapt this in case of need */
+
+
+#define RSB_INTERNAL_FLAG_CSR_SORTING_MASK (RSB_FLAG_QUAD_PARTITIONING | RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING)
+
+#define RSB_DO_FLAGS_EXTRACT_STORAGE(F)	 ( \
+		/*((F) & RSB_FLAG_WANT_LINKED_STORAGE) */ 0 | \
+		((F) & RSB_FLAG_WANT_COO_STORAGE) | \
+		((F) & RSB_FLAG_WANT_FIXED_BLOCKING_VBR) | \
+		((F) & RSB_FLAG_WANT_BCSS_STORAGE) | \
+		RSB_FLAG_NOFLAGS )
+
+#if 1
+#define rsb_do_spmv(TRANSA,ALPHAP,MTXAP,XP,INCX,BETAP,YP,INCY)	\
+       	rsb_do_spmv_general(TRANSA,ALPHAP,MTXAP,XP,INCX,BETAP,YP,INCY,(RSB_OP_FLAG_DEFAULT) RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS)
+#else
+rsb_err_t rsb_do_spmv(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Yp, rsb_coo_idx_t incY)
+{
+	rsb_err_t errval = rsb_do_spmv_general(transA,alphap,mtxAp,Xp,incX,betap,Yp,incY,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+	return errval;
+}
+#endif
+
+/* We may use custom memcpy functions. */
+#define RSB_MEMCPY(DST,SRC,BYTES) rsb_memcpy((DST),(SRC),(BYTES))
+
+#define RSB_A_BZERO(ID,DOFF,NNZ,ES) \
+	RSB_BZERO(((rsb_byte_t*)(ID))+(ES)*(DOFF),(ES)*(NNZ)) \
+
+#define RSB_A_MEMCPY(ID,IS,DOFF,SOFF,NNZ,ES) \
+	RSB_MEMCPY(((rsb_char_t*)(ID))+(ES)*(DOFF),((const rsb_char_t*)(IS))+(ES)*(SOFF),(ES)*(NNZ)) \
+
+#define RSB_A_MEMMOVE(ID,IS,DOFF,SOFF,NNZ,ES) \
+	RSB_MEMMOVE(((rsb_char_t*)(ID))+(ES)*(DOFF),((const rsb_char_t*)(IS))+(ES)*(SOFF),(ES)*(NNZ)) \
+
+#define RSB_COA_MEMCPY(ID,IS,DOFF,SOFF,NNZ) \
+	RSB_MEMCPY(((rsb_coo_idx_t*)(ID))+(DOFF),((const rsb_coo_idx_t*)(IS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ)) \
+
+#define RSB_COA_MEMCPY2H(ID,IS,DOFF,SOFF,NNZ,ADD) 					\
+{											\
+	rsb_nnz_idx_t RSB_DUMMY_ID=0;							\
+	for(RSB_DUMMY_ID=0;RSB_DUMMY_ID<(NNZ);++RSB_DUMMY_ID)				\
+		((rsb_half_idx_t*)(ID))[(DOFF)+(RSB_DUMMY_ID)]=			\
+		((const rsb_coo_idx_t*)IS)[(SOFF)+(RSB_DUMMY_ID)]+(ADD);		\
+}
+
+#define RSB_COA_MEMMOVE(ID,IS,DOFF,SOFF,NNZ) \
+	RSB_MEMMOVE(((rsb_coo_idx_t*)(ID))+(DOFF),((const rsb_coo_idx_t*)(IS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ)) \
+
+#define RSB_COO_MEMMOVE(VD,ID,JD,VS,IS,JS,DOFF,SOFF,NNZ,ES) \
+	RSB_MEMMOVE(((rsb_char_t*)(VD))+(ES)*(DOFF),((const rsb_char_t*)(VS))+(ES)*(SOFF),(ES)*(NNZ)), \
+	RSB_MEMMOVE(((rsb_coo_idx_t*)(ID))+(DOFF),((const rsb_coo_idx_t*)(IS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ)), \
+	RSB_MEMMOVE(((rsb_coo_idx_t*)(JD))+(DOFF),((const rsb_coo_idx_t*)(JS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ))
+
+#define RSB_COO_MEMCPY(VD,ID,JD,VS,IS,JS,DOFF,SOFF,NNZ,ES) \
+	RSB_MEMCPY(((rsb_char_t*)(VD))+(ES)*(DOFF),((const rsb_char_t*)(VS))+(ES)*(SOFF),(ES)*(NNZ)), \
+	RSB_MEMCPY(((rsb_coo_idx_t*)(ID))+(DOFF),((const rsb_coo_idx_t*)(IS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ)), \
+	RSB_MEMCPY(((rsb_coo_idx_t*)(JD))+(DOFF),((const rsb_coo_idx_t*)(JS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ))
+
+#define RSB_CSR_MEMCPY(VD,ID,JD,VS,IS,JS,NNZ,NR,ES) \
+	RSB_MEMCPY(((rsb_char_t   *)(VD)),((const rsb_char_t   *)(VS)),(ES)*(NNZ)), \
+	RSB_MEMCPY(((rsb_nnz_idx_t*)(ID)),((const rsb_nnz_idx_t*)(IS)),sizeof(rsb_nnz_idx_t)*(NR)), \
+	RSB_MEMCPY(((rsb_coo_idx_t*)(JD)),((const rsb_coo_idx_t*)(JS)),sizeof(rsb_coo_idx_t)*(NNZ))
+
+#define RSB_CSR2COO_MEMCPY(VD,ID,JD,VS,I,JS,DOFF,SOFF,NNZ,ES) \
+	RSB_MEMCPY(((rsb_char_t*)(VD))+(ES)*(DOFF),((const rsb_char_t*)(VS))+(ES)*(SOFF),(ES)*(NNZ)), \
+	rsb__util_coo_array_set(((rsb_coo_idx_t*)(ID))+(DOFF),(NNZ),(I)), \
+	RSB_MEMCPY(((rsb_coo_idx_t*)(JD))+(DOFF),((const rsb_coo_idx_t*)(JS))+(SOFF),sizeof(rsb_coo_idx_t)*(NNZ))
+
+#define RSB_COO_MEMCPY_parallel(VD,ID,JD,VS,IS,JS,DOFF,SOFF,NNZ,ES) \
+	RSB_A_MEMCPY_parallel(VD,VS,DOFF,SOFF,NNZ,ES), \
+	RSB_COA_MEMCPY_parallel(ID,IS,DOFF,SOFF,NNZ), \
+	RSB_COA_MEMCPY_parallel(JD,JS,DOFF,SOFF,NNZ)
+
+#define RSB_FCOO_ASUM(S,X,LI,UI) {rsb_coo_idx_t i; for(i=(LI);i<(UI);++i)(S)+=(X)[i];}
+#define RSB_XCOO_ISET(X,  LI,UI) {rsb_coo_idx_t i; for(i=(LI);i<(UI);++i)(X)[i]=i-(LI);}
+#define RSB_FCOO_ISET RSB_XCOO_ISET
+#define RSB_XCOO_VSET(X,V,LI,UI) {rsb_coo_idx_t i; for(i=(LI);RSB_LIKELY((i)<(UI));++i)(X)[(i)] =(V);}
+#define RSB_XCOO_VADD(X,V,LI,UI) {rsb_coo_idx_t i; for(i=(LI);RSB_LIKELY((i)<(UI));++i)(X)[(i)]+=(V);}
+#define RSB_XCOO_IREN	/* TODO: to write one */
+
+#define RSB_NNZ_OF(MTXAP) ((MTXAP)?((MTXAP)->nnz):0)
+#define RSB_TYPED_OFF_PTR(TYPECODE,VA,OFF) (((rsb_byte_t*)(VA))+(((size_t)(RSB_SIZEOF(TYPECODE))*(OFF))))
+#define RSB_COO_LT(I1,J1,I2,J2) ( (I1) < (I2) || ( (I1) == (I2) && ( (J1) < (J2) ) ) )
+#define RSB_COO_GT(I1,J1,I2,J2) RSB_COO_LT(I2,J2,I1,J1)
+#define RSB_COO_GE(I1,J1,I2,J2) ( !RSB_COO_LT(I1,J1,I2,J2) )
+#define RSB_COO_EQ(I1,J1,I2,J2) ( (I1) == (I2) && ( (J1) == (J2) ) )
+
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+struct rsb_memory_level_t
+{
+	size_t size;				/*  */
+	size_t level;				/*  */
+	size_t associativity;			/*  */
+	size_t linesize;			/*  */
+};
+
+#define RSB_MEGABYTE (1024*1024)
+#define RSB_MEGABYTE_SYM "MiB"
+
+#define RSB_DEFAULT_STREAM stdout
+#define RSB_DIR_SEPARATOR	'/'	/*  */
+#define RSB_MAX_STRERRLEN  	128	/*  */
+#define RSB_MAX_LINE_LENGTH  	1025	/*  */
+#define RSB_MAX_COMPILE_COMMAND_LENGTH 	1025	/*  */
+#define RSB_MAX_VERSION_STRING_LENGTH  	4096	/*  */
+#define RSB_MAX_FILENAME_LENGTH  RSB_MAX_LINE_LENGTH	/* the maximal supported file name length (in buffers) */
+
+#define RSB_MAX_SUPPORTED_CACHE_LEVELS 32L	/* the maximal supported height of memory hierarchy */
+#define RSB_MIN_THREAD_MEMCPY_NNZ 1024		/* minimal count of nonzeros to move for a thread during parallel memcpy */
+#define RSB_MIN_THREAD_BZERO_BYTES 8192		/* minimal count of nonzeros to bzero for a thread during parallel bzero */
+#define RSB_MIN_THREAD_XAXPY_NNZ 256 /* 1024 */		/* minimal count of elements for a parallel vector-vector operation */
+#define RSB_MIN_THREAD_SORT_NNZ 256		/* minimal count of nonzeros to sort for a thread during parallel sort */
+
+#define RSB_POWER_OF_2(N) (1<<(N))
+#define RSB_FRAC(Q,D) (((Q)+((D)-1))/(D))
+#define RSB_MIDDLE(X) RSB_FRAC(X,2)
+#define RSB_IS_INTEGER_ODD(X)   ( (X)&0x01)
+#define RSB_IS_INTEGER_EVEN(X)	(!RSB_IS_INTEGER_ODD(X))
+
+#define RSB_HAVE_STREAMS RSB_HAVE_STDIO_H
+
+#define RSB_WANT_LIBRSB_TIMER defined(RSB_WANT_LIBRSB_STATS) && (RSB_WANT_LIBRSB_STATS> 0)
+
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+struct rsb_session_handle_t
+{
+	#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	/*!
+	 * A global memory counter, used for debugging purposes.
+	 * */
+	size_t allocated_memory;			/* total of allocated memory, in bytes */
+	size_t allocations_count;		/* total number of current allocations */
+	#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	size_t min_leaf_matrix_bytes;		/*  */
+	size_t avg_leaf_matrix_bytes;		/*  */
+	size_t rsb_g_threads;			/* detected threads */
+#if RSB_WANT_PERFORMANCE_FILE
+	/*rsb_byte_t * performance_binary_dump_file;*/	/* TODO: obsolete feature */
+	rsb_char_t * performance_binary_dump_file;	/* TODO: obsolete feature */
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	/* beginning of user settable variables declarations */
+	size_t rsb_want_threads;		/* RSB_IO_WANT_EXECUTING_THREADS ; active threads (may be <> rsb_g_threads) */
+	rsb_int_t asm_sort_method;		/* RSB_IO_WANT_SORT_METHOD */
+	rsb_real_t subdivision_multiplier;	/* RSB_IO_WANT_SUBDIVISION_MULTIPLIER */
+	rsb_int_t want_bounded_box;		/* RSB_IO_WANT_BOUNDED_BOX_COMPUTATION */
+	rsb_int_t cache_blocking_method;	/* RSB_IO_WANT_CACHE_BLOCKING_METHOD */
+	rsb_int_t want_outer_spmm;		/* RSB_IO_WANT_LEAF_LEVEL_MULTIVEC */
+#if RSB_HAVE_STREAMS
+	FILE * out_stream;			/* RSB_IO_WANT_OUTPUT_STREAM */
+	FILE * error_stream;			/* RSB_IO_WANT_VERBOSE_ERRORS */
+	FILE * init_stream;			/* RSB_IO_WANT_VERBOSE_INIT */
+	FILE * exit_stream;			/* RSB_IO_WANT_VERBOSE_EXIT */
+#endif /* RSB_HAVE_STREAMS */
+	const rsb_char_t * mhis;		/* RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING ; set via rsb_lib_reinit */
+#if RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE
+	rsb_int_t rsb_g_verbose_interface;	/* RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE */
+#endif /* RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE */
+	/* end of user settable variables declarations */
+	long memory_hierarchy_levels;		/*  */
+	struct rsb_memory_level_t caches[RSB_MAX_SUPPORTED_CACHE_LEVELS];	/* 0,..,memory_hierarchy_levels-1*/
+	rsb_bool_t rsb_g_initialized;		/*  */
+#if RSB_WANT_ALLOCATOR_LIMITS
+       	size_t memory_count_max;		/*  */
+	size_t allocations_count_max;		/*  */
+#endif /* RSB_WANT_ALLOCATOR_LIMITS */
+#if RSB_WANT_LIBRSB_TIMER
+	rsb_time_t etime;
+#endif /* RSB_WANT_LIBRSB_TIMER */
+	rsb_int_t verbose_tuning;		/*  */
+};
+
+#define RSB_INTERNALS_COMMON_HEAD_DECLS extern struct rsb_session_handle_t rsb_global_session_handle;
+#define RSB_DO_ERROR_CUMULATE(ERRVAL,ERRFLAG) RSB_DO_FLAG_ADD((ERRVAL),(ERRFLAG))
+
+#define RSB_IF_NOT_NULL_CAST_TO(P,TYPE,FALLBACK) ((P)?*(TYPE*)(P):(FALLBACK))
+#define RSB_IF_NOT_NULL_SET_TO_CASTED(V,P,TYPE) {if((P)!=NULL){(V)=*(TYPE*)(P);}}
+#define RSB_IF_NOT_NULL_GET_TO_CASTED(V,P,TYPE) {if((P)!=NULL){*(TYPE*)(P)=(V);}}
+#define RSB_IF_NOT_NULL_GET_SET_TO_CASTED(V,P,TYPE,F,ERRVAL)	{	\
+	switch(F){							\
+		case(RSB_IO_SPECIFIER_GET):				\
+		RSB_IF_NOT_NULL_GET_TO_CASTED((V),(P),TYPE);break;		\
+		case(RSB_IO_SPECIFIER_SET):				\
+		RSB_IF_NOT_NULL_SET_TO_CASTED((V),(P),TYPE);break;	\
+		default: RSB_DO_ERROR_CUMULATE(ERRVAL,RSB_ERR_BADARGS); }}
+#define rsb__sprintf sprintf
+
+#define RSB_BLAS_ERROR		-1	/* */
+#define RSB_BLAS_NO_ERROR 	0	/* */
+#define RSB_BLAS_ERROR_UNSUPPORTED   RSB_BLAS_ERROR			/* TODO: spread usage of this throughout the code */
+#define RSB_BLAS_ERROR_UNIMPLEMENTED RSB_BLAS_ERROR			/* TODO: spread usage of this throughout the code */
+#define RSB_BLAS_ERROR_WRONG_USGP_ARG RSB_BLAS_ERROR			/* TODO: spread usage of this throughout the code */
+
+#define RSB_SET_IF_NOT_NULL(P,V) if((P)!=NULL)*(P)=V
+typedef int rsb_blas_int_t;
+typedef double rsb_aligned_t;	/* see RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE and RSB_CONST_ENOUGH_BYTES_FOR_ANY_TYPE */
+
+
+#define RSB_MASK_OUT_SOME_ERRORS(ERRVAL) {if((ERRVAL)==RSB_ERR_UNSUPPORTED_FEATURE)(ERRVAL)=RSB_ERR_NO_ERROR;}/* NOTE; this is a macro only used to prevent the test suite to complain for failing printouts when output is disabled! */
+
+/*!
+ Macros to get indices types liminal values, configuration-dependent.
+*/
+ #define RSB_COO_HALF_BITS_SIZE	((sizeof(rsb_coo_idx_t)*RSB_CHAR_BIT)/2)
+
+#define RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS {int ___foo=1;++___foo;}/* will avoid things like error: label at end of compound statement */
+
+#if RSB_WANT_ZLIB_SUPPORT
+#define RSB_FOPEN(X,Y) gzopen((X),(Y))
+#define RSB_FCLOSE(X) gzclose(X)
+#else /* RSB_WANT_ZLIB_SUPPORT */
+#define RSB_FOPEN(X,Y) fopen((X),(Y))
+#define RSB_FCLOSE(X) fclose(X)
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+
+#define RSB_EMPTY_FILE_FILLER static int foo(void){return 0;}
+
+#define RSB_DECLARE_COO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX,TYPE) 	\
+		TYPE *IA=(TYPE*)(MATRIX)->bpntr;			\
+		TYPE *JA=(TYPE*)(MATRIX)->bindx;
+
+#define RSB_DECLARE_COO_IARRAY_FROM_MATRIX(IA,MATRIX,TYPE) 	\
+		TYPE *IA=(TYPE*)(MATRIX)->bpntr;
+
+#define RSB_DECLARE_COO_JARRAY_FROM_MATRIX(JA,MATRIX,TYPE) 	\
+		TYPE *JA=(TYPE*)(MATRIX)->bindx;
+
+#define RSB_DECLARE_CSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX,PTYPE,ITYPE) 	\
+		PTYPE *PA=(PTYPE*)(MATRIX)->bpntr;			\
+		ITYPE *JA=(ITYPE*)(MATRIX)->bindx;
+
+#define RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX) 	\
+	RSB_DECLARE_CSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX,const rsb_nnz_idx_t,const rsb_half_idx_t)
+
+#define RSB_DECLARE_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX) 	\
+	RSB_DECLARE_CSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX,rsb_nnz_idx_t,rsb_half_idx_t)
+
+#define RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX) 	\
+	RSB_DECLARE_CSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX,const rsb_nnz_idx_t,const rsb_coo_idx_t)
+
+#define RSB_DECLARE_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX) 	\
+	RSB_DECLARE_CSR_ARRAYS_FROM_MATRIX(PA,JA,MATRIX,rsb_nnz_idx_t,rsb_coo_idx_t)
+
+#define RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX) 	\
+	RSB_DECLARE_COO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX,const rsb_half_idx_t)
+
+#define RSB_DECLARE_CONST_HALFCOO_IARRAY_FROM_MATRIX(IA,MATRIX) 	\
+	RSB_DECLARE_COO_IARRAY_FROM_MATRIX(IA,MATRIX,const rsb_half_idx_t)
+
+#define RSB_DECLARE_CONST_HALFCOO_JARRAY_FROM_MATRIX(JA,MATRIX) 	\
+	RSB_DECLARE_COO_JARRAY_FROM_MATRIX(JA,MATRIX,const rsb_half_idx_t)
+
+#define RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX) 	\
+	RSB_DECLARE_COO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX,const rsb_coo_idx_t)
+
+#define RSB_DECLARE_CONST_FULLCOO_IARRAY_FROM_MATRIX(IA,MATRIX) 	\
+	RSB_DECLARE_COO_IARRAY_FROM_MATRIX(IA,MATRIX,const rsb_coo_idx_t)
+
+#define RSB_DECLARE_CONST_FULLCOO_JARRAY_FROM_MATRIX(JA,MATRIX) 	\
+	RSB_DECLARE_COO_JARRAY_FROM_MATRIX(JA,MATRIX,const rsb_coo_idx_t)
+
+#define RSB_DECLARE_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX) 	\
+	RSB_DECLARE_COO_ARRAYS_FROM_MATRIX(IA,JA,MATRIX,rsb_coo_idx_t)
+
+/*!
+ * If the restrict keyword is supported, we use it in our declarations.
+ * */
+/* #ifdef restrict */
+#ifdef RSB_restrict
+#define RSB_RESTRICT restrict
+#else /* RSB_restrict */
+#define RSB_RESTRICT
+#endif /* RSB_restrict */
+
+#define RSB_VA_OFFSET_POINTER(VA,ES,OFF) 		((rsb_byte_t*)(VA)+(size_t)(ES)*(OFF))
+#define RSB_VA_OFFSET_POINTER_CONST(VA,ES,OFF) 		((const rsb_byte_t*)(VA)+(size_t)(ES)*(OFF))
+#define RSB_VA_MEMCMP(LVA,LOFF,RVA,ROFF,ES) 		\
+	RSB_MEMCMP(RSB_VA_OFFSET_POINTER((LVA),(ES),(LOFF)),RSB_VA_OFFSET_POINTER((RVA),(ES),(ROFF)),(ES))		
+
+/*!
+ * \brief Auxiliary structure for a coo-stored matrix (usually for temporary operations).
+ * */
+struct rsb_coo_matrix_t{
+	rsb_coo_idx_t * IA, * JA;/** row and columns indices */
+	rsb_coo_idx_t nr,nc;	/** matrix (declared) nonzeros */
+	rsb_nnz_idx_t nnz;	/** matrix rows, columns */
+	void * VA;		/** values of data elements */
+	rsb_type_t typecode;	/** as specified in the RSB_NUMERICAL_TYPE_* preprocessor symbols in rsb_types.h 	*/
+};
+
+#define RSB_INIT_COO_FROM_MTX(COOP,MTXAP)	{ \
+		(COOP)->nr=(MTXAP)->nr;	\
+		(COOP)->nc=(MTXAP)->nc;	\
+		(COOP)->nnz=(MTXAP)->nnz;	\
+		(COOP)->typecode=(MTXAP)->typecode; }
+
+#define RSB_INIT_CXX_FROM_MTX(COOP,MTXAP)	{ \
+		(COOP)->nr=(MTXAP)->nr;	\
+		(COOP)->nc=(MTXAP)->nc;	\
+		(COOP)->nnz=RSB_MAX((MTXAP)->nnz,1+RSB_MAX((MTXAP)->nr,(MTXAP)->nc)); \
+		(COOP)->typecode=(MTXAP)->typecode; }
+
+#define RSB_BIND_COO_TO_MTX(COOP,MTXAP)	{ \
+		(COOP)->VA=(MTXAP)->VA;	\
+		(COOP)->IA=(MTXAP)->bpntr;	\
+		(COOP)->JA=(MTXAP)->bindx;	}
+
+#define RSB_FLAG_SOME_SYMMETRY				(RSB_FLAG_HERMITIAN|RSB_FLAG_SYMMETRIC)
+#define RSB_FLAG_ALL_STRUCTURAL_FLAGS	(RSB_FLAG_SOME_SYMMETRY|RSB_FLAG_DIAGONAL|RSB_FLAG_TRIANGULAR|RSB_FLAG_UNIT_DIAG_IMPLICIT)
+#define RSB_FLAG_ALL_DUPLICATE_FLAGS	(RSB_FLAG_DUPLICATES_KEEP_LAST|RSB_FLAG_DUPLICATES_SUM)
+#define RSB_DUMMY_ID		rsb_dummy_id
+#define RSB_DUMMY_MTX		(NULL)
+#define RSB_DEFAULT_TEST_MATRIX_FILENAME "pd.mtx"	/**< this file should always be included in the library distribution (FIXME: should enforce this) */
+
+#define RSB_VECTORS_DIFF_DISPLAY_N 10
+#define RSB_VECTORS_DIFF_DISPLAY_N_SMALL 3
+#define RSB_DEFAULT_UNDEFINED_COO_VALUE 0
+
+#define RSB_PSORT_CHUNK 10000			/* FIXME: hardcoded constants are bad (and the PGI compiler won't accept them) */
+#define RSB_MINIMUM_VECOP_OMP_CHUNK 1000			/* FIXME: hardcoded constants are bad (and the PGI compiler won't accept them) */
+
+#define RSB_BOOL_IS_POINTER_NON_NULL(P) ((P)?RSB_BOOL_TRUE:RSB_BOOL_FALSE)
+
+#define RSB_CONDITIONAL_ERRPSET(ERRVALP,ERRVAL) {if(ERRVALP)(*(ERRVALP)=(ERRVAL));}
+#define RSB_MTX_FREE(MTXAP) if(MTXAP){rsb__do_mtx_free(MTXAP);(MTXAP)=NULL;}  /* frees the matrix and nullifies the associated pointer. */
+
+/* #define RSB_FLAGS_RSB_AGNOSTIC RSB_FLAG_FORTRAN_INDICES_INTERFACE */
+/* #define RSB_FLAGS_RSB_NON_AGNOSTIC (RSB_FLAG_USE_HALFWORD_INDICES|RSB_FLAG_WANT_COO_STORAGE|RSB_FLAG_WANT_CSR_STORAGE)  --- see RSB_DO_FLAGS_EXTRACT_STORAGE(flags) */
+#define RSB_FLAGS_RSB_AGNOSTIC (RSB_FLAG_FORTRAN_INDICES_INTERFACE|RSB_FLAG_UNIT_DIAG_IMPLICIT|RSB_FLAG_UPPER|RSB_FLAG_LOWER|RSB_FLAG_SORTED_INPUT|RSB_FLAG_TRIANGULAR|RSB_FLAG_SYMMETRIC|RSB_FLAG_HERMITIAN)
+
+#define RSB_INDEX_FIT_IN_HALFWORD(I) ((I)<=RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t))
+#define RSB_INDICES_FIT_IN_HALFWORD(I,J) ( RSB_INDEX_FIT_IN_HALFWORD(I) && RSB_INDEX_FIT_IN_HALFWORD(J) )
+
+#define RSB_IF_NOFLAGS_SET_DEFAULT_MATRIX_FLAGS(V)  						\
+	if(RSB_DO_FLAG_FILTEROUT((V),RSB_FLAGS_RSB_AGNOSTIC)==RSB_FLAG_NOFLAGS)	\
+ 		RSB_DO_FLAG_ADD((V),RSB_FLAG_DEFAULT_MATRIX_FLAGS);
+
+#define RSB_DIVIDE_IN_CHUNKS(N,NTHREADS) RSB_MAX(((N)+(NTHREADS)-1)/(NTHREADS),1)
+#define RSB_EXIT exit
+#define RSB_DO_ERR_RETURN(ERRVAL) {return (ERRVAL);}
+#define RSB_DO_MTX_RETURN(MATRIX,ERRVAL) {return (MATRIX);}
+#define RSB_FLAG_UPPTRI (RSB_FLAG_UPPER|RSB_FLAG_LOWER)
+#define RSB_DO_FLAG_FLIP_UPLO(V)	{\
+if(RSB_DO_FLAG_HAS((V),RSB_FLAG_UPPER)) \
+	RSB_DO_FLAG_DEL((V),RSB_FLAG_UPPER),RSB_DO_FLAG_ADD((V),RSB_FLAG_LOWER); \
+else \
+if(RSB_DO_FLAG_HAS((V),RSB_FLAG_LOWER)) \
+	RSB_DO_FLAG_ADD((V),RSB_FLAG_UPPER),RSB_DO_FLAG_DEL((V),RSB_FLAG_LOWER); \
+}
+#define RSB_PERR_GOTO(LABEL,...) {RSB_ERROR(__VA_ARGS__);goto LABEL;}
+#define RSB_SERR_GOTO(LABEL)     {goto LABEL;}
+
+#define RSB_SYMMETRY_STRING(FLAGS) (RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_HERMITIAN)?"hermitian":(RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_SYMMETRIC)?"symmetric":"general"))
+
+typedef float rsb_float_t;
+#define RSB_FLOAT_ONE 1.0f 
+
+#define RSB_RECURSION_MIN_DIM (2)
+#define RSB_RECURSION_MIN_NNZ (4)
+
+/*!
+ An integer type for thread indices.
+ */
+typedef int rsb_thread_t;
+
+/*! \internal  */
+typedef rsb_flags_t rsb_order_t;
+
+
+
+/** \internal \todo:OBSOLETE, REMOVE */ 
+#define RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING	 	0x008000
+
+/** \internal \todo:obsolete: FIXME  */ 
+/*#define RSB_FLAG_BLOCK_ASYMMETRIC_Z_SORTED	 	0x010000*/
+/** \internal \todo:temporary fix: FIXME  */ 
+#define RSB_FLAG_FIX_FOR_BINARY_LOADED_MATRIX		 	0x010000
+
+/** \internal \todo:EXPERIMENTAL */ 
+#define RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR	 	0x020000
+
+/** if set, the matrix will be partitioned with a block size chosen automatically */ 
+#define RSB_FLAG_AUTO_BLOCKING				0x80000000	/* FIXME: obsolete, unsupported */
+
+
+/** if set, will decide between column or row major for each (leaf) matrix (NEW: EXPERIMENTAL) */
+/*#define RSB_FLAG_WANT_AUTO_MAJOR_ORDER 			0x200000	*/	/* Unsupported */
+
+
+#if 0
+/** if set, the matrix ..  */ 
+#define RSB_FLAG_WANT_RECURSIVELY_NON_UNIFORM_AUTO_BLOCKING 0x000200	/* experimental, but works well */
+#endif /* 0 */
+
+/** if set, the matrix will take possession of partitioning arrays p_r and p_c on input. if unset, a copy will be made	*/ 
+#define RSB_FLAG_OWN_PARTITIONING_ARRAYS		0x000080	/*  */
+
+/** if set, the blocks will be linked in some way */
+/* FIXME: delete this flag */
+/*#define RSB_FLAG_WANT_LINKED_STORAGE 			0x000400*/
+
+/** if set, operating routines will check input more aggressively (may break operation)  */
+#define RSB_FLAG_SHOULD_DEBUG 				0x000800
+
+/** if set, the block partitioning will be fixed but VBR or LBR (Unsupported)	*/
+#define RSB_FLAG_WANT_FIXED_BLOCKING_VBR	 	0x001000
+
+/** if set, will mark a leaf matrix */
+#define RSB_FLAG_NON_ROOT_MATRIX	 	0x100000
+
+/** if set, the matrix will be prevent from being subdivided too much (OUTLAWED) */
+/*#define RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES 		0x4000000*/
+
+/**
+ * if set, the blocks will cycle column after column.
+ * if RSB_FLAG_WANT_BCSS_STORAGE is also set, the matrix storage format will be BCSC.
+ * otherwise it will be VBC.
+ * */
+/* see rsb.h*/
+/*#define RSB_FLAG_WANT_COLUMN_MAJOR_ORDER 		0x4000000*/
+
+
+/** if set, the code will sort the input 			*/
+#define RSB_FLAG_SORT_INPUT			0x2000000	/* FIXME: delete this flag */
+
+
+/** a parameter to determine if a matrix is really 'small' or not (FIXME) */
+#define RSB_EXPERIMENTAL_MIN_LEAF_ELEMENTS 		1024
+
+/*#define RSB_FLAG_RECURSIVE_SHRINK_BOUNDING_BOX		0x40000000*/
+
+#if 0
+/* only flags left :  */
+#define RSB_FLAG_ALLOW_PARALLEL_OPERATION		0x40000000		/* NEW : UNUSED */
+#endif /* 0 */
+
+#if 0
+#define RSB_FLAG_DEFAULT		 		(RSB_FLAG_DISCARD_ZEROS  /*| RSB_FLAG_WANT_BCSS_STORAGE*/ /* | RSB_FLAG_SORT_INPUT*/)
+#endif /* 0 */
+
+
+/**
+ * \brief It is an internal structure, so beware, you should not use it.
+ * \internal
+ *
+ * This structure will be used for keeping information about matrix partitioning.
+ * It should be used primarily during matrix building, when the matrix arrays are 
+ * not all allocated.
+ *
+ * It is an internal structure, so beware, you should not use it.
+ * */
+struct rsb_mtx_partitioning_info_t
+{
+	rsb_blk_idx_t M_b, K_b;		/**< just as in rsb_mtx_t */
+	rsb_blk_idx_t br, bc;			/**< block row and column size (only if BCSR) (NEW) */
+	rsb_coo_idx_t *rpntr,*cpntr;		/**< just as in rsb_mtx_t */
+	
+	rsb_coo_idx_t nr,nc;			/**< just as in rsb_mtx_t */
+	rsb_submatrix_idx_t should_subdivide_levels;		/**< for recursive partitioning (EXPERIMENTAL) */
+};
+
+
+typedef signed   long rsb_long_t;		/* FIXME :internals, (still) unused */
+
+#define	RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT__VAL 0x10
+#define RSB_OP_FLAG_WANT_SERIAL__VAL 0x2
+#define RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL_VAL (RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT__VAL+RSB_OP_FLAG_WANT_SERIAL__VAL) 
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper enumeration.
+ * \internal
+ */
+enum rsb_op_flags_t { 	RSB_OP_FLAG_DEFAULT=0x1, /* normal operation */
+       			RSB_OP_FLAG_WANT_SERIAL=RSB_OP_FLAG_WANT_SERIAL__VAL,
+		       	RSB_OP_FLAG_MAY_PARALLEL=0x4,
+			RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE=0x5, /* will process only diagonal blocks */
+			RSB_OP_FLAG_FAKE_LOCK=0x6, /* will perform operations with no locking (thus giving incorrect results) just to determine lock overhead */
+       			RSB_OP_FLAG_WANT_PARALLEL_SORT=0x7,
+       			RSB_OP_FLAG_WANT_SERIAL_SORT=0x8,
+       			RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT=RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT__VAL,
+       			RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL=RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL_VAL,
+       			RSB_OP_FLAG_WANT_TRACE_PLOT=0x10
+			};
+
+#define RSB_BLOCK_ROWMAJOR_ADDRESS(P,LDP,NR,NC,R,C,ES) \
+	((rsb_char_t*)P)+((size_t)(ES))*((LDP)*(R)+(C))
+#define RSB_BLOCK_COLMAJOR_ADDRESS(P,LDP,NR,NC,R,C,ES) \
+	((rsb_char_t*)P)+((size_t)(ES))*((LDP)*(C)+(R))
+
+#define RSB_BLOCK_X_MAJOR_REFERENCE(A,LDP,R,C,ONEIFISCOLMAJOR) \
+	A[(ONEIFISCOLMAJOR)?((LDP)*(C)+(R)):((LDP)*(R)+(C))]
+
+#define RSB_SOME_ERROR(ERRVAL) (ERRVAL)!=RSB_ERR_NO_ERROR
+
+#define RSB_USE_OMP_SET_NUM_THREADS 0
+
+#if RSB_USE_OMP_SET_NUM_THREADS
+#define rsb_set_num_threads(RNT) omp_set_num_threads(RNT)
+#define rsb_get_num_threads()    omp_get_num_threads()
+#else
+#define rsb_set_num_threads(RNT) rsb__set_num_threads(RNT)
+#define rsb_get_num_threads()    rsb__set_num_threads(RSB_THREADS_GET)
+#endif
+#define RSB_DO_THREADS_PUSH(RNT)	{if((RNT)>0)rsb_set_num_threads(RNT); /* push */}
+#define RSB_DO_THREADS_POP(RNT,ORNT)	{if((RNT)>0)rsb_set_num_threads(ORNT); /* pop */}
+
+#if defined(RSB_WANT_RSB_NUM_THREADS) && (RSB_WANT_RSB_NUM_THREADS>0) 
+#define RSB_NUM_THREADS_DECL	const char * rnt_str = getenv("RSB_NUM_THREADS"); rsb_int_t ornt = rsb_get_num_threads(), rnt = (rnt_str? rsb__util_atoi(rnt_str) :0);
+#define RSB_NUM_THREADS_PUSH	{RSB_DO_THREADS_PUSH(rnt); /* push */}
+#define RSB_NUM_THREADS_POP	{RSB_DO_THREADS_POP(rnt,ornt); /* pop */}
+#else /* defined(RSB_WANT_RSB_NUM_THREADS) && (RSB_WANT_RSB_NUM_THREADS>0) */
+#define RSB_NUM_THREADS_DECL
+#define RSB_NUM_THREADS_PUSH
+#define RSB_NUM_THREADS_POP
+#endif /* defined(RSB_WANT_RSB_NUM_THREADS) && (RSB_WANT_RSB_NUM_THREADS>0) */
+
+#if defined(RSB_BLAS_WANT_EXPERIMENTAL_TUNING)
+#define RSB_SPB_THREADS_PUSH	{RSB_DO_THREADS_PUSH(rnt); /* push */}
+#define RSB_SPB_THREADS_POP	{RSB_DO_THREADS_PUSH(rnt,ornt); /* pop */}
+#else /* defined(RSB_BLAS_WANT_EXPERIMENTAL_TUNING) */
+#define RSB_SPB_THREADS_PUSH
+#define RSB_SPB_THREADS_POP
+#endif /* defined(RSB_BLAS_WANT_EXPERIMENTAL_TUNING) */
+
+#define RSB_SPB_THREADS_DEFAULT 0
+#define RSB_SPB_THREADS_AUTO -1
+#define RSB_SPB_THR_STR_AUTO -2
+#define RSB_SPB_THR_STR_AUTO_NEXTOP -3 /* TODO: need to diversify in thr.-only vs str.+thr. tuning */
+
+/* #define RSB_PRINT_THREAD_STATS RSB_STDOUT("rsb_want_threads / rsb_g_threads / omp_get_max_threads / omp_get_num_threads / omp_get_thread_limit: %d / %d / %d / %d / %d\n",rsb_global_session_handle.rsb_want_threads,rsb_global_session_handle.rsb_g_threads,omp_get_max_threads(),omp_get_num_threads(),omp_get_thread_limit()); */
+#define RSB_PRINT_THREAD_STATS RSB_STDOUT("rsb_want_threads / rsb_g_threads / omp_get_max_threads / omp_get_num_threads / omp_get_thread_limit: %d / %d / %d / %d\n",(int)rsb_global_session_handle.rsb_want_threads,(int)rsb_global_session_handle.rsb_g_threads,(int)omp_get_max_threads(),(int)omp_get_num_threads());
+
+#define RSB_ERRMSG_NOSTREAMS "streams usage configured out."
+#define RSB_ERRMSG_BADFORMAT "submatrix format unrecognized."
+#define RSB_ERRMSG_NOTMTXMKT "not a Matrix Market format matrix"
+#define RSB_ERRMSG_FILEOPENP "problems opening"
+#define RSB_ERRMSG_PROIFAMM "problems reading or interpreting file as Matrix Market"
+#define RSB_ERRMSG_FILEOPENPGZ "problems opening gzipped"
+#define RSB_ERRMSG_TMXMKTBANNER "Could not process Matrix Market banner"
+#define RSB_ERRMSG_BADCOO "bad input coo elements"
+#define RSB_INFOMSG_SAK "is a swiss army knife for testing the library functionality and performance"
+
+#define RSB_WANT_COO_BEGIN 1 
+
+#if RSB_WANT_COO_BEGIN 
+#define RSB_MTX_HBDF(MTXAP) ((MTXAP)->RSB_MTX_BMF==RSB_MTX_BMV)
+#define RSB_MTX_HBDFH(MTXAP) ((MTXAP)->RSB_MTX_BDF)
+#define RSB_MTX_BDF nnz
+#define RSB_MTX_BMF nr
+#define RSB_MTX_BMV -1
+#endif /* RSB_WANT_COO_BEGIN */
+
+#define RSB_STDOUT_MATRIX_ESSENTIALS(M,MN,TN) RSB_STDOUT("%s\t%c\t%c\t%d\t%d\t%d\t%d",(const rsb_char_t*)rsb__basename(MN),rsb__do_get_symmetry_char(M),RSB_TRANSPOSITION_AS_CHAR(transA),TN,(M)->nr,(M)->nc,(M)->nnz)
+#define RSB_FPRINTF_MATRIX_ESSENTIALS(FD,M,MN,TN) RSB_FPRINTF(FD,"%s\t%c\t%c\t%d\t%d\t%d\t%d",(const rsb_char_t*)rsb__basename(MN),rsb__do_get_symmetry_char(M),RSB_TRANSPOSITION_AS_CHAR(transA),TN,(M)->nr,(M)->nc,(M)->nnz)
+#define RSB_FPINV(FPV) (1.0/(FPV))
+#define RSB_MILLION_I 1000000
+#define RSB_MILLION_F 1000000.0
+#define RSB_CLEARTERM_STRING "\x1B\x4D"
+/*#define RSB_MAX_SHORTIDX_MATRIX_DIM (RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t)-RSB_NNZ_BLK_MAX)*/
+#define RSB_MAX_SHORTIDX_MATRIX_DIM (RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t))
+#define RSB_BENCH_PROG_OPTS \
+	    {"nthreads",	required_argument, NULL, 0x6E},/* n */  
+#define RSB_MAX_ALLOCATABLE_MEMORY_CHUNK \
+((size_t)((sizeof(void*)==sizeof(unsigned int))? RSB_MAX_VALUE_FOR_TYPE(unsigned int):RSB_MAX_VALUE_FOR_TYPE(size_t)))
+#define RSB_DOES_TRANSPOSE(TRANSA) ((TRANSA)!=RSB_TRANSPOSITION_N)
+#define RSB_DOES_NOT_TRANSPOSE(TRANSA) (!RSB_DOES_TRANSPOSE(TRANSA))
+#define RSB_DOES_CONJUGATE(TRANSA) ((TRANSA)==RSB_TRANSPOSITION_C)
+#define RSB_DOES_NOT_CONJUGATE(TRANSA) (!RSB_DOES_CONJUGATE(TRANSA))
+#define RSB_MTX_TRANSPOSED_ROWS(MTX,TRANSA) (RSB_DOES_TRANSPOSE((TRANSA))?(MTX)->nc:(MTX)->nr)
+#define RSB_MTX_TRANSPOSED_COLS(MTX,TRANSA) (RSB_DOES_TRANSPOSE((TRANSA))?(MTX)->nr:(MTX)->nc)
+#define RSB_MTX_DIAG_SIZE(MTX) RSB_MIN( (MTX)->nc,(MTX)->nr )
+#define RSB_MTX_DIAG_SIZE_BLK(MTX)  RSB_MTX_DIAG_SIZE_BLK(MTX) + RSB_NNZ_BLK_MAX
+
+#define RSB_ALLOW_ZERO_DIM RSB_MIN_MATRIX_DIM == 0
+#define RSB_ANY_MTX_DIM_ZERO(MTXAP) ((MTXAP) && (((MTXAP)->nr==0)||(MTXAP)->nc==0))
+
+#if defined(RSB_WANT_OMP_RECURSIVE_KERNELS) && (RSB_WANT_OMP_RECURSIVE_KERNELS>0)
+#define RSB_NT rsb_global_session_handle.rsb_g_threads
+#define RSB_NTC num_threads(RSB_NT)
+#else
+#define RSB_NT
+#define RSB_NTC
+#endif
+#define RSB_STORE_IDXSA 1
+
+#define RSB_ASSIGN_IF_SP(DSTV,SRCP) 	\
+	if ( (SRCP) != NULL )		\
+		(DSTV) = *(SRCP);		/* FIXME: move this declaration elsewhere */
+
+#define RSB_ASSIGN_IF_DP(DSTP,SRCV) 	\
+	if ( (DSTP) != NULL )		\
+		*(DSTP) = (SRCV);		/* FIXME: move this declaration elsewhere */
+
+#define RSB_ASSIGN_IF(DSTP,SRCV) RSB_ASSIGN_IF_DP(DSTP,SRCV)
+
+#ifdef RSB_HAVE_ASSERT_H 
+#ifdef RSB_USE_ASSERT
+/* ok, no extra action needed */
+#else /* RSB_USE_ASSERT */
+/* according to POSIX.1-2001, C89, C99, NDEBUG will cause assert to generate no code.  */
+#define NDEBUG 1
+#endif /* RSB_USE_ASSERT */
+#include <assert.h>	/* the assert() macro */
+#endif /* RSB_HAVE_ASSERT_H */
+
+#include "rsb.h"		/* public API specification */
+#include "rsb_init.h"		/* initialization functions */
+#include "rsb_rec.h"		/* recursion handling functions */
+#include "rsb_permute.h"	/* permutation functions */
+#include "rsb_srt.h"		/* sorting functions */
+#include "rsb_mergesort.h"	/* sorting functions */
+#include "rsb_merge.h"		/* merging functions */
+#include "rsb_srtp.h"		/* parallel sorting functions */
+#include "rsb_prec.h"		/* toy preconditioning */
+#include "rsb_msort_up.h"	/* sorting functions, adapted from PSBLAS */
+#include "rsb_unroll.h"		/* computational kernels */
+#include "rsb_is.h"			/* coordinate handling functions */
+#include "rsb_src.h"		/* search functions */
+#include "rsb_clone.h"		/* clone functions */
+#include "rsb_err.h"		/* error handling functions */
+#include "rsb_internals.h"		/* */
+#include "rsb_do.h"		/* */
+#include "rsb_mio.h"			/* I/O functions */
+#include "rsb_get.h"		/* matrix getter functions */
+#include "rsb_set.h"		/* matrix setter functions */
+#include "rsb_dump.h"		/* matrix dumping functions */
+#include "rsb_coo.h"		/* coordinate handling functions */
+#include "rsb_csr.h"		/* csr handling functions */
+#include "rsb_blas_stuff.h"		/* BLAS like stuff */
+#include "rsb_op.h"			/* */
+#include "rsb_bio.h"		/* Binary Matrix I/O */
+#include "rsb_asm.h"		/* Matrix assembly functions */
+#include "rsb_coo_check.h"	/* */
+#include "rsb_coo_symm.h"		/* */
+#include "rsb_idx.h"		/* index manipulation */
+/* #include "rsb_ftn.h"*/		/* fortran interface functions (obsolete) */
+#include "rsb_libspblas_handle.h"	/*  */
+#include "rsb_render.h"		/* matrix as pixmap rendering functions */
+#include "rsb_eps.h"		/* matrix as (encapsulated) postscript rendering functions */
+#include "rsb_gen.h"		/* matrix generating functions */
+#include "rsb_sys.h"		/* system related functions */
+#include "rsb_mbw.h"		/* memory benchmark related functions */
+#include "rsb_limiter.h"	/*  */
+#include "rsb_fpb.h"		/* floating point benchmark related functions */
+#include "rsb_garbage.h"		/* misc helpers routines */
+#include "rsb_pcnt.h"		/* performance counters code */
+#include "rsb_perf.h"		/* performance info gathering code */
+#include "rsb_pr.h"		/* performance reporting */
+#include "rsb_util.h"		/* sorting and computational stuff */
+#include "rsb_spmv.h"		/* sparse matrix-vector multiplication */
+#include "rsb_swt.h"		/* switching format functions */
+#include "rsb_lock.h"		/* */
+#include "rsb_partition.h"	/* custom partitioning stuff (OBSOLETE) */
+#include "rsb_krnl.h"		/* kernels rsb_krnlers */
+#include "rsb_krnl_vb.h"	/* vb specific functions */
+/* #include "libspblas_tests.h" */	/*  */
+#include "rsb_test_accuracy.h"	/* accuracy testing functions */
+#include "rsb_krnl_bcss.h"	/* bcss specific functions */
+#include "rsb_krnl_bcoo_spmv_u.h"	/* bcoo specific functions */
+#include "rsb_bench.h"		/* performance info gathering code (OBSOLETE) */
+#include "rsb_spgemm.h"		/* sparse matrices multiplication */
+#include "rsb_spgemm_csr.h"	/* sparse matrices multiplication */
+#include "rsb_spsum_misc.h"	/* sum of Sparse Matrices */
+#include "rsb_spsum.h"		/* Sum of Sparse Matrices */
+#include "rsb_spsv.h"		/* */
+#include "rsb_lbl.h"		/* OBSOLETE */
+/* #include "rsb_experiments.h" */	/* experiments (obsolete) */
+#include "rsb_coo2rec.h"		/* */
+#include "rsb_rec2coo.h"		/* */
+#include "rsb_rec2csr.h"		/* */
+#include "rsb_csr2coo.h"		/* */
+#include "rsb_cpmv.h"		/* */
+#include "rsb_tune.h"		/* */
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_COMMON_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_config.m4 b/rsb_config.m4
new file mode 100644
index 0000000..ea7ef53
--- /dev/null
+++ b/rsb_config.m4
@@ -0,0 +1,56 @@
+dnl	Code generator configuration
+dnl	Michele Martone
+dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	Whether we want OpenMP thread level parallelism (EXPERIMENTAL)
+dnl
+dnl define(`RSB_M4_WANT_OMP',`ifelse(`@libmmvbr_cv_openmp@',`yes',`1',`')')dnl
+define(`RSB_M4_WANT_OMP',`ifelse(`yes',`yes',`1',`')')dnl
+define(`RSB_M4_WANT_OMP_IN_RECURSION',`ifelse(`yes',`yes',`1',`')')dnl
+define(`RSB_M4_WANT_OMP_IN_KERNELS',`ifelse(`yes',`yes',`',`')')dnl
+dnl define(`RSB_M4_WANT_OMP_IN_KERNELS',`ifelse(`yes',`yes',`1',`')')dnl
+define(`RSB_M4_MAX_OMP_THREADS',`4')dnl	FIXME : TEMPORARY 
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	The follwing triggers the generation of code with some m4 debug info in it.
+dnl
+define(`RSB_M4_DEBUG',`1')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	If 1, enables register blocking, in kernels where this is supported (experimental).
+dnl
+define(`RSB_M4_WANT_BLOCKING',`1')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	The number of registers, in case of register blocking (EXPERIMENTAL).
+dnl
+define(`RSB_M4_REGISTERS',`8')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_FITTING_SAMPLES',/*12 8*/4)dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_BENCHMARK_MIN_SECONDS',/*0.5*/1.0)dnl
+dnl
+define(`RSB_M4_BENCHMARK_MIN_RUNS',/*5*/10)dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_BUFLEN',128)dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_USE_RESTRICT',`ifelse(`yes',`yes',`1',`')')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	Version strings.
+dnl
+define(`RSB_M4_WANT_LIBRSB_VER_DATE',`September 01, 2016')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_MAJOR',`1')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_MINOR',`2')dnl
+define(`RSB_M4_WANT_LIBRSB_LIBRSB_VER',`10200')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_PATCH',`0')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_PRERS',`-rc5')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_WANT_20110206_BOUNDED_BOX_PATCH',1)dnl
+dnl	---------------------------------------------------------------------------
diff --git a/rsb_config.m4.in b/rsb_config.m4.in
new file mode 100644
index 0000000..1b8b444
--- /dev/null
+++ b/rsb_config.m4.in
@@ -0,0 +1,56 @@
+dnl	Code generator configuration
+dnl	Michele Martone
+dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	Whether we want OpenMP thread level parallelism (EXPERIMENTAL)
+dnl
+dnl define(`RSB_M4_WANT_OMP',`ifelse(`@libmmvbr_cv_openmp@',`yes',`1',`')')dnl
+define(`RSB_M4_WANT_OMP',`ifelse(`@enable_openmp@',`yes',`1',`')')dnl
+define(`RSB_M4_WANT_OMP_IN_RECURSION',`ifelse(`@enable_openmp@',`yes',`1',`')')dnl
+define(`RSB_M4_WANT_OMP_IN_KERNELS',`ifelse(`@enable_openmp@',`yes',`',`')')dnl
+dnl define(`RSB_M4_WANT_OMP_IN_KERNELS',`ifelse(`@enable_openmp@',`yes',`1',`')')dnl
+define(`RSB_M4_MAX_OMP_THREADS',`4')dnl	FIXME : TEMPORARY 
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	The follwing triggers the generation of code with some m4 debug info in it.
+dnl
+define(`RSB_M4_DEBUG',`1')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	If 1, enables register blocking, in kernels where this is supported (experimental).
+dnl
+define(`RSB_M4_WANT_BLOCKING',`1')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	The number of registers, in case of register blocking (EXPERIMENTAL).
+dnl
+define(`RSB_M4_REGISTERS',`8')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_FITTING_SAMPLES',/*12 8*/4)dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_BENCHMARK_MIN_SECONDS',/*0.5*/1.0)dnl
+dnl
+define(`RSB_M4_BENCHMARK_MIN_RUNS',/*5*/10)dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_BUFLEN',128)dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_USE_RESTRICT',`ifelse(`@enable_restrict@',`yes',`1',`')')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+dnl	Version strings.
+dnl
+define(`RSB_M4_WANT_LIBRSB_VER_DATE',`@LIBRSB_VER_DATE@')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_MAJOR',`@LIBRSB_VER_MAJOR@')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_MINOR',`@LIBRSB_VER_MINOR@')dnl
+define(`RSB_M4_WANT_LIBRSB_LIBRSB_VER',`@LIBRSB_LIBRSB_VER@')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_PATCH',`@LIBRSB_VER_PATCH@')dnl
+define(`RSB_M4_WANT_LIBRSB_VER_PRERS',`@LIBRSB_VER_PRERS@')dnl
+dnl
+dnl	---------------------------------------------------------------------------
+define(`RSB_M4_WANT_20110206_BOUNDED_BOX_PATCH',1)dnl
+dnl	---------------------------------------------------------------------------
diff --git a/rsb_coo.c b/rsb_coo.c
new file mode 100644
index 0000000..caf1739
--- /dev/null
+++ b/rsb_coo.c
@@ -0,0 +1,737 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO handling.
+ * */
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_MEMMOVE_BASED_DUPS_HANDLING 0
+
+static rsb_nnz_idx_t rsb_weed_out_duplicates_unsorted(rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, void *RSB_RESTRICT VA, rsb_nnz_idx_t nnz, rsb_type_t typecode)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Weeds out duplicate coordinate elements.
+	 * returns the true nnz after weeding out duplicates
+	 *
+	 * \note : basic, unoptimized implementation.
+	 * \note : there is no test routine for this function.
+	 * \note : if needed, could enhance this routine by restructuring and using rsb_util_compact_marked_coo_array
+	 */
+	rsb_nnz_idx_t i,k,dups = 0;
+	size_t el_size = 0;
+
+	if(!IA || !JA || RSB_INVALID_NNZ_INDEX(nnz) )
+		return 0;
+
+	el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);
+	RSB_DEBUG_ASSERT(el_size);
+
+	for(k=0  ;k<nnz;++k)
+	for(i=k+1;i<nnz;++i)
+	{
+		if( IA[k]==IA[i] && JA[k]==JA[i] )
+		{
+			/* this is a debug method, therefore it is stupid */
+			RSB_MEMMOVE(IA+i,IA+i+1,sizeof(rsb_coo_idx_t)*(nnz-i-1));
+			RSB_MEMMOVE(JA+i,JA+i+1,sizeof(rsb_coo_idx_t)*(nnz-i-1));
+			/* note that it is legal and ok to move 0 for the next operation */
+			RSB_MEMMOVE(
+				((rsb_byte_t*)(VA))+i*el_size,
+				((rsb_byte_t*)(VA))+(i+1)*el_size,el_size*(nnz-i-1));
+			++dups;
+			--nnz;
+		}
+	}
+	return nnz;
+}
+
+static rsb_nnz_idx_t rsb_weed_out_duplicates_from_sorted(rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, void *RSB_RESTRICT VA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Weeds out duplicate coordinate elements.
+	 *
+	 * \return the true nnz after weeding out duplicates
+	 * \note : assumes input is sorted.
+	 * \note : if input is not really sorted, will remove only contiguous duplicates.
+	 *
+	 * TODO : could suffer of overflow, because in principle rsb_coo_idx_t != rsb_nnz_idx_t .
+	 * Only works for total orders (thus, no blocked orderings!).
+	 * TODO : make this parallel.
+	 */
+	size_t el_size = 0;
+#if (!RSB_MEMMOVE_BASED_DUPS_HANDLING)
+	const rsb_coo_idx_t marker = RSB_MARKER_COO_VALUE; 	
+	rsb_coo_idx_t fd = marker,ld = marker;    /* first duplicate sequence, last duplicate sequence */
+	rsb_nnz_idx_t k = 0, dups = 0, moved = 0, moves = 0;
+#endif /* RSB_MEMMOVE_BASED_DUPS_HANDLING */
+	if(!IA || !JA || RSB_INVALID_NNZ_INDEX(nnz) || nnz < 2)
+	{
+		goto ret;
+	}
+
+	el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);
+
+#if (!RSB_MEMMOVE_BASED_DUPS_HANDLING)
+
+	for(k=0  ;k<nnz-1;  )
+	if(IA[k]==IA[k+1] && JA[k]==JA[k+1] )
+	{
+		/* we found a duplicate pair */
+		rsb_coo_idx_t ldc; /* local duplicates count */
+		rsb_byte_t*lp = ((rsb_byte_t*)(VA))+k*el_size,*rp;
+		ldc = 1;
+
+		while( k+ldc<nnz-1 && IA[k+ldc] == IA[k+ldc+1] && JA[k+ldc] == JA[k+ldc+1] )
+		{
+			/* we look for more dups */
+			++ldc;
+		}
+		rp = lp+ldc*el_size;
+	//	RSB_INFO("dup: %d: %d %d (%d x)\n",k,IA[k],JA[k],ldc);
+#ifdef RSB_FLAG_DUPLICATES_SUM
+		//	RSB_ERROR("%d..%d..%d\n",k,ldc,nnz);
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_DUPLICATES_SUM))
+		{
+			// FIXME: missing error handling
+			/*errval|=*/rsb__util_vector_sum_strided(lp,lp,typecode,ldc+1,1);
+		}
+		else
+#endif /* RSB_FLAG_DUPLICATES_SUM */
+#ifdef RSB_FLAG_DUPLICATES_KEEP_LAST
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_DUPLICATES_KEEP_LAST))
+		{
+			RSB_MEMCPY(lp,rp,el_size);
+		}
+		else
+#endif /* RSB_FLAG_DUPLICATES_KEEP_LAST */
+		{
+			/* the first element is the one remaining */
+		}
+
+		if(fd==marker)
+		{
+			/* if there (at k) we have the first duplicate sequence, we keep its index just after */
+			fd = k+1;
+		}
+		else
+		{
+			/* if this is not the first one, we advertise this sequence index in JA[ld] */
+			JA[ld] = k+1;
+		}
+		/* we write the current one length in I[k+1] */
+		IA[k+1] = ldc;
+
+		/* we advance */
+		ld = k+1;
+		k += ldc+1;
+		dups += ldc;
+	}
+	else
+		++k;
+
+	//RSB_ERROR("! %d dups\n",dups);
+	/* no dups ? nothing to do. */
+	if(!dups)
+		goto ret;
+
+	/* we mark the last duplicate sequence as such */
+	JA[ld] = marker;
+
+	/* ok, we are ready for compacting the sequence */
+	rsb_util_compact_marked_coo_array(IA,JA,VA,nnz,el_size,fd,&moved,&moves);
+	//RSB_INFO("%d nnz - %d dups\n",nnz,dups);
+	nnz -= dups;
+	goto ret;
+#else /* RSB_MEMMOVE_BASED_DUPS_HANDLING */
+	el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);
+	/* very, very slow */
+	for(k=0;RSB_LIKELY(k<nnz-1);++k)
+	if(RSB_UNLIKELY(IA[k]==IA[k+1] && JA[k]==JA[k+1] ))
+	{
+		/* this is a debug method, therefore it is stupid */
+		RSB_MEMMOVE(IA+k,IA+k+1,sizeof(rsb_coo_idx_t)*(nnz-k-1));
+		RSB_MEMMOVE(JA+k,JA+k+1,sizeof(rsb_coo_idx_t)*(nnz-k-1));
+		/* note that it is legal and ok to move 0 for the next operation */
+		RSB_MEMMOVE(
+			((rsb_byte_t*)(VA))+ k   *el_size,
+			((rsb_byte_t*)(VA))+(k+1)*el_size,el_size*(nnz-k-1));
+		++dups;
+		--nnz;
+		RSB_ERROR("dup: %d: %d %d\n",k,IA[k],JA[k]);
+	}
+#endif /* RSB_MEMMOVE_BASED_DUPS_HANDLING */
+ret:
+	return nnz;
+}
+
+rsb_nnz_idx_t rsb_weed_out_duplicates(rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, void *RSB_RESTRICT VA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Weeds out duplicate coordinate elements.
+	 *
+	 * \return the true nnz after weeding out duplicates
+	 */
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_SORTED_INPUT))
+		return rsb_weed_out_duplicates_from_sorted(IA,JA,VA,nnz,typecode,flags);
+	else
+		return rsb_weed_out_duplicates_unsorted(IA,JA,VA,nnz,typecode);
+}
+
+rsb_nnz_idx_t rsb_check_for_zeros(const void * VA, rsb_nnz_idx_t nnz, rsb_type_t typecode)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Checks for zero elements.
+	 *
+	 * Note : basic, unoptimized implementation.
+	 */
+	rsb_nnz_idx_t k,zeros = 0;
+	size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);
+
+	if(!VA || RSB_INVALID_NNZ_INDEX(nnz) || RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		return RSB_ERR_BADARGS;
+	}
+
+	for(k=0;RSB_LIKELY(k<nnz);++k)
+		if( RSB_IS_ELEMENT_ZERO(((rsb_byte_t*)VA) + k * el_size , typecode ))
+		{
+			++zeros;
+			/* could be improved, of course */
+		}
+	return zeros;
+}
+
+rsb_nnz_idx_t rsb_check_for_nonzeros(const void * VA, rsb_nnz_idx_t nnz, rsb_type_t typecode)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Checks for non zero elements.
+	 *
+	 * Note : basic, unoptimized implementation.
+	 */
+	return nnz-rsb_check_for_zeros(VA,nnz,typecode);
+}
+
+rsb_err_t rsb_util_compact_marked_coo_array( rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, void *RSB_RESTRICT VA, rsb_nnz_idx_t nnz, size_t el_size, rsb_coo_idx_t fd, rsb_nnz_idx_t * movedp, rsb_nnz_idx_t * movesp)
+{
+	/*!
+		\ingroup gr_internals
+		\return the number of moved elements
+		The same technique could be used for in-place BCSR element displacement.
+	*/
+	rsb_nnz_idx_t k = 0,moved = 0,moves = 0;
+	const rsb_coo_idx_t marker = RSB_MARKER_COO_VALUE; 	
+	rsb_coo_idx_t nld = 0,ld = 0;
+	rsb_byte_t* vp = VA;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t ldc,lnd;
+
+	RSB_DEBUG_ASSERT(IA);
+	RSB_DEBUG_ASSERT(JA );
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(fd));
+	
+	if(!IA || !JA || RSB_INVALID_NNZ_INDEX(nnz) )
+		return RSB_ERR_BADARGS;
+
+#if defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1400) && defined(__INTEL_COMPILER_BUILD_DATE) && (__INTEL_COMPILER_BUILD_DATE >= 20140120)
+/* #if defined(__INTEL_COMPILER_UPDATE) && (__INTEL_COMPILER_UPDATE == 2) */
+	/* seems to be a bug occurring after with e.g. ./rsbench -Q 1 compiling -O3 and 'icc (ICC) 14.0.2 20140120'; turning on the asserts or printing fd/JA[fd] shadows the bug */
+	if( fd >=0 && JA[fd]!=marker)
+#endif
+	for( ld=fd,k=fd ; RSB_LIKELY(JA[ld]!=marker); ld=nld )
+	{
+		rsb_coo_idx_t ldc,lnd;
+		ldc = IA[ld];
+		nld = JA[ld];
+		lnd = nld-(ld+ldc);	/* local marked count, local non marked */
+
+		RSB_DEBUG_ASSERT(fd >=0);
+		RSB_DEBUG_ASSERT(ld >=0);
+		RSB_DEBUG_ASSERT(ld <nnz);
+		RSB_DEBUG_ASSERT(ldc>0);
+		RSB_DEBUG_ASSERT(lnd>=0);
+//		RSB_INFO("k : %d  ld : %d  lnd : %d  ldc : %d  JA[ld] : %d   nld : %d\n", k,ld,lnd,ldc,JA[ld],nld);
+/*		RSB_INFO("(%zd .. %zd) <- (%zd .. %zd)\n", 
+			(rsb_printf_int_t)(k),
+			(rsb_printf_int_t)(k+(lnd-1)),
+			(rsb_printf_int_t)(ldc+ld),
+			(rsb_printf_int_t)(ldc+ld+(lnd-1))
+			);*/
+
+		RSB_MEMMOVE(IA+k,IA+ld+ldc,(lnd) * sizeof(rsb_coo_idx_t));
+		RSB_MEMMOVE(JA+k,JA+ld+ldc,(lnd) * sizeof(rsb_coo_idx_t));
+		RSB_MEMMOVE(vp+(el_size*k),vp+el_size*(ld+ldc), (lnd) * el_size);
+		k += (lnd);
+		moved += (lnd);
+		moves++;
+	}
+	
+	/* JA[ld]==marker (last marked sequence ) */
+	ldc = IA[ld],lnd = (nnz-(ld+ldc));	/* local marked count, local non marked */
+//	RSB_INFO("k : %d  ld : %d  lnd : %d  ldc : %d  JA[ld] : %d   nld : %d\n", k,ld,lnd,ldc,JA[ld],nld);
+
+	if(lnd)
+	{
+		RSB_MEMMOVE(IA+k,IA+ld+ldc,lnd * sizeof(rsb_coo_idx_t));
+		RSB_MEMMOVE(JA+k,JA+ld+ldc,lnd * sizeof(rsb_coo_idx_t));
+		RSB_MEMMOVE(vp+(el_size*k),vp+el_size*(ld+ldc), lnd * el_size);
+			moved += (lnd);
+		moves++;
+/*		RSB_INFO("(%zd .. %zd) <- (%zd .. %zd)\n", 
+			(rsb_printf_int_t)(k),
+			(rsb_printf_int_t)(k+(lnd-1)),
+			(rsb_printf_int_t)(ldc+ld),
+			(rsb_printf_int_t)(ldc+ld+(lnd-1))
+			);*/
+	}
+	if(movesp)
+		*movesp = moves;
+	if(movedp)
+		*movedp = moved;
+
+	if(0)
+	RSB_STDERR("performed %zd moves, moved %zd elements out of %zd\n",(rsb_printf_int_t)moves,(rsb_printf_int_t)moved,(rsb_printf_int_t)nnz);
+
+	RSB_DEBUG_ASSERT(moved>=0 );
+	RSB_DEBUG_ASSERT(moved<=nnz);
+	RSB_DEBUG_ASSERT(moves>=0 );
+	RSB_DEBUG_ASSERT(moves<=nnz);
+	RSB_DEBUG_ASSERT(moves<=moved);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_util_compact_nonzeros(void *RSB_RESTRICT VA, rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp, rsb_flags_t flags  )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Will compact the non zero input coefficients of type typecode.
+	 *
+	 * \param VA	a pointer to a valid coefficients array
+	 * \param IA	a pointer to a valid rows coefficients array
+	 * \param JA	a pointer to a valid columns coefficients array
+	 * \param nnz	the coefficients count
+	 * \param typecode	the coefficients typecode
+	 * \param gapp	a pointer where the cut off elements number will be stored
+	 * \return the number of discarded elements (0 or more) or an error code otherwise
+	 *
+	 * Note: this documentation is obsolete.
+	 * Note : this code is slow, both algorithmically and not (it is debug stuff).
+	 * TODO: shall use flags for verbosity
+	 * */
+	size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);	/* missing unsupported typecode check */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	const rsb_coo_idx_t marker = RSB_MARKER_COO_VALUE; 	
+	rsb_coo_idx_t fz = marker,lz = marker;    /* first zero sequence, last zero sequence */
+	rsb_nnz_idx_t k = 0,zeros = 0,holes = 0,dzeros = 0;
+	const int verbose = 0 * (flags != RSB_FLAG_NOFLAGS); /* FIXME */
+
+	if(!VA || !IA || !JA || !gapp || RSB_INVALID_NNZ_INDEX(nnz) )
+	{
+		errval = RSB_ERR_BADARGS;
+		{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	for(k=0  ;RSB_LIKELY(k<nnz);  )
+	if( RSB_UNLIKELY(RSB_IS_ELEMENT_ZERO(((rsb_byte_t*)VA)+el_size*k,typecode)) )
+	{
+		/* we found a zero */
+		rsb_coo_idx_t lzc = 0; /* local zeros count */
+		int iod = (IA[k+lzc]==JA[k+lzc])?1:0;
+		lzc = 1;
+		dzeros += iod;
+
+		if(verbose)
+			RSB_STDOUT("zero: %d:  r: %d  c: %d (%d x, diag=%c)\n",k+lzc,IA[k+lzc],JA[k+lzc],lzc,iod?'y':'n');
+		while( k+lzc<nnz && RSB_IS_ELEMENT_ZERO(((rsb_byte_t*)VA)+el_size*(k+lzc),typecode) )
+		{
+			iod = (IA[k+lzc]==JA[k+lzc])?1:0;
+			/* we look for more zeros */
+			if(verbose)
+				RSB_STDOUT("zero: %d:  r: %d  c: %d (%d x, diag=%c)\n",k+lzc,IA[k+lzc],JA[k+lzc],lzc,iod?'y':'n');
+			++lzc;
+		}
+		holes += (k+1+lzc!=nnz);	/* we do not count bottom duplicates as a hole */
+		
+		if( RSB_UNLIKELY( fz == marker ) )
+		{
+			/* if this (at k) we have the first duplicate sequence, we keep its index */
+			fz = k;
+		}
+		else
+		{
+			/* if this is not the first one, we advertise this sequence index in JA[lz] */
+			JA[lz] = k;
+		}
+		/* we write the current one length in I[k] */
+		IA[k] = lzc;
+
+		/* we advance */
+		lz = k;
+		k += lzc;
+		zeros += lzc;
+	}
+	else
+		++k;
+
+	/* no zeros ? nothing to do. */
+	if(zeros<=0)
+	{
+		/* scrap scrap */
+	}
+	else
+	{
+		rsb_nnz_idx_t moved = 0,moves = 0;
+		/* we mark the last zero sequence as such */
+		JA[lz] = marker;
+
+		/* ok, we are ready for compacting the sequence */
+		errval = rsb_util_compact_marked_coo_array(IA,JA,VA,nnz,el_size,fz,&moved,&moves);
+/*		if(moves!=holes) // will trigger false positive error in cases like (0,0)  <- (1,1)
+		{
+			RSB_ERROR("%zd != %zd\n",(rsb_printf_int_t)moves,(rsb_printf_int_t)holes);
+                	return RSB_ERR_INTERNAL_ERROR;
+		}*/
+		if(RSB_SOME_ERROR(errval))
+		{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	if(discardedp)
+		*discardedp = zeros;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_util_compact_out_of_range(void *RSB_RESTRICT VA, rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t roff, rsb_coo_idx_t  coff, rsb_coo_idx_t Mdim, rsb_coo_idx_t mdim, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Will compact the non out of range input coefficients of type typecode
+	 *
+	 * \param VA	a pointer to a valid coefficients array
+	 * \param IA	a pointer to a valid rows coefficients array
+	 * \param JA	a pointer to a valid columns coefficients array
+	 * \param nnz	the coefficients count
+	 * \param typecode	the coefficients typecode
+	 * \param gapp	a pointer where the cut off elements number will be stored
+	 * \return the number of discarded elements (0 or more) or an error code otherwise
+	 *
+	 * Note that documentation is out of date. 
+	 * Note : this code is slow, both algorithmically and not (it is debug stuff).
+	 * */
+	size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);	/* missing unsupported typecode check */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	const rsb_coo_idx_t marker = RSB_MARKER_COO_VALUE; 	
+	rsb_coo_idx_t fz = marker,lz = marker;    /* first zero sequence, last zero sequence */
+	rsb_nnz_idx_t k = 0,zeros = 0,holes = 0;
+
+
+	if(!VA || !IA || !JA || !gapp || RSB_INVALID_NNZ_INDEX(nnz) )
+	{
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,"Bad argument(s): VA=%p IA=%p JA=%p gapp=%p nnz=%d\n",VA,IA,JA,gapp,(int)nnz);
+	}
+
+	for(k=0  ;RSB_LIKELY(k<nnz);  )
+	if(RSB_UNLIKELY( IA[k]<roff || IA[k]>=roff+Mdim || JA[k]<coff || JA[k]>=coff+mdim ))
+	{
+		/* we found an out of range element */
+		rsb_coo_idx_t lzc; /* local out of range count */
+		lzc = 1;
+
+		while( k+lzc<nnz && ( IA[k+lzc]<roff || IA[k+lzc]>=roff+Mdim || JA[k+lzc]<coff || JA[k+lzc]>=coff+mdim ) )
+		{
+			/* we look for more */
+			++lzc;
+		}
+		holes += (k+1+lzc!=nnz);	/* we do not count bottom duplicates as a hole */
+
+	//	RSB_INFO("zero: %d: %d %d (%d x)\n",k,IA[k],JA[k],ldc);
+		
+		if(RSB_UNLIKELY(fz==marker))
+		{
+			/* if this (at k) we have the first duplicate sequence, we keep its index */
+			fz = k;
+		}
+		else
+		{
+			/* if this is not the first one, we advertise this sequence index in JA[lz] */
+			JA[lz] = k;
+		}
+		/* we write the current one length in I[k] */
+		IA[k] = lzc;
+
+		/* we advance */
+		lz = k;
+		k += lzc;
+		zeros += lzc;
+	}
+	else
+		++k;
+
+	/* no zeros ? nothing to do. */
+	if(zeros<=0)
+	{
+		/* scrap scrap */
+	}
+	else
+	{
+		rsb_nnz_idx_t moved = 0,moves = 0;
+		/* we mark the last zero sequence as such */
+		JA[lz] = marker;
+
+		/* ok, we are ready for compacting the sequence */
+		errval = rsb_util_compact_marked_coo_array(IA,JA,VA,nnz,el_size,fz,&moved,&moves);
+/*		if(moves!=holes) // will trigger false positive error in cases like (0,0)  <- (1,1)
+		{
+			RSB_ERROR("%zd != %zd\n",(rsb_printf_int_t)moves,(rsb_printf_int_t)holes);
+                	return RSB_ERR_INTERNAL_ERROR;
+		}*/
+		if(RSB_SOME_ERROR(errval))
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(discardedp)
+		*discardedp = zeros;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_util_compact_nonzeros(void *RSB_RESTRICT VA, rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp, rsb_flags_t flags )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Will sort the input coefficients of type typecode without
+	 * changing their relative order, except for coefficients which are zero.
+	 *
+	 * \param VA	a pointer to a valid coefficients array
+	 * \param IA	a pointer to a valid rows coefficients array
+	 * \param JA	a pointer to a valid columns coefficients array
+	 * \param nnz	the coefficients count
+	 * \param typecode	the coefficients typecode
+	 * \return the number of discarded elements (0 or more) or an error code otherwise
+	 * 
+	 * Note that documentation is out of date. 
+	 * */
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		return RSB_ERR_UNSUPPORTED_TYPE;
+	}
+	return rsb_do_util_compact_nonzeros(VA, IA,  JA, nnz, typecode, gapp, discardedp, flags);
+}
+
+rsb_err_t rsb_weed_out_non_upptri(void *RSB_RESTRICT VA, rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * */
+	return rsb_weed_out_non_lowtri(VA,JA,IA,nnz,typecode,gapp,discardedp);
+}
+
+rsb_err_t rsb_weed_out_non_lowtri(void *RSB_RESTRICT VA, rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * */
+	size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);	/* missing unsupported typecode check */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	const rsb_coo_idx_t marker = RSB_MARKER_COO_VALUE; 	
+	rsb_coo_idx_t fz = marker,lz = marker;    /* first zero sequence, last zero sequence */
+	rsb_nnz_idx_t k = 0,zeros = 0,holes = 0;
+
+
+	if(!VA || !IA || !JA /*|| !gapp*/ || RSB_INVALID_NNZ_INDEX(nnz) )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	for(k=0  ;RSB_LIKELY(k<nnz);  )
+	if(RSB_UNLIKELY(IA[k]<JA[k]))
+	{
+		/* we found a zero */
+		rsb_coo_idx_t lzc; /* local zeros count */
+		lzc = 1;
+
+		while(RSB_UNLIKELY( k+lzc<nnz && RSB_IS_ELEMENT_ZERO(((rsb_byte_t*)VA)+el_size*(k+lzc),typecode) ))
+		{
+			/* we look for more zeros */
+			++lzc;
+		}
+		holes += (k+1+lzc!=nnz);	/* we do not count bottom duplicates as a hole */
+
+		//RSB_INFO("up tri: %d: %d %d (%d x)\n",k,IA[k],JA[k],lzc);
+		
+		if(RSB_UNLIKELY(fz==marker))
+		{
+			/* if this (at k) we have the first duplicate sequence, we keep its index */
+			fz = k;
+		}
+		else
+		{
+			/* if this is not the first one, we advertise this sequence index in JA[lz] */
+			JA[lz] = k;
+		}
+		/* we write the current one length in I[k] */
+		IA[k] = lzc;
+
+		/* we advance */
+		lz = k;
+		k += lzc;
+		zeros += lzc;
+	}
+	else
+		++k;
+
+	/* no zeros ? nothing to do. */
+	if(zeros<=0)
+	{
+		/* scrap scrap */
+	}
+	else
+	{
+		rsb_nnz_idx_t moved = 0,moves = 0;
+		/* we mark the last zero sequence as such */
+		JA[lz] = marker;
+
+		/* ok, we are ready for compacting the sequence */
+		errval = rsb_util_compact_marked_coo_array(IA,JA,VA,nnz,el_size,fz,&moved,&moves);
+/*		if(moves!=holes) // will trigger false positive error in cases like (0,0)  <- (1,1)
+		{
+			RSB_ERROR("%zd != %zd\n",(rsb_printf_int_t)moves,(rsb_printf_int_t)holes);
+                	return RSB_ERR_INTERNAL_ERROR;
+		}*/
+		if(RSB_SOME_ERROR(errval))
+		{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	if(discardedp)
+		*discardedp = zeros;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_weed_out_diagonal(void *RSB_RESTRICT VA, rsb_coo_idx_t *RSB_RESTRICT IA, rsb_coo_idx_t *RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * */
+	size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);	/* missing unsupported typecode check */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	const rsb_coo_idx_t marker = RSB_MARKER_COO_VALUE; 	
+	rsb_coo_idx_t fz = marker,lz = marker;    /* first zero sequence, last zero sequence */
+	rsb_nnz_idx_t k = 0,zeros = 0,holes = 0;
+
+
+	if(!VA || !IA || !JA || !gapp || (nnz!=0 && RSB_INVALID_NNZ_INDEX(nnz)) )
+	{
+		errval = RSB_ERR_BADARGS;
+		{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+	if(nnz==0)
+	{goto err;};/* nothing to do */
+
+	for(k=0  ;RSB_LIKELY(k<nnz);  )
+	if(RSB_UNLIKELY(IA[k]==JA[k]))
+	{
+		/* we found a zero */
+		rsb_coo_idx_t lzc; /* local zeros count */
+		lzc = 1;
+
+		while( k+lzc<nnz && RSB_IS_ELEMENT_ZERO(((rsb_byte_t*)VA)+el_size*(k+lzc),typecode) )
+		{
+			/* we look for more zeros */
+			++lzc;
+		}
+		holes += (k+1+lzc!=nnz);	/* we do not count bottom duplicates as a hole */
+
+		//RSB_INFO("up tri: %d: %d %d (%d x)\n",k,IA[k],JA[k],lzc);
+		
+		if(RSB_UNLIKELY(fz==marker))
+		{
+			/* if this (at k) we have the first duplicate sequence, we keep its index */
+			fz = k;
+		}
+		else
+		{
+			/* if this is not the first one, we advertise this sequence index in JA[lz] */
+			JA[lz] = k;
+		}
+		/* we write the current one length in I[k] */
+		IA[k] = lzc;
+
+		/* we advance */
+		lz = k;
+		k += lzc;
+		zeros += lzc;
+	}
+	else
+		++k;
+
+	/* no zeros ? nothing to do. */
+	if(zeros<=0)
+	{
+		/* scrap scrap */
+	}
+	else
+	{
+		rsb_nnz_idx_t moved = 0,moves = 0;
+		/* we mark the last zero sequence as such */
+		JA[lz] = marker;
+
+		/* ok, we are ready for compacting the sequence */
+		errval = rsb_util_compact_marked_coo_array(IA,JA,VA,nnz,el_size,fz,&moved,&moves);
+/*		if(moves!=holes) // will trigger false positive error in cases like (0,0)  <- (1,1)
+		{
+			RSB_ERROR("%zd != %zd\n",(rsb_printf_int_t)moves,(rsb_printf_int_t)holes);
+                	return RSB_ERR_INTERNAL_ERROR;
+		}*/
+		if(RSB_SOME_ERROR(errval))
+		{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	if(discardedp)
+		*discardedp = zeros;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_coo.h b/rsb_coo.h
new file mode 100644
index 0000000..fb06992
--- /dev/null
+++ b/rsb_coo.h
@@ -0,0 +1,46 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO handling.
+ * */
+
+#ifndef RSB_COO_H_INCLUDED
+#define RSB_COO_H_INCLUDED
+
+#include "rsb_internals.h"
+
+rsb_nnz_idx_t rsb_weed_out_duplicates(rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, void *VA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags);
+rsb_nnz_idx_t rsb_check_for_zeros(const void * RSB_RESTRICT VA, rsb_nnz_idx_t nnz, rsb_type_t typecode);
+rsb_nnz_idx_t rsb_check_for_nonzeros(const void * RSB_RESTRICT VA, rsb_nnz_idx_t nnz, rsb_type_t typecode);
+rsb_err_t rsb_util_compact_nonzeros(void *RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp, rsb_flags_t flags);
+rsb_err_t rsb_util_compact_marked_coo_array( rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT VA, rsb_nnz_idx_t nnz, size_t el_size, rsb_coo_idx_t fd, rsb_nnz_idx_t * movedp, rsb_nnz_idx_t * movesp);
+rsb_err_t rsb_weed_out_non_lowtri(void *RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp );
+rsb_err_t rsb_weed_out_non_upptri(void *RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp );
+rsb_err_t rsb_weed_out_diagonal(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp );
+rsb_err_t rsb_do_util_compact_out_of_range(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t roff, rsb_coo_idx_t  coff, rsb_coo_idx_t Mdim, rsb_coo_idx_t mdim, rsb_type_t typecode, rsb_nnz_idx_t *RSB_RESTRICT gapp, rsb_nnz_idx_t * RSB_RESTRICT discardedp );
+
+#endif /* RSB_COO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_coo2rec.c b/rsb_coo2rec.c
new file mode 100644
index 0000000..17b283b
--- /dev/null
+++ b/rsb_coo2rec.c
@@ -0,0 +1,2990 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @brief  Recursive Sparse matrices assembling code.
+ * @author Michele Martone
+ * */
+/*
+ * TODO: improve this code, because there is a number of unclean practices which could break a build.
+ * */
+#include "rsb_common.h"
+
+#ifndef RSB_C2R_ASSERT
+//#define RSB_C2R_ASSERT(e) assert(e)		// uncomment this to use   asserts
+#define RSB_C2R_ASSERT(e)			// uncomment this to avoid asserts
+#else /* RSB_C2R_ASSERT */
+#undef RSB_C2R_ASSERT
+#define RSB_C2R_ASSERT(e) 
+#endif /* RSB_C2R_ASSERT */
+
+#define RSB_DO_ENOUGHNNZFORINDEXBASEDBUILD(M) (!RSB_DO_TOOFEWNNZFORRCSR((M)->nnz,(M)->nr))
+#define RSB_C2R_IF_VERBOSE 0	/* activates output which is useful for debugging */
+#define RSB_C2R_PARANOIA 0
+
+#define RSB_MEMCPY_SMALL_GENERAL(ID,IS,DOFF,SOFF,NNZ,TYPE) \
+	{ \
+		TYPE*dp = ((TYPE*)(ID))+(DOFF),*ld = dp+(NNZ); \
+		const register TYPE*sp = ((TYPE*)(IS))+(SOFF); \
+		for(;dp<ld;++sp,++dp)*dp = *sp; \
+       	}
+
+#define RSB_C2R_WANT_MAYBE_FASTER 0
+
+#if RSB_C2R_WANT_MAYBE_FASTER 
+#define RSB_COA_MEMCPY_SMALL(ID,IS,DOFF,SOFF,NNZ) RSB_MEMCPY_SMALL_GENERAL(ID,IS,DOFF,SOFF,NNZ,rsb_coo_idx_t)
+#define RSB_COA_MEMCPY_ROWSZ(ID,IS,DOFF,SOFF,NNZ) RSB_COA_MEMCPY_parallel(ID,IS,DOFF,SOFF,NNZ)
+//#define RSB_A_MEMCPY_SMALL(ID,IS,DOFF,SOFF,NNZ,ES) RSB_MEMCPY_SMALL_GENERAL(ID,IS,DOFF,SOFF,NNZ,double)	/* FIXME */
+#define RSB_A_MEMCPY_SMALL(ID,IS,DOFF,SOFF,NNZ,ES) RSB_A_MEMCPY(ID,IS,DOFF,SOFF,NNZ,ES) 
+#else /* RSB_C2R_WANT_MAYBE_FASTER */
+#define RSB_COA_MEMCPY_SMALL(ID,IS,DOFF,SOFF,NNZ) RSB_COA_MEMCPY(ID,IS,DOFF,SOFF,NNZ) 
+#define RSB_A_MEMCPY_SMALL(ID,IS,DOFF,SOFF,NNZ,ES) RSB_A_MEMCPY(ID,IS,DOFF,SOFF,NNZ,ES) 
+#endif /* RSB_C2R_WANT_MAYBE_FASTER */
+
+#define RSB_TIC(T) (T) = -rsb_time()
+#define RSB_TOC(T) (T) += rsb_time()
+#define RSB_TOC_TIC(T,U) {rsb_time_t t = rsb_time();(T) += t;(U) = -t;}
+#define RSB_TIC_TOC(U,T) {rsb_time_t t = rsb_time();(T) += t;(U) = -t;}
+
+#define RSB_WANT_BINSEARCH_MIN_NZPR 8	/* FIXME */
+#define RSB_WANT_VERBOSE_TIMINGS 0
+#define RSB_WANT_VERBOSE_SUBDIVISION 0	/* */
+#define RSB_WANT_VERBOSE_SUBDIVISION2 0	/* */
+#define RSB_WANT_MORE_PARALLELISM 1	/* FIXME: EXPERIMENTAL, BY DEFAULT TURNED OFF (0) */
+#define RSB_WANT_FIRST_VERSION 0	/* FIXME: EXPERIMENTAL, BY DEFAULT TURNED ON (1) */
+#define RSB_WANT_LITTLE_IMPROVED 1	/* FIXME: EXPERIMENTAL, BY DEFAULT TURNED OFF (0) */
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+#define RSB_WANT_PARALLEL_SUBDIVISION 1	/* FIXME: EXPERIMENTAL, BY DEFAULT TURNED OFF (0) */
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+#define RSB_WANT_PARALLEL_SUBDIVISION 0	/* FIXME: EXPERIMENTAL, BY DEFAULT TURNED OFF (0) */
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+#define RSB_WANT_QUADRANT_QUICK_DETECT 0/* FIXME: EXPERIMENTAL, BY DEFAULT TURNED OFF (0) */
+#define RSB_WANT_SUBDIVISION_FIXES_20101120 1 /* FIXME: EXPERIMENTAL, BY DEFAULT TURNED OFF (0) */
+#define RSB_WANT_SUBDIVISION_FIXES_20101213 0 /* FIXME: EXPERIMENTAL ) */
+#define RSB_WANT_FIX_BUG_DISCOVERED_20121210 1	/* this bug prevents from HCSR usage */
+#if RSB_WANT_VERBOSE_SUBDIVISION2
+//#define RSB_MTXASM_INFO RSB_INFO	/* NEW */
+#define RSB_MTXASM_INFO printf	/* NEW */
+#else /* RSB_MTXASM_INFO */
+#define RSB_MTXASM_INFO 	/* NEW */
+#endif /* RSB_MTXASM_INFO */
+
+#define RSB_SUBDIVISION_SKEW_MAX  (RSB_FLOAT_ONE/2.0)
+#define RSB_MAX_QUADRANTS_UNBALANCE (4-1)
+#define RSB_SUBDIVISION_BUG_EXTRA (4)		/* incorrect behaviour is encountered if setting this to 0, as it should (experienced on a 12 core machine). proper bugfix remains unknown to me. */
+
+#define RSB_WANT_FASTER_EXPERIMENTAL_CONSTRUCTOR 0 /* FIXME: this is experimental code and shall be finished ! */
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#if RSB_WANT_FASTER_EXPERIMENTAL_CONSTRUCTOR
+#define RSB_POW2(P) (1<<(P))
+//#define RSB_MUL2(P) ((P) *= 2)
+#define RSB_MUL2(P) ((P)<<=1)
+//#define RSB_HALF(V) (((V)+1)/2)
+#define RSB_HALF(V) (((V)+1)>>1)
+#define RSB_POW4(P) (RSB_POW2(P)*RSB_POW2(P))
+static inline rsb_nnz_idx_t rsb_coo_index_bit_interleave(rsb_coo_idx_t o, rsb_coo_idx_t e)
+{
+	/* FIXME: this is DUPLICATE code !!! */
+	rsb_nnz_idx_t i = 0, O = o, E = e;
+	RSB_DEBUG_ASSERT(O>=0);
+	RSB_DEBUG_ASSERT(E>=0);
+	if (sizeof(rsb_nnz_idx_t)==1)
+	{
+		E = (E | (E << 2)) & 0x33;
+		E = (E | (E << 1)) & 0x55;
+		O = (O | (O << 2)) & 0x33;
+		O = (O | (O << 1)) & 0x55;
+	}
+	else
+	if (sizeof(rsb_nnz_idx_t)==2)
+	{
+		E = (E | (E << 4)) & 0x0F0F;
+		E = (E | (E << 2)) & 0x3333;
+		E = (E | (E << 1)) & 0x5555;
+		O = (O | (O << 4)) & 0x0F0F;
+		O = (O | (O << 2)) & 0x3333;
+		O = (O | (O << 1)) & 0x5555;
+	}
+	else
+	if (sizeof(rsb_nnz_idx_t)==4)
+	{
+		E = (E | (E << 8)) & 0x00FF00FF;
+		E = (E | (E << 4)) & 0x0F0F0F0F;
+		E = (E | (E << 2)) & 0x33333333;
+		E = (E | (E << 1)) & 0x55555555;
+		O = (O | (O << 8)) & 0x00FF00FF;
+		O = (O | (O << 4)) & 0x0F0F0F0F;
+		O = (O | (O << 2)) & 0x33333333;
+		O = (O | (O << 1)) & 0x55555555;
+	}
+	else
+	if (sizeof(rsb_nnz_idx_t)==8)
+	{
+		E = (E | (E <<16)) & 0x0000FFFF0000FFFF;
+		E = (E | (E << 8)) & 0x00FF00FF00FF00FF;
+		E = (E | (E << 4)) & 0x0F0F0F0F0F0F0F0F;
+		E = (E | (E << 2)) & 0x3333333333333333;
+		E = (E | (E << 1)) & 0x5555555555555555;
+		O = (O | (O <<16)) & 0x0000FFFF0000FFFF;
+		O = (O | (O << 8)) & 0x00FF00FF00FF00FF;
+		O = (O | (O << 4)) & 0x0F0F0F0F0F0F0F0F;
+		O = (O | (O << 2)) & 0x3333333333333333;
+		O = (O | (O << 1)) & 0x5555555555555555;
+	}
+	else
+	{
+		RSB_ERROR(RSB_ERRM_FYRYNS);
+		/* FIXME : fatal! */
+	}
+
+	i = (E | (O << 1));
+	RSB_DEBUG_ASSERT((i & ~-1)>=0);
+	return i;
+}
+
+rsb_err_t rsb_assign_subm__(rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_coo_idx_t nlev = 4;
+	/* FIXME: irm, jrm fit together in a single byte! (1byte/nnz!) */
+	rsb_coo_idx_t nlevi,nlevj;
+	rsb_coo_idx_t mink = RSB_POW2(nlev),maxk = mink;
+	rsb_nnz_idx_t scnt[16*16], nzi;
+	rsb_coo_idx_t idiv[   16];
+	rsb_coo_idx_t jdiv[   16];
+	rsb_nnz_idx_t nzoff = 0;
+	//rsb_coo_idx_t * zIA, * zJA;
+	rsb_byte_t * zIA = NULL, * zJA = NULL;
+	rsb_coo_idx_t * oIA = NULL, * oJA = NULL;
+
+	RSB_BZERO_P(&scnt);
+	oJA = rsb__malloc(nnz*sizeof(rsb_coo_idx_t));
+	oIA = rsb__malloc(nnz*sizeof(rsb_coo_idx_t));
+	zJA = rsb__malloc(nnz*sizeof(rsb_coo_idx_t));
+	zIA = rsb__malloc(nnz*sizeof(rsb_coo_idx_t));
+	if(!oJA || !oIA) goto err;
+	if(!zJA || !zIA) goto err;
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		const rsb_coo_idx_t i = IA[nzi],j = JA[nzi];
+		rsb_coo_idx_t irm = 0,jrm = 0;
+		rsb_coo_idx_t sm = m,sk = k;
+		rsb_coo_idx_t om = 0,ok = 0;
+		rsb_int sidx;
+		for(nlevi=0;nlevi<nlev;++nlevi)
+		{
+			rsb_coo_idx_t hm = RSB_HALF(sm),hk = RSB_HALF(sk);
+			RSB_MUL2(irm);RSB_MUL2(jrm);
+			if(i>=hm+om){irm += 1;sm-=hm;om += hm;}else{sm = hm;}
+			if(j>=hk+ok){jrm += 1;sk-=hk;ok += hk;}else{sk = hk;}
+		}
+		zIA[nzi] = irm;
+		zJA[nzi] = jrm;
+		//sidx = 16*irm+jrm;
+		sidx = rsb_coo_index_bit_interleave(irm,jrm);
+		//scnt[sidx]++;
+		//printf("hm:%d sm:%d\n",hm,sm); printf("hk:%d sk:%d\n",hk,sk);
+		//printf("%d %d -> %d %d (%d)    at %d %d  sized %d %d\n",i,j,irm,jrm,sidx,om,ok,sm,sk);
+	}
+	if(0)
+	for(nlevi=0;nlevi<nlev;++nlevi)
+	for(nlevj=0;nlevj<nlev;++nlevj)
+	{
+		printf("%d %d : %d\n",nlevi,nlevj,scnt[16*nlevi+nlevj]);
+	}
+	for(nlevi=1;nlevi<nlev*nlev;++nlevi)
+		scnt[nlev*nlev-nlevi] = scnt[nlev*nlev-nlevi-1];
+	scnt[0] = 0;
+	for(nlevi=1;nlevi<nlev*nlev;++nlevi)
+		scnt[nlevi] += scnt[nlevi-1]; /* FIXME: shall use rsb__do_prefix_sum_coo_idx_t */
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_int sidx;
+		rsb_coo_idx_t irm = 0,jrm = 0;
+		irm = zIA[nzi];
+	       	jrm = zJA[nzi];
+		sidx = rsb_coo_index_bit_interleave(irm,jrm);
+		//sidx = 0;
+		oIA[ scnt[sidx]  ] = IA[nzi];
+		oJA[ scnt[sidx]++] = JA[nzi];
+	}
+	rsb_memcpy(IA,oIA,sizeof(rsb_coo_idx_t)*nnz);
+	rsb_memcpy(JA,oJA,sizeof(rsb_coo_idx_t)*nnz);
+	//for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	//printf("please ignore this value: %d\n",scnt[0]);
+err:
+	RSB_CONDITIONAL_FREE(oJA);
+	RSB_CONDITIONAL_FREE(oIA);
+	RSB_CONDITIONAL_FREE(zJA);
+	RSB_CONDITIONAL_FREE(zIA);
+	return errval;
+}
+
+static void rsb_allocate_new__(void *RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_err_t *errvalp)
+{
+	rsb_time_t dt;
+	long cs = rsb__get_first_level_c_size();
+	//long cs = rsb__get_lastlevel_c_size();
+	rsb_nnz_idx_t bnz = RSB_MIN(cs/(4*sizeof(rsb_coo_idx_t)),nnz),fnz;
+	//rsb_nnz_idx_t bnz = nnz,fnz;
+	if(!getenv("RSB_CB"))bnz = nnz;
+	RSB_TIC(dt);
+	for(fnz=0;fnz<nnz;fnz+=bnz)
+	{
+		rsb_assign_subm__(IA+fnz,JA+fnz,m,k,RSB_MIN(bnz,nnz-fnz));
+	}
+	RSB_TOC(dt);
+	printf("%d cache blocks\n",(nnz+bnz-1)/bnz);
+	printf("EXPERIMENTAL: processed indices at %lf Mnnz/s in %lf s\n",(RSB_FPINV(dt)*nnz)/RSB_MILLION_F,dt);
+	exit(0);
+}
+#endif /* RSB_WANT_FASTER_EXPERIMENTAL_CONSTRUCTOR */
+
+void rsb__do_set_in_place_submatrices_offsets(struct rsb_mtx_t *RSB_RESTRICT submatrices, rsb_submatrix_idx_t cmc, rsb_char_t *RSB_RESTRICT  VA, rsb_coo_idx_t *RSB_RESTRICT  IA, rsb_coo_idx_t *RSB_RESTRICT JA, size_t el_size)
+{
+	/**
+		\ingroup gr_internals
+		\note: if nnz==0 and diagonal implicit, this could be dangerous
+	 */
+	rsb_submatrix_idx_t smi;
+	for(smi=0;smi<cmc;++smi)
+	{
+		struct rsb_mtx_t * submatrix = submatrices+smi;
+		if(!RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			submatrix->bpntr = IA+submatrix->nzoff;
+			submatrix->bindx = JA+submatrix->nzoff;
+			submatrix->VA = ((rsb_char_t*)VA)+el_size*submatrix->nzoff;
+		}
+	}
+}
+
+rsb_err_t rsb__do_switch_recursive_matrix_to_fullword_storage(struct rsb_mtx_t * mtxAp)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move somewhere else
+		FIXME: may be UNFINISHED
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(!mtxAp)
+	{
+		RSB_ERROR(RSB_ERRM_E_MTXAP);
+		return RSB_ERR_BADARGS;
+	}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+			if(submatrix)
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_recursive_matrix_to_fullword_storage(submatrix));
+	}
+	else
+	{
+//		if(rsb__do_is_candidate_for_halfword_coo(mtxAp))
+//			RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_fullword_coo(mtxAp));
+//		else
+//		if(rsb__do_is_candidate_for_halfword_csr(mtxAp))
+//			RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_fullword_csr(mtxAp));
+//		else
+//		if(!rsb__is_root_matrix(mtxAp) || rsb__is_terminal_recursive_matrix(mtxAp)) /* root recursive or root nonrec. */
+//			RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES);
+//		else
+//		;/* for root matrices, we keep the flags, because some of the leaves MAY have it */
+		if( mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR )
+		{
+		       	if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)(mtxAp->bpntr),mtxAp->nnz,0);
+				rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)(mtxAp->bindx),mtxAp->nnz,0);
+			       	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES);
+			}
+		}
+		else
+		if( mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCSR )
+		{
+		       	if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)(mtxAp->bindx),mtxAp->nnz,0);
+			       	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES);
+			}
+		}
+		else
+			errval = RSB_ERR_BADARGS;
+	}
+	
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_switch_fresh_terminal_matrix_to_halfword_storages(struct rsb_mtx_t * mtxAp)
+{
+	/**
+		\ingroup gr_unfinished
+		TODO: move somewhere else
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(!mtxAp)
+	{
+		RSB_ERROR(RSB_ERRM_E_MTXAP);
+		return RSB_ERR_BADARGS;
+	}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		return RSB_ERR_BADARGS;
+	}
+	else
+	{
+		if(RSB_C2R_IF_VERBOSE && 0)
+			RSB_INFO_MATRIX_SUMMARY(mtxAp),
+			RSB_INFO(" -switch.."),
+			RSB_INFO("HCOO?(%d)..",rsb__do_is_candidate_for_halfword_coo(mtxAp)),
+			RSB_INFO("HCSR?(%d)",rsb__do_is_candidate_for_halfword_csr(mtxAp)),
+			RSB_INFO("\n");
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+		{
+			if(rsb__do_is_candidate_for_halfword_coo(mtxAp))
+			{	
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("to halfword COO:"),RSB_INFO_MATRIX_SUMMARY(mtxAp),RSB_INFO("\n");
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_halfword_coo(mtxAp));
+			}
+			else
+				RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES);
+		}
+		else
+//		if(rsb_do_is_candidate_for_fullword_coo(mtxAp))
+//		{
+//			RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(mtxAp,RSB_BOOL_FALSE));
+//			RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_fullword_coo(mtxAp));// FIXME: wrong naming :(
+//		}
+//		else
+#if RSB_WANT_FIX_BUG_DISCOVERED_20121210
+		if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+#else /* RSB_WANT_FIX_BUG_DISCOVERED_20121210 */
+		if( RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+#endif /* RSB_WANT_FIX_BUG_DISCOVERED_20121210 */
+		{
+			if(RSB_SOME_ERROR(rsb__do_is_candidate_for_halfword_csr(mtxAp)))
+			{
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("to halfword CSR:"),RSB_INFO_MATRIX_SUMMARY(mtxAp),RSB_INFO("\n");
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_halfword_csr(mtxAp));
+			}
+			else
+				RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES);
+		}
+		else
+		//if(!rsb__is_root_matrix(mtxAp) || rsb__is_terminal_recursive_matrix(mtxAp)) /* root recursive or root nonrec. */
+		//	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES);
+		//else
+			RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES);
+		;/* for root matrices, we keep the flags, because some of the leaves MAY have it */
+	}
+	
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if !RSB_WANT_MORE_PARALLELISM 
+static rsb_err_t rsb_do_switch_fresh_recursive_matrix_to_halfword_storages(struct rsb_mtx_t * mtxAp)
+{
+	/**
+		\ingroup gr_unfinished
+TODO: move somewhere else
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(!mtxAp)
+	{
+		RSB_ERROR(RSB_ERRM_E_MTXAP);
+		return RSB_ERR_BADARGS;
+	}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+			if(submatrix)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_switch_fresh_recursive_matrix_to_halfword_storages(submatrix));
+	}
+	else
+		errval = rsb_do_switch_fresh_terminal_matrix_to_halfword_storages(mtxAp);
+	
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_WANT_MORE_PARALLELISM */
+
+rsb_err_t rsb__check_bounds(struct rsb_mtx_t * mtxAp)
+{
+	/* FIXME: need checks on Mdim, mdim, ... */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(
+			mtxAp->broff<0 || mtxAp->bcoff<0 ||
+			/* mtxAp->broff<mtxAp->roff+broff || mtxAp->bcoff<mtxAp->coff+bcoff ||  */
+			mtxAp->bm>mtxAp->nr || 
+			mtxAp->bk>mtxAp->nc 
+	  )
+	{
+		RSB_ERROR(RSB_PRINTF_MATRIX_BOUNDS_SUMMARY_ARGS(mtxAp)); RSB_ERROR("\n");
+
+		RSB_ASSERT(! ( mtxAp->broff<0) );
+		RSB_ASSERT(! ( mtxAp->bcoff<0) );
+	/*	RSB_ASSERT(! ( mtxAp->broff<mtxAp->roff+broff) );
+		RSB_ASSERT(! ( mtxAp->bcoff<mtxAp->coff+bcoff) ); */
+		RSB_ASSERT(! ( mtxAp->bm>mtxAp->nr) );
+		RSB_ASSERT(! ( mtxAp->bk>mtxAp->nc) );
+
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_ERROR(RSB_ERRM_BCE);
+		RSB_ERROR(RSB_ERRM_BM),RSB_ERROR_MATRIX_SUMMARY(mtxAp),RSB_ERROR(RSB_ERRM_NL);
+		return errval;
+	}
+	return errval;
+}
+
+rsb_err_t rsb__compute_bounded_box(struct rsb_mtx_t * mtxAp)
+{
+	/*
+	 * Set have to be: nr, nc.
+	 * Will compute: ...
+	 *
+	 * TODO: make sure it does not depend on RSB_FLAG_QUAD_PARTITIONING.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t broff = RSB_INVALID_COO_IDX_VAL,bcoff = RSB_INVALID_COO_IDX_VAL,bm = RSB_INVALID_COO_IDX_VAL,bk = RSB_INVALID_COO_IDX_VAL;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		//rsb_nnz_idx_t nnz0 = 0,nnz1 = mtxAp->nnz;
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			rsb_half_idx_t li, ui;
+			rsb_half_idx_t lj, uj;
+			// FIXME: could optimize, since IA is sorted
+			rsb__util_find_extremal_half_index_val(IA,mtxAp->nnz,0,mtxAp->nr,&li,&ui);
+			rsb__util_find_extremal_half_index_val(JA,mtxAp->nnz,0,mtxAp->nc,&lj,&uj);
+			bk = 1;bk += uj; bm = 1;bm += ui; broff = li; bcoff = lj;
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			rsb_coo_idx_t li, ui;
+			rsb_coo_idx_t lj, uj;
+			// FIXME: could optimize, since IA is sorted
+			rsb__util_find_extremal_full_index_val(IA,mtxAp->nnz,0,mtxAp->nr,&li,&ui);
+			rsb__util_find_extremal_full_index_val(JA,mtxAp->nnz,0,mtxAp->nc,&lj,&uj);
+			bk = 1;bk += uj; bm = 1;bm += ui; broff = li; bcoff = lj;
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			rsb_half_idx_t lj, uj;
+			rsb_coo_idx_t li, ui;
+			rsb__util_find_extremal_half_index_val(JA,mtxAp->nnz,0,mtxAp->nr,&lj,&uj);
+			ui = rsb__nnz_split_nnz_bsearch(PA,mtxAp->nnz,mtxAp->nr+1);
+			li = rsb__nnz_split_nnz_bsearch(PA,1,mtxAp->nr+1)-1;
+			bk = 1;bk += uj; bm = ui; broff = li; bcoff = lj;
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			rsb_coo_idx_t lj, uj;
+			rsb_coo_idx_t li, ui;
+			rsb__util_find_extremal_full_index_val(JA,mtxAp->nnz,0,mtxAp->nr,&lj,&uj);
+			ui = rsb__nnz_split_nnz_bsearch(PA,mtxAp->nnz,mtxAp->nr+1);
+			li = rsb__nnz_split_nnz_bsearch(PA,1,mtxAp->nr+1)-1;
+			bk = 1;bk += uj; bm = ui; broff = li; bcoff = lj;
+		}
+	}
+	else
+		RSB_ERROR(RSB_ERRMSG_BADFORMAT);
+
+	mtxAp->broff = mtxAp->roff+broff;
+	mtxAp->bcoff = mtxAp->coff+bcoff;
+	mtxAp->bm = bm;
+	mtxAp->bk = bk;
+
+	errval = rsb__check_bounds(mtxAp);
+#if 0
+	RSB_INFO("bounding box of "),RSB_INFO_MATRIX_SUMMARY(mtxAp),
+		RSB_INFO(": %.2f%% x  %.2f %%\n",(100.0f*(float)(bm-broff))/mtxAp->nr,(100.0f*(float)(bk-bcoff))/mtxAp->nc);
+		RSB_INFO(": %d,%d %d,%d\n",mtxAp->roff+broff,mtxAp->coff+bcoff,bm,bk);
+		RSB_INFO(": %d,%d %d,%d\n",mtxAp->roff+broff,mtxAp->coff+bcoff,bm,bk);
+#endif
+	return errval;
+}
+
+static rsb_err_t rsb_do_compute_bounded_boxes(struct rsb_mtx_t * mtxAp)
+{
+	/**
+		\ingroup gr_unfinished
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t smi = 0;
+	rsb_bool_t want_really = 0;
+#if RSB_WANT_BOUNDED_BOXES
+	want_really = (rsb_global_session_handle.want_bounded_box!=0);
+#else /* RSB_WANT_BOUNDED_BOXES */
+	mtxAp->broff = roff;
+	mtxAp->bcoff = coff;
+	mtxAp->bm = m;
+	mtxAp->bk = k;
+	goto err;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+
+	if(!mtxAp)
+	{
+		RSB_ERROR(RSB_ERRM_E_MTXAP);
+		return RSB_ERR_BADARGS;
+	}
+
+	if(mtxAp->nnz==0)
+		goto err;
+
+	if(want_really)
+	{
+		if(rsb__is_terminal_recursive_matrix(mtxAp)) // fix for serial 20101206
+		{
+			RSB_DO_ERROR_CUMULATE(errval,rsb__compute_bounded_box(mtxAp));
+			goto err;
+		}
+		#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC 
+		for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+		{
+			struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[smi].mtxlp;
+			RSB_DO_ERROR_CUMULATE(errval,rsb__compute_bounded_box(submatrix));
+		}
+		#pragma omp barrier
+	}
+	else
+	{
+		#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC 
+		for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+		{
+			struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[smi].mtxlp;
+			submatrix->bm = submatrix->nr;
+			submatrix->bk = submatrix->nc;
+			submatrix->broff = submatrix->roff;
+			submatrix->bcoff = submatrix->coff;
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_switch_fresh_recursive_matrix_to_halfword_storages_parallel(struct rsb_mtx_t * mtxAp)
+{
+	/**
+		\ingroup gr_unfinished
+TODO: move somewhere else
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t smi = 0;
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+
+	/* FIXME: 20100809 it seems that 'switching' a 0-nnz matrix overwrites something which should not be overwritten  */
+	if(mtxAp->nnz==0)
+		goto err;
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp)) // fix for serial 20101206
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_switch_fresh_terminal_matrix_to_halfword_storages(mtxAp));
+		goto err;
+	}
+	#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC 
+	for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+	{
+		struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[smi].mtxlp;
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_switch_fresh_terminal_matrix_to_halfword_storages(submatrix));
+	}
+	#pragma omp barrier
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if 0
+static rsb_err_t rsb_do_shuffle_left_and_right_rows_inner(rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t m, rsb_coo_idx_t m0, rsb_nnz_idx_t nnz, rsb_nnz_idx_t nnz0, rsb_coo_idx_t * RSB_RESTRICT IL, rsb_coo_idx_t * IM, rsb_coo_idx_t * RSB_RESTRICT WA, size_t sz)
+{
+	/**
+		\ingroup gr_unfinished
+		FIXME: UNFINISHED, EXPERIMENTAL
+	 */
+		rsb_err_t errval = RSB_ERR_NO_ERROR;
+		rsb_coo_idx_t iu = m0,id = m-1;
+		rsb_coo_idx_t wl = nnz,wr = 0,ns = 0,nu = 0,ws = 0,nd = nnz;
+
+		if( sz<1 || !IA || !IM || !IL || !WA || RSB_INVALID_NNZ_INDEX(nnz) || RSB_INVALID_COO_INDEX(m) )
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+		}
+		if(RSB_UNLIKELY(IL[m]!=nnz))
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		if(iu>=id)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		nu = IL[iu];
+
+		while(RSB_LIKELY(iu<=id))
+		{
+			/* compute the left subrow length */
+			ns = IM[iu]-IL[iu];
+			/* shift left the left subrow */
+			RSB_A_MEMMOVE(IA,IA,nu,IL[iu],ns,sz);
+			/* update the counter of left subrows elements in IA */
+			nu += ns;
+			/* compute the right subrow length */
+			ws = (IL[iu+1]-IM[iu]);
+			/* buffer the right subrow */
+			RSB_A_MEMCPY(WA,IA,wr,IM[iu],ws,sz);
+			/* update the (complementary) counter of right subrows elements in the buffer */
+			wr += ws;
+
+			if(RSB_UNLIKELY(iu>=id))
+			{
+				/* skip row, as id was already done */
+				++id;
+				goto done;
+			}
+			/* compute the right subrow length */
+			ns = IL[id+1]-IM[id];
+			/* update the (complementary) counter of right subrows elements in IA */
+			nd -= ns;
+			/* shift right the right subrow */
+			RSB_A_MEMMOVE(IA,IA,nd,IM[id],ns,sz);
+			/* compute the left subrow length */
+			ws = IM[id]-IL[id];
+			/* update the counter of right subrows elements in the buffer */
+			wl -= ws;
+			/* buffer the left subrow */
+			RSB_A_MEMCPY(WA,IA,wl,IL[id],ws,sz);
+
+			++iu,--id;
+		}
+		/* IA has definitive elements, from left at  0..nu-1 and from right at (nnz-nd)..nnz-1  */
+		{
+			//rsb_nnz_idx_t
+		}
+		/* WA has definitive elements, from right at  0..wr-1 and from left at  wl..nnz-1  */
+		/* it should be : nnz == nu + nnz-wl+nd-wr */
+		if(nu+((nnz)-wl)!=nd-wr)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+done:
+		/* compute the number of left submatrix elements in the buffer */
+		ns = (nnz)-wl;
+		/* copy the partial left submatrix from the buffer to the array */
+		RSB_A_MEMMOVE(IA,WA,nu,wl,ns,sz);
+		/* update the counter of left subrows elements in IA */
+		nu += ns;
+		/* compute the number of right submatrix elements in the buffer */
+		ns = wr;
+		/* copy the partial right submatrix from the buffer to the array */
+		RSB_A_MEMMOVE(IA,WA,nu,0,ns,sz);
+		/* update the counter to all subrows elements in IA (those already present, too) */
+		nd -= ns;
+
+		/* minimal coherence check */
+err:
+		if(RSB_UNLIKELY(nu!=nd))
+		{
+			RSB_ERROR("nnz=%d != nu+nd = %d; nu=%d, wl=%d, wr=%d, nd=%d\n",nnz,nu+nd,nu,wl,wr,nd);
+//			RSB_ERROR("nnz=%d != nu+nnz-wl+nd = %d; nu=%d, wl=%d, wr=%d, nd=%d\n",nnz,nu+nnz+wl+nd,nu,wl,wr,nd);
+			errval = RSB_ERR_INTERNAL_ERROR;
+		}
+		/* the buffer is empty now, and the arrays are left-right partitioned */
+		RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+#if 0
+static rsb_err_t rsb_do_shuffle_left_and_right_rows(void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t m, rsb_coo_idx_t m0, rsb_nnz_idx_t nnz, rsb_nnz_idx_t nnz0, rsb_type_t typecode, rsb_coo_idx_t * RSB_RESTRICT IL, rsb_coo_idx_t * IM, rsb_coo_idx_t * RSB_RESTRICT WA)
+{
+	/**
+		\ingroup gr_unfinished
+		FIXME: UNFINISHED, EXPERIMENTAL
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const size_t sz = sizeof(rsb_coo_idx_t);
+	if(!IL || !IM || !WA)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_shuffle_left_and_right_rows_inner(IA,m,m0,nnz,nnz0,IL,IM,WA,sz));
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_shuffle_left_and_right_rows_inner(JA,m,m0,nnz,nnz0,IL,IM,WA,sz));
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_shuffle_left_and_right_rows_inner(VA,m,m0,nnz,nnz0,IL,IM,WA,RSB_SIZEOF(typecode)));
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+static rsb_err_t rsb_do_compute_vertical_split_search_only(
+		const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA,
+	       	rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t m, rsb_coo_idx_t k,
+	       	rsb_coo_idx_t hm, rsb_coo_idx_t hk, rsb_nnz_idx_t nnz,
+	       	const rsb_coo_idx_t * IB, rsb_nnz_idx_t *ulp, rsb_nnz_idx_t *urp, rsb_nnz_idx_t *llp, rsb_nnz_idx_t *lrp)
+{
+	/**
+	\ingroup gr_unfinished
+	
+
+	 */
+	rsb_nnz_idx_t ul = 0,ur = 0,ll = 0,lr = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t dnnz = 0/*,wdnnz = 0,rnnz = 0,hrnnz = 0*/;
+	register rsb_coo_idx_t i;
+	//rsb_nnz_idx_t nnz0 = 0;
+	//rsb_coo_idx_t xroff = 0;
+
+
+	if(nnz>m || 1)
+	//if(nnz>m)
+	{
+	for(i = roff;RSB_LIKELY(i<roff+m);++i)
+	{
+		// offset of line i in the global line pointers array
+		rsb_nnz_idx_t nnz0 = IB[i];
+		// nnz1..nnz0 are the boundaries of line i
+		rsb_nnz_idx_t nnz1 = IB[i+1];
+		rsb_nnz_idx_t nnz2 = 0;
+		// check
+		RSB_C2R_ASSERT(nnz0>=IB[i]);
+		RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+		// skip line if empty
+		if(nnz1-nnz0<1)continue;
+		// find first element of line i also in the submatrix
+		nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,coff,nnz1-nnz0);
+		// skip line if empty in the submatrix
+		if(nnz1-nnz0<1)continue;
+		// find the length of the subrow i in the submatrix
+		nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,coff+k,nnz1-nnz0);
+		//check 
+		RSB_C2R_ASSERT(JA[nnz0+0]>=coff);
+		// skip line if empty in the submatrix
+		if(nnz1-nnz0<1)continue;
+		nnz2 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,coff+hk,nnz1-nnz0);
+	       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+		RSB_C2R_ASSERT(JA[nnz0+0]>=coff);
+		RSB_C2R_ASSERT(JA[nnz1-1]< coff+k);
+		dnnz += nnz1-nnz0;
+		if(i<roff+hm)
+			ul += nnz2-nnz0,
+			ur += nnz1-nnz2;
+		else
+			ll += nnz2-nnz0,
+			lr += nnz1-nnz2;
+	}
+	}
+	else
+	{
+		// FIXME: UNFINISHED
+		rsb_nnz_idx_t nnz0,nnz1,n;
+		//RSB_INFO("almost empty matrix !\n");
+		for(n=0;n<nnz;++n)
+		{
+			rsb_nnz_idx_t nnz2 = 0;
+			i = IA[n];
+			nnz0 = IB[i];
+			nnz1 = IB[i+1];
+			// ...
+#if 1
+		// skip line if empty
+		if(nnz1-nnz0<1)continue;
+		// find first element of line i also in the submatrix
+		nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,coff,nnz1-nnz0);
+		// skip line if empty in the submatrix
+		if(nnz1-nnz0<1)continue;
+		// find the length of the subrow i in the submatrix
+		nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,coff+k,nnz1-nnz0);
+		//check 
+		RSB_C2R_ASSERT(JA[nnz0+0]>=coff);
+		// skip line if empty in the submatrix
+		if(nnz1-nnz0<1)continue;
+		nnz2 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,coff+hk,nnz1-nnz0);
+	       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+		RSB_C2R_ASSERT(JA[nnz0+0]>=coff);
+		RSB_C2R_ASSERT(JA[nnz1-1]< coff+k);
+		dnnz += nnz1-nnz0;
+		if(i<roff+hm)
+			ul += nnz2-nnz0,
+			ur += nnz1-nnz2;
+		else
+			ll += nnz2-nnz0,
+			lr += nnz1-nnz2;
+#else
+			if(nnz1-nnz0<1)continue;
+			if(i<roff+hm)
+			{
+				for(;n<nnz1;++n)
+					if(JA[n]>=coff+hk)
+						++ur;
+					else
+						++ul;
+			}
+			else
+			{
+				for(;n<nnz1;++n)
+					if(JA[n]>=coff+hk)
+						++lr;
+					else
+						++ll;
+			}
+#endif
+		}
+	}
+//done:
+	*llp = ll;
+	*lrp = lr;
+	*ulp = ul;
+	*urp = ur;
+//err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if !RSB_WANT_PARALLEL_SUBDIVISION 
+static rsb_err_t rsb_do_compute_vertical_split(const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_coo_idx_t hm, rsb_coo_idx_t hk, rsb_nnz_idx_t nnz, rsb_coo_idx_t * IL, rsb_coo_idx_t * RSB_RESTRICT IM, rsb_coo_idx_t * IR, rsb_nnz_idx_t *ulp, rsb_nnz_idx_t *urp, rsb_nnz_idx_t *llp, rsb_nnz_idx_t *lrp)
+{
+	/**
+	\ingroup gr_unfinished
+	FIXME: UNFINISHED, EXPERIMENTAL
+
+	Computes two arrays: IM, IL.
+	IM[i], contains the index of the first element >= hk on line i,
+	IL[i], contains the index of the first element on line i.
+	Notes: 
+       		IM[i]==IL[i+1] if no element >=hk exists
+       		IM[i]==IL[i]   if no IL[i] >=hk
+       		IL[i]==IL[i+1]  if line i is empty
+       		IL[0]==0
+       		IL[m]==nnz
+		IM is valid on the 0..nr-1 range.
+		IL is valid on the 0..nr range.
+
+	TODO: blocking support
+	 */
+	rsb_nnz_idx_t ul = 0,ur = 0,ll = 0,lr = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t dnnz = 0,wdnnz = 0,rnnz = 0,hrnnz = 0;
+	register rsb_coo_idx_t i;
+	rsb_nnz_idx_t nnz0 = 0;
+
+	hk += coff;
+
+	if(IR==NULL && IM==NULL && IL==NULL)
+	{
+		/* FIXME: write me, for cases where we want subdivision on barely-COO matrices */
+	}
+	else
+	if(IR==NULL && IM==NULL)
+	{
+		// root matrix; should compute IL
+		IL[0] = 0;
+//		if(nnz>100*m)// TODO
+		if(0)// TODO: determine which case is faster !
+		{
+			/* fill the row pointers array */
+//			#pragma omp parallel for reduction(+:dnnz) RSB_NTC 
+//			for(i=0;i<m;++i)
+			for(i=0;RSB_LIKELY(i<m);++i)
+			{
+				// delimit the current row
+				rnnz = rsb__nnz_split_coo_bsearch(IA+dnnz,i+1,nnz-dnnz);
+				/* i==m-1 || IA[dnnz+rnnz] > i */
+				dnnz += rnnz;
+				IL[i+1] = dnnz;
+				RSB_C2R_ASSERT(rnnz>=0);
+			}
+		}
+		else
+		{
+			/* for full matrices, this is faster */
+			rsb_nnz_idx_t n = 0;
+#if 1
+			nnz0 = 0;
+#if RSB_WANT_QUADRANT_QUICK_DETECT 
+			/* FIXME: UNFINISHED */
+			if(IB[roff]==IB[roff+hm])
+			{
+				RSB_INFO("upper submatrix empty\n");
+				// should do something sharp
+			}
+			else
+			if(IB[roff+hm]==IB[roff+m])
+			{
+				RSB_INFO("lower submatrix empty\n");
+				// should do something sharp
+			}
+#endif /* RSB_WANT_QUADRANT_QUICK_DETECT  */
+			for(i=0;RSB_LIKELY(i<m);++i)
+			{
+				rnnz = 0;
+				for(;RSB_LIKELY(n<nnz && IA[n]==i);++n)
+					++rnnz;
+				IL[i+1] = nnz0+rnnz;
+				nnz0 += rnnz;
+			}
+#else
+			for(i=0;RSB_LIKELY(i<m);++i)
+				IL[i] = 0;
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+				RSB_C2R_ASSERT(IA[n]>=0 && IA[n]<m);
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+				IL[IA[n]+1]++;
+			for(i=0;RSB_LIKELY(i<m);++i)
+				IL[i+1] += IL[i];
+#endif
+		}
+		RSB_C2R_ASSERT(IL[m]==nnz);
+		goto err;
+	}
+	else
+	if(IR==NULL)
+	{
+		RSB_C2R_ASSERT(0);
+		RSB_ASSERT(ulp);
+		RSB_ASSERT(llp);
+		RSB_ASSERT(urp);
+		RSB_ASSERT(lrp);
+		// root matrix; should compute IL
+		IL[0] = 0;
+		/* fill the row pointers array */
+		for(i=0;RSB_LIKELY(i<m);++i)
+		{
+			// delimit the current row
+			rnnz = rsb__nnz_split_coo_bsearch(IA+dnnz,i+1,nnz-dnnz);
+			/* i==m-1 || IA[dnnz+rnnz] > i */
+
+			IL[i+1] = dnnz+rnnz;
+
+			if(RSB_LIKELY(dnnz+rnnz<=nnz))
+			{
+				// the current row is non empty
+				hrnnz = rsb__nnz_split_coo_bsearch(JA+dnnz,hk,rnnz);
+				if(RSB_LIKELY(hrnnz<rnnz))
+					IM[i] = dnnz+hrnnz;
+				else
+					// all the elements are in the left submatrix
+					IM[i] = dnnz+ rnnz;
+			}
+			else
+				// last row
+				hrnnz = rnnz,
+				IM[i] = nnz;
+
+				if(RSB_UNLIKELY(IM[i]<IL[i]))
+				{
+					errval = RSB_ERR_INTERNAL_ERROR;
+					RSB_PERR_GOTO(err,RSB_ERRM_ES);
+				}
+
+			// TODO: split in two cycles: 0..hm-1, hm..nr-1
+			if(i<hm)
+				ul += IM[i  ]-IL[i],
+				ur += IL[i+1]-IM[i];
+			else
+				ll += IM[i  ]-IL[i],
+				lr += IL[i+1]-IM[i];
+			//RSB_INFO("%d @ %d~%d (%d/%d)\n",i,dnnz,dnnz+rnnz-1,hrnnz,rnnz);
+			dnnz += rnnz;
+		}
+		IM[m] = IL[m];
+	}
+	else
+	{
+		// compute middle pointers array, using the left and right ones
+		RSB_ASSERT(ulp);
+		RSB_ASSERT(llp);
+		RSB_ASSERT(urp);
+		RSB_ASSERT(lrp);
+		nnz0 = IL[0];
+		/* fill the middle row pointers array */
+		for(i=0;RSB_LIKELY(i<m);++i)
+		{
+			// delimit the current row
+			rsb_nnz_idx_t il = IL[i],ir = IR[i],im;
+			rnnz = ir-il;
+
+			RSB_C2R_ASSERT(ir>=il);
+			if(ir<il)
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+			if(ir==il)
+			{
+				// empty row
+				IM[i] = IR[i];
+				continue;
+			}
+			/* i==m-1 || IA[dnnz+rnnz] > i */
+
+			// the current row is non empty
+			RSB_C2R_ASSERT(JA[il+0]>=coff  );
+			RSB_C2R_ASSERT(JA[ir-1]< coff+k);
+
+			hrnnz = rsb__nnz_split_coo_bsearch(JA+il,hk,rnnz);
+			im = il+hrnnz;
+
+			IM[i] = im;
+
+#if RSB_C2R_PARANOIA
+			if(IM[i]>IR[i])
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;
+				RSB_PERR_GOTO(err,"i=%d, %d > %d!\n",i,IM[i],IR[i]);
+			}
+
+			if(IM[i]<IL[i])
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;
+				RSB_PERR_GOTO(err,"i=%d, %d < %d!\n",i,IM[i],IL[i]);
+			}
+
+#endif /* RSB_C2R_PARANOIA */
+			// TODO: split in two cycles: 0..hm-1, hm..nr-1
+			if(i<hm)
+				ul += im-il,
+				ur += ir-im;
+			else
+				ll += im-il,
+				lr += ir-im;
+			//RSB_INFO("%d @ %d~%d (%d/%d)\n",i,dnnz,dnnz+rnnz-1,hrnnz,rnnz);
+			dnnz += rnnz;
+		}
+		IM[m] = IL[m];
+	}
+
+		if(RSB_C2R_PARANOIA)
+		{
+			rsb_coo_idx_t i;
+			rsb_nnz_idx_t lnz = 0,rnz = 0,tnz = 0;
+			if(IR==NULL)
+				IR = IL+1;
+
+/*			if(IL[m]!=nnz0+nnz)
+			{
+				RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}*/
+
+			for(i=0;RSB_LIKELY(i<m);++i)
+			{
+				lnz += IM[i]-IL[i];
+				rnz += IR[i]-IM[i];
+				tnz += IR[i]-IL[i];
+
+				if(RSB_UNLIKELY(IM[i]<IL[i] || IL[i]>IL[i+1]))
+				{
+					RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+					RSB_PERR_GOTO(err,RSB_ERRM_ES);
+				}
+			}
+			if(ul+ll!=lnz || ur+lr != rnz)
+			{
+				RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+			if(tnz!=nnz || (rnz+lnz)!=nnz)
+			{
+				RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				RSB_PERR_GOTO(err,"tnz:%d, nnz:%d, rnz:%d, lnz:%d\n",tnz,nnz,rnz,lnz);
+			}
+		}
+		*llp = ll;
+		*lrp = lr;
+		*ulp = ul;
+		*urp = ur;
+err:
+		RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_WANT_PARALLEL_SUBDIVISION */
+
+static rsb_err_t rsb_do_compute_vertical_split_parallel(const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_coo_idx_t hm, rsb_coo_idx_t hk, rsb_nnz_idx_t nnz, rsb_coo_idx_t * IL, rsb_coo_idx_t * RSB_RESTRICT IM, rsb_coo_idx_t * IR, rsb_nnz_idx_t *ulp, rsb_nnz_idx_t *urp, rsb_nnz_idx_t *llp, rsb_nnz_idx_t *lrp)
+{
+
+	/**
+	Binary search for the boundaries of each row.
+	Assign threads to rows intervals.
+	Perform the row pointers vector fill calling rsb_do_compute_vertical_split.
+	*/
+#if 0
+	return rsb_do_compute_vertical_split(IA,JA,roff,coff,m,k,hm,hk,nnz,IL,IM,IR,ulp,urp,llp,lrp);
+#else
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* const rsb_thread_t wet = rsb_get_num_threads(); */
+
+	if(m<1)
+		return RSB_ERR_NO_ERROR;/* TODO: limit case */
+	IL[0] = 0;
+	if(m==1)
+		goto after;
+
+	#pragma omp parallel RSB_NTC 
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		const rsb_thread_t tn = /*wet*/ omp_get_num_threads(), tnn = RSB_MIN(tn,m);
+		const rsb_thread_t th_id = omp_get_thread_num();
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		const rsb_thread_t tn = 1, tnn = RSB_MIN(tn,m);
+		const rsb_thread_t th_id = 0;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		const rsb_coo_idx_t mm = ((m+tnn-1)/tnn),m0 = mm*th_id,m1 = RSB_MIN(m0+mm,m);
+		rsb_coo_idx_t i;
+		rsb_nnz_idx_t nnz0 = 0,nnz1 = nnz;
+		rsb_nnz_idx_t n,rnnz,fnnz,lnnz;
+		if(th_id>=m)
+			goto nowork;
+		/* binary search for the boundaries of each row  */
+		nnz0 = rsb__nnz_split_coo_bsearch(IA+nnz0,m0,nnz1-nnz0);
+		nnz1 = nnz0+rsb__nnz_split_coo_bsearch(IA+nnz0,m1,nnz1-nnz0);
+		/* assign threads to rows intervals */
+		if(nnz0>=nnz1)
+		{
+			//for(i=m0;RSB_LIKELY(i<m1);++i)
+			//	IL[i+1] = nnz0;
+			RSB_XCOO_VSET(IL,nnz0,m0+1,m1+1);
+			goto nowork;
+		}
+		//RSB_INFO("thread %d  rows %d..%d  nnz %d..%d\n",th_id,m0,m1,nnz0,nnz1);
+		/* perform the row pointers vector fill calling rsb_do_compute_vertical_split */
+		//RSB_DO_ERROR_CUMULATE(errval,rsb_do_compute_vertical_split(IA+nnz0,JA+nnz0,roff+m0,coff,m1-m0,k,hm,hk,nnz1-nnz0,IL+m0,NULL,NULL,ulp,urp,llp,lrp));
+		fnnz = nnz0;
+		n = nnz0;
+		for(i=m0;RSB_LIKELY(i<m1);++i)
+		{
+			if((nnz1-nnz0)/(m1-m0)<RSB_WANT_BINSEARCH_MIN_NZPR) 
+			{
+				rnnz = 0;
+				for(;RSB_LIKELY(n<nnz1 && IA[n]==i);++n)
+					++rnnz;
+				IL[i+1] = nnz0+rnnz;
+				nnz0 += rnnz;
+			}
+			else
+			{
+				/* TODO : should use a smarter strategy than this one */
+				lnnz = fnnz+rsb__nnz_split_coo_bsearch(IA+fnnz,i+1,nnz1-fnnz);
+				//RSB_INFO("%d : %d\n",i,lnnz);
+				IL[i+1] = lnnz;
+				fnnz = lnnz;
+			}
+		}
+nowork:			
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+	#pragma omp barrier
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+	}
+after:
+	IL[m] = nnz;
+	//int i; RSB_INFO(":::"); for(i=0;RSB_LIKELY(i<m+1);++i) RSB_INFO("%d ",IL[i]); RSB_INFO("\n");
+	//RSB_INFO(":::"); for(i=0;RSB_LIKELY(i<nnz);++i) RSB_INFO("%d ",IA[i]); RSB_INFO("\n");
+//err:
+	RSB_DO_ERR_RETURN(errval)
+#endif
+}
+
+#if 0
+static rsb_err_t rsb_do_fill_partially_rcsr_arrays_for_later(struct rsb_mtx_t * mtxAp, 
+		const rsb_coo_idx_t * IL, const rsb_coo_idx_t * IR,
+		rsb_coo_idx_t * IA, rsb_coo_idx_t * JA,
+		//const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+		rsb_nnz_idx_t nzoff, rsb_coo_idx_t m, rsb_coo_idx_t roff )
+{
+	/**
+		\ingroup gr_unfinished
+	 */
+	mtxAp->nzoff = nzoff;
+	mtxAp->bindx = IA+nzoff;
+	mtxAp->bpntr = NULL;
+#if RSB_WANT_FIRST_VERSION
+	RSB_COA_MEMMOVE(mtxAp->bindx,IL,0,roff,m+1);
+#endif /* RSB_WANT_FIRST_VERSION */
+#if RSB_WANT_FIRST_VERSION
+{
+	rsb_nnz_idx_t i;
+	for(i=mtxAp->roff;RSB_LIKELY(i<mtxAp->roff+mtxAp->nr);++i)
+	{
+		rsb_nnz_idx_t nnz1 = IR[i];
+		rsb_nnz_idx_t nnz0 = IL[i];
+//		RSB_C2R_ASSERT(IL[i-mtxAp->roff]>=IB[i]);
+//		RSB_C2R_ASSERT(IL[i-mtxAp->roff]<=IB[i+1]);
+//		RSB_C2R_ASSERT(IL[i-mtxAp->roff+1]>=IB[i+1]);
+		RSB_C2R_ASSERT(nnz0>=IL[i]);
+		RSB_C2R_ASSERT(nnz1<=IR[i]);
+		if(nnz1==nnz0)continue;
+//	       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+		RSB_C2R_ASSERT(JA[nnz0+0]>=mtxAp->coff);
+		RSB_C2R_ASSERT(JA[nnz1-1]< mtxAp->coff+mtxAp->nc);
+	}
+	}
+#endif /* RSB_WANT_FIRST_VERSION */
+
+
+
+	return RSB_ERR_NO_ERROR;
+}
+#endif
+
+#if 0
+static rsb_err_t rsb_do_fill_rcsr_arrays_for_later(struct rsb_mtx_t * mtxAp, 
+		const rsb_coo_idx_t * IL, const rsb_coo_idx_t * IR,
+	       	//const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	       	rsb_coo_idx_t * IA, rsb_coo_idx_t * JA,
+		rsb_nnz_idx_t nzoff, rsb_coo_idx_t m, rsb_coo_idx_t roff )
+{
+	/**
+		\ingroup gr_unfinished
+	 */
+	if(!IR)
+		IR = IL+1;
+	mtxAp->nzoff = nzoff;
+	mtxAp->bindx = IA+nzoff;
+	mtxAp->bpntr = IA+nzoff+m+1;
+#if RSB_C2R_WANT_MAYBE_FASTER 
+	RSB_COA_MEMCPY_ROWSZ(mtxAp->bindx,IL,0,roff,m+1);
+	RSB_COA_MEMCPY_ROWSZ(mtxAp->bpntr,IR,0,roff,m+1);
+//	RSB_COA_MEMCPY(mtxAp->bindx,IL,0,roff,m+1);
+//	RSB_COA_MEMCPY(mtxAp->bpntr,IR,0,roff,m+1);
+//	RSB_COA_MEMCPY_parallel(mtxAp->bindx,IL,0,roff,m+1);
+//	RSB_COA_MEMCPY_parallel(mtxAp->bpntr,IR,0,roff,m+1);
+#else /* RSB_C2R_WANT_MAYBE_FASTER */
+	/* are we sure  we need MEMMOVE here ? FIXME */
+	RSB_COA_MEMMOVE(mtxAp->bindx,IL,0,roff,m+1);
+	RSB_COA_MEMMOVE(mtxAp->bpntr,IR,0,roff,m+1);
+#endif /* RSB_C2R_WANT_MAYBE_FASTER */
+
+#if RSB_C2R_PARANOIA
+	{
+	rsb_nnz_idx_t i;
+	for(i=0;i<mtxAp->nr;++i)
+	//for(i=mtxAp->roff;i<mtxAp->roff+mtxAp->nr;++i)
+	{
+		rsb_nnz_idx_t nnz1 = IR[i];
+		rsb_nnz_idx_t nnz0 = IL[i];
+//		RSB_C2R_ASSERT(IL[i-mtxAp->roff]>=IB[i]);
+//		RSB_C2R_ASSERT(IL[i-mtxAp->roff]<=IB[i+1]);
+//		RSB_C2R_ASSERT(IL[i-mtxAp->roff+1]>=IB[i+1]);
+		RSB_C2R_ASSERT(nnz0>=IL[i]);
+		RSB_C2R_ASSERT(nnz1<=IR[i]);
+		if(nnz1==nnz0)continue;
+//	       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+		RSB_C2R_ASSERT(JA[nnz0+0]>=mtxAp->coff);
+		RSB_C2R_ASSERT(JA[nnz1-1]< mtxAp->coff+mtxAp->nc);
+	}
+	}
+#endif /* RSB_C2R_PARANOIA */
+	return RSB_ERR_NO_ERROR;
+}
+#endif
+
+static rsb_err_t rsb_do_fill_early_leaf_matrix( struct rsb_mtx_t * mtxAp, struct rsb_mtx_t * submatrix,
+		const rsb_coo_idx_t * IL, const rsb_coo_idx_t * IR, 
+		rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, const rsb_coo_idx_t * VA,
+		//const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_coo_idx_t * VA,
+		rsb_nnz_idx_t snzoff, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k,
+		rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_type_t typecode, rsb_flags_t flags )
+{
+	/**
+		\ingroup gr_unfinished
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+//	if(!IR)
+//		IR = IL+1;
+
+#if 0
+	/* 20131206 nowadays IR and IL are always NULL */
+	if(!RSB_DO_TOOFEWNNZFORRCSR(nnz,m) && IR && IL)
+	{
+		// the matrix could be split further: we fill it with info to continue, if necessary
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_rcsr_arrays_for_later(submatrix,IL,IR,IA,JA,snzoff,m,roff));
+	}
+	else
+	if(!RSB_DO_TOOFEWNNZFORCSR(nnz,m) && IR && IL)
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_partially_rcsr_arrays_for_later(submatrix,IL,IR,IA,JA,snzoff,m,roff));
+		//RSB_ERROR("nnz=%d ,m=%d ! what shall we do ?\n",nnz,m);
+		RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_WANT_BCSS_STORAGE);
+	}
+	else
+#endif
+	{
+		if(RSB_C2R_IF_VERBOSE)
+			RSB_INFO("building a very sparse recursive matrix\n");
+
+		/* no hope for CSR : however, full/half word COO will fit  */
+		submatrix->nzoff = snzoff;
+		submatrix->bindx = NULL;
+		submatrix->bpntr = NULL;
+		RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_WANT_BCSS_STORAGE);
+		//RSB_ERROR("nnz=%d ,m=%d ! what shall we do ?\n",nnz,m);
+	}
+	mtxAp->sm[roff?(coff?3:2):(coff?1:0)] = submatrix;
+	submatrix->roff = roff+mtxAp->roff;
+	submatrix->coff = coff+mtxAp->coff;
+	RSB_DO_ERROR_CUMULATE(errval,rsb__set_init_flags_and_stuff(submatrix,NULL,NULL,m,k,nnz,nnz,nnz,typecode,flags));
+//err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+int rsb_compar_rcsr_matrix_leftmost_first(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+		TODO:RENAME: rsb_compar_rcsr_matrix_leftmost_first -> ?
+		Orders first by column, then by row.
+	*/
+	struct rsb_mtx_t *a = *(struct rsb_mtx_t **)ap;
+	struct rsb_mtx_t *b = *(struct rsb_mtx_t **)bp;
+	int ss = 1;	/* should swap results ? */
+	rsb_bool_t at;
+	rsb_bool_t bt;
+
+	at=!RSB_DO_FLAG_HAS(a->flags,RSB_FLAG_QUAD_PARTITIONING);
+	bt=!RSB_DO_FLAG_HAS(b->flags,RSB_FLAG_QUAD_PARTITIONING);
+
+	if(at && !bt)
+		return 1;
+	if(!at && bt)
+		return -1;
+
+	if(a->coff < b->coff)
+	{
+		RSB_SWAP(struct rsb_mtx_t *,a,b);
+		ss = -1;/* should swap results ! */
+	}
+
+	return (a->coff==b->coff)?(a->roff>b->roff?1:(a->roff<b->roff?-1:0)):1*ss;
+}
+
+#if 0
+/* 20121001 unfinished code: commented */
+static struct rsb_mtx_t * rsb_do_find_ffmltart(struct rsb_mtx_t ** submatricesp, rsb_submatrix_idx_t smn, struct rsb_mtx_t * submatrix, rsb_coo_idx_t off)
+{
+	/**
+		\ingroup gr_unfinished
+	*/
+	rsb_submatrix_idx_t smi = 0;
+	rsb_coo_idx_t coff = submatrix->coff;
+	rsb_coo_idx_t roff = submatrix->roff;
+	rsb_coo_idx_t m = submatrix->nr;
+	rsb_coo_idx_t k = submatrix->nc;
+	/* leftmost from right */
+	for(smi=0;smi<smn;++smi)
+		if(submatricesp[smi]->coff>=coff+k &&
+			       	submatricesp[smi]->roff<=off+0 && submatricesp[smi]->roff+submatricesp[smi]->nr>off)
+			return submatricesp[smi];
+	/* leftmost from left, the line after */
+	for(smi=0;smi<smn;++smi)
+		if(submatricesp[smi]->coff<coff &&
+			       	submatricesp[smi]->roff<=off+1 && submatricesp[smi]->roff+submatricesp[smi]->nr>off)
+			return submatricesp[smi];
+	return NULL;
+}
+#endif
+
+static rsb_bool_t rsb__should_recursively_partition_matrix(
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_nnz_idx_t element_count,
+	rsb_nnz_idx_t block_count,
+	rsb_nnz_idx_t nnz,
+	rsb_blk_idx_t Mdim,
+	rsb_blk_idx_t mdim,
+	rsb_coo_idx_t roff,
+	rsb_coo_idx_t coff,
+	rsb_flags_t flags,
+	size_t el_size,
+	rsb_thread_t wet
+)
+{
+#if (RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY == RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_NAIVE)
+	/*
+		\ingroup gr_internals
+		NEW: UNFINISHED
+		TODO : ERROR HANDLING, DOCS
+		TODO : should partition on high nnz per row count
+			although in this case if the matrix is highly 
+			compact (almost banded) it won't help.
+			Therefore, some sparseness statistics in the matrix constructor
+			would be nice.
+	*/
+	//long cs = rsb__get_lastlevel_c_size();
+	//long cs = rsb__get_lastlevel_c_size_per_thread();
+	long cs=(rsb__get_lastlevel_c_size()/(wet>0?wet:rsb_get_num_threads()));
+	rsb_bool_t sp = RSB_BOOL_FALSE;	/* should partition */
+	rsb_fillin_t efillin=1.0;		/* FIXME */
+	size_t smab=0;			/* spmv memory accessed bytes */
+	//cs/=20000;
+	//cs/=20;
+	//cs/=4;
+	/* FIXME */
+	if(nnz<RSB_RECURSION_MIN_NNZ  || m<RSB_RECURSION_MIN_DIM  || k<RSB_RECURSION_MIN_DIM  || !RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+	{
+		sp = RSB_BOOL_FALSE;
+		goto done;
+	}
+
+	if(kB<1)kB=1;
+	if(mB<1)mB=1;
+
+#if 0
+	if(flags & RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE) 
+		cs*=2;
+
+	if(flags & RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE) 
+		cs/=2;
+#endif
+
+	if( (flags & RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG) && roff == coff )
+		cs/=2;
+	/* this will imply a more fine grained subdivision on the diagonal 
+	 * FIXME : we could use a factor different than 2 !
+	 * */
+
+
+	/* subdivide at least until matrix indices can be compressed */
+	if((flags & RSB_FLAG_USE_HALFWORD_INDICES_CSR) && m>1 && k>1 && 
+//			nnz>1
+//			nnz>(cs/4)
+			nnz*el_size>2*cs
+			&& !rsb__do_is_candidate_size_for_halfword_csr(m,k,nnz,flags))
+		return RSB_BOOL_TRUE;
+	if((flags & RSB_FLAG_USE_HALFWORD_INDICES_COO) && m>1 && k>1 && 
+//			nnz>1
+//			nnz>(cs/4)
+			nnz*el_size>2*cs
+			&& !rsb__do_is_candidate_size_for_halfword_coo(m,k,flags))
+		return RSB_BOOL_TRUE;
+
+	if(cs>0)
+	{
+		smab = rsb_spmv_memory_accessed_bytes_(mB,kB,m,k,efillin*nnz,((efillin*nnz)/mB)/kB,m/mB,el_size);
+
+		if( 2*smab > 3*3*cs )	/* FIXME : overflow possible */
+			sp=1;
+		else
+		if( 
+			/* FIXME! */
+			(((Mdim+mdim+m+k)*sizeof(rsb_coo_idx_t))
+			/(nnz*el_size)) > 8*cs
+		)
+			sp = RSB_BOOL_TRUE;
+		else
+			sp = RSB_BOOL_FALSE;
+	}
+	else
+	{	
+		/* no cache info (FIXME: there should be no section like this one) */
+		if(  
+			Mdim<8 || mdim<8 || m < 500 
+			|| k < 500 || nnz < 200*100)
+			sp = RSB_BOOL_FALSE;
+		else
+			sp = RSB_BOOL_TRUE;
+	}
+#ifdef RSB_EXPERIMENTAL_ROWS_SUBDIVIDE_TO_CORES_NUM
+	/* STILL UNIMPLEMENTED */
+#endif /* RSB_EXPERIMENTAL_ROWS_SUBDIVIDE_TO_CORES_NUM */
+#if RSB_EXPERIMENTAL_NO_SUBDIVIDE_ON_MIN_NNZ_PER_ROW_OR_COLUMN
+	if(1)
+	{
+		rsb_nnz_idx_t nnzpr;
+		if( (flags&RSB_FLAG_WANT_COLUMN_MAJOR_ORDER) != 0 )
+			nnzpr=nnz/k;
+		else
+			nnzpr=nnz/m;
+
+		if( nnzpr < RSB_CONST_MIN_NNZ_PER_ROW_OR_COLUMN_PER_SUBMATRIX )
+			sp = RSB_BOOL_FALSE;
+	}
+#endif /* RSB_EXPERIMENTAL_NO_SUBDIVIDE_ON_MIN_NNZ_PER_ROW_OR_COLUMN */
+done:
+	return sp;
+#else /* (RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY == RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_NAIVE) */
+	#error "should use a RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_NAIVE partitioning policy!"
+	return RSB_BOOL_FALSE;
+#endif /* (RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY == RSB_EXPERIMENTAL_QUAD_DIVISION_POLICY_NAIVE) */
+}
+
+static rsb_nnz_idx_t rsb_do_copy_submatrix_coa(struct rsb_mtx_t * submatrix, void * VA, void * WA, rsb_coo_idx_t * IL, rsb_coo_idx_t * IR, size_t el_size, rsb_coo_idx_t n0, rsb_coo_idx_t i0, rsb_coo_idx_t m0)
+{
+	rsb_nnz_idx_t i,n;
+	RSB_C2R_ASSERT(submatrix);
+	RSB_C2R_ASSERT(VA);
+	RSB_C2R_ASSERT(WA);
+	RSB_C2R_ASSERT(IL);
+	RSB_C2R_ASSERT(IR);
+	RSB_C2R_ASSERT(el_size>0);
+	RSB_C2R_ASSERT(n0>=0);
+	RSB_C2R_ASSERT(i0<m0);
+	submatrix->VA=((char*)VA)+el_size*submatrix->nzoff;
+	for(n=n0,i=i0;RSB_LIKELY(i<m0);n+=IR[i]-IL[i],++i)
+	{
+		RSB_C2R_ASSERT(n>=0);
+		RSB_A_MEMCPY_SMALL(WA,VA,submatrix->nzoff+n,IL[i],IR[i]-IL[i],el_size);
+	}
+	return n;
+}
+
+
+static rsb_submatrix_idx_t rsb_do_pick_largest_open_matrix(struct rsb_mtx_t ** submatricesp, rsb_submatrix_idx_t smc)
+{
+	/*
+	 *	FIXME: NEW, UNFINISHED
+	 *	need a lock, too
+	 *	TODO:need a priority queue, here
+	 * */
+	rsb_submatrix_idx_t smi = 0,msmi = RSB_SUBM_IDX_MARKER;
+	rsb_nnz_idx_t maxnz = 0;
+	if(RSB_WANT_VERBOSE_SUBDIVISION)
+		if(smc==0)
+			RSB_INFO("warning: no largest open matrix among 0 matrices\n");
+	for(smi=0;smi<smc;++smi)
+	{
+		//RSB_INFO("looking %d : %d\n",smi,submatricesp[smi]->nnz);
+		/* FIXME: ">=" here is used to cope with diagonal implicit matrices (which could have nnz==0), too */
+		if(submatricesp[smi])
+		if(submatricesp[smi]->nnz>=maxnz)
+		{
+			maxnz = submatricesp[smi]->nnz;
+			msmi = smi;
+		}
+	}
+	return msmi;
+}
+
+static rsb_err_t rsb_do_coo2rec_subdivide_parallel(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_err_t *errvalp, struct rsb_mtx_t ** submatricesp, struct rsb_mtx_t * mtxAp, const rsb_nnz_idx_t * IB, const rsb_nnz_idx_t * IX, rsb_coo_idx_t * IT, rsb_coo_idx_t * WA, rsb_submatrix_idx_t cmc, rsb_submatrix_idx_t omc, rsb_submatrix_idx_t tmc,  [...]
+{
+	/*
+	 	TODO: clean this up.
+		Note that rsb__set_num_threads is outlawed here.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t el_size = RSB_SIZEOF(typecode);
+	const rsb_nnz_idx_t ttlnz = nnz;		/* total nnz */
+	rsb_nnz_idx_t maxnz = nnz;			/* max encountered nnz for a leaf */
+	rsb_submatrix_idx_t stmc = RSB_MIN(tmc,wet);	/* submatrices total count */
+	rsb_submatrix_idx_t lmc = 1;			/* leaf matrix count */
+	rsb_time_t cpt = RSB_TIME_ZERO,dt = RSB_TIME_ZERO;	/* cpt overwrites mtxAp->cpt */
+	rsb_thread_t tn = rsb_get_num_threads();	/* threads number */
+	rsb_thread_t mtn = RSB_MAX(1,(rsb_thread_t)(rsb_global_session_handle.subdivision_multiplier*tn));
+	rsb_thread_t tnn = 1;				/* threads number */
+	rsb_float_t skew = ((rsb_float_t)(maxnz))/(nnz/wet);	/* if more than one, will limit scaling */
+	long cbs = rsb__get_cache_block_byte_size();
+
+	if(RSB_WANT_VERBOSE_SUBDIVISION)
+		RSB_INFO("serial substage subdivision of "),RSB_INFO_MATRIX_SUMMARY(mtxAp),RSB_INFO("\n");
+again:
+	#pragma omp parallel reduction(|:errval) shared(submatricesp) num_threads(tnn)
+	{
+		rsb_submatrix_idx_t smi = 0;
+		struct rsb_mtx_t * submatrix = NULL;
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		rsb_thread_t th_id = omp_get_thread_num();
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		rsb_thread_t th_id = 0;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+#if RSB_WANT_VERBOSE_SUBDIVISION2
+		if(th_id==0){
+			RSB_MTXASM_INFO("entering %d threads in subdivision phase\n",tnn);
+			RSB_MTXASM_INFO("entering %d threads in subdivision phase\n",tnn);}
+#endif /* RSB_WANT_VERBOSE_SUBDIVISION2 */
+iagain:
+		#pragma omp critical (rsb_coo2rsbsub_crs)
+		{
+			smi = rsb_do_pick_largest_open_matrix(submatricesp+cmc,omc);
+			if(smi != RSB_SUBM_IDX_MARKER)
+			{
+				smi += cmc;
+				submatrix = submatricesp[smi];
+				maxnz = submatrix->nnz;
+				/* RSB_ASSERT(nnz>=wet); */
+				skew = ((rsb_float_t)(maxnz))/((rsb_float_t)(nnz/wet));
+				RSB_ASSERT(!isinf(skew));
+				omc--;
+				if(smi!=cmc)
+				{
+#if 0
+			  		assert(submatricesp[smi]);
+			  		assert(submatricesp[cmc]);
+#endif
+					RSB_SWAP(struct rsb_mtx_t *,submatricesp[smi],submatricesp[cmc]);
+				}
+				++cmc;
+			}
+			else
+			{
+				submatrix = NULL;
+			}
+	 		if(RSB_WANT_VERBOSE_SUBDIVISION)
+			{
+				if(submatrix)
+					RSB_INFO("subdividing "),RSB_INFO_MATRIX_SUMMARY(submatrix),RSB_INFO(" (open:%d,closed:%d) for thread %d\n",omc,cmc,th_id);
+				else
+					RSB_INFO("no available submatrix (open:%d,closed:%d) for thread %d/%d\n",omc,cmc,th_id,tnn);
+			}
+		}
+	if((smi)!=RSB_SUBM_IDX_MARKER)
+	{
+#if 0
+	for(smi=0;RSB_LIKELY(omc>0);smi=cmc+omc-1)
+	while(submatrix)
+	if(omc>0 && ((submatrix=submatricesp[smi])!=NULL))
+#endif
+	{
+		rsb_coo_idx_t k = submatrix->nc;
+		rsb_coo_idx_t m = submatrix->nr;
+		rsb_coo_idx_t hk = RSB_MIDDLE(k);
+		rsb_coo_idx_t hm = RSB_MIDDLE(m);
+		rsb_nnz_idx_t ul = 0,ur = 0,ll = 0,lr = 0;
+		rsb_nnz_idx_t nnz = submatrix->nnz;
+		rsb_coo_idx_t roff = submatrix->roff;
+		rsb_coo_idx_t coff = submatrix->coff;
+		rsb_flags_t flags = submatrix->flags;
+		rsb_nnz_idx_t nzoff = submatrix->nzoff;
+		rsb_bool_t sqs = RSB_BOOL_FALSE;		/* should quad subdivide */
+		rsb_submatrix_idx_t smc = 0;	/* submatrices count */
+
+		if(RSB_C2R_IF_VERBOSE)
+			RSB_INFO("cmc:%d omc:%d smi:%d tmc=%d stmc=%d th_id=%d\n",cmc,omc,smi,tmc,stmc,th_id);
+
+		/* too few nonzeros for recursion (TODO: may change in the future) */
+		if(RSB_DO_TOOFEWNNZFORRCSR(nnz,m))
+#if RSB_WANT_SUBDIVISION_FIXES_20101120
+		if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COO_STORAGE))
+#endif /* RSB_WANT_SUBDIVISION_FIXES_20101120 */
+		{
+			if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("matrix too sparse for RCSR: rejoining\n");
+			sqs = RSB_BOOL_FALSE;
+			goto nosqstest;
+		}
+
+		/* decide if the matrix is worth subdividing further (soft) */
+		sqs = rsb__should_recursively_partition_matrix(0,0,m,k,0,0,nnz,m,k,roff,coff,flags,el_size,mtn);
+#if RSB_WANT_SUBDIVISION_FIXES_20101120
+		if(nnz<RSB_RECURSION_MIN_NNZ  || m<RSB_RECURSION_MIN_DIM  || k<RSB_RECURSION_MIN_DIM  || !RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			sqs = RSB_BOOL_FALSE;		/* a hard condition */
+			goto nosqstest;
+		}
+		else
+			if(cmc+omc<tmc)
+				if(skew>RSB_SUBDIVISION_SKEW_MAX)
+					sqs = RSB_BOOL_TRUE;	/* a soft condition */
+#endif /* RSB_WANT_SUBDIVISION_FIXES_20101120 */
+
+		if(!sqs)
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS))
+				if(wet>lmc)
+					sqs = RSB_BOOL_TRUE;
+
+		if(sqs)
+		{
+			rsb_bool_t awfcsr = RSB_BOOL_FALSE; /* all of the matrices will fit csr ? */
+#if RSB_WANT_SUBDIVISION_FIXES_20101120
+			rsb_nnz_idx_t mqnnz = RSB_MAX(RSB_MAX(ul,ur),RSB_MAX(lr,ll));
+#endif /* RSB_WANT_SUBDIVISION_FIXES_20101120 */
+
+			/* compute the split vector */
+			dt = - rsb_time();
+			if((errval = rsb_do_compute_vertical_split_search_only(IA,JA,roff,coff,m,k,hm,hk,nnz,IB,&ul,&ur,&ll,&lr))!=RSB_ERR_NO_ERROR) 
+				;/* goto err; */
+			dt += rsb_time();
+			cpt += dt;
+			RSB_C2R_ASSERT(IR);
+			awfcsr = ( (ul>0 && RSB_DO_TOOFEWNNZFORCSR(ul,hm))   || (ur>0 && RSB_DO_TOOFEWNNZFORCSR(ur,hm)) || (lr>0 && RSB_DO_TOOFEWNNZFORCSR(lr,m-hm)) || (ll>0 && RSB_DO_TOOFEWNNZFORCSR(ll,m-hm)))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+
+			if(awfcsr) /* FIXME: misleading naming ! */ 
+			{
+				/* if some leaf won't fit in CSR, we don't split anymore */
+				if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COO_STORAGE))
+					sqs = RSB_BOOL_TRUE;
+				else
+					sqs = RSB_BOOL_FALSE; 
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("no space for conversion of some leaf: rejoining ? %d\n",!sqs);
+			}
+
+#if RSB_WANT_SUBDIVISION_FIXES_20101120
+			/* an alternative would be to place this test in the branch above*/
+			if(	mqnnz>RSB_MAX_QUADRANTS_UNBALANCE*(nnz-mqnnz) &&
+				el_size*nnz<cbs &&
+				nnz < (ttlnz/wet) )
+				sqs = RSB_BOOL_FALSE; 
+#endif /* RSB_WANT_SUBDIVISION_FIXES_20101120 */
+
+			/* how many submatrices out of four ? */
+			smc = (ul?1:0)+(ur?1:0)+(ll?1:0)+(lr?1:0);
+			if(cmc+omc+smc>tmc)
+			{	
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("too many submatrices (%d+%d>%d: rejoining\n",cmc+omc,smc,tmc);
+				sqs = RSB_BOOL_FALSE;
+				goto nosqstest;
+			}
+
+#if !RSB_WANT_SUBDIVISION_FIXES_20101120
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES))
+				if(wet<lmc-1)
+				{	
+					if(RSB_C2R_IF_VERBOSE)
+						RSB_INFO("RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES: rejoining\n");
+					sqs = RSB_BOOL_FALSE;
+				}
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+#endif /* RSB_WANT_SUBDIVISION_FIXES_20101120 */
+
+			if(RSB_C2R_IF_VERBOSE)
+			RSB_ERROR("splitting %d/%d -> %d/%d %d/%d %d/%d %d/%d sqs? %d\n",nnz,m,ul,hm,ur,hm,ll,m-hm,lr,m-hm,sqs);
+			if(ul+ur+ll+lr != nnz)
+			{
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_ERROR("%d ?= %d + %d + %d + %d = %d\n",nnz,ul,ur,ll,lr,ul+ur+ll+lr);
+				RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+			}
+		}
+nosqstest:
+		if(sqs)
+		{
+			/* should quad-subdivide. let's take care of indices. */
+			rsb_nnz_idx_t snzoff = nzoff;
+			rsb_submatrix_idx_t smci = 0;
+			rsb_submatrix_idx_t smco = 0;
+			struct rsb_mtx_t*isms[4] = {NULL,NULL,NULL,NULL};
+			
+			/*
+			the index arrays are copied/linked into the quadrants
+			some quadrants may seem ready for recursion, but they not result as such later on.
+			they will be made leaf later on, if necessary.
+			...
+			*/
+			RSB_C2R_ASSERT(ur>=0 && ul>=0 && lr>=0 && ll>=0);
+
+			#pragma omp critical (rsb_coo2rsbsub_crs)
+			{
+				if(cmc+omc+smc+RSB_SUBDIVISION_BUG_EXTRA>tmc)
+				{	
+					if(RSB_C2R_IF_VERBOSE)
+						RSB_INFO("too many submatrices (%d+%d>%d): rejoining\n",cmc+omc,smc,tmc);
+					sqs = RSB_BOOL_FALSE;
+				}
+				else
+				{
+					lmc += smc;
+					lmc -= 1;
+					smco = cmc+omc;
+					snzoff = nzoff;
+					if(ul){ isms[0] = submatricesp[smco+smci];submatricesp[smco+smci] = NULL;smci++;}
+					if(ur){ isms[1] = submatricesp[smco+smci];submatricesp[smco+smci] = NULL;smci++;}
+					if(ll){ isms[2] = submatricesp[smco+smci];submatricesp[smco+smci] = NULL;smci++;}
+					if(lr){ isms[3] = submatricesp[smco+smci];submatricesp[smco+smci] = NULL;smci++;}
+					smci = 0;
+					omc += smc;
+				}
+			if(sqs)	
+			{
+				if(ul)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,isms[0],NULL,NULL,IA,JA,VA,snzoff,ul,hm,hk,0,0,typecode,flags)), snzoff += ul,++smci;
+				if(ur)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,isms[1],NULL,NULL,IA,JA,VA,snzoff,ur,hm,k-hk,0,hk,typecode,flags)), snzoff += ur,++smci;
+				if(ll)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,isms[2],NULL,NULL,IA,JA,VA,snzoff,ll,m-hm,hk,hm,0,typecode,flags)), snzoff += ll,++smci;
+				if(lr)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,isms[3],NULL,NULL,IA,JA,VA,snzoff,lr,m-hm,k-hk,hm,hk,typecode,flags)), snzoff += lr,++smci;
+				RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+			}
+
+			if(sqs)
+			{
+				smci = 0;
+				if(ul){ submatricesp[smco+smci] = isms[0];smci++;}
+				if(ur){ submatricesp[smco+smci] = isms[1];smci++;}
+				if(ll){ submatricesp[smco+smci] = isms[2];smci++;}
+				if(lr){ submatricesp[smco+smci] = isms[3];smci++;}
+			}
+
+			if(sqs)
+			{
+			if(snzoff-nzoff!=nnz)
+			{
+				/* is this a partition ? */
+				RSB_ERROR("%d - %d != %d ?= %d + %d + %d + %d = %d\n",snzoff,nzoff,nnz,ul,ur,ll,lr,ul+ur+ll+lr);
+				RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+			}
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES); /* goto err; */
+			}
+			RSB_DO_FLAG_ADD(submatrix->flags,RSB_FLAG_QUAD_PARTITIONING);
+			RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_NON_ROOT_MATRIX);
+			submatrix->bindx = NULL; submatrix->bpntr = NULL; submatrix->indptr = NULL;
+			}
+			}
+		}
+		if(!sqs)
+		{
+			RSB_DO_FLAG_SUBST(submatrix->flags,RSB_FLAG_QUAD_PARTITIONING,RSB_FLAG_NON_ROOT_MATRIX);
+			/* selecting a format and declaring as leaf */
+			if(!RSB_DO_TOOFEWNNZFORCSR(nnz,m) /*&& IR && IL*/)
+			{
+/*				RSB_INFO("CSR -> COO ?\n"); */
+				if(RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_WANT_BCSS_STORAGE))
+					RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_WANT_COO_STORAGE);
+				if((errval = rsb__do_set_init_storage_flags(submatrix,submatrix->flags))!=RSB_ERR_NO_ERROR)
+					;/* goto err; */
+			}
+			else
+			{
+/*				RSB_INFO("COO !\n"); */
+				rsb_flags_t sflags = flags;
+				RSB_DO_FLAG_SUBST(sflags,RSB_FLAG_WANT_BCSS_STORAGE,RSB_FLAG_WANT_COO_STORAGE);
+				if((errval = rsb__do_set_init_storage_flags(submatrix,sflags))!=RSB_ERR_NO_ERROR)
+					;/* goto err; */
+			}	
+			if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("freezing %d ",smi+1),
+				RSB_INFO_MATRIX_SUMMARY(submatrix),
+				RSB_INFO("\n");
+		}
+		/* matrix is declared as 'closed'.
+		   sorting, in a way smi will point to the biggest open mtxAp, which will be picked up next */
+#if 0
+		qsort(submatricesp+cmc,(size_t)(omc),sizeof(struct rsb_mtx_t*),& rsb_compar_rcsr_matrix_regarding_nnz);
+		RSB_SWAP(struct rsb_mtx_t *,submatricesp[smi],submatricesp[cmc-1]);
+#endif
+	}
+		/*smi = cmc+omc-1; */
+	}
+		if(omc>0 && cmc+omc<stmc)
+			goto iagain;
+#if RSB_WANT_VERBOSE_SUBDIVISION2
+		if(th_id==0)
+		{
+			RSB_MTXASM_INFO("thread %d:terminating subdivision",th_id);
+			if(omc==0)
+			{RSB_MTXASM_INFO(", no more open matrices");}
+			RSB_MTXASM_INFO("(closed %d= %d nodes + %d leaves, out of %d available)",cmc,cmc-lmc,lmc,tmc);
+			RSB_MTXASM_INFO(",(maxnz=%d,skew=%g)",maxnz,skew);
+			if(cmc+omc>=stmc)
+			{RSB_MTXASM_INFO(", no room left for submatrices");}
+			RSB_MTXASM_INFO(".\n");
+		}
+#endif
+	} /* parallel */
+
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	#pragma omp barrier
+	if(stmc!=tmc)
+	{
+		stmc = tmc;
+		if(RSB_WANT_VERBOSE_SUBDIVISION)
+			RSB_INFO("parallel substage subdivision of "),RSB_INFO_MATRIX_SUMMARY(mtxAp),RSB_INFO("\n");
+		tnn = tn;
+		goto again;
+	}
+	else
+	{
+	
+		if(RSB_WANT_VERBOSE_SUBDIVISION)
+			RSB_INFO("parallel substage subdivision of "),RSB_INFO_MATRIX_SUMMARY(mtxAp),RSB_INFO(" not required\n");
+	}
+	{
+ 		if(RSB_WANT_VERBOSE_SUBDIVISION)
+			RSB_INFO("subdivision of "),RSB_INFO_MATRIX_SUMMARY(mtxAp),RSB_INFO("complete \n");
+	}
+	mtxAp->cpt = cpt;
+
+	*cmcp = cmc;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if 0
+static rsb_err_t rsb_do_coo2rec_subdivide(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_err_t *errvalp, struct rsb_mtx_t ** submatricesp, struct rsb_mtx_t * mtxAp, const rsb_nnz_idx_t * IB, const rsb_nnz_idx_t * IX, rsb_coo_idx_t * IT, rsb_coo_idx_t * WA, rsb_submatrix_idx_t cmc, rsb_submatrix_idx_t omc, rsb_submatrix_idx_t tmc, rsb_threa [...]
+{
+	rsb_nnz_idx_t tdnnz = 0;
+	rsb_submatrix_idx_t smi = 0;/* max matrix count, done matrix count, submatrix index */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t el_size = RSB_SIZEOF(typecode);
+	rsb_time_t cpt = RSB_TIME_ZERO,dt = RSB_TIME_ZERO;
+
+	for(smi=0;RSB_LIKELY(omc>0);smi=cmc+omc-1)
+	{
+		struct rsb_mtx_t * submatrix = submatricesp[smi];
+		rsb_coo_idx_t k = submatrix->nc;
+		rsb_coo_idx_t m = submatrix->nr;
+		rsb_coo_idx_t hk = (k+1)/2;
+		rsb_coo_idx_t hm = (m+1)/2;
+		rsb_nnz_idx_t ul = 0,ur = 0,ll = 0,lr = 0;
+		rsb_nnz_idx_t nnz = submatrix->nnz;
+		rsb_coo_idx_t roff = submatrix->roff;
+		rsb_coo_idx_t coff = submatrix->coff;
+		rsb_coo_idx_t*IL = submatrix->bindx;	// IL will be hosted here
+		rsb_coo_idx_t*IM = IT;			// IM will be hosted in a temporary vector
+		rsb_coo_idx_t*IR = submatrix->bpntr;	// IR will be hosted here
+		rsb_flags_t flags = submatrix->flags;
+		rsb_nnz_idx_t nzoff = submatrix->nzoff;
+		rsb_bool_t sqs = RSB_BOOL_FALSE;		// should quad subdivide
+		rsb_submatrix_idx_t smc = 0;	/* submatrices count */
+
+//		RSB_INFO("picked up %d/%d -> %d x %d, %d nnz, @ %d %d \n",smi+1,tmc,m,k,nnz,roff,coff);
+
+		if(!IL && !RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COO_STORAGE) )
+		{
+			/* if there is no line pointer, we make this submatrix leaf */
+			sqs = RSB_BOOL_FALSE;
+			if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("no split, as no line pointer found\n");
+			goto nosqstest;
+		}
+
+		if(/*!IL || */!IM)
+		{
+			/* if this happens, this is an error */
+			RSB_ERROR("IL:%p, IM:%p\n",IL,IM);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+	
+		/* too few nonzeros for recursion (TODO: may change in the future) */
+		if(RSB_DO_TOOFEWNNZFORRCSR(nnz,m) 
+				/* 
+				 * Uncommenting the following allows subdivision for very spare matrices 
+				 * However, this feature is unfinished and bugful (segfault risk)
+				 * */
+			       /*	&& !RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COO_STORAGE) */
+				)
+		{
+			if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("matrix too sparse for RCSR: rejoining\n");
+
+			sqs = RSB_BOOL_FALSE;
+			goto nosqstest;
+		}
+
+		/* decide if the matrix is worth subdividing further */
+		sqs = rsb__should_recursively_partition_matrix(0,0,m,k,0,0,nnz,m,k,roff,coff,flags,el_size,0);
+
+		/* if we want subdivision  */
+
+		if(!sqs)
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS))
+		if(wet>cmc+1+smc)/* /FIXME : this may not terminate! */
+			sqs = RSB_BOOL_TRUE;
+
+		if(sqs)
+		{
+			rsb_bool_t awfcsr = RSB_BOOL_FALSE; /* all of the matrices will fit csr ? */
+
+			// compute the split vector
+			dt = - rsb_time();
+			if((!RSB_DO_TOOFEWNNZFORCSR(nnz,m)) && IR && IL)
+			{if((errval = rsb_do_compute_vertical_split(IA,JA,roff,coff,m,k,hm,hk,nnz,IL,IM,IR,&ul,&ur,&ll,&lr))!=RSB_ERR_NO_ERROR) goto err;}
+			else
+			{
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("using the sparse splitter\n");
+				if((errval = rsb_do_compute_vertical_split_search_only(IA,JA,roff,coff,m,k,hm,hk,nnz,IB,&ul,&ur,&ll,&lr))!=RSB_ERR_NO_ERROR) goto err;
+			}
+			dt += rsb_time();
+			cpt += dt;
+			RSB_C2R_ASSERT(IR);
+			awfcsr = ( (ul>0 && RSB_DO_TOOFEWNNZFORCSR(ul,hm))   || (ur>0 && RSB_DO_TOOFEWNNZFORCSR(ur,hm)) || (lr>0 && RSB_DO_TOOFEWNNZFORCSR(lr,m-hm)) || (ll>0 && RSB_DO_TOOFEWNNZFORCSR(ll,m-hm)))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+
+			// after computing the split vector, we can still resign from subdividing
+			// especially if some submatrix is deemed too small and the overall submatrices count is enough
+			// TODO: if(rsb__should_rejoin_small_leaf(...
+			// ...
+			
+			/* is there room for these additional submatrices ? */
+//			if( (ul>0 && RSB_DO_TOOFEWNNZFORRCSR(ul,hm))   || (ur>0 && RSB_DO_TOOFEWNNZFORRCSR(ur,hm)) || (lr>0 && RSB_DO_TOOFEWNNZFORRCSR(lr,m-hm)) || (ll>0 && RSB_DO_TOOFEWNNZFORRCSR(ll,m-hm)))
+			if(awfcsr)
+			{
+				/* if some leaf won't fit in CSR, we don't split anymore */
+				if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COO_STORAGE))
+					sqs = RSB_BOOL_TRUE;
+				else
+					sqs = RSB_BOOL_FALSE; 
+
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("no space for conversion of some leaf: rejoining ? %d\n",!sqs);
+			}
+
+			/* how many submatrices out of four ? */
+			smc = (ul?1:0)+(ur?1:0)+(ll?1:0)+(lr?1:0);
+			if(cmc+omc+smc>tmc)
+			{	
+				if(RSB_C2R_IF_VERBOSE)
+					RSB_INFO("too many submatrices: rejoining\n");
+				sqs = RSB_BOOL_FALSE;
+			}
+
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+			/* 
+ 			  if we want to avoid micro leaves, we could stop here 
+ 			  FIXME: we need a better criteria (for proper load balancing!)
+ 			*/
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES))
+				if(wet<cmc)
+				{	
+					if(RSB_C2R_IF_VERBOSE)
+						RSB_INFO("RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES: rejoining\n");
+					sqs = RSB_BOOL_FALSE;
+				}
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+
+			if(RSB_C2R_IF_VERBOSE)
+			RSB_INFO("splitting %d/%d -> %d/%d %d/%d %d/%d %d/%d sqs? %d\n",nnz,m,ul,hm,ur,hm,ll,m-hm,lr,m-hm,sqs);
+		}
+
+nosqstest:
+		omc--;
+		if(smi!=cmc)
+			RSB_SWAP(struct rsb_mtx_t *,submatricesp[smi],submatricesp[cmc]);
+		++cmc;
+		if(sqs)
+		{
+			/* should quad-subdivide. let's take care of indices. */
+			rsb_nnz_idx_t snzoff = nzoff;
+			
+			// the index arrays are copied/linked into the quadrants
+			// some quadrants may seem ready for recursion, but they not result as such later on.
+			// they will be made leaf later on, if necessary.
+			// ...
+			RSB_C2R_ASSERT(ur>=0 && ul>=0 && lr>=0 && ll>=0);
+
+#if RSB_C2R_WANT_MAYBE_FASTER 
+			if(IL)
+				RSB_COA_MEMCPY_ROWSZ(IX,IL,0  ,0,m+1),
+				IL = IX;
+			if(IR)
+				RSB_COA_MEMCPY_ROWSZ(IX,IR,m+1,0,m+1),
+				IR = IX+m+1;
+#else /* RSB_C2R_WANT_MAYBE_FASTER */
+			if(IL)
+				RSB_COA_MEMMOVE(IX,IL,0  ,0,m+1),
+				IL = IX;
+			if(IR)
+				RSB_COA_MEMMOVE(IX,IR,m+1,0,m+1),
+				IR = IX+m+1;
+#endif /* RSB_C2R_WANT_MAYBE_FASTER */
+
+			if(ul)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,submatricesp[cmc+omc],IL,IM,IA,JA,VA,snzoff,ul,hm,hk,0,0,typecode,flags), ++omc, snzoff += ul);
+			if(ur)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,submatricesp[cmc+omc],IM,IR,IA,JA,VA,snzoff,ur,hm,k-hk,0,hk,typecode,flags), ++omc, snzoff += ur);
+			if(ll)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,submatricesp[cmc+omc],IL,IM,IA,JA,VA,snzoff,ll,m-hm,hk,hm,0,typecode,flags), ++omc, snzoff += ll);
+			if(lr)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_fill_early_leaf_matrix(submatrix,submatricesp[cmc+omc],IM,IR,IA,JA,VA,snzoff,lr,m-hm,k-hk,hm,hk,typecode,flags), ++omc, snzoff += lr);
+
+			if(snzoff-nzoff!=nnz)
+			{
+				/* is this a partition ? */
+				RSB_ERROR("%d - %d != %d ?= %d + %d + %d + %d = %d\n",snzoff,nzoff,nnz,ul,ur,ll,lr,ul+ur+ll+lr);
+				errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			}
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES); goto err;
+			}
+			RSB_DO_FLAG_ADD(submatrix->flags,RSB_FLAG_QUAD_PARTITIONING);
+			RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_NON_ROOT_MATRIX);
+			submatrix->bindx = NULL; submatrix->bpntr = NULL; submatrix->indptr = NULL;
+		}
+		else
+		{
+			RSB_DO_FLAG_SUBST(submatrix->flags,RSB_FLAG_QUAD_PARTITIONING,RSB_FLAG_NON_ROOT_MATRIX);
+			// we should decide a format, and proceed declaring it as leaf
+			if(!RSB_DO_TOOFEWNNZFORRCSR(nnz,m) && IR && IL)
+			{
+				if(RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_WANT_BCSS_STORAGE))
+					RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_WANT_COO_STORAGE);
+#if RSB_WANT_LITTLE_IMPROVED 
+				if(submatrix==mtxAp)/*  root only */
+					/* FIXME: TODO: IR is NOT needed AT ALL!  */
+					rsb_do_fill_rcsr_arrays_for_later(submatrix,IL,IR,IA,JA,nzoff,m,0);
+#else /* RSB_WANT_LITTLE_IMPROVED */
+					rsb_do_fill_rcsr_arrays_for_later(submatrix,IL,IR,IA,JA,nzoff,m,0);
+#endif /* RSB_WANT_LITTLE_IMPROVED */
+				if((errval = rsb__do_set_init_storage_flags(submatrix,submatrix->flags))!=RSB_ERR_NO_ERROR)
+					goto err;
+				submatrix->VA = VA;	// FIXME: we will place pointers to partially swapped VA, here.
+			}
+			else
+			if(!RSB_DO_TOOFEWNNZFORCSR(nnz,m) /*&& IR && IL*/)
+			{
+//				RSB_INFO("CSR -> COO ?\n");
+				if(RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_WANT_BCSS_STORAGE))
+					RSB_DO_FLAG_DEL(submatrix->flags,RSB_FLAG_WANT_COO_STORAGE);
+				if((errval = rsb__do_set_init_storage_flags(submatrix,submatrix->flags))!=RSB_ERR_NO_ERROR)
+					goto err;
+			}
+			else
+			{
+//				RSB_INFO("COO !\n");
+				rsb_flags_t sflags = flags;
+				RSB_DO_FLAG_SUBST(sflags,RSB_FLAG_WANT_BCSS_STORAGE,RSB_FLAG_WANT_COO_STORAGE);
+				if((errval = rsb__do_set_init_storage_flags(submatrix,sflags))!=RSB_ERR_NO_ERROR)
+					goto err;
+			}	
+			if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("freezing %d ",smi+1),
+				RSB_INFO_MATRIX_SUMMARY(submatrix),
+				RSB_INFO("\n");
+		}
+		// matrix is declared as 'closed'.
+		// sorting, in a way smi will point to the biggest open mtxAp, which will be picked up next
+		qsort(submatricesp+cmc,(size_t)(omc),sizeof(struct rsb_mtx_t*),& rsb_compar_rcsr_matrix_regarding_nnz);
+		// FIXME: a priority queue would do the job, here
+	}
+	mtxAp->cpt = cpt;
+err:
+	*cmcp = cmc;
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+static rsb_err_t rsb_do_coo2rec_shuffle(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_err_t *errvalp, struct rsb_mtx_t ** submatricesp, struct rsb_mtx_t * mtxAp, const rsb_nnz_idx_t * IB, rsb_coo_idx_t * WA, rsb_submatrix_idx_t cmc)
+{
+	rsb_nnz_idx_t tdnnz = 0;
+	rsb_submatrix_idx_t smi = 0;/* max matrix count, done matrix count, submatrix index */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t el_size = RSB_SIZEOF(typecode);
+#if RSB_WANT_VERBOSE_TIMINGS
+	rsb_time_t pmt = RSB_TIME_ZERO;
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+
+	if(!VA || cmc==1)
+	{
+		mtxAp->VA = VA;
+		goto no_va_cp;
+	}
+
+	// the following is a highly parallel phase
+	#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(tdnnz,submatricesp,IB) RSB_NTC 
+	for(smi=0;smi<cmc;++smi)
+	{
+		struct rsb_mtx_t * submatrix = submatricesp[smi];
+		rsb_coo_idx_t*IL = submatrix->bindx;
+		rsb_coo_idx_t*IR = submatrix->bpntr;
+		rsb_nnz_idx_t dnnz = 0;
+		rsb_coo_idx_t i;
+//		if(RSB_C2R_IF_VERBOSE)
+//		RSB_INFO("%d -> %d\n",smi,omp_get_thread_num());
+		if(!RSB_DO_FLAG_HAS((submatrix->flags),RSB_FLAG_QUAD_PARTITIONING))
+		{
+			if(RSB_DO_ENOUGHNNZFORINDEXBASEDBUILD(submatrix) && !rsb__is_coo_matrix(submatrix) && IL && IR)
+			{
+				RSB_C2R_ASSERT(IL);
+				RSB_C2R_ASSERT(IR);
+				dnnz = rsb_do_copy_submatrix_coa(submatrix,VA,WA,IL,IR,el_size,0,0,submatrix->nr);
+#if 0
+				for(i=submatrix->roff;RSB_LIKELY(i<submatrix->roff+submatrix->nr);++i)
+				{
+					rsb_nnz_idx_t nnz1 = IR[i-submatrix->roff];
+					rsb_nnz_idx_t nnz0 = IL[i-submatrix->roff];
+					RSB_C2R_ASSERT(IL[i-submatrix->roff]>=IB[i]);
+					RSB_C2R_ASSERT(IL[i-submatrix->roff]<=IB[i+1]);
+					RSB_C2R_ASSERT(IL[i-submatrix->roff+1]>=IB[i+1]);
+					RSB_C2R_ASSERT(nnz0>=IL[i-submatrix->roff]);
+					RSB_C2R_ASSERT(nnz1<=IL[i-submatrix->roff+1]);
+				       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					if(IB[i]==IB[i+1])continue;
+					RSB_C2R_ASSERT(nnz1>=nnz0);
+					if(nnz1==nnz0)continue;
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					RSB_C2R_ASSERT(JA[nnz1-1]< submatrix->coff+submatrix->nc);
+				}
+#endif
+			}
+			else
+			if(!RSB_DO_TOOFEWNNZFORCSR(submatrix->nnz,submatrix->nr))
+			{
+				//rsb_coo_idx_t*IL = submatrix->bindx;
+				for(i=submatrix->roff;RSB_LIKELY(i<submatrix->roff+submatrix->nr);++i)
+				{
+					//rsb_nnz_idx_t fel;
+					// offset of line i in the global line pointers array
+					rsb_nnz_idx_t nnz0 = IB[i];
+					// nnz1..nnz0 are the boundaries of line i
+					rsb_nnz_idx_t nnz1 = IB[i+1];
+					// check
+					RSB_C2R_ASSERT(nnz0>=IB[i]);
+					RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					// skip line if empty
+					if(nnz1-nnz0<1)continue;
+					// find first element of line i also in the submatrix
+					nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff,nnz1-nnz0);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)continue;
+					// find the length of the subrow i in the submatrix
+					nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff+submatrix->nc,nnz1-nnz0);
+					//check 
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)continue;
+					// nnz1 .. nnz0 contain nonempty subrow i in the submatrix
+//					RSB_INFO("i:%d, %d..%d -> %d\n",i,nnz0,nnz1-1,submatrix->nzoff+dnnz);
+					// checks
+//					RSB_C2R_ASSERT(IL[i-submatrix->roff]>=IB[i]);
+//					RSB_C2R_ASSERT(IL[i-submatrix->roff]<=IB[i+1]);
+//					RSB_C2R_ASSERT(IL[i-submatrix->roff+1]>=IB[i+1]);
+//					RSB_C2R_ASSERT(nnz0>=IL[i-submatrix->roff]);
+//					RSB_C2R_ASSERT(nnz1<=IL[i-submatrix->roff+1]);
+				       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					RSB_C2R_ASSERT(JA[nnz1-1]< submatrix->coff+submatrix->nc);
+					// perform the copy
+					RSB_A_MEMCPY_SMALL(WA,VA,submatrix->nzoff+dnnz,nnz0,nnz1-nnz0,el_size);
+					//RSB_COA_MEMCPY(WA,JA,submatrix->nzoff+dnnz,nnz0,nnz1-nnz0);
+					// update the actual offset in the destination array
+					dnnz += nnz1-nnz0;
+				}
+			}
+			else
+			{
+				//rsb_coo_idx_t*IL = submatrix->bindx;
+				for(i=submatrix->roff;RSB_LIKELY(i<submatrix->roff+submatrix->nr);++i)
+				{
+					//rsb_nnz_idx_t fel;
+					// offset of line i in the global line pointers array
+					rsb_nnz_idx_t nnz0 = IB[i];
+					// nnz1..nnz0 are the boundaries of line i
+					rsb_nnz_idx_t nnz1 = IB[i+1];
+					// check
+					RSB_C2R_ASSERT(nnz0>=IB[i]);
+					RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					// skip line if empty
+					if(nnz1-nnz0<1)continue;
+					// find first element of line i also in the submatrix
+					nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff,nnz1-nnz0);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)continue;
+					// find the length of the subrow i in the submatrix
+					nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff+submatrix->nc,nnz1-nnz0);
+					//check 
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)continue;
+					// nnz1 .. nnz0 contain nonempty subrow i in the submatrix
+//					RSB_INFO("i:%d, %d..%d -> %d\n",i,nnz0,nnz1-1,submatrix->nzoff+dnnz);
+					// checks
+//					RSB_C2R_ASSERT(IL[i-submatrix->roff]>=IB[i]);
+//					RSB_C2R_ASSERT(IL[i-submatrix->roff]<=IB[i+1]);
+//					RSB_C2R_ASSERT(IL[i-submatrix->roff+1]>=IB[i+1]);
+//					RSB_C2R_ASSERT(nnz0>=IL[i-submatrix->roff]);
+//					RSB_C2R_ASSERT(nnz1<=IL[i-submatrix->roff+1]);
+				       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					RSB_C2R_ASSERT(JA[nnz1-1]< submatrix->coff+submatrix->nc);
+					// perform the copy
+					RSB_A_MEMCPY_SMALL(WA,VA,submatrix->nzoff+dnnz,nnz0,nnz1-nnz0,el_size);
+					//RSB_COA_MEMCPY(WA,JA,submatrix->nzoff+dnnz,nnz0,nnz1-nnz0);
+					// update the actual offset in the destination array
+					dnnz += nnz1-nnz0;
+				}
+			}
+			if(dnnz!=submatrix->nnz)
+			{
+				RSB_ERROR("@%d,%d: found %d, should have found %d\n",
+						submatrix->roff, submatrix->coff, dnnz,submatrix->nnz);
+				RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+			}
+			#pragma omp critical (rsb_coo2rsb_nzinc_crs)
+			{tdnnz += dnnz;}
+		}
+	}
+//gerr:
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+#if   !defined(__xlC__)
+	/* FIXME: xlc does not allow this, but we have experienced problems, without */
+	#pragma omp barrier
+#endif /* __xlC__ */
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_NL);
+	}
+
+	if(tdnnz!=nnz)
+	{
+		RSB_ERROR("found %d, should have found %d\n", tdnnz,nnz);
+		errval = RSB_ERR_INTERNAL_ERROR;
+	       	goto err;
+	}
+
+#if RSB_WANT_VERBOSE_TIMINGS
+	pmt -= rsb_time();
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+	RSB_A_MEMCPY_parallel(VA,WA,0,0,nnz,el_size);
+#if RSB_WANT_VERBOSE_TIMINGS
+	pmt += rsb_time();
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+no_va_cp:
+
+	tdnnz = 0;
+	#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(tdnnz,submatricesp,IB) RSB_NTC 
+	for(smi=0;smi<cmc;++smi)
+	{
+		struct rsb_mtx_t * submatrix = submatricesp[smi];
+		if(!RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			rsb_coo_idx_t oll,nll;
+			rsb_coo_idx_t i;
+			rsb_coo_idx_t*IR = submatrix->bpntr;
+			rsb_coo_idx_t*IL = submatrix->bindx;
+			rsb_nnz_idx_t dnnz = 0;
+
+			//if(!RSB_DO_TOOFEWNNZFORRCSR(submatrix->nnz,submatrix->nr))
+			if(RSB_DO_ENOUGHNNZFORINDEXBASEDBUILD(submatrix) && !rsb__is_coo_matrix(submatrix) && IL && IR)
+			{
+				if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("CSR:%d/%d:%d..%d\n",smi,cmc,submatrix->nzoff,submatrix->nzoff+submatrix->nnz);
+				submatrix->bpntr = IA+submatrix->nzoff;
+				RSB_C2R_ASSERT(IL); RSB_C2R_ASSERT(IR);
+				RSB_C2R_ASSERT(IR< IA+submatrix->nzoff+submatrix->nnz);
+				RSB_C2R_ASSERT(IL< IA+submatrix->nzoff+submatrix->nnz);
+				RSB_C2R_ASSERT(IR>=IA+submatrix->nzoff);
+				RSB_C2R_ASSERT(IL>=IA+submatrix->nzoff);
+				for(dnnz=0,i=0;RSB_LIKELY(i<submatrix->nr);dnnz += IR[i]-IL[i],++i)
+					RSB_COA_MEMCPY_SMALL(WA,JA,submatrix->nzoff+dnnz,IL[i],IR[i]-IL[i]);
+
+//				RSB_INFO("%d x %d (%d) @ %d, %d (rcsr)\n",submatrix->nr,submatrix->nc,submatrix->nnz,submatrix->roff,submatrix->coff);
+				if(dnnz!=submatrix->nnz)
+				{
+					RSB_ERROR("@%d,%d: found %d, should have found %d\n",
+							submatrix->roff, submatrix->coff, dnnz,submatrix->nnz);
+					RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				}
+				RSB_C2R_ASSERT(IL); RSB_C2R_ASSERT(IR);
+
+				oll = IR[0]-IL[0];
+				submatrix->bpntr[0] = 0;
+				for(i=1;RSB_LIKELY(i<submatrix->nr);++i)
+				{
+					nll = IR[i]-IL[i];
+					submatrix->bpntr[i] = submatrix->bpntr[i-1]+oll;
+					oll = nll;
+				}
+				submatrix->bpntr[submatrix->nr] = submatrix->bpntr[submatrix->nr-1]+oll;
+				if(submatrix->bpntr[submatrix->nr]!=submatrix->nnz)
+				{
+					RSB_ERROR("@%d,%d: found %d, should have found %d\n",
+					submatrix->roff,submatrix->coff,submatrix->bpntr[submatrix->nr],submatrix->nnz);
+					RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				}
+			}
+			else
+			if(!RSB_DO_TOOFEWNNZFORCSR(submatrix->nnz,submatrix->nr) && !rsb__is_coo_matrix(submatrix))
+			{
+				if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("CSR:%d/%d:%d..%d\n",smi,cmc,submatrix->nzoff,submatrix->nzoff+submatrix->nnz);
+				oll = 0;
+				submatrix->bpntr = IA+submatrix->nzoff;
+				submatrix->bpntr[0] = 0;
+//				RSB_INFO("%d x %d (%d) @ %d, %d (rcsr)\n",submatrix->nr,submatrix->nc,submatrix->nnz,submatrix->roff,submatrix->coff);
+				for(i=submatrix->roff;RSB_LIKELY(i<submatrix->roff+submatrix->nr);++i)
+				{
+					//rsb_nnz_idx_t fel;
+					// offset of line i in the global line pointers array
+					rsb_nnz_idx_t nnz0 = IB[i];
+					// nnz1..nnz0 are the boundaries of line i
+					rsb_nnz_idx_t nnz1 = IB[i+1];
+					// check
+					RSB_C2R_ASSERT(nnz0>=IB[i]);
+					RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					// skip line if empty
+					if(nnz1-nnz0<1)goto is_empty_subrow;
+					// find first element of line i also in the submatrix
+					nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff,nnz1-nnz0);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)goto is_empty_subrow;
+					// find the length of the subrow i in the submatrix
+					nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff+submatrix->nc,nnz1-nnz0);
+					//check 
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)goto is_empty_subrow;
+					// nnz1 .. nnz0 contain nonempty subrow i in the submatrix
+//					RSB_INFO("i:%d, %d..%d -> %d\n",i,nnz0,nnz1-1,submatrix->nzoff+dnnz);
+					// checks
+				       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					RSB_C2R_ASSERT(JA[nnz1-1]< submatrix->coff+submatrix->nc);
+					// convert row indices
+					nll = nnz1-nnz0;
+					if(i>submatrix->roff)
+						submatrix->bpntr[i-submatrix->roff] = submatrix->bpntr[i-submatrix->roff-1]+oll;
+					oll = nll;
+					// perform the copy
+					RSB_COA_MEMCPY_SMALL(WA,JA,submatrix->nzoff+dnnz,nnz0,nnz1-nnz0);
+					// update the actual offset in the destination array
+					dnnz += nnz1-nnz0;
+					continue;
+is_empty_subrow:
+					// convert row indices
+					nll = 0;
+					if(RSB_LIKELY(i>submatrix->roff))
+						submatrix->bpntr[i-submatrix->roff] = submatrix->bpntr[i-submatrix->roff-1]+oll;
+					oll = nll;
+				}
+				submatrix->bpntr[submatrix->nr] = submatrix->bpntr[submatrix->nr-1]+oll;
+				if(dnnz!=submatrix->nnz || submatrix->bpntr[submatrix->nr]!=submatrix->nnz)
+				{
+					RSB_ERROR("@%d,%d: found %d, and %d; should have found %d\n",
+							submatrix->roff, submatrix->coff,
+							dnnz, submatrix->bpntr[submatrix->nr],submatrix->nnz);
+					RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				}
+			}
+			else
+			{
+				if(RSB_C2R_IF_VERBOSE)
+				RSB_INFO("COO:%d/%d:%d..%d\n",smi,cmc,submatrix->nzoff,submatrix->nzoff+submatrix->nnz);
+				oll = 0;
+				submatrix->bpntr = IA+submatrix->nzoff;
+				submatrix->bpntr[0] = 0;
+				for(i=submatrix->roff;RSB_LIKELY(i<submatrix->roff+submatrix->nr);++i)
+				{
+					//rsb_nnz_idx_t fel;
+					// offset of line i in the global line pointers array
+					rsb_nnz_idx_t nnz0 = IB[i];
+					// nnz1..nnz0 are the boundaries of line i
+					rsb_nnz_idx_t nnz1 = IB[i+1];
+					// check
+					RSB_C2R_ASSERT(nnz0>=IB[i]);
+					RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					// skip line if empty
+					if(nnz1-nnz0<1)continue;
+					// find first element of line i also in the submatrix
+					nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff,nnz1-nnz0);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)continue;
+					// find the length of the subrow i in the submatrix
+					nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,submatrix->coff+submatrix->nc,nnz1-nnz0);
+					//check 
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					// skip line if empty in the submatrix
+					if(nnz1-nnz0<1)continue;
+					// nnz1 .. nnz0 contain nonempty subrow i in the submatrix
+//					RSB_INFO("i:%d, %d..%d -> %d\n",i,nnz0,nnz1-1,submatrix->nzoff+dnnz);
+					// checks
+				       	RSB_C2R_ASSERT(nnz1<=IB[i+1]);
+					RSB_C2R_ASSERT(JA[nnz0+0]>=submatrix->coff);
+					RSB_C2R_ASSERT(JA[nnz1-1]< submatrix->coff+submatrix->nc);
+					// convert row indices
+					// perform the copy
+					RSB_COA_MEMCPY_SMALL(WA,JA,submatrix->nzoff+dnnz,nnz0,nnz1-nnz0);
+					rsb__util_coo_array_set(IA+submatrix->nzoff+dnnz,nnz1-nnz0,i-submatrix->roff);
+					// update the actual offset in the destination array
+					dnnz += nnz1-nnz0;
+				}
+				if(dnnz!=submatrix->nnz )
+				{
+					RSB_ERROR("@%d,%d: found %d; should have found %d\n",
+							submatrix->roff, submatrix->coff, dnnz, submatrix->nnz);
+					RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+				}
+			}
+			rsb__util_coo_array_sub(WA+submatrix->nzoff,dnnz,submatrix->coff);
+			#pragma omp critical (rsb_coo2rsb_nzinc_crs)
+			{tdnnz += dnnz;}
+		}
+	}
+	#pragma omp barrier
+#if RSB_WANT_VERBOSE_TIMINGS
+	pmt -= rsb_time();
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+#if 1
+	RSB_COA_MEMCPY_parallel(JA,WA,0,0,nnz);
+#else
+	RSB_COA_MEMCPY(JA,WA,0,0,nnz);
+#endif
+
+#if RSB_WANT_VERBOSE_TIMINGS
+	pmt += rsb_time();
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_NL);
+	}
+
+	if(tdnnz!=nnz)
+	{
+		RSB_ERROR("found %d, should have found %d\n", tdnnz,nnz);
+		errval = RSB_ERR_INTERNAL_ERROR;
+	       	goto err;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+struct rsb_mtx_t * rsb__allocate_recursive_sparse_matrix_from_row_major_coo(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_err_t *errvalp)
+{
+	/**
+		\ingroup gr_experimental
+
+		Once finished, should assembly a complete R-CSR/R-CSC matrix in-place in the provided COO arrays.
+		Should use no more than ??? temporary memory.
+
+		TODO: get rid of pinfop.
+		TODO: interleave the matrix structs into the data arrays.
+		TODO: missing proper error handling.
+		TODO: guaranteed preallocation needed
+		TODO: may continue the line of rsb_allocate_new__ and plug it here.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * submatrices = NULL;
+	struct rsb_mtx_t ** submatricesp = NULL;
+	struct rsb_mtx_t * mtxAp = NULL;
+	long fcs = rsb__get_first_level_c_size();
+	long lcs = rsb__get_lastlevel_c_size();
+	long lcspt = rsb__get_lastlevel_c_size_per_thread();
+	long cbs = rsb__get_cache_block_byte_size();
+#if RSB_WANT_SUBDIVISION_FIXES_20101213
+	long wcbs = cbs;
+#else /* RSB_WANT_SUBDIVISION_FIXES_20101213 */
+	long wcbs = lcspt;
+#endif /* RSB_WANT_SUBDIVISION_FIXES_20101213 */
+#if RSB_WANT_VERBOSE_TIMINGS
+	rsb_time_t pmt = RSB_TIME_ZERO;
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+	rsb_coo_idx_t * IB = NULL;
+	rsb_coo_idx_t * IT = NULL;
+	rsb_coo_idx_t * IX = NULL;
+	rsb_coo_idx_t * WA = NULL;
+	rsb_submatrix_idx_t tmc = 0, smi = 0; /* max matrix count, done matrix count, submatrix index */
+	rsb_submatrix_idx_t cmc = 0, omc = 0; /* closed matrices count, open matrices count */
+	rsb_submatrix_idx_t lm = 0;           /* leaf matrices */
+	rsb_time_t dt = RSB_TIME_ZERO,eit = RSB_TIME_ZERO,est = RSB_TIME_ZERO,ect = RSB_TIME_ZERO,tat = RSB_TIME_ZERO,sat = RSB_TIME_ZERO;
+	const rsb_thread_t wet = rsb_get_num_threads(); /* want executing threads */
+	size_t el_size = RSB_SIZEOF(typecode);
+	rsb_coo_idx_t roff = 0;
+	rsb_coo_idx_t coff = 0;
+
+	tat = - rsb_time();
+#if RSB_ALLOW_EMPTY_MATRICES
+	if(!RSB_HAVE_GOOD_PARMS_FOR_EMPTY(m,k,nnz,flags))
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+	if(!RSB_HAVE_GOOD_PARMS_FOR_IN_PLACE_RCSR(m,k,nnz,flags))
+	{
+		RSB_ERROR(RSB_ERRM_MDNFARTS);
+	       	errval = RSB_ERR_BADARGS;
+	       	goto err;
+	}
+
+	if( fcs > lcs || fcs<1 || cbs<1 ) /* we allow declaration of 1 level of cache only */
+	{
+		/* TODO : find a reasonable solution, and declare it in ./rsbench -I, which should give some diagnostic about this */
+		RSB_ERROR("innermost cache size:%d, outermost cache size:%d, cache block size %d\n",fcs,lcs,cbs);
+	       	errval = RSB_ERR_FAILED_MEMHIER_DETECTION; 
+	       	goto err;
+	}
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+	{
+		RSB_ERROR(RSB_ERRM_CMOINIY);
+	       	errval = RSB_ERR_UNIMPLEMENTED_YET;
+	       	goto err;
+	}
+
+#if RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE
+	if((rsb_global_session_handle.rsb_g_verbose_interface&2))
+			RSB_STDOUT("building a matrix with %d nnz, %d x %d\n",nnz,m,k);
+#endif /* RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE */
+
+	if(RSB_DO_FLAGS_EXTRACT_STORAGE(flags)==RSB_FLAG_NOFLAGS)
+	{
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_STORAGE_FLAGS);
+	}
+
+	/* TODO: plug *here* the eventually to-come RSB_WANT_FASTER_EXPERIMENTAL_CONSTRUCTOR stuff */
+
+	tmc = RSB_SUBDIVISION_BUG_EXTRA+2*(((nnz+wcbs)*(RSB_SIZEOF(typecode)+2*sizeof(rsb_coo_idx_t)))/(wcbs)); /* TODO: clean this up */
+	tmc = RSB_MAX(1,(rsb_submatrix_idx_t)(rsb_global_session_handle.subdivision_multiplier*tmc));
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS))
+	if(wet>tmc)
+	{
+		tmc = wet;
+	}
+
+	submatrices = rsb__calloc(sizeof(struct rsb_mtx_t )*tmc);
+	submatricesp = rsb__calloc(sizeof(struct rsb_mtx_t*)*tmc);
+
+	if(!submatrices || !submatricesp)
+	{
+	       	errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	for(smi=0;smi<tmc;++smi)
+		submatricesp[smi] = submatrices + smi;
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE)) /* TODO: this is *slow*, speed this up */
+		rsb__util_coo_array_from_fortran_indices(IA,nnz,RSB_BOOL_TRUE),
+		rsb__util_coo_array_from_fortran_indices(JA,nnz,RSB_BOOL_TRUE),
+		RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+
+	dt = - rsb_time();
+	if((errval = rsb__do_cleanup_nnz(VA,IA,JA,nnz,roff,coff,m,k,&nnz,typecode,flags))!=RSB_ERR_NO_ERROR)
+		goto err;
+
+	ect = dt;
+	dt = - rsb_time();
+	ect -= dt;
+
+	if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_SORTED_INPUT))
+	if((errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,m,k,typecode,flags))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT); /* TODO: is this needed ? */
+
+	/* we need duplicates removal, and this can only take place after sorting */
+#if RSB_WANT_VERBOSE_TIMINGS 
+	{rsb_nnz_idx_t dnnz = nnz - rsb_weed_out_duplicated(IA,JA,VA,nnz,typecode,flags);
+	RSB_INFO("duplicate removal: %zd - %zd = %zd\n",nnz,dnnz,nnz-dnnz);
+	nnz -= dnnz;}
+#else /* RSB_WANT_VERBOSE_TIMINGS */
+	nnz = rsb_weed_out_duplicates(IA,JA,VA,nnz,typecode,flags);
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+	dt += rsb_time();
+
+	est = dt;	/* with 'sorting' (est) we DO NOT intend also cleanup (in ect) */
+
+	/* work vectors allocation */
+/*	IL = rsb__malloc(sizeof(rsb_coo_idx_t)*(m+1)); */
+	IT = rsb__malloc(sizeof(rsb_coo_idx_t)*(m+1));
+	IX = rsb__malloc(sizeof(rsb_coo_idx_t)*2*(m+1));
+	IB = rsb__malloc(sizeof(rsb_coo_idx_t)*(m+1));
+	if(/*  !IL ||*/ !IT || !IX || !IB)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+	       	errval = RSB_ERR_ENOMEM; goto err;
+	}
+
+	/* declaring this first matrix (smi == 0) as 'open' */
+	smi = 0; omc = 1;
+	/* compile in the first mtxAp, linking into to the temporary split vector */
+	submatrices[smi].nzoff = 0;
+	submatrices[smi].roff = roff;
+	submatrices[smi].coff = coff;
+	submatrices[smi].bindx = IB;
+	submatrices[smi].bpntr = IB+1;
+	submatrices[smi].indptr = NULL;
+/*	RSB_DO_FLAG_ADD(flags,RSB_FLAG_QUAD_PARTITIONING); */
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS);
+	if( (errval = rsb__set_init_flags_and_stuff(
+		&submatrices[smi],NULL,NULL,m,k,nnz,nnz,nnz,typecode,flags)
+				)!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	
+	if(nnz==0)
+	{
+		/* a special case. we copy the arrays addresses because they may be non-NULL and containing duplicate/diagonal/etc.. elements we have honoured to free, afterwards. */
+		++cmc; --omc;
+		mtxAp = &submatrices[0];
+		RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_QUAD_PARTITIONING); /* necessary, too */
+		mtxAp->bpntr = IA;
+		mtxAp->bindx = JA;
+		mtxAp->indptr = NULL;
+		mtxAp->VA = VA;
+		goto arrays_done;
+	}
+	else
+		mtxAp = &submatrices[0];
+
+	sat = - rsb_time();
+	/* computing the first right-left pointer vectors */
+#if RSB_WANT_PARALLEL_SUBDIVISION 
+	if((errval = rsb_do_compute_vertical_split_parallel(IA,JA,roff,coff,m,k,0,0,nnz,IB,NULL,NULL,NULL,NULL,NULL,NULL))!=RSB_ERR_NO_ERROR)
+#else /* RSB_WANT_PARALLEL_SUBDIVISION */
+	if((errval = rsb_do_compute_vertical_split_parallel(IA,JA,roff,coff,m,k,0,0,nnz,IB,NULL,NULL,NULL,NULL,NULL,NULL))!=RSB_ERR_NO_ERROR)
+	/*if((errval = rsb_do_compute_vertical_split(IA,JA,roff,coff,m,k,0,0,nnz,IB,NULL,NULL,NULL,NULL,NULL,NULL))!=RSB_ERR_NO_ERROR) */
+#endif /* RSB_WANT_PARALLEL_SUBDIVISION */
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+
+	if(RSB_C2R_IF_VERBOSE)
+		RSB_INFO("beginning (%d x %d) @ %p with flags 0x%x (coo:%d, csr:%d), storage: 0x%x, max %d submatrices\n",
+			submatrices[smi].nr, submatrices[smi].nc, (const void*)&submatrices[smi], submatrices[smi].flags,
+			RSB_DO_FLAG_HAS(submatrices[smi].flags,RSB_FLAG_WANT_COO_STORAGE),
+			RSB_DO_FLAG_HAS(submatrices[smi].flags,RSB_FLAG_WANT_BCSS_STORAGE),
+			submatrices[smi].matrix_storage,tmc
+			);
+
+/*	if(!RSB_WANT_MORE_PARALLELISM || (RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_QUAD_PARTITIONING))) */ /* TODO */
+#if 1 /* the code is not yet ready for this */
+/* #if RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = rsb_do_coo2rec_subdivide_parallel(VA,IA,JA,m,k,nnz,typecode,pinfop,flags,errvalp,submatricesp,mtxAp,IB,IX,IT,WA,cmc,omc,tmc,RSB_MAX(1,RSB_MIN(wet,nnz)),&cmc);
+#else
+	errval = rsb_do_coo2rec_subdivide(VA,IA,JA,m,k,nnz,typecode,pinfop,flags,errvalp,submatricesp,mtxAp,IB,IX,IT,WA,cmc,omc,tmc,wet,&cmc);
+#endif
+	RSB_CONDITIONAL_FREE(IX);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	eit = - rsb_time();
+	sat -= eit;
+
+	/*
+	RSB_CONDITIONAL_FREE(IL);
+	RSB_CONDITIONAL_FREE(IT);
+       	*/
+	/* WA will is needed for shuffle only (so, after IL,IM deallocation, in a way that total memory need is max(WA,IL)) */
+	WA = rsb__malloc(RSB_MAX(sizeof(rsb_coo_idx_t),el_size)*nnz);
+	if(!WA)
+	{
+		RSB_ERROR(RSB_ERRM_FAOTAFS);
+	       	errval = RSB_ERR_ENOMEM; goto err;
+	}
+
+#if 0
+	/* after symbolic partitioning is done, we are ready to shuffle all of the arrays using the temporary storage and add an intermediate node */
+	RSB_INFO("assembling leaf %d -> %d x %d, %d\n",smi,submatricesp[smi]->nr,submatricesp[smi]->nc,submatricesp[smi]->nnz);
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_shuffle_left_and_right_rows(VA,IA,JA,m,0,nnz,0,typecode,IL,IM,WA));
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_shuffle_left_and_right_rows(VA,IA,JA,(m+1)/2,0,IL[(m+1)/2],0,typecode,IL,IR,WA));
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_shuffle_left_and_right_rows(VA,IA,JA,m,(m+1)/2,nnz,IL[(m+1)/2],typecode,IL,IR,WA));
+	/* TODO : should use a temporary vector, here. */
+#endif
+
+	for(smi=0;smi<cmc;++smi)
+		if(rsb__is_terminal_recursive_matrix(submatricesp[smi]))
+			++lm;
+
+/*	qsort(submatricesp+(cmc-lm),(size_t)(lm),sizeof(struct rsb_mtx_t*),&rsb_compar_rcsr_matrix_leftmost_first); */
+	qsort(submatricesp,(size_t)(cmc),sizeof(struct rsb_mtx_t*),&rsb_compar_rcsr_matrix_leftmost_first);
+	/* TODO: a priority queue would do the job, here */
+	for(smi=0;smi<cmc-lm;++smi)
+		if(rsb__is_terminal_recursive_matrix(submatricesp[smi]))
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ANLSMIT);
+		}
+	for(smi=cmc-lm;smi<cmc;++smi)
+		if(!rsb__is_terminal_recursive_matrix(submatricesp[smi]))
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ALSMINT);
+		}
+
+	errval = rsb_do_coo2rec_shuffle(VA,IA,JA,m,k,nnz,typecode,pinfop,flags,errvalp,submatricesp,mtxAp,IB,WA,cmc);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_SEOWS);
+	}
+
+	rsb__do_set_in_place_submatrices_offsets(submatrices,cmc,VA,IA,JA,el_size);
+	
+/*	RSB_INFO("VA:%p, IA:%p, JA:%p\n",VA,IA,JA); */
+
+	if(RSB_C2R_IF_VERBOSE)
+		RSB_INFO("IA? :%p / %p\n",(const void*)IA,
+			(const void*)(rsb__do_get_first_submatrix(mtxAp)->bpntr-
+			(rsb__do_get_first_submatrix(mtxAp)->nr+1))
+/*			rsb__do_get_first_submatrix(mtxAp)->roff-
+  			((submatricesp[0])->nr+1) */
+			);
+
+	/* after shuffling, the last vectors conversion happens and we are done. */
+arrays_done:
+	eit += rsb_time();
+
+	/* first matrix is always root (even if a CSR one) */
+	RSB_DO_FLAG_DEL(submatrices[0].flags,RSB_FLAG_NON_ROOT_MATRIX);
+	#if RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS
+	if(!(submatrices[0].flags & RSB_FLAG_NON_ROOT_MATRIX))
+	{
+		submatrices[0].all_leaf_matrices = NULL;
+		errval = rsb__get_array_of_leaf_matrices(&submatrices[0],&(submatrices[0].all_leaf_matrices),&submatrices[0].all_leaf_matrices_n);
+		if(RSB_SOME_ERROR(errval))
+			goto err;
+	}
+	else
+	{
+		/* this is a non root matrix */
+		submatrices[0].all_leaf_matrices = NULL;
+		submatrices[0].all_leaf_matrices_n = 0;
+	}
+	#endif /* RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS */
+
+#if RSB_WANT_VERBOSE_TIMINGS
+	rsb_time_t hct = - rsb_time();
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+	if(
+		/* RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES_COO)
+		       ||	RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+		       ||	RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COO_STORAGE)*/
+		       	RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES)
+		)
+#if RSB_WANT_MORE_PARALLELISM 
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_switch_fresh_recursive_matrix_to_halfword_storages_parallel(mtxAp));
+#else /* RSB_WANT_MORE_PARALLELISM */
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_switch_fresh_recursive_matrix_to_halfword_storages(mtxAp));
+#endif /* RSB_WANT_MORE_PARALLELISM */
+	else
+	{
+		if(RSB_C2R_IF_VERBOSE)
+			RSB_INFO("no  RSB_FLAG_USE_HALFWORD_INDICES flag\n");
+	}
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_compute_bounded_boxes(mtxAp));
+
+#if RSB_WANT_VERBOSE_TIMINGS
+	hct += rsb_time();
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	tat += rsb_time();
+
+	mtxAp->sat = sat;
+	mtxAp->ect = ect;
+	mtxAp->eit = eit;
+	mtxAp->est = est;
+	mtxAp->pet = RSB_TIME_ZERO;
+	mtxAp->rpt = RSB_TIME_ZERO;
+	mtxAp->tat = tat;
+
+	/* some statistics */
+#if RSB_WANT_VERBOSE_TIMINGS
+	RSB_INFO("analyzed arrays in %g s\n",sat);
+	RSB_INFO("cleaned-up arrays in %g s\n",ect);
+	RSB_INFO("sorted arrays in %g s\n",est);
+	RSB_INFO("computed partitions in %g s\n",mtxAp->cpt);
+	RSB_INFO("shuffled partitions in %g s\n",eit);
+	RSB_INFO(" (of which %g s for parallel memcpy and %g for halfword conversion)\n",pmt,hct);
+#endif /* RSB_WANT_VERBOSE_TIMINGS */
+	mtxAp->cpt = 0;/* cpt is contained in sat, so should not be counted here! */
+#if RSB_STORE_IDXSA
+	mtxAp->idxsa = rsb__get_index_storage_amount(mtxAp);
+#endif
+
+#if 0
+  	RSB_INFO("got %d matrices (%d leaves)\n",cmc,lm);
+
+  	if( !rsb__mtx_chk(mtxAp) )
+  	{
+  		errval = RSB_ERR_INTERNAL_ERROR;
+  		RSB_PERR_GOTO(derr,RSB_ERRM_NL);
+  	}
+
+	errval = rsb__do_switch_recursive_matrix_to_fullword_storage(mtxAp);
+
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(mtxAp);
+ok:
+#endif
+	goto noerr;
+
+	/* TODO: missing proper error handling here ! --- destroy any allocated data for the matrix (e.g.: submatrices, .. ) */
+	if(mtxAp)
+		rsb__do_mtx_free(mtxAp);
+err:
+	mtxAp = NULL;
+noerr:
+	if(RSB_SOME_ERROR(errval))
+		rsb__do_perror(NULL,errval);
+	RSB_CONDITIONAL_FREE(IB);
+	RSB_CONDITIONAL_FREE(IT);
+	RSB_CONDITIONAL_FREE(IX);
+/*	RSB_CONDITIONAL_FREE(IL); */
+	RSB_CONDITIONAL_FREE(WA);
+	RSB_CONDITIONAL_FREE(submatricesp);
+	if(!mtxAp)RSB_CONDITIONAL_FREE(submatrices);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+#if RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE
+	if(mtxAp && (rsb_global_session_handle.rsb_g_verbose_interface&2))
+			RSB_STDOUT_MATRIX_SUMMARY(mtxAp),RSB_STDOUT("\n");
+#endif /* RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE */
+	return mtxAp;
+}
+
+rsb_err_t rsb__init_coo_struct_from_rsb(const struct rsb_mtx_t *mtxAp, struct rsb_coo_matrix_t *coop)
+{
+	/* FIXME: unfinished; shall replace by direct use of RSB_INIT_COO_FROM_MTX, or the opposite */
+	if(!mtxAp || !coop)	
+	{
+		return RSB_ERR_BADARGS;
+	}
+
+	RSB_INIT_COO_FROM_MTX(coop,mtxAp);
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__project_rsb_to_coo(struct rsb_mtx_t *mtxAp, struct rsb_coo_matrix_t *coop)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(!mtxAp || !coop)	
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb__init_coo_struct_from_rsb(mtxAp,coop);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+       	}
+
+	RSB_BIND_COO_TO_MTX(coop,mtxAp);
+err:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_coo2rec.h b/rsb_coo2rec.h
new file mode 100644
index 0000000..89fbb9b
--- /dev/null
+++ b/rsb_coo2rec.h
@@ -0,0 +1,78 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @brief  Recursive Sparse matrices assembling code.
+ * @author Michele Martone
+ * */
+#ifndef RSB_COO2RCSR_H_INCLUDED
+#define RSB_COO2RCSR_H_INCLUDED
+#include "rsb_common.h"
+#define RSB_HAVE_GOOD_PARMS_FOR_RCSR(R,C,NNZ,FLAGS) \
+	((!(NNZ==0)) || (RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_UNIT_DIAG_IMPLICIT))) 
+
+#define RSB_HAVE_GOOD_PARMS_FOR_EMPTY(R,C,NNZ,FLAGS) ( \
+		((NNZ==0) && RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_WANT_COO_STORAGE)) \
+		)
+#define RSB_HAVE_GOOD_PARMS_FOR_IN_PLACE_RCSR(R,C,NNZ,FLAGS) ( \
+	!RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER) && \
+	((!RSB_DO_TOOFEWNNZFORRCSR(NNZ,R)) || \
+	( \
+		(RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_WANT_COO_STORAGE)) || \
+		(RSB_DO_FLAGS_EXTRACT_STORAGE(FLAGS)==RSB_FLAG_NOFLAGS) \
+	)) && \
+	RSB_HAVE_GOOD_PARMS_FOR_RCSR(R,C,NNZ,FLAGS) \
+	)
+#define RSB_DO_TOOFEWNNZFORRCSR(NNZ,M) ((NNZ)<(2*(M+1)))	/*  */
+#define RSB_DO_TOOFEWNNZFORCSR(NNZ,M)  ((NNZ)<(1*(M+1)))	/*  */
+struct rsb_mtx_t * rsb__allocate_recursive_sparse_matrix_from_row_major_coo(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_err_t *errvalp);
+void rsb__do_set_in_place_submatrices_offsets(struct rsb_mtx_t *RSB_RESTRICT submatrices, rsb_submatrix_idx_t cmc, rsb_char_t *RSB_RESTRICT  VA, rsb_coo_idx_t *RSB_RESTRICT  IA, rsb_coo_idx_t *RSB_RESTRICT JA, size_t el_size);
+rsb_err_t rsb__do_switch_recursive_matrix_to_fullword_storage(struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__project_rsb_to_coo(struct rsb_mtx_t *mtxAp, struct rsb_coo_matrix_t *coop);
+rsb_err_t rsb__init_coo_struct_from_rsb(const struct rsb_mtx_t *mtxAp, struct rsb_coo_matrix_t *coop);
+rsb_err_t rsb__compute_bounded_box(struct rsb_mtx_t * mtxAp);
+#define RSB_STDOUT_MATRIX_SUMMARY_ARGS(M) RSB_PRINTF_MTX_SUMMARY_ARGS(M)
+#define RSB_STDOUT_MATRIX_SUMMARY(M)  RSB_STDOUT(RSB_STDOUT_MATRIX_SUMMARY_ARGS(M))
+
+#define RSB_FPRINTF_MATRIX_SUMMARY(FP,M)  RSB_FPRINTF(FP,RSB_STDOUT_MATRIX_SUMMARY_ARGS(M))
+
+#define RSB_PRINTF_COO_MATRIX_SUMMARY_ARGS(CM)  \
+			"(%d x %d)[%p] @ (? , ?) (%d nnz, %.2lg nnz/r) flags 0x??, typecode: %x:",		\
+				(CM)->nr, (CM)->nc, (const void*)(CM),								\
+			       	(CM)->nnz,									\
+			       	((double)(CM)->nnz)/(CM)->nr,							\
+				CM->typecode
+
+#define RSB_STDOUT_COO_MATRIX_SUMMARY_ARGS(M) RSB_PRINTF_COO_MATRIX_SUMMARY_ARGS(M)
+
+#if RSB_ALLOW_STDOUT
+#define RSB_ERROR_MATRIX_SUMMARY(M)  RSB_STDOUT(RSB_STDOUT_MATRIX_SUMMARY_ARGS(M))
+#define RSB_STDOUT_COO_MATRIX_SUMMARY(CM)  RSB_STDOUT(RSB_STDOUT_COO_MATRIX_SUMMARY_ARGS(CM))
+#define RSB_INFO_MATRIX_SUMMARY  RSB_STDOUT_MATRIX_SUMMARY
+#else /* RSB_ALLOW_STDOUT */
+#define RSB_INFO_MATRIX_SUMMARY(M)  RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS  
+#define RSB_ERROR_MATRIX_SUMMARY(M) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS  
+#endif /* RSB_ALLOW_STDOUT */
+
+#endif /* RSB_COO2RCSR_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_coo_check.c b/rsb_coo_check.c
new file mode 100644
index 0000000..b8fe48a
--- /dev/null
+++ b/rsb_coo_check.c
@@ -0,0 +1,399 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO handling and check.
+ * */
+#include "rsb_internals.h"
+
+rsb_err_t rsb__util_is_valid_coo_array(const rsb_coo_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		FIXME : document.
+	*/
+	register rsb_nnz_idx_t k;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	RSB_DEBUG_ASSERT(p);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	for(k=0;RSB_LIKELY(k<n);++k)
+		if(!RSB_IS_VALID_COO_INDEX(p[k]))
+		{
+			errval = RSB_ERR_GENERIC_ERROR;
+			RSB_PERR_GOTO(err,"%zd : %zd\n",(rsb_printf_int_t)k,(rsb_printf_int_t)p[k]);
+		}
+err:
+		return errval;
+}
+
+rsb_err_t rsb__util_are_valid_coo_arrays(const rsb_coo_idx_t * p, const rsb_coo_idx_t * q, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		FIXME : document.
+	*/
+	return
+		(rsb__util_is_valid_coo_array(p,n)==RSB_ERR_NO_ERROR && 
+		 rsb__util_is_valid_coo_array(q,n)==RSB_ERR_NO_ERROR ) ?
+		RSB_ERR_NO_ERROR : RSB_ERR_GENERIC_ERROR;
+}
+
+rsb_err_t rsb__util_is_sorted_coo_as_row_major(const void *VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags )
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	RSB_DO_FLAG_DEL(flags,RSB_INTERNAL_FLAG_CSR_SORTING_MASK);	/* NEW */
+
+	if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+		return rsb__util_is_sorted_coo(VA,IA,JA,nnz,typecode,pinfop,flags);
+	else
+		return rsb__util_is_sorted_coo(VA,JA,IA,nnz,typecode,pinfop,flags);
+}
+
+rsb_err_t rsb__util_is_sorted_coo(const void *VA, const rsb_coo_idx_t *MIndx, const rsb_coo_idx_t * mIndx, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags )
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function to check if a nonzeros array is block-sorted.
+	 *
+	 *	When calling this routine, make sure
+	 *	mIndx==IA
+	 *	MIndx==JA
+	 *	when !(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	 *	and 
+	 *	mIndx==JA
+	 *	MIndx==IA
+	 *	when flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER 
+	 *
+	 *  FIXME : does it work with recursive ordering ?
+	 * \return 0 if sorted, an error code in the other cases.
+	 */
+	/* Note : are you sure this is the only check for all setups ? */
+	/* Note : this algorithm can be improved in plenty of ways */
+	rsb_coo_idx_t i = 0,j = 0;
+	rsb_nnz_idx_t k = 0;
+//	const rsb_coo_idx_t *IA = NULL,*JA = NULL;
+	const rsb_coo_idx_t *Mbndx = NULL,*mbndx = NULL;
+	rsb_coo_idx_t Mdim = 0,mdim = 0;
+	rsb_bool_t want_recursive_sort = flags & RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING;
+	rsb_coo_idx_t Mb = 1; rsb_coo_idx_t Kb = 1;
+	
+	if(!VA || !mIndx || !MIndx || nnz < 0 || 0==(RSB_NUMERICAL_TYPE_SIZE(typecode)) )
+		return RSB_ERR_BADARGS;
+	if( 0==(RSB_NUMERICAL_TYPE_SIZE(typecode)) )
+		return RSB_ERR_UNSUPPORTED_TYPE;
+
+#if !RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS
+	if(!pinfop)
+		return RSB_ERR_BADARGS;
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+	if(nnz<2)
+		return RSB_ERR_NO_ERROR;
+
+	if(!(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+	{
+		if(pinfop)
+		{
+			Mbndx = pinfop->rpntr;
+			mbndx = pinfop->cpntr;
+			Mdim = pinfop->M_b;
+			mdim = pinfop->K_b;
+		}
+		else
+		{
+			Mbndx = MIndx;
+			mbndx = mIndx;
+		}
+
+//		JA = mIndx;
+//		IA = MIndx;
+	}
+	else
+	{
+		if(pinfop)
+		{
+			Mbndx = pinfop->cpntr;
+			mbndx = pinfop->rpntr;
+			Mdim = pinfop->K_b;
+			mdim = pinfop->M_b;
+		}
+		else
+		{
+			Mbndx = MIndx;
+			mbndx = mIndx;
+		}
+
+//		IA = mIndx;
+//		JA =MIndx;
+	}
+
+        if(pinfop && ( !pinfop->rpntr || !pinfop->cpntr ) )
+        {
+                //errval = RSB_ERR_INTERNAL_ERROR;
+                goto oops;
+        }
+	
+	if(rsb__have_fixed_blocks_matrix_flags(flags) && mbndx && Mbndx)
+	{
+			/* FIXME */
+			Kb = mbndx[1]-mbndx[0];
+			Mb = Mbndx[1]-Mbndx[0];
+	}
+
+	if( want_recursive_sort && !rsb__have_fixed_blocks_matrix_flags(flags) )
+	{
+		return RSB_ERR_UNIMPLEMENTED_YET;
+	}
+	
+	if( want_recursive_sort )
+	{
+		/* FIXME : does not handle column transposition */
+		int ml = 0, kl = 0;
+
+		rsb_coo_idx_t Idim = (pinfop->nr+(Mb-1))/Mb;
+		rsb_coo_idx_t Jdim = (pinfop->nc+(Kb-1))/Kb;
+
+		if((flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+		{
+			Idim = (pinfop->nc+(Mb-1))/Mb;
+			Jdim = (pinfop->nr+(Kb-1))/Kb;
+		}
+
+		while( (1<<ml) < Idim ) ml++;
+		while( (1<<kl) < Jdim ) kl++;
+
+		for( k=0;k<nnz-1;++k)
+#if 0
+		/* this is not the same! */
+		rsb__asymmetric_z_index( 
+			RSB_GET_BLOCK_ROW_FOR_NZ_(MIndx+k+0,Mbndx,Mdim),
+			RSB_GET_BLOCK_COL_FOR_NZ_(mIndx+k+0,mbndx,mdim), pinfop->nr, pinfop->nc, ml, kl )
+			>
+		rsb__asymmetric_z_index( 
+			RSB_GET_BLOCK_ROW_FOR_NZ_(MIndx+k+1,Mbndx,Mdim),
+			RSB_GET_BLOCK_COL_FOR_NZ_(mIndx+k+1,mbndx,mdim), pinfop->nr, pinfop->nc, ml, kl ))
+#else
+		if(
+			rsb__asymmetric_z_index((MIndx[k+0]/Mb),(mIndx[k+0]/Kb),Idim,Jdim,ml,kl)>
+			rsb__asymmetric_z_index((MIndx[k+1]/Mb),(mIndx[k+1]/Kb),Idim,Jdim,ml,kl))
+#endif
+			goto oops;
+		goto ok;
+	}
+#if 0
+	{
+		rsb_nnz_idx_t i;
+		for(i=0;i<nnz-1;++i)
+		{
+			if(mIndx[i]>mIndx[i+1]||(mIndx[i]==mIndx[i+1]&&MIndx[i]>MIndx[i+1]))
+			{
+				RSB_INFO("nnz %d : (%d,%d)\n",i  ,mIndx[i  ],MIndx[i  ]);
+				RSB_INFO("nnz %d : (%d,%d)\n",i+1,mIndx[i+1],MIndx[i+1]);
+				return RSB_ERR_GENERIC_ERROR;
+			}
+		}
+	}
+	else
+#endif
+	{
+#if 1
+		/* NEW : NEED COMMENTS : FIXME */
+		if(nnz<1)
+			goto ok;
+		k = 0;
+		i = 0;j = 0;
+
+		if(!pinfop)/* 1x1 */
+		for( k=1;k<nnz;++k )
+		{
+/*			RSB_DEBUG_ASSERT( MIndx[k-1] >= 0 );
+			RSB_DEBUG_ASSERT( MIndx[k-1] <= MIndx[k] );
+			RSB_DEBUG_ASSERT(!( mIndx[k-1] > mIndx[k] && MIndx[k-1] >= MIndx[k] ));*/
+
+			if( MIndx[k-1] < 0 )
+			{
+				
+				RSB_STDERR("for k=%zd\n",(rsb_printf_int_t)(k-1));
+				RSB_STDERR("row index (%zd) is smaller than any one of ours\n",(size_t)MIndx[k-1]);
+				goto oops1;
+			}
+
+			if( MIndx[k-1] > MIndx[k] )
+			{
+				RSB_STDERR("for k=%zd\n",(rsb_printf_int_t)(k-1));
+				RSB_STDERR("row index (%zd) is bigger than any one of ours\n",(size_t)MIndx[k-1]);
+				goto oops1;
+			}
+
+			if( mIndx[k-1] > mIndx[k] && MIndx[k-1] >= MIndx[k] )
+			{
+				RSB_STDERR("for k=%zd\n",(rsb_printf_int_t)(k-1));
+				RSB_STDERR("col index (%zd) is bigger than any one of ours\n",(size_t)mIndx[k-1]);
+				goto oops1;
+			}
+		}
+		else
+		for( k=0;k<nnz;++k )
+		{
+			rsb_blk_idx_t li = i;
+
+			if( MIndx[k] < Mbndx[0] )
+			{
+				RSB_STDERR("row index (%zd) is smaller than any one of ours (%zd)\n",(size_t)MIndx[k],(size_t)Mbndx[0]);
+				goto oops;
+			}
+
+			while( i<Mdim && MIndx[k] > Mbndx[i+1] )
+				++i;
+
+			if( MIndx[k] > Mbndx[i+1] )
+			{
+				RSB_STDERR("row index (%zd) is bigger than any one of ours (%zd)\n",(size_t)MIndx[k],(size_t)Mbndx[i+1]);
+				goto oops;
+			}
+
+			/* next block row index is ok */
+
+			if(i>li)
+				j = 0;	/* new block row */
+
+			if( mIndx[k] < mbndx[0] )
+			{
+				RSB_STDERR("col index (%zd) is smaller than any one of ours (%zd)\n",(size_t)mIndx[k],(size_t)mbndx[0]);
+				goto oops;
+			}
+
+			while( j<mdim && mIndx[k] > mbndx[j+1] )
+				++j;
+
+			if( mIndx[k] > mbndx[j+1] )
+			{
+				RSB_STDERR("col index (%zd) is bigger than any one of ours (%zd)\n",(size_t)mIndx[k],(size_t)mbndx[j+1]);
+				goto oops;
+			}
+		}
+#else
+		/* quite slow */
+		if(!(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+		{
+			JA = mIndx;
+			IA = MIndx;
+			for(i=0;i<pinfop->M_b;++i)
+			for(j=0;j<pinfop->K_b;++j)
+			{
+				if(k>=nnz)
+					goto k_nnz;/* 'premature' exit : empty last block */
+	
+				if(IA[k]<pinfop->rpntr[i] /* || ( IA[k]>=pinfop->rpntr[i] && JA[k]<pinfop->cpntr[j] )*/ )
+				{
+					RSB_ERROR("nnz %d : %d < %d (block row %d)\n",k,IA[k],pinfop->rpntr[i],i);
+					RSB_ERROR("nnz %d : %d <?%d (block col %d)\n",k, JA[k],pinfop->cpntr[i],j);
+					goto oops;/* this block should have been seen before */
+				}
+	
+				/* if any, scan nnz's in this block */
+				while(	k<nnz &&
+					JA[k]>=pinfop->cpntr[j] && JA[k]< pinfop->cpntr[j+1] &&
+					IA[k]>=pinfop->rpntr[i] &&  IA[k]< pinfop->rpntr[i+1] ) ++k;
+				/* to the next block, even if this did not match */
+			}
+		}
+		else
+		{
+			IA = mIndx;
+			JA = MIndx;
+			for(j=0;j<pinfop->K_b;++j)
+			for(i=0;i<pinfop->M_b;++i)
+			{
+				if(k>=nnz)
+					goto k_nnz;/* 'premature' exit : empty last block */
+
+				if(JA[k]<pinfop->cpntr[j] /* || ( IA[k]>=pinfop->rpntr[i] && JA[k]<pinfop->cpntr[j] )*/ )
+					goto oops;/* this block should have been seen before */
+	
+				/* if any, scan nnz's in this block */
+				while(	k<nnz &&
+					IA[k]>=pinfop->rpntr[i] && IA[k]< pinfop->rpntr[i+1] &&
+					JA[k]>=pinfop->cpntr[j] &&  JA[k]< pinfop->cpntr[j+1] ) ++k;
+				/* to the next block, even if this did not match */
+			}
+		}
+#endif
+	}
+	goto k_nnz;
+
+k_nnz:
+	if(k!=nnz)
+	{
+		RSB_STDERR("block sorting does not seem to be ok:\n");
+		RSB_STDERR("empty last block ?\n");
+		RSB_STDERR("element %zd %zd encountered at %zd'th (out of %zd) nnz's block (%zd %zd) (%zd - %zd , %zd - %zd)\n",
+		(size_t)MIndx[k],(size_t)mIndx[k],(size_t)k,(size_t)nnz, (size_t)i,(size_t)j,(size_t)Mbndx[i],(size_t)Mbndx[i+1]-1,(size_t)mbndx[j],(size_t)mbndx[j+1]-1);
+		goto err;
+	}
+	else
+	{
+		if(RSB_WANT_VERBOSE_MESSAGES)
+			RSB_STDERR("block sorting seems ok\n");
+			/* all ok */
+	}
+ok:
+	return RSB_ERR_NO_ERROR;
+oops:
+		RSB_ERROR("block sorting does not seem to be ok:\n");
+		RSB_ERROR("resurgent block ?\n");
+		RSB_ERROR("element %zd %zd encountered at %zd'th (out of %zd) nnz's block (%zd %zd) (%zd - %zd , %zd - %zd)\n",
+		(size_t)MIndx[k],(size_t)mIndx[k],(size_t)k,(size_t)nnz, (size_t)i,(size_t)j,(size_t)Mbndx[i],(size_t)Mbndx[i+1]-1,(size_t)mbndx[j],(size_t)mbndx[j+1]-1);
+	goto err;
+oops1:
+		RSB_ERROR("block sorting does not seem to be ok..\n");
+err:
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+rsb_err_t rsb__util_is_valid_coo_struct(const struct rsb_coo_matrix_t*coop)
+{
+	/* FIXME: new, unfinished */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if((!coop) || (!RSB_IS_VALID_NNZ_INDEX(coop->nnz)) || (!RSB_IS_VALID_COO_INDEX(coop->nr)) || (!RSB_IS_VALID_COO_INDEX(coop->nc)))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(coop->typecode))
+	{
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb__util_are_valid_coo_arrays(coop->IA,coop->JA,coop->nnz);
+err:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_coo_check.h b/rsb_coo_check.h
new file mode 100644
index 0000000..7e0ff3c
--- /dev/null
+++ b/rsb_coo_check.h
@@ -0,0 +1,41 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO handling.
+ * */
+
+#ifndef RSB_COO_CHECK_H_INCLUDED
+#define RSB_COO_CHECK_H_INCLUDED
+
+#include "rsb_internals.h"
+
+rsb_err_t rsb__util_is_valid_coo_array(const rsb_coo_idx_t * p, rsb_nnz_idx_t n);
+rsb_err_t rsb__util_are_valid_coo_arrays(const rsb_coo_idx_t * p, const rsb_coo_idx_t * q, rsb_nnz_idx_t n);
+rsb_err_t rsb__util_is_sorted_coo_as_row_major(const void *VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags );
+rsb_err_t rsb__util_is_sorted_coo(const void *VA, const rsb_coo_idx_t *MIndx, const rsb_coo_idx_t * mIndx, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags );
+rsb_err_t rsb__util_is_valid_coo_struct(const struct rsb_coo_matrix_t*coop);
+#endif /* RSB_COO_CHECK_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_coo_symm.c b/rsb_coo_symm.c
new file mode 100644
index 0000000..35dc2f3
--- /dev/null
+++ b/rsb_coo_symm.c
@@ -0,0 +1,98 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO symmetry handling.
+ * */
+
+#include "rsb_internals.h"
+
+rsb_err_t rsb__reallocate_with_symmetry( rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void **coo, rsb_nnz_idx_t * nnz, rsb_type_t typecode )
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Assuming that for a symmetric matrix we are given 
+	 * these arrays containing (i,j) pairs, with no (j,i) pairs at all (except j=i),
+	 * we reallocate arrays (if possible) and fill them with the symmetric elements
+	 * with no duplicate.
+	 *
+	 * note : this is a slow service/debug function, not a high performance one.
+	 * */
+	rsb_coo_idx_t * new_IA, *new_JA;
+	void * ncoo;
+	rsb_nnz_idx_t nnnz;
+	size_t i,odel = 0, el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);/* off diagonal elements */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!IA || !JA || !*IA || !*JA || !nnz || !*nnz || !el_size)
+		return RSB_ERR_BADARGS;
+	
+	nnnz = *nnz;
+	for(i=0;i<nnnz;++i)
+	{
+		if((*IA)[i]!=(*JA)[i])++odel;
+	}
+	nnnz += odel;
+
+	if(!odel)
+	{
+		return RSB_ERR_NO_ERROR;/* a diagonal matrix */
+	}
+
+	errval = rsb_util_coo_alloc(&ncoo,&new_IA,&new_JA,nnnz,typecode,RSB_BOOL_TRUE);
+	if(RSB_SOME_ERROR(errval))
+		return RSB_ERR_ENOMEM;
+		
+	RSB_COO_MEMCPY(ncoo,new_IA,new_JA,*coo,*IA,*JA,0,0,*nnz,el_size);
+
+	odel = 0;
+	for(i=0;i<*nnz;++i)
+	{
+		if((*IA)[i]!=(*JA)[i])
+		{
+			new_IA[*nnz+odel] = new_JA[i];
+			new_JA[*nnz+odel] = new_IA[i];
+/*			new_IA[*nnz+odel] = 1;
+			new_JA[*nnz+odel] = 1;*/
+			rsb_memcpy(((char*)ncoo) + (*nnz+odel) * el_size, ((const char*)ncoo) + i * el_size ,el_size);
+			++odel;
+			//RSB_ERROR("%d %d  ..\n",new_IA[i],new_JA[i]);
+		}
+	}
+
+	rsb__free(*IA);
+	rsb__free(*JA);
+	rsb__free(*coo);
+
+	*IA = new_IA;
+	*JA = new_JA;
+	*coo = ncoo;
+	*nnz += odel;
+
+	return errval;
+}
+
+
+/* @endcond */
diff --git a/rsb_coo_symm.h b/rsb_coo_symm.h
new file mode 100644
index 0000000..765e2d3
--- /dev/null
+++ b/rsb_coo_symm.h
@@ -0,0 +1,37 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO symmetry handling.
+ * */
+
+#ifndef RSB_COO_SYMM_H_INCLUDED
+#define RSB_COO_SYMM_H_INCLUDED
+
+#include "rsb_internals.h"
+rsb_err_t rsb__reallocate_with_symmetry( rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void **coo, rsb_nnz_idx_t * nnz, rsb_type_t typecode );
+
+#endif /* RSB_COO_SYMM_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_cpmv.c b/rsb_cpmv.c
new file mode 100644
index 0000000..7bff211
--- /dev/null
+++ b/rsb_cpmv.c
@@ -0,0 +1,100 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+ /**
+ * @file
+ * @brief Copy/Move primitives 
+ * @author Michele Martone
+ * */
+#include "rsb_common.h"
+
+/*!
+ An integer type for thread indices (internal).
+ */
+typedef rsb_thread_t rsb_tc_t;
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+void RSB_BZERO_parallel(void * p, size_t n)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move to somewhere else
+	 */
+	rsb_char_t * cp = p;
+	const rsb_tc_t wet = rsb_get_num_threads(); /* want executing threads */
+
+	if(RSB_UNLIKELY(n<wet*RSB_MIN_THREAD_BZERO_BYTES))
+	{
+		RSB_BZERO(cp,n);
+	}
+	else
+	{
+		rsb_nnz_idx_t wi;
+		size_t cn = (n+wet-1)/wet;	/* chunk size */
+		#pragma omp parallel for schedule(static,1) RSB_NTC
+		for(wi=0;wi<wet;++wi)
+		{
+			size_t coff = wi*cn;
+			size_t cnn = (wi<wet-1)?cn:n-((wet-1)*cn);
+			RSB_BZERO(cp+coff,cnn);
+		}
+	}
+}
+
+void RSB_A_MEMCPY_parallel(void * RSB_RESTRICT ID, const void * RSB_RESTRICT IS, size_t DOFF, size_t SOFF, size_t NNZ, size_t ES)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move to somewhere else
+	 */
+	const rsb_tc_t wet = rsb_get_num_threads(); /* want executing threads */
+
+	RSB_DEBUG_ASSERT(RSB_MIN_THREAD_MEMCPY_NNZ);
+
+	if(RSB_UNLIKELY(NNZ<wet*RSB_MIN_THREAD_MEMCPY_NNZ))/* at least RSB_MIN_THREAD_MEMCPY_NNZ nnz to trigger memcpy */
+	{
+		RSB_A_MEMCPY(ID,IS,DOFF,SOFF,NNZ,ES);
+	}
+	else
+	{
+		rsb_nnz_idx_t wi;
+		size_t cnz = (NNZ+wet-1)/wet;	/* chunk size */
+		#pragma omp parallel for schedule(static,1) RSB_NTC
+		for(wi=0;wi<wet;++wi)
+		{
+			size_t coff = wi*cnz;
+			size_t cnnz = (wi<wet-1)?cnz:NNZ-((wet-1)*cnz);
+			RSB_A_MEMCPY(ID,IS,DOFF+coff,SOFF+coff,cnnz,ES);
+		}
+	}
+}
+
+void RSB_COA_MEMCPY_parallel(void * ID, const void * IS, size_t DOFF, size_t SOFF, size_t NNZ)
+{
+	/**
+		\ingroup gr_internals
+	 */
+	RSB_A_MEMCPY_parallel(ID,IS,DOFF,SOFF,NNZ,sizeof(rsb_coo_idx_t));
+}
+
+/* @endcond */
diff --git a/rsb_cpmv.h b/rsb_cpmv.h
new file mode 100644
index 0000000..c6c4acb
--- /dev/null
+++ b/rsb_cpmv.h
@@ -0,0 +1,40 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+ /**
+ * @file
+ * @brief Copy/Move primitives 
+ * @author Michele Martone
+ * */
+#ifndef RSB_CPMV_H_INCLUDED
+#define RSB_CPMV_H_INCLUDED
+#include "rsb_common.h"
+/*void RSB_A_MEMCPY_parallel(rsb_char_t * ID, const rsb_char_t * IS, size_t DOFF, size_t SOFF, size_t NNZ, size_t ES);*/
+/*void RSB_COA_MEMCPY_parallel(rsb_char_t * ID, const rsb_char_t * IS, size_t DOFF, size_t SOFF, size_t NNZ);*/
+#define RSB_A_MEMCPY_parallel rsb_a_memcpy_parallel
+#define RSB_COA_MEMCPY_parallel rsb_coa_memcpy_parallel
+#define RSB_BZERO_parallel rsb_bzero_parallel
+void RSB_A_MEMCPY_parallel(void * RSB_RESTRICT ID, const void * RSB_RESTRICT IS, size_t DOFF, size_t SOFF, size_t NNZ, size_t ES);
+void RSB_COA_MEMCPY_parallel(void* ID, const void* IS, size_t DOFF, size_t SOFF, size_t NNZ);
+void RSB_BZERO_parallel(void * p, size_t n);
+/* @endcond */
+#endif /* RSB_CPMV_H_INCLUDED */
diff --git a/rsb_csr.c b/rsb_csr.c
new file mode 100644
index 0000000..c7fe606
--- /dev/null
+++ b/rsb_csr.c
@@ -0,0 +1,102 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for CSR handling.
+ * */
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+static rsb_err_t rsb_is_correctly_built_csr_matrix(const rsb_nnz_idx_t * PA, const rsb_coo_idx_t * JA, const rsb_coo_idx_t nrA, const rsb_coo_idx_t ncA, const rsb_nnz_idx_t nnz, const rsb_coo_idx_t ib /* index base */)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t ni;
+	rsb_coo_idx_t ri;
+
+	if(!PA ||!JA || RSB_INVALID_COO_INDEX(nrA)|| RSB_INVALID_COO_INDEX(ncA)|| RSB_INVALID_NNZ_INDEX(nnz))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,"PA:%p JA:%p nrA:%d ncA:%d nnzA:%d\n",PA,JA,nrA,ncA,nnz);
+	}
+
+	if(PA[nrA]-ib!=nnz)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,"PA[nrA]=%d vs nnzA=%d (ib=%d)\n",PA[nrA],nnz,ib);
+	}
+
+	for(ri=0;ri<nrA;++ri)
+	{
+#if 0
+		if(!rsb__util_is_coo_array_sorted_up(JA+IP[nr],IP[nr+1]-IP[nr]))
+		{
+			RSB_PERR_GOTO(err,"bindx seems unsorted!\n");
+		}
+#endif
+		if(PA[ri]>PA[ri+1])
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,"PA[%d]>PA[%d]: %d>%d (row off its bounds)\n",ri,ri+1,PA[ri],PA[ri+1]);
+		}
+		if(PA[ri+1]-PA[ri] > ncA)
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		for(ni=PA[ri]-ib;ni<PA[ri+1]-ib;++ni)
+		{
+			if(ni+1<PA[ri+1]-ib)
+			if(JA[ni]>=JA[ni+1])
+		       	{
+				errval = RSB_ERR_BADARGS;
+				RSB_PERR_GOTO(err,"i=%d JA[%d]>=JA[%d]: %d>=%d (adjacent duplicates)\n",ri,ni,ni+1,JA[ni],JA[ni+1]);
+			}
+			if(JA[ni-ib]-ib>=ncA)
+		       	{
+				errval = RSB_ERR_BADARGS;
+				RSB_PERR_GOTO(err,"i=%d  JA[%d]>=ncA: %d >= %d (column exceeding matrix)\n",ri,ni,JA[ni],ncA);
+			}
+		}
+	}
+err:
+        RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__csr_chk(const rsb_nnz_idx_t * RSB_RESTRICT IP, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_nnz_idx_t nnzA, rsb_coo_idx_t ib)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb_is_correctly_built_csr_matrix(IP, JA, nrA, ncA, nnzA, ib);
+	return errval;
+}
+
+rsb_err_t rsb__csc_chk(const rsb_nnz_idx_t * RSB_RESTRICT IP, const rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_nnz_idx_t nnzA, rsb_coo_idx_t ib)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb__csr_chk(IP,IA,nrA,ncA,nnzA,ib);
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_csr.h b/rsb_csr.h
new file mode 100644
index 0000000..f0ae7ea
--- /dev/null
+++ b/rsb_csr.h
@@ -0,0 +1,40 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for CSR handling.
+ * */
+
+#ifndef RSB_CSR_H_INCLUDED
+#define RSB_CSR_H_INCLUDED
+
+#include "rsb_internals.h"
+
+rsb_err_t rsb__csr_chk(const rsb_nnz_idx_t * RSB_RESTRICT IP, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_nnz_idx_t nnzA, rsb_coo_idx_t ib);
+rsb_err_t rsb__csc_chk(const rsb_nnz_idx_t * RSB_RESTRICT IP, const rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_nnz_idx_t nnzA, rsb_coo_idx_t ib);
+
+
+#endif /* RSB_CSR_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_csr2coo.c b/rsb_csr2coo.c
new file mode 100644
index 0000000..a0465f8
--- /dev/null
+++ b/rsb_csr2coo.c
@@ -0,0 +1,240 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+ /**
+ * @file
+ * @brief CSR to COO conversion code
+ * @author Michele Martone
+ * */
+#include "rsb_common.h"
+
+void rsb__do_prefix_sum_coo_idx_t(rsb_nnz_idx_t *IA, rsb_nnz_idx_t nnz)
+{
+	/* FIXME: shall optimize */
+	rsb_nnz_idx_t i;
+	for(i=1;RSB_LIKELY(i<nnz);++i)
+		IA[i] += IA[i-1];
+}
+
+rsb_err_t rsb__do_switch_fullword_array_to_compressed(rsb_nnz_idx_t *IA, rsb_nnz_idx_t nnz, rsb_nnz_idx_t m)
+{
+		/**
+ 			FIXME: no test case
+			see rsb__do_switch_compressed_array_to_fullword_coo
+ 			FIXME: need a no-calloc version
+	 		TODO: rsb__do_switch_fullword_array_to_compressed -> rsb__idx_fia2fpa
+  		*/
+		rsb_err_t errval = RSB_ERR_NO_ERROR;
+		rsb_nnz_idx_t i;
+		rsb_coo_idx_t * IP = NULL;
+		IP = rsb__calloc(sizeof(rsb_coo_idx_t)*(m+1));
+		if(!IP)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+#if 0
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+			if(IA[i]>=m || IA[i]<0)
+			{
+				errval = RSB_ERR_BADARGS;
+				RSB_PERR_GOTO(err,"0 <= IA[%d]=%d < m=%d  ?\n",i,IA[i],m);
+			}
+#endif
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+			IP[IA[i]+1]++;
+		for(i=0;RSB_LIKELY(i<m);++i)
+			IP[i+1] += IP[i];
+		RSB_COA_MEMCPY(IA,IP,0,0,m+1);
+err:
+		RSB_CONDITIONAL_FREE(IP);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_compressed_array_to_fullword_coo(rsb_nnz_idx_t *RSB_RESTRICT IP, rsb_nnz_idx_t m, rsb_coo_idx_t off, rsb_coo_idx_t *RSB_RESTRICT TA)
+{
+		/**
+ 			FIXME: no test case
+	 		Requires m+1 temporary space.
+			see rsb__do_switch_fullword_array_to_compressed
+	 		TODO: rsb__do_switch_compressed_array_to_fullword_coo -> rsb__idx_fpa2fia
+  		*/
+		rsb_err_t errval = RSB_ERR_NO_ERROR;
+		rsb_nnz_idx_t /*k,*/li,ri;
+		//rsb_nnz_idx_t nnz = IP[m+1];
+		rsb_coo_idx_t i;
+		rsb_coo_idx_t * RSB_RESTRICT IA = TA;
+
+		if(!IA)
+			IA = rsb__malloc(sizeof(rsb_coo_idx_t)*(m+1));
+		if(!IA)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		RSB_COA_MEMCPY(IA,IP,0,0,m+1);
+		for(i=0;RSB_LIKELY(i<m);++i)
+		{
+			ri = IA[i+1];
+			li = IA[i];
+			rsb__util_coo_array_set(IP+li,ri-li,i+off);
+		}
+err:
+		if(IA!=TA)
+			RSB_CONDITIONAL_FREE(IA);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_switch_in_place_csr_to_in_place_coo(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
+{
+	/**
+		\ingroup gr_internals
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t li,ri;
+	rsb_coo_idx_t i;
+	// IA needs expansion
+	rsb_coo_idx_t * IA = NULL;
+
+	if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+	{
+		rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)(mtxAp->bindx),mtxAp->nnz,0);
+	}
+	else
+	{
+	}
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		// FIXME: TODO (nothing todo)
+		goto err;
+	}
+	IA = rsb__malloc(sizeof(rsb_coo_idx_t)*(mtxAp->Mdim+1));
+	if(!IA)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		errval = RSB_ERR_ENOMEM;
+	}
+	RSB_COA_MEMCPY(IA,mtxAp->bpntr,0,0,mtxAp->Mdim+1);
+	for(i=0;RSB_LIKELY(i<mtxAp->Mdim);++i)
+	{
+		ri = IA[i+1];
+		li = IA[i];
+		rsb__util_coo_array_set(mtxAp->bpntr+li,ri-li,i);
+	}
+	if(do_shift)
+	{
+		// JA needs displacement of mtxAp->coff
+		rsb__util_coo_array_add(mtxAp->bindx,mtxAp->nnz,mtxAp->coff);
+		// IA needs displacement of mtxAp->coff
+		rsb__util_coo_array_add(mtxAp->bpntr,mtxAp->nnz,mtxAp->roff);
+	}
+	// VA is opaque to us: no processing is needed
+	RSB_CONDITIONAL_FREE(IA);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_nnz_idx_t rsb_do_count_lowtri_in_csr(const struct rsb_coo_matrix_t *csrp)
+{
+	register rsb_coo_idx_t i;
+	register rsb_nnz_idx_t lnz = 0;
+	const rsb_coo_idx_t *IA = csrp->IA;
+	const rsb_coo_idx_t *JA = csrp->JA;
+	for(i=0;i<csrp->nr;++i)
+	{
+		register rsb_nnz_idx_t nnz0 = IA[i+0];
+		register rsb_nnz_idx_t nnz1 = IA[i+1];
+		lnz += rsb__nnz_split_coo_bsearch(JA+nnz0,i+1,nnz1-nnz0);
+	}
+	return lnz;
+}
+
+rsb_nnz_idx_t rsb__do_count_upptri_in_csr(const struct rsb_coo_matrix_t *csrp)
+{
+	register rsb_coo_idx_t i;
+	register rsb_nnz_idx_t unz = 0;
+	const rsb_coo_idx_t *IA = csrp->IA;
+	const rsb_coo_idx_t *JA = csrp->JA;
+	for(i=0;i<csrp->nr;++i)
+	{
+		register rsb_nnz_idx_t nnz0 = IA[i+0];
+		register rsb_nnz_idx_t nnz1 = IA[i+1];
+		unz += nnz1-nnz0-rsb__nnz_split_coo_bsearch(JA+nnz0,i,nnz1-nnz0);
+	}
+	return unz;
+}
+
+rsb_nnz_idx_t rsb__do_copy_lowtri_from_csr_to_coo(const struct rsb_coo_matrix_t *csrp, struct rsb_coo_matrix_t *coop)
+{
+	register rsb_coo_idx_t i;
+	register rsb_nnz_idx_t lnz = 0;
+	const rsb_coo_idx_t *IA = csrp->IA;
+	const rsb_coo_idx_t *JA = csrp->JA;
+	const rsb_coo_idx_t *VA = csrp->VA;
+	size_t el_size = RSB_SIZEOF(csrp->typecode);
+	for(i=0;i<csrp->nr;++i)
+	{
+		register rsb_nnz_idx_t nnz0 = IA[i+0];
+		register rsb_nnz_idx_t nnz1 = IA[i+1];
+		nnz1 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,i+1,nnz1-nnz0);
+		RSB_CSR2COO_MEMCPY(coop->VA,coop->IA,coop->JA,VA,i,JA,lnz,nnz0,nnz1-nnz0,el_size);
+		lnz += nnz1-nnz0;
+	}
+	return lnz;
+}
+
+rsb_nnz_idx_t rsb__do_copy_upptri_from_csr_to_coo(const struct rsb_coo_matrix_t *csrp, struct rsb_coo_matrix_t *coop)
+{
+	register rsb_coo_idx_t i;
+	register rsb_nnz_idx_t unz = 0;
+	const rsb_coo_idx_t *IA = csrp->IA;
+	const rsb_coo_idx_t *JA = csrp->JA;
+	const rsb_coo_idx_t *VA = csrp->VA;
+	size_t el_size = RSB_SIZEOF(csrp->typecode);
+	for(i=0;i<csrp->nr;++i)
+	{
+		register rsb_nnz_idx_t nnz0 = IA[i+0];
+		register rsb_nnz_idx_t nnz1 = IA[i+1];
+		nnz0 = nnz0+rsb__nnz_split_coo_bsearch(JA+nnz0,i,nnz1-nnz0);
+		RSB_CSR2COO_MEMCPY(coop->VA,coop->IA,coop->JA,VA,i,JA,unz,nnz0,nnz1-nnz0,el_size);
+		unz += nnz1-nnz0;
+	}
+	return unz;
+}
+
+rsb_nnz_idx_t rsb__do_count_tri_in_csr(const struct rsb_coo_matrix_t *csrp, rsb_nnz_idx_t *lnzp, rsb_nnz_idx_t *unzp)
+{
+	/* FIXME: should optimize */
+	if(lnzp)
+		*lnzp = rsb_do_count_lowtri_in_csr(csrp);
+	if(unzp)
+		*unzp = rsb__do_count_upptri_in_csr(csrp);
+	return (lnzp?*lnzp:0)+(unzp?*unzp:0);
+}
+rsb_nnz_idx_t rsb__do_copy_tri_from_csr_to_coo(const struct rsb_coo_matrix_t *csrp, struct rsb_coo_matrix_t *lcoop, struct rsb_coo_matrix_t *ucoop)
+{
+	/* FIXME: should optimize */
+	return rsb__do_copy_lowtri_from_csr_to_coo(csrp,lcoop)+rsb__do_copy_upptri_from_csr_to_coo(csrp,ucoop);
+}
+
+/* @endcond */
diff --git a/rsb_csr2coo.h b/rsb_csr2coo.h
new file mode 100644
index 0000000..1982f01
--- /dev/null
+++ b/rsb_csr2coo.h
@@ -0,0 +1,42 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+ /**
+ * @file
+ * @brief CSR to COO conversion code
+ * @author Michele Martone
+ * */
+#ifndef RSB_CSR2COO_H_INCLUDED
+#define RSB_CSR2COO_H_INCLUDED
+#include "rsb_common.h"
+
+void rsb__do_prefix_sum_coo_idx_t(rsb_nnz_idx_t *IA, rsb_nnz_idx_t nnz);
+#define rsb_do_prefix_sum_nnz_idx_t rsb__do_prefix_sum_coo_idx_t
+rsb_err_t rsb__do_switch_compressed_array_to_fullword_coo(rsb_nnz_idx_t *RSB_RESTRICT IP, rsb_nnz_idx_t m, rsb_coo_idx_t off, rsb_coo_idx_t *RSB_RESTRICT TA);
+rsb_nnz_idx_t rsb__do_copy_lowtri_from_csr_to_coo(const struct rsb_coo_matrix_t *csrp, struct rsb_coo_matrix_t *coop);
+rsb_nnz_idx_t rsb__do_copy_upptri_from_csr_to_coo(const struct rsb_coo_matrix_t *csrp, struct rsb_coo_matrix_t *coop);
+rsb_nnz_idx_t rsb__do_count_upptri_in_csr(const struct rsb_coo_matrix_t *csrp);
+rsb_nnz_idx_t rsb__do_count_tri_in_csr(const struct rsb_coo_matrix_t *csrp, rsb_nnz_idx_t *lnzp, rsb_nnz_idx_t *unzp);
+rsb_nnz_idx_t rsb__do_copy_tri_from_csr_to_coo(const struct rsb_coo_matrix_t *csrp, struct rsb_coo_matrix_t *lcoop, struct rsb_coo_matrix_t *ucoop);
+rsb_err_t rsb__do_switch_fullword_array_to_compressed(rsb_nnz_idx_t *IA, rsb_nnz_idx_t nnz, rsb_nnz_idx_t m);
+#endif /* RSB_CSR2COO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_do.c b/rsb_do.c
new file mode 100644
index 0000000..e4361c5
--- /dev/null
+++ b/rsb_do.c
@@ -0,0 +1,1284 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ *
+ * Implementation of the interface functions.
+ * \internal
+ *
+ * */
+/* TODO: introduce RSB_MSG_BADARGS_ERROR(ERRVAL,MSG,BAN) */
+#include "rsb_common.h"
+#include "rsb_util.h"
+#include "rsb.h"
+#include "rsb_unroll.h"
+#ifdef RSB_HAVE_SYS_UTSNAME_H 
+#include <sys/utsname.h>	/* uname */
+#endif /* RSB_HAVE_SYS_UTSNAME_H */
+#include "rsb_do.h"
+extern struct rsb_session_handle_t rsb_global_session_handle;
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_err_t rsb_do_prec_build(struct rsb_mtx_t ** mtxLpp, struct rsb_mtx_t ** mtxUpp, const struct rsb_mtx_t * mtxAp)
+{
+	/* 
+	 * FIXME: UNFINISHED, UNTESTED
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t *L=NULL,*U=NULL;
+	struct rsb_coo_matrix_t csr,lcoo,ucoo;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_MATRIX_FLAGS;
+
+	csr.VA=NULL; csr.IA=NULL; csr.JA=NULL;
+	if(!mtxLpp)
+		goto err;
+	if(!mtxUpp)
+		goto err;
+	if(!mtxAp)
+		goto err;
+	if(mtxAp->nr != mtxAp->nc)
+		goto err;
+	csr.nr=mtxAp->nr;
+	csr.nc=mtxAp->nc;
+	csr.nnz = RSB_MAX(mtxAp->nnz,RSB_MAX(csr.nr,csr.nc)+1);
+	csr.typecode=mtxAp->typecode;
+	ucoo=csr; lcoo=csr;
+	if(rsb__allocate_coo_matrix_t(&csr)!=&csr)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	csr.nnz=mtxAp->nnz;
+/* a reasonably efficient routine would: */
+	/* build a fullword CSR clone (using a RSB-to-CSR constructor) */
+	/* perform ILU on the CSR struct */
+	/* build RSB clones for the L and U CSR parts  (using a modified CSR-to-RSB constructor, eventually building them together, in one shot) */
+	/* FIXME: TODO */
+	/* a reasonable quick hack routine would: */
+	/* build a fullword CSR clone (using a RSB-to-CSR constructor) */
+	if((errval = rsb__do_get_csr(mtxAp->typecode,mtxAp,csr.VA,csr.IA,csr.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS))!=RSB_ERR_NO_ERROR)
+		goto err;
+	/* perform ILU on the CSR struct */
+	if((errval = rsb__prec_csr_ilu0(&csr))!=RSB_ERR_NO_ERROR)
+		goto err;
+	/* perform CSR to COO conversion */
+	rsb__do_count_tri_in_csr(&csr,&lcoo.nnz,&ucoo.nnz);
+
+	ucoo.nnz=RSB_MAX(ucoo.nnz,ucoo.nr+1);
+	if(rsb__allocate_coo_matrix_t(&ucoo)!=&ucoo) { errval = RSB_ERR_ENOMEM; goto err; }
+
+	lcoo.nnz=RSB_MAX(lcoo.nnz,lcoo.nr+1);
+
+	if(rsb__allocate_coo_matrix_t(&lcoo)!=&lcoo) { errval = RSB_ERR_ENOMEM; goto err; }
+	rsb__do_count_tri_in_csr(&csr,&lcoo.nnz,&ucoo.nnz);
+
+	rsb__do_copy_tri_from_csr_to_coo(&csr,&lcoo,&ucoo);
+
+	/* allocating L and U */
+	L = rsb__do_mtx_alloc_from_coo_const(lcoo.VA,lcoo.IA,lcoo.JA,lcoo.nnz,lcoo.typecode,lcoo.nr,lcoo.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags|RSB_FLAG_LOWER_TRIANGULAR,&errval);
+	if(!L)
+	{
+		rsb__destroy_coo_matrix_t(&lcoo);
+		RSB_BZERO_P(&lcoo);
+		goto err;
+	}
+	U = rsb__do_mtx_alloc_from_coo_const(ucoo.VA,ucoo.IA,ucoo.JA,ucoo.nnz,ucoo.typecode,ucoo.nr,ucoo.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags|RSB_FLAG_UPPER_TRIANGULAR,&errval);
+	if(!U)
+	{
+		rsb__destroy_coo_matrix_t(&ucoo);
+		RSB_BZERO_P(&ucoo);
+		goto err;
+	}
+	/*RSB_ERROR(RSB_ERRM_WUF);*/
+
+	*mtxLpp=L;
+	*mtxUpp=U;
+       	rsb__destroy_coo_matrix_t(&csr);
+	rsb__destroy_coo_matrix_t(&lcoo); rsb__destroy_coo_matrix_t(&ucoo); 
+
+	return errval;
+err:
+	rsb__destroy_coo_matrix_t(&lcoo); rsb__destroy_coo_matrix_t(&ucoo); 
+	rsb__destroy_coo_matrix_t(&csr);
+	rsb__do_perror(NULL,errval);
+	RSB_MTX_FREE(L);
+	RSB_MTX_FREE(U);
+	errval = RSB_ERR_BADARGS;
+	return errval;
+}
+
+rsb_err_t rsb__do_get_preconditioner(void *opd, const struct rsb_mtx_t * mtxAp, rsb_precf_t prec_flags, const void *ipd)/* FIXME: temporary interface */
+{
+	// FIXME: UNFINISHED, UNTESTED
+	rsb_err_t errval = RSB_ERR_GENERIC_ERROR;
+	struct rsb_mtx_t * LU[2]={NULL,NULL};
+
+	if(!opd || !mtxAp)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb_do_prec_build(&LU[0],&LU[1],mtxAp);
+	rsb_memcpy(opd,LU,sizeof(LU));
+err:
+	return errval;
+}
+#if 0
+rsb_err_t rsb_do_prec_apply(const struct rsb_prec_t * precp, const void *r)
+{
+	/*
+	 * given the preconditioner P, computes \f$ r \leftarrow {P}^{-1} r.\f$
+	 * \f$ r' = {P}^{-1} r \f$
+	 * \f$ r' = {L \cdot U}^{-1} r \f$
+	 * \f$ r' = {U}^{-1} {L}^{-1} r \f$
+	 * \f$ r' = ({U}^{-1} ({L}^{-1} r)) \f$
+	 * */
+	//rsb__debug_print_vector(r,precp->L->nr,precp->L->typecode,1);
+	double one=1.0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_spsv(RSB_TRANSPOSITION_N,&one,precp->L,r,1,r,1));
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_spsv(RSB_TRANSPOSITION_N,&one,precp->U,r,1,r,1));
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	//rsb__debug_print_vector(r,precp->L->nr,precp->L->typecode,1);
+err:
+	if(RSB_SOME_ERROR(errval))
+		rsb__do_perror(NULL,errval);
+	return errval;
+}
+#endif
+
+rsb_err_t rsb__do_get_rows_sparse(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags)
+{
+	/* TODO: having a return scaled rows would be an efficient feature. */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t off = 0;
+
+#if RSB_ALLOW_ZERO_DIM 
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+	{
+		goto err; /* FIXME: skipping checks on ldB, ldC, op_flags*/
+	}
+#endif
+
+	if(VA == NULL && JA == NULL && IA == NULL && rnzp != NULL)
+	{
+		*rnzp = rsb__dodo_get_rows_nnz(mtxAp, frA, lrA,flags,&errval);
+		goto err;
+	}
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+
+	if(!rnzp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,"user did not supply a results nonzeroes pointer\n");
+	}
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+	{
+		off=1;
+		lrA--;
+		frA--;
+	}
+
+	if(frA<0 || lrA>mtxAp->nr)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+	if(!VA || frA>lrA)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+	if(RSB_DOES_TRANSPOSE(transA))
+		RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+
+	*rnzp=0;
+#if 0
+        errval = rsb__do_get_rows_sparse_rec(mtxAp,VA,frA,lrA,IA,JA,rnzp,off,off);
+	if(flags & RSB_FLAG_SORT_INPUT)
+		errval = rsb_util_sort_row_major_inner(VA,IA,JA,*rnzp,mtxAp->nr+off,mtxAp->nc+off,mtxAp->typecode,flags|RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR);
+#if 0
+	if( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+		rsb__util_nnz_array_to_fortran_indices(IA,*rnzp),
+		rsb__util_nnz_array_to_fortran_indices(JA,*rnzp);
+#endif
+#else
+#if 1
+	{
+		rsb_coo_idx_t i;
+		for(i=frA;i<=lrA;++i)
+		        RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_rows_sparse_rec(mtxAp,VA,i,i,IA,JA,rnzp,off,off));
+	}
+#else
+	{
+		/* this is too slow for many leaf matrices */
+		rsb_submatrix_idx_t si;
+		rsb_coo_idx_t i;
+		for(i=frA;i<=lrA;++i)
+		for(si=0;si<mtxAp->all_leaf_matrices_n;++si)
+		        RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_rows_sparse_rec(mtxAp->all_leaf_matrices[si].mtxlp,VA,i,i,IA,JA,rnzp,off,off));
+	}
+#endif
+#endif
+	RSB_DEBUG_ASSERT(rsb__dodo_get_rows_nnz(mtxAp,frA,lrA,flags,NULL)==*rnzp);
+	if(RSB_DOES_TRANSPOSE(transA))
+	{
+		RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+		/* swapping back and ready for sorting. if now, would get column major. */
+		if(RSB_SOME_ERROR(errval = rsb_util_sort_row_major_inner(VA,IA,JA,*rnzp,mtxAp->nr,mtxAp->nc,mtxAp->typecode,flags)))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_EM);
+		}
+	}
+	else
+	if(RSB_DOES_CONJUGATE(transA))
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb__util_do_conjugate(VA,mtxAp->typecode,*rnzp));
+	}
+
+	if(alphap)
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,*rnzp,alphap,VA,1));
+	}
+err:
+	if(RSB_SOME_ERROR(errval))
+	RSB_ERROR(RSB_ERRM_NL);
+	return errval;
+}
+
+rsb_err_t rsb__do_scal(struct rsb_mtx_t * mtxAp, const void * d, rsb_trans_t trans)
+{
+	/* TODO : what should be the semantics of scaling a symmetric matrix ? */
+	/* FIXME : and error handling ? **/
+	rsb_err_t errval = RSB_ERR_UNSUPPORTED_OPERATION;
+
+#ifdef RSB_HAVE_OPTYPE_SCALE
+
+#if RSB_ALLOW_ZERO_DIM 
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+	{
+		errval = RSB_ERR_NO_ERROR;
+		goto err; /* FIXME: skipping checks on ldB, ldC, op_flags*/
+	}
+#endif
+
+	if(!mtxAp)
+	//	return RSB_ERR_NO_ERROR;
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+	if( rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix=NULL;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			rsb_coo_idx_t off;
+			if(RSB_DOES_NOT_TRANSPOSE(trans))
+				off=submatrix->roff-mtxAp->roff;
+			else
+				off=submatrix->coff-mtxAp->coff;
+	
+			rsb__do_scal(submatrix,((rsb_byte_t*)d)+mtxAp->el_size*off,trans);
+		}
+		//return RSB_ERR_NO_ERROR;
+		{
+			errval = RSB_ERR_NO_ERROR;
+			goto err;
+		}
+	}
+	else
+		errval = rsb__do_scale(mtxAp,trans,d);
+#else /* RSB_HAVE_OPTYPE_SCALE */
+#endif /* RSB_HAVE_OPTYPE_SCALE */
+err:
+	return errval;
+}
+
+rsb_err_t rsb__dodo_getdiag( const struct rsb_mtx_t * mtxAp, void * diagonal )
+{
+  	// FIXME : missing documentation and error checks!
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+	if(!RSB_BLOCK_CROSSED_BY_DIAGONAL(0,0,mtxAp->nr,mtxAp->nc))
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	if(1)
+	//if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_COO )
+	{
+		// FIXME: THIS IS SLOW, TEMPORARY
+		rsb_coo_idx_t i;
+		long nt = rsb_get_num_threads();
+		const int gdc = RSB_DIVIDE_IN_CHUNKS(mtxAp->nr,nt);
+		#pragma omp parallel for schedule(static,gdc) reduction(|:errval)  RSB_NTC
+		for(i=0;i<mtxAp->nr;++i)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_coo_element(mtxAp,((rsb_char_t*)diagonal)+mtxAp->el_size*(i),i,i));
+		errval = RSB_ERR_NO_ERROR;
+	}
+#if 0
+	else
+	{
+		RSB_BZERO(diagonal,mtxAp->el_size*RSB_MTX_DIAG_SIZE(mtxAp));
+		errval = rsb_do_getdiag(mtxAp,diagonal);
+	}
+#endif
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_do_elemental_scale(struct rsb_mtx_t * mtxAp, const void * alphap)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling
+
+	   Computes \f$ A \leftarrow \alpha A \f$.
+
+	   \param \rsb_mtxt_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \return \rsberrcodemsg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * submatrix=NULL;
+
+	if(!mtxAp)
+       	{
+		errval = RSB_ERR_BADARGS;
+	       	goto err;
+	}
+#if RSB_WANT_PARALLEL_ELEMENTAL_OPS
+	if(1)
+		errval = rsb__do_elemental_scale_parallel(mtxAp,alphap);
+#else /* RSB_WANT_PARALLEL_ELEMENTAL_OPS */
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t smi;
+		//#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC
+		RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(submatrix->typecode,submatrix->nnz,alphap,submatrix->VA,1));
+	}
+#endif /* RSB_WANT_PARALLEL_ELEMENTAL_OPS */
+	else
+		RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,mtxAp->nnz,alphap,mtxAp->VA,1));
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_do_elemental_scale_inv(struct rsb_mtx_t * mtxAp, const void * alphap)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling
+
+	   Computes \f$ A \leftarrow \frac{1}{\alpha} A \f$.
+
+	   \param \rsb_mtxt_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \return \rsberrcodemsg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * submatrix=NULL;
+
+	if(!mtxAp)
+       	{
+		errval = RSB_ERR_BADARGS;
+	       	goto err;
+	}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t smi;
+		//#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC
+		RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vector_scale_inv(submatrix->VA,alphap,submatrix->typecode,submatrix->nnz));
+	}
+	else
+		RSB_DO_ERROR_CUMULATE(errval,rsb__vector_scale_inv(mtxAp->VA,alphap,mtxAp->typecode,mtxAp->nnz));
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_do_elemental_pow(struct rsb_mtx_t * mtxAp, const void * alphap)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling
+
+	   Raises each matrix element to the given power.
+
+	   \param \rsb_mtxt_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \return \rsberrcodemsg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * submatrix=NULL;
+
+	if(!mtxAp)
+       	{
+		errval = RSB_ERR_BADARGS;
+	       	goto err;
+	}
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t smi;
+		//#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC
+		RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__util_vector_pow(submatrix->VA,submatrix->typecode,alphap,submatrix->nnz));
+	}
+	else
+		RSB_DO_ERROR_CUMULATE(errval,rsb__util_vector_pow(mtxAp->VA,mtxAp->typecode,alphap,mtxAp->nnz));
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_dodo_negation(struct rsb_mtx_t * mtxAp)
+{
+#ifdef RSB_HAVE_OPTYPE_NEGATION
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+		{errval = RSB_ERR_BADARGS;goto err;}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix=NULL;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_dodo_negation(submatrix));
+	}
+	else
+		errval = rsb_do_negation(mtxAp,0xf1c57415,RSB_DEFAULT_TRANSPOSITION);
+#else /* RSB_HAVE_OPTYPE_NEGATION */
+	/* FIXME : eliminate negation as mop ! */
+//	return RSB_ERR_UNSUPPORTED_OPERATION;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(!mtxAp)
+		{errval = RSB_ERR_BADARGS;goto err;}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix=NULL;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_dodo_negation(submatrix));
+	}
+	else
+		/* FIXME : assuming elements are contiguous ! */
+		errval = rsb__util_do_negate(mtxAp->VA,mtxAp->typecode,mtxAp->element_count);
+#endif /* RSB_HAVE_OPTYPE_NEGATION */
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_elemental_unop(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags)
+{
+	// FIXME: untested
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp) {errval = RSB_ERR_BADARGS; goto err;}
+	switch(elop_flags)
+	{
+		case(RSB_ELOPF_NEG):
+		errval = rsb_dodo_negation(mtxAp);
+		break;
+		//#define RSB_ELOPF_SQRT		0x00000010		/*!< Elemental square root (usable with rsb_mtx_elemental_unop). */
+		/*
+		case(RSB_ELOPF_SQRT):
+		errval=....(mtxAp);
+		break;
+		*/
+	/*	case(RSB_ELOPF_TRANS):
+		errval = rsb_transpose(&mtxAp);
+		break;
+		case(RSB_ELOPF_HTRANS):
+		errval = rsb_htranspose(&mtxAp);
+		break;*/
+		default:
+		{errval = RSB_ERR_BADARGS; goto err;}
+	}
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_elemental_binop(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags, const void * opp)
+{
+	// FIXME: untested
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_trans_t transA = RSB_TRANSPOSITION_N; 
+	void * topp=NULL;
+
+	if(!mtxAp) {errval = RSB_ERR_BADARGS; goto err;}
+	if(!opp) {errval = RSB_ERR_BADARGS; goto err;}
+
+	switch(elop_flags)
+	{
+		case(RSB_ELOPF_SCALE_COLS_REAL):
+		case(RSB_ELOPF_SCALE_COLS):
+		transA = RSB_TRANSPOSITION_T; 
+		break;
+	}
+
+	switch(elop_flags)
+	{
+		case(RSB_ELOPF_SCALE_COLS_REAL):
+		case(RSB_ELOPF_SCALE_ROWS_REAL):
+		if( RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode) )
+		{
+			/* FIXME: this is inefficient */
+			rsb_type_t typecode = RSB_NUMERICAL_TYPE_REAL_TYPE(mtxAp->typecode);
+			if( NULL == (topp = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode)) )
+			{
+				errval = RSB_ERR_ENOMEM;
+				goto err;
+			}
+			errval = rsb__cblas_Xcopy(typecode,mtxAp->nr,opp,1,topp,2);
+		}		
+		case(RSB_ELOPF_SCALE_COLS):
+		case(RSB_ELOPF_SCALE_ROWS):
+		errval = rsb__do_scal(mtxAp,topp?topp:opp,transA);
+		break;
+		case(RSB_ELOPF_MUL):
+		errval = rsb_do_elemental_scale(mtxAp,opp);
+		break;
+		case(RSB_ELOPF_DIV):
+		errval = rsb_do_elemental_scale_inv(mtxAp,opp);
+		break;
+		case(RSB_ELOPF_POW):
+		errval = rsb_do_elemental_pow(mtxAp,opp);
+		break;
+		default:
+		{
+			errval = RSB_ERR_BADARGS;
+		       	goto err;
+		}
+	}
+err:
+	RSB_CONDITIONAL_FREE(topp);
+	return errval;
+}
+
+rsb_nnz_idx_t rsb__dodo_get_rows_nnz(const struct rsb_mtx_t *mtxAp, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t rnz = 0;
+
+	RSB_DEBUG_ASSERT(fr <= lr);
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		lr--,fr--;
+	errval = rsb_do_get_rows_nnz(mtxAp,fr,lr,&rnz);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	if(RSB_SOME_ERROR(errval))
+		rnz=0;
+	return rnz;
+}
+
+#if RSB_WANT_PARALLEL_ELEMENTAL_OPS
+rsb_err_t rsb__do_elemental_scale_parallel(struct rsb_mtx_t * mtxAp, const void * alphap);
+{
+	/**
+		\ingroup gr_internals
+		TODO: move to somewhere else
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_thread_t wet = rsb_get_num_threads();
+
+	if(RSB_UNLIKELY(mtxAp->nnz<wet*RSB_MIN_THREAD_MEMCPY_NNZ))
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,mtxAp->nnz,alphap,((rsb_byte_t*)mtxAp->VA),1));
+	}
+	else
+	{
+		rsb_nnz_idx_t wi;
+		size_t cnz=(mtxAp->nnz+wet-1)/wet;	/* chunk size */
+		#pragma omp parallel for schedule(static,1) RSB_NTC
+		for(wi=0;wi<wet;++wi)
+		{
+			size_t coff=wi*cnz;
+			size_t cnnz=(wi<wet-1)?cnz:mtxAp->nnz-((wet-1)*cnz);
+			printf("%d nz on %d\n",cnnz,wi);
+			RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,cnnz,alphap,((rsb_byte_t*)mtxAp->VA)+RSB_SIZEOF(mtxAp->typecode)*mtxAp->coff,1));
+		}
+	}
+}
+#endif /* RSB_WANT_PARALLEL_ELEMENTAL_OPS */
+
+rsb_err_t rsb__do_matrix_add_to_dense(const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_nnz_idx_t ldB, rsb_nnz_idx_t nr, rsb_nnz_idx_t nc, rsb_bool_t rowmajor, void * Bp)
+{
+	//  FIXME: could this be documented in two groups (mops and unfinished) at the same time ?
+	//  TODO: what about supporting transA ?
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+
+	if(!alphap)
+	{
+		rsb__util_set_area_to_converted_integer(&pone[0],mtxAp->typecode,+1);
+		alphap = &pone[0];
+	}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t smi;
+		//#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC
+		RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_add_submatrix_to_dense(submatrix,alphap,Bp,ldB,nr,nc,rowmajor));
+	}
+	else
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_add_submatrix_to_dense(mtxAp,alphap,Bp,ldB,nr,nc,rowmajor));
+//err:
+	return errval;
+}
+
+rsb_err_t rsb__do_switch_rsb_mtx_to_csr_sorted(struct rsb_mtx_t * mtxAp, void ** VAP, rsb_coo_idx_t ** IAP, rsb_coo_idx_t ** JAP, rsb_flags_t flags)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_coo_matrix_t coo;
+	const rsb_nnz_idx_t nnz=mtxAp?mtxAp->nnz:0;
+	const rsb_coo_idx_t m=mtxAp?mtxAp->nr:0;
+	//const rsb_coo_idx_t k=mtxAp?mtxAp->nc:0;
+	const rsb_flags_t mflags=mtxAp?mtxAp->flags:RSB_FLAG_NOFLAGS;
+
+	if(!mtxAp)
+       	{
+	       	errval = RSB_ERR_BADARGS;
+	       	goto err;
+       	}
+
+	if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{
+	       	errval = RSB_ERR_BADARGS;
+	       	goto err;
+       	}
+
+	if(!IAP || !JAP || !VAP)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_VIJP);
+	}
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(mtxAp,&coo);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb__util_compress_to_row_pointers_array(NULL,nnz,m,mflags,flags,coo.IA);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		rsb__util_nnz_array_to_fortran_indices(coo.JA,nnz);
+	*JAP=coo.JA;
+	*IAP=coo.IA;
+	*VAP=coo.VA;
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_get_csr(rsb_type_t typecode, const struct rsb_mtx_t *mtxAp, rsb_byte_t * VA, rsb_nnz_idx_t * RP, rsb_coo_idx_t * JA, rsb_flags_t flags)
+{
+	/* NOTE this writes more than mtxAp->nnz elements! */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+	{
+	       	errval = RSB_ERR_BADARGS;
+	       	goto err;
+       	}
+
+#define RSB_WANT_OLD_TO_CSR_SEMANTICS 0
+
+	if((mtxAp->typecode != typecode) || (mtxAp->flags != flags))/* FIXME: condition on cflags is unnecessarily restrictive */
+	{
+		struct rsb_mtx_t * mtxCp = rsb__clone_simple(mtxAp);
+#if RSB_WANT_OLD_TO_CSR_SEMANTICS
+#else /* RSB_WANT_OLD_TO_CSR_SEMANTICS */
+		/* RSB_DO_FLAG_DEL(flags,RSB_FLAG_ALL_STRUCTURAL_FLAGS);*/ /* commented out 20120905: this caused csr expansion even if user explicitly did not required it  */
+#endif /* RSB_WANT_OLD_TO_CSR_SEMANTICS */
+		errval = rsb__dodo_get_csr(mtxCp,&VA,&RP,&JA);
+		RSB_MTX_FREE(mtxCp);
+	}
+	else
+		errval = rsb__dodo_get_csr(mtxAp,&VA,&RP,&JA);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+	       	goto err;
+       	}
+//#if RSB_WANT_OLD_TO_CSR_SEMANTICS
+	/* FIXME: shall move C -> Fortran indices semantics to rsb__dodo_get_csr */
+	if(flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE)
+		rsb__util_nnz_array_to_fortran_indices(RP,mtxAp->nr+1),
+		rsb__util_nnz_array_to_fortran_indices(JA,mtxAp->nnz);
+//#endif
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_get_matrix_info(const struct rsb_mtx_t *mtxAp, enum rsb_mif_t miflags, void* info, size_t buflen)
+{
+	/*!
+	   \ingroup FIXME 
+	   \warning \rsb_warn_unfinished_msg
+		FIXME: UNFINISHED, UNTESTED
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_real_t rrv=0;
+	size_t szv=0;
+	rsb_coo_idx_t civ=0;
+	rsb_nnz_idx_t niv=0;
+	rsb_flags_t fiv=0;
+	rsb_type_t tiv=0;
+	rsb_blk_idx_t biv=0;
+	char*cis=(char*)info;
+
+	if(!mtxAp || !info)
+	{
+	       	errval = RSB_ERR_BADARGS;
+	       	goto err;
+	}
+	switch(miflags)
+	{
+		case RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T:
+		{
+		       	szv = rsb__get_index_storage_amount(mtxAp);
+		       	if(buflen<=0) *(size_t*)info = szv;
+		        else snprintf(cis,buflen,"%zd",szv);
+	       	}
+		break;
+		case RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T:
+		{
+		  size_t isa = rsb__get_index_storage_amount(mtxAp);
+		  rrv=((rsb_real_t)isa)/((rsb_real_t)mtxAp->nnz);
+		  if(buflen<=0) *(rsb_real_t*)info=rrv;
+		  else snprintf(cis,buflen,"%lg",rrv);
+		}
+		break;
+		case RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T:
+		{
+		       	civ = (mtxAp->nr);
+	                if(buflen<=0) *(rsb_coo_idx_t*)info = civ;
+		        else snprintf(cis,buflen,"%d",civ);
+	       	}
+		break;
+		case RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T:
+		{
+		       	civ = (mtxAp->nc);
+	                if(buflen<=0) *(rsb_coo_idx_t*)info = civ;
+		        else snprintf(cis,buflen,"%d",civ);
+	       	}
+		break;
+		case RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T:
+		{
+		       	niv = (mtxAp->nnz);
+	                if(buflen<=0) *(rsb_nnz_idx_t*)info = niv;
+		        else snprintf(cis,buflen,"%d",niv);
+	       	}
+		break;
+		case RSB_MIF_TOTAL_SIZE__TO__SIZE_T:
+		{
+		       	szv = rsb__get_sizeof(mtxAp);
+		       	if(buflen<=0) *(size_t*)info = szv;
+		        else snprintf(cis,buflen,"%zd",szv);
+	       	}
+		break;
+		case RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T:
+		{
+		       	fiv = (mtxAp->flags);
+		       	if(buflen<=0) *(rsb_flags_t*)info = fiv;
+		        else snprintf(cis,buflen,"%d",fiv);
+	       	}
+		break;
+		case RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T:
+		{
+		       	tiv = (mtxAp->typecode);
+		       	if(buflen<=0) *(rsb_type_t*)info = tiv;
+		        else snprintf(cis,buflen,"%d",tiv);
+	       	}
+		break;
+		case RSB_MIF_MATRIX_INFO__TO__CHAR_P:				
+		{
+		       	tiv = (mtxAp->typecode);
+		       	if(buflen<=0) { errval = RSB_ERR_BADARGS; goto err; }
+		        else snprintf(cis,buflen,RSB_PRINTF_MTX_SUMMARY_ARGS(mtxAp));
+
+	       	}
+		break;
+		case RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T:				
+		{
+		       	biv = (mtxAp->all_leaf_matrices_n);
+		       	if(buflen<=0) *(rsb_blk_idx_t*)info = biv;
+		        else snprintf(cis,buflen,"%d",biv);
+	       	}
+		break;
+		default:
+		errval = RSB_ERR_GENERIC_ERROR;
+	}
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_check_leak(void)
+{
+	/*!
+	   \ingroup rsb_doc_library
+	  
+	   Called after \ref rsb_lib_exit(), will report on the standard output stream
+	   (see #RSB_IO_WANT_OUTPUT_STREAM) whether some previously allocated
+	   memory area was not freed by librsb.
+	   \n
+	   Will report leak information only if built with the #RSB_DISABLE_ALLOCATOR_WRAPPER symbol undefined.
+	   \n
+	   Will return #RSB_ERR_NO_ERROR on no leak; an error otherwise.
+	   \n
+	  
+	   \warning \rsb_warn_soon_to_be_deprecated_msg 
+	   \return \rsberrcodemsg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(rsb__get_g_rsb_memory_count())
+	{
+		RSB_INFO("WARNING: allocated memory  : %zu : POSSIBLE MEMORY LEAK\n",rsb__get_g_rsb_memory_count());
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+	}
+
+	if(rsb__get_g_rsb_allocations_count())
+	{
+		RSB_INFO("WARNING: allocations count : %zu : POSSIBLE MEMORY LEAK\n",rsb__get_g_rsb_allocations_count());
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_matrix_norm(const struct rsb_mtx_t * mtxAp , void * np, enum rsb_extff_t flags)
+{
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+	switch(flags)
+	{
+		case RSB_EXTF_NORM_ONE:
+		errval = rsb__do_infinity_norm(mtxAp,np,RSB_BOOL_FALSE,RSB_TRANSPOSITION_T);
+		break;
+		case RSB_EXTF_NORM_TWO:/* FIXME: UNTESTED ! */
+		errval = rsb__cblas_Xnrm2(mtxAp->typecode,mtxAp->nnz,rsb__do_get_first_submatrix(mtxAp)->VA,1,np);
+		break;
+		case RSB_EXTF_NORM_INF:
+		errval = rsb__do_infinity_norm(mtxAp,np,RSB_BOOL_FALSE,RSB_TRANSPOSITION_N);
+		break;
+		default:
+		break;
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_matrix_compute(const struct rsb_mtx_t * mtxAp , void * dp, enum rsb_extff_t flags)
+{
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+#if RSB_ALLOW_ZERO_DIM 
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+	{
+		errval = RSB_ERR_NO_ERROR;
+		goto ret; /* FIXME: skipping further error checks */
+	}
+#endif
+
+	if( mtxAp == NULL )
+		goto ret;
+
+	switch(flags)
+	{
+		case RSB_EXTF_SUMS_ROW:
+		errval = rsb__do_rowssums(mtxAp,RSB_TRANSPOSITION_N,dp);
+		break;
+		case RSB_EXTF_SUMS_COL:
+		errval = rsb__do_rowssums(mtxAp,RSB_TRANSPOSITION_T,dp);
+		break;
+		case RSB_EXTF_ASUMS_ROW:
+		errval = rsb__do_absolute_rows_sums( mtxAp , dp);
+		break;
+		case RSB_EXTF_ASUMS_COL:
+		errval = rsb__do_absolute_columns_sums(mtxAp,dp);
+		break;
+		case RSB_EXTF_DIAG:
+		errval = rsb__dodo_getdiag(mtxAp,dp);
+		break;
+		default:
+		break;
+	}
+ret:
+	return errval;
+}
+
+rsb_err_t rsb__do_load_vector_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode, void * yp, rsb_coo_idx_t *yvlp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!filename || ((!yvlp) && (!yp)))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+	if(yvlp)
+	{
+		/* FIXME: temporarily ignoring second dimension! */
+		rsb_coo_idx_t yvk=0, yvm=0;
+		rsb_bool_t is_vector = RSB_BOOL_FALSE;
+
+		if(RSB_SOME_ERROR(errval = rsb__util_mm_info_matrix_f(filename,&yvm,&yvk,NULL,NULL,NULL,NULL,NULL,NULL,NULL,&is_vector)) )
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_EM);
+		}
+		*yvlp=yvm;
+	}
+	if(yp)
+	{
+		/* printf("stub: reading in %s...\n",filename); */
+		rsb_nnz_idx_t vnz=0;
+
+		errval = rsb__util_mm_load_vector_f(filename,&yp,&vnz,typecode);
+	}
+err:
+	return errval;
+}
+
+struct rsb_mtx_t * rsb__dodo_load_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_flags_t flags, rsb_type_t typecode, rsb_err_t *errvalp)
+{
+	// FIXME
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+       	errval = rsb__do_load_matrix_file_as_matrix_market(&mtxAp,filename,flags,typecode);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+rsb_bool_t rsb__do_was_initialized(void)
+{
+	/*!
+	   \ingroup rsb_doc_library
+	  
+	   Call this function to know whether the library had already been initialized or not.
+	   \n
+	   This function is mainly intended to be used in between \ref rsb_lib_exit() and \ref rsb_lib_init() calls,
+	   or generally after one or more calls to \ref rsb_lib_init() were already been done.
+	   \n
+	   It is not meant to be called before the 'first' initialization ever, unless 
+	   the user is sure this library was built on a system which supports default
+	   initialization to zero of static variables (which indeed is supported by most standards;
+	   e.g.: ANSI C: http://flash-gordon.me.uk/ansi.c.txt ).  
+	  
+	   \return #RSB_BOOL_TRUE if it was initialized, #RSB_BOOL_FALSE otherwise.
+	 */
+	/* TODO: redocument elsewhere! redundant function! */
+	return (rsb_global_session_handle.rsb_g_initialized == RSB_BOOL_TRUE) ? RSB_BOOL_TRUE : RSB_BOOL_FALSE;
+}
+
+static rsb_err_t rsb__do_switch_rsb_mtx_to_coo_unsorted(struct rsb_mtx_t * mtxAp, void ** VAP, rsb_coo_idx_t ** IAP, rsb_coo_idx_t ** JAP, rsb_flags_t flags)
+{
+	struct rsb_coo_matrix_t coo;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	RSB_ASSERT( RSB_DO_FLAG_HAS(mtxAp->flags, RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS) );
+
+	if(!IAP || !JAP || !VAP)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(mtxAp,&coo);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		rsb__util_nnz_array_to_fortran_indices(coo.IA,coo.nnz),
+		rsb__util_nnz_array_to_fortran_indices(coo.JA,coo.nnz);
+	*JAP = coo.JA;
+	*IAP = coo.IA;
+	*VAP = coo.VA;
+err:
+	return errval;
+}
+
+static rsb_err_t rsb__do_switch_rsb_mtx_to_coo_sorted(struct rsb_mtx_t * mtxAp, void ** VAP, rsb_coo_idx_t ** IAP, rsb_coo_idx_t ** JAP, rsb_flags_t flags)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_coo_matrix_t coo;
+	const rsb_nnz_idx_t nnz = mtxAp ? mtxAp-> nnz:0;
+
+	RSB_ASSERT( RSB_DO_FLAG_HAS(mtxAp->flags, RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS) );
+
+	if(!IAP || !JAP || !VAP)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err, RSB_ERRM_EM);
+	}
+
+	RSB_BZERO_P(&coo);
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(mtxAp, &coo);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err, RSB_ERRM_EM);
+	}
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		rsb__util_nnz_array_to_fortran_indices(coo.IA, nnz),
+		rsb__util_nnz_array_to_fortran_indices(coo.JA, nnz);
+	*JAP = coo.JA;
+	*IAP = coo.IA;
+	*VAP = coo.VA;
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_switch_rsb_mtx_to_coo(struct rsb_mtx_t * mtxAp, void ** VAP, rsb_coo_idx_t ** IAP, rsb_coo_idx_t ** JAP, rsb_flags_t flags)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+       	{
+	       	errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err, RSB_ERRM_E_MTXAP"\n");
+       	}
+
+	/* Purpose of the following is avoidance of internally allocated memory leakage. */
+	/* TODO: As an improvement, one may relax this constraint when the allocation wrapper is off. */
+	if(!RSB_DO_FLAG_HAS(mtxAp->flags, RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+       	{
+	       	errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err, RSB_ERRM_IMNIP);
+       	}
+
+	if(RSB_DO_FLAG_HAS(flags, RSB_FLAG_SORTED_INPUT))
+		errval = rsb__do_switch_rsb_mtx_to_coo_sorted(mtxAp, VAP, IAP, JAP, flags);
+	else
+		errval = rsb__do_switch_rsb_mtx_to_coo_unsorted(mtxAp, VAP, IAP, JAP, flags);
+err:
+	return errval;
+}
+
+#if RSB_WANT_COO_BEGIN 
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_coo_begin(rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	blas_sparse_matrix bmtxA = RSB_BLAS_INVALID_VAL;
+
+	rsb__init_struct(mtxAp = rsb__calloc(sizeof(struct rsb_mtx_t)));
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP"\n");
+	}
+	mtxAp->RSB_MTX_BMF = RSB_MTX_BMV;
+	bmtxA = mtxAp->RSB_MTX_BDF = rsb__BLAS_Xuscr_begin(nrA,ncA,typecode);
+	if( mtxAp->RSB_MTX_BDF == RSB_BLAS_INVALID_VAL )
+	{
+		errval = RSB_ERR_GENERIC_ERROR;
+		RSB_CONDITIONAL_FREE(mtxAp);
+		RSB_PERR_GOTO(err,RSB_ERRM_IPEWIEM);
+	}
+	/* FIXME : the following need an improvement  */
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE)) rsb__BLAS_ussp( bmtxA, blas_one_base);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UNIT_DIAG_IMPLICIT)) rsb__BLAS_ussp( bmtxA, blas_unit_diag );
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR)) rsb__BLAS_ussp( bmtxA, blas_lower_triangular);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR)) rsb__BLAS_ussp( bmtxA, blas_upper_triangular);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_SYMMETRIC)) rsb__BLAS_ussp( bmtxA, blas_lower_symmetric);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_SYMMETRIC)) rsb__BLAS_ussp( bmtxA, blas_upper_symmetric);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_HERMITIAN)) rsb__BLAS_ussp( bmtxA, blas_lower_hermitian);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_HERMITIAN)) rsb__BLAS_ussp( bmtxA, blas_upper_hermitian);
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+rsb_err_t rsb__do_mtx_alloc_from_coo_end(struct rsb_mtx_t ** mtxApp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	blas_sparse_matrix bmtxA = RSB_BLAS_INVALID_VAL;
+	struct rsb_mtx_t * mtxBp = NULL;
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	if(!mtxApp || !*mtxApp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAPP);
+	}
+
+	mtxAp = *mtxApp ;
+
+	if( !RSB_MTX_HBDF( mtxAp ) )
+	{
+		/* errval = RSB_ERR_NO_ERROR; */
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_DNSAMIWAFCB);
+	}
+
+	bmtxA = RSB_MTX_HBDFH(mtxAp);
+	/* FIXME: missing serious check on mtxAp->flags ! */
+	if( rsb__BLAS_Xuscr_end_flagged(bmtxA,NULL) == RSB_BLAS_INVALID_VAL )
+	{
+	       	errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_PFTM);
+		/* FIXME: insufficient cleanup */
+	}
+	mtxBp = rsb__BLAS_inner_matrix_retrieve(bmtxA);
+	*mtxApp = mtxBp;
+	rsb__free(mtxAp);
+	rsb__BLAS_handle_free(bmtxA); /* ignoring return value ... */
+err:
+	return errval;
+}
+#endif /* RSB_WANT_COO_BEGIN */
+
+rsb_err_t rsb__do_upd_vals(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags, const void * omegap)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	switch(elop_flags)
+	{
+		case(RSB_ELOPF_MUL):
+		case(RSB_ELOPF_DIV):
+		case(RSB_ELOPF_POW):
+		case(RSB_ELOPF_SCALE_ROWS):
+		case(RSB_ELOPF_SCALE_COLS):
+		case(RSB_ELOPF_SCALE_ROWS_REAL):
+		case(RSB_ELOPF_SCALE_COLS_REAL):
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_elemental_binop(mtxAp,elop_flags,omegap));
+		break;
+		case(RSB_ELOPF_NEG):
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_elemental_unop(mtxAp,elop_flags));
+		break;
+		default: {errval = RSB_ERR_BADARGS; goto err;}
+	}
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_mtx_get_info(const struct rsb_mtx_t *mtxAp, enum rsb_mif_t miflags, void* minfop)
+{
+	rsb_err_t errval = RSB_ERR_UNIMPLEMENTED_YET;
+	errval = rsb__do_get_matrix_info(mtxAp,miflags,minfop,0);
+	return errval;
+}
+
+rsb_err_t rsb__do_file_mtx_save(const struct rsb_mtx_t * mtxAp, const rsb_char_t * filename)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_MATRIX_MARKET,filename);
+	return errval;
+}
+
+rsb_err_t rsb__do_vec_save(const rsb_char_t * filename, rsb_type_t typecode, const void * Yp, rsb_coo_idx_t yvl)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	FILE*stream = NULL;
+	const int incX = 1;
+
+       	if(filename == NULL)
+		stream = stdout;
+	else
+		stream = fopen(filename,"w");
+
+	errval = rsb__debug_print_vector_extra(Yp,yvl,typecode,incX,0x1,stream);
+
+       	if(filename == NULL)
+		;
+	else
+		fclose(stream);
+
+	return errval;
+}
+
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_csr_inplace (void *VA, rsb_coo_idx_t * RP, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp )
+{
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if RSB_ALLOW_EMPTY_MATRICES
+	if( nnzA > 0 )
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+		errval = rsb__util_uncompress_row_pointers_array(RP,nrA,flagsA,flagsA,RP);
+
+	if(RSB_SOME_ERROR(errval))
+		RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	if( RSB_DO_FLAG_HAS(flagsA,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		rsb__util_coo_array_sub(RP,nnzA,1),
+		rsb__util_coo_array_sub(JA,nnzA,1),
+		RSB_DO_FLAG_DEL(flagsA,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+	RSB_DO_FLAG_ADD(flagsA,RSB_FLAG_SORTED_INPUT);
+	if(errval == RSB_ERR_NO_ERROR)
+		mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,RP,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,&errval);
+	return mtxAp;
+}
+
+rsb_err_t rsb__do_file_mtx_rndr(void * pmp, const char * filename, rsb_coo_idx_t pmlWidth, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( pmlWidth != pmlWidth )
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	switch(rflags)
+	{
+		case(RSB_MARF_RGB):
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_pixmap_RGB_from_matrix(filename,pmp,pmWidth,pmHeight));
+		break;
+		case(RSB_MARF_EPS):
+		// rsb_dump_postscript_from_mtx_t(fd,mtxAp,1,1,pmWidth,pmHeight,0);
+		// RSB_DO_ERROR_CUMULATE(errval,rsb__dump_postscript_from_matrix(filename,1,1,pmWidth,pmHeight,0));
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNIMPLEMENTED_YET);
+		//RSB_DO_ERROR_CUMULATE(errval,rsb__dump_postscript_recursion_from_matrix(filename,1,1,pmWidth,pmHeight,RSB_FLAG_NOFLAGS,1,1,0,RSB_NUMERICAL_TYPE_DEFAULT));
+		break;
+		default: {errval = RSB_ERR_UNIMPLEMENTED_YET; goto err;}
+	}
+err:
+	return errval;
+}
+
+
+/* @endcond */
diff --git a/rsb_do.h b/rsb_do.h
new file mode 100644
index 0000000..2c8517a
--- /dev/null
+++ b/rsb_do.h
@@ -0,0 +1,124 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*
+ * @author Michele Martone
+ */
+#ifndef RSB_RSB_DO_H_INCLUDED
+#define RSB_RSB_DO_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb_common.h"
+
+/**
+ * @file
+ * @brief
+ * Implementation of the interface functions.
+ *
+ */
+
+rsb_err_t rsb__do_get_rows_sparse(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags);
+rsb_err_t rsb__do_scal(struct rsb_mtx_t * mtxAp, const void * d, rsb_trans_t trans);
+rsb_err_t rsb__dodo_getdiag( const struct rsb_mtx_t * mtxAp, void * diagonal );
+rsb_err_t rsb__do_elemental_binop(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags, const void * alphap);
+rsb_err_t rsb__do_elemental_unop(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags);
+rsb_nnz_idx_t rsb__dodo_get_rows_nnz(const struct rsb_mtx_t *mtxAp, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_flags_t flags, rsb_err_t * errvalp);
+#define RSB_WANT_PARALLEL_ELEMENTAL_OPS 0 /* FIXME: temporary ! */
+#if RSB_WANT_PARALLEL_ELEMENTAL_OPS
+rsb_err_t rsb__do_elemental_scale_parallel(struct rsb_mtx_t * mtxAp, const void * alphap);
+#endif /* RSB_WANT_PARALLEL_ELEMENTAL_OPS */
+rsb_err_t rsb__do_matrix_add_to_dense(const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_nnz_idx_t ldb, rsb_nnz_idx_t nr, rsb_nnz_idx_t nc, rsb_bool_t rowmajor, void * Bp);
+rsb_err_t rsb__do_switch_rsb_mtx_to_csr_sorted(struct rsb_mtx_t * mtxAp, void ** VAP, rsb_coo_idx_t ** IAP, rsb_coo_idx_t ** JAP, rsb_flags_t flags);
+rsb_err_t rsb__do_get_preconditioner(void *opd, const struct rsb_mtx_t * mtxAp, rsb_precf_t prec_flags, const void *ipd);/* FIXME: temporary interface */
+rsb_err_t rsb__do_get_csr(rsb_type_t typecode, const struct rsb_mtx_t *mtxAp, rsb_byte_t * VA, rsb_nnz_idx_t * RP, rsb_coo_idx_t * JA, rsb_flags_t flags);
+rsb_err_t rsb__do_get_matrix_info(const struct rsb_mtx_t *mtxAp, enum rsb_mif_t miflags, void* info, size_t buflen);
+rsb_err_t rsb__do_check_leak(void);
+rsb_err_t rsb__do_matrix_norm(const struct rsb_mtx_t * mtxAp , void * np, enum rsb_extff_t flags);
+rsb_err_t rsb__do_load_vector_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode, void * yp, rsb_coo_idx_t *yvlp);
+struct rsb_mtx_t * rsb__dodo_load_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_flags_t flags, rsb_type_t typecode, rsb_err_t *errvalp);
+rsb_bool_t rsb__do_was_initialized(void);
+rsb_err_t rsb__do_matrix_compute(const struct rsb_mtx_t * mtxAp , void * dp, enum rsb_extff_t flags);
+rsb_err_t rsb__do_switch_rsb_mtx_to_coo(struct rsb_mtx_t * mtxAp, void ** VAP, rsb_coo_idx_t ** IAP, rsb_coo_idx_t ** JAP, rsb_flags_t flags);
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_coo_begin(rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_flags_t flags, rsb_err_t * errvalp);
+rsb_err_t rsb__do_mtx_alloc_from_coo_end(struct rsb_mtx_t ** mtxAp);
+
+/* TODO: this is a "secret" function, not declared in rsb.h ; shall make it official some day */
+rsb_err_t rsb__lib_get_info_str(int what, rsb_char_t* sbuf, size_t buflen);
+rsb_err_t rsb__do_upd_vals(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags, const void * omegap);
+rsb_err_t rsb__do_mtx_get_info(const struct rsb_mtx_t *mtxAp, enum rsb_mif_t miflags, void* minfop);
+rsb_err_t rsb__do_file_mtx_save(const struct rsb_mtx_t * mtxAp, const rsb_char_t * filename);
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_csr_inplace (void *VA, rsb_coo_idx_t * RP, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp );
+rsb_err_t rsb__do_file_mtx_rndr(void * pmp, const char * filename, rsb_coo_idx_t pmlWidth, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags);
+rsb_err_t rsb__do_vec_save(const rsb_char_t * filename, rsb_type_t typecode, const void * Yp, rsb_coo_idx_t yvl);
+
+#define RSB_ERR_DEFAULT_INTERFACE_ERROR RSB_ERR_GENERIC_ERROR
+#if RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE
+/* please note that the code is likely to fail self-consistency tests, if writing to stderr */
+#define RSB_DEBUG_VERBOSE_INTERFACE_NOTICE	{ if(rsb_global_session_handle.rsb_g_verbose_interface)RSB_STDERR("In file %20s (in %s) at line %10d:\n",__FILE__,__func__,__LINE__);}
+#else
+#define RSB_DEBUG_VERBOSE_INTERFACE_NOTICE	{}
+#endif /* RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE */
+
+#if RSB_WANT_LIBRSB_TIMER
+#define RSB_INTERFACE_TIMER_DCLS rsb_time_t etime, tetime = rsb_global_session_handle.etime;
+#define RSB_INTERFACE_TIMER_CMDS { etime = -rsb_do_time(); }
+#define RSB_INTERFACE_TIMER_ENDC { etime += rsb_do_time(); rsb_global_session_handle.etime = etime + tetime; }
+#else
+#define RSB_INTERFACE_TIMER_DCLS
+#define RSB_INTERFACE_TIMER_CMDS
+#define RSB_INTERFACE_TIMER_ENDC
+#endif
+
+#define RSB_INTERFACE_PREAMBLE_DCLS RSB_INTERFACE_TIMER_DCLS
+#define RSB_INTERFACE_PREAMBLE_CMDS RSB_INTERFACE_TIMER_CMDS RSB_DEBUG_VERBOSE_INTERFACE_NOTICE
+#define RSB_INTERFACE_PREAMBLE RSB_INTERFACE_PREAMBLE_DCLS RSB_INTERFACE_PREAMBLE_CMDS
+#define RSB_INTERFACE_ENDCMD RSB_INTERFACE_TIMER_ENDC
+
+#if RSB_OUT_ERR_VERBOSITY
+# if RSB_OUT_ERR_VERBOSITY==1
+# define RSB_DO_ERR_MANIFEST_INTERFACE(ERRVAL) {if(rsb_global_session_handle.error_stream!=NULL)if(RSB_SOME_ERROR(ERRVAL))rsb__do_perror(NULL,ERRVAL);/* don't put rsb_perror here or infinite recursion will arise :-) */}
+# endif /* RSB_OUT_ERR_VERBOSITY */
+# if RSB_OUT_ERR_VERBOSITY==2
+# define RSB_DO_ERR_MANIFEST_INTERFACE(ERRVAL) {if(RSB_SOME_ERROR(ERRVAL))rsb__do_perror(NULL,ERRVAL);}
+# endif /* RSB_OUT_ERR_VERBOSITY */
+# if RSB_OUT_ERR_VERBOSITY>=3 && RSB_OUT_ERR_VERBOSITY<=98
+/* it would be better to put the following error in the configure script */
+# error Error verbosity (set at configure time with --enable-interface-error-verbosity) shall be either 0,1,2,99 !
+# endif /* RSB_OUT_ERR_VERBOSITY */
+# if RSB_OUT_ERR_VERBOSITY==99
+# define RSB_DO_ERR_MANIFEST_INTERFACE(ERRVAL) {if(RSB_SOME_ERROR(ERRVAL)){rsb__do_perror(NULL,ERRVAL);RSB_STDOUT("Terminating program now.\n");RSB_EXIT(RSB_ERR_TO_PROGRAM_ERROR(ERRVAL));}}
+# endif /* RSB_OUT_ERR_VERBOSITY */
+#else /* RSB_OUT_ERR_VERBOSITY */
+# define RSB_DO_ERR_MANIFEST_INTERFACE(ERRVAL) 
+#endif /* RSB_OUT_ERR_VERBOSITY */
+#define RSB_DO_ERR_RETURN_INTERFACE(ERRVAL) {RSB_DO_ERR_MANIFEST_INTERFACE(ERRVAL) return (ERRVAL);}
+#define RSB_DO_MTX_RETURN_INTERFACE(MATRIX,ERRVAL) {RSB_DO_ERR_MANIFEST_INTERFACE(ERRVAL) return (MATRIX);}
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_RSB_DO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_dump.c b/rsb_dump.c
new file mode 100644
index 0000000..7d19ded
--- /dev/null
+++ b/rsb_dump.c
@@ -0,0 +1,366 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Matrix info dumping code
+ * @author Michele Martone
+ *
+ * TODO: move other similar functions here.
+ * */
+
+#include "rsb_common.h"
+
+#define RSB_CONST_DUMP_DEFAULT_INNER	(RSB_CONST_DUMP_RECURSION | RSB_CONST_DUMP_INTERNALS | RSB_CONST_DUMP_TIMES | RSB_CONST_DUMP_DIMENSIONS)
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+
+static rsb_err_t rsb_do_dump_internals_brief(const struct rsb_mtx_t *mtxAp)
+{
+	/**
+		\ingroup gr_internals
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_submatrix_idx_t i,j;
+	//struct rsb_mtx_t * submatrix=NULL;
+	rsb_submatrix_idx_t smi=0;
+
+	if(!mtxAp)
+	{
+		return RSB_ERR_BADARGS;
+	}
+
+	for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+	{
+		RSB_STDOUT_MATRIX_SUMMARY((mtxAp->all_leaf_matrices[smi]).mtxlp);RSB_INFO("\n");
+	}
+	RSB_DO_ERR_RETURN(errval)
+#else
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif
+}
+
+static rsb_err_t rsb_do_print_matrix_t(const struct rsb_mtx_t *mtxAp, FILE * stream, rsb_dump_flags_t flags)
+{
+	/**
+	 * \ingroup gr_internals
+	 * This is a slow debug function to print out a Matrix Market matrix out of the argument mtxAp.
+	 * */
+#if RSB_ALLOW_STDOUT
+	struct rsb_coo_matrix_t coo;
+	rsb_flags_t aflags = RSB_FLAG_NOFLAGS;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_bool_t want_calloc = RSB_BOOL_FALSE;
+	if(!mtxAp || (!stream && stream != RSB_DEFAULT_STREAM))
+	{
+		return RSB_ERR_BADARGS;
+	}
+
+	RSB_INIT_COO_FROM_MTX(&coo,mtxAp);
+
+	if(flags&RSB_CONST_DUMP_CSR)
+		aflags=RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS, want_calloc = RSB_BOOL_TRUE;
+	if(rsb__xallocate_coo_matrix_t(&coo,want_calloc,aflags)!=&coo)
+       	{
+	       	RSB_ERROR(RSB_ERRM_ES); 
+		goto err; 
+	}
+	errval = rsb__do_get_coo(mtxAp,(rsb_byte_t**)(&coo.VA),&coo.IA,&coo.JA,RSB_FLAG_NOFLAGS);
+	if(RSB_SOME_ERROR(errval)){RSB_ERROR(RSB_ERRM_ES);goto merr;}
+	if(flags&RSB_CONST_DUMP_MATRIX_MARKET || flags&RSB_CONST_DUMP_OCTAVE_STYLE)
+		errval = rsb__test_print_coo_mm(mtxAp->typecode,mtxAp->flags,coo.IA,coo.JA,coo.VA,coo.nr,coo.nc,coo.nnz,RSB_BOOL_TRUE,stream);
+	if(flags&RSB_CONST_DUMP_CSR)
+	{
+		errval = rsb__do_switch_fullword_array_to_compressed(coo.IA,coo.nnz,coo.nr);
+		if(RSB_SOME_ERROR(errval)){RSB_ERROR(RSB_ERRM_ES);goto merr;}
+		errval = rsb__test_print_csr(mtxAp->typecode,mtxAp->flags,coo.IA,coo.JA,coo.VA,coo.nr,coo.nc,coo.nnz,RSB_BOOL_TRUE,stream);
+		if(RSB_SOME_ERROR(errval)){RSB_ERROR(RSB_ERRM_ES);goto merr;}
+	}
+merr:
+	rsb__destroy_coo_matrix_t(&coo);
+err:
+	return errval;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif
+}
+
+static rsb_err_t rsb_do_dump_graphviz_dot_graph_do_file_inner(const struct rsb_mtx_t *mtxAp, FILE * fd)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const struct rsb_mtx_t * submatrix = NULL; 
+	rsb_submatrix_idx_t i,j;
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+	{
+
+//#define RSB_FPRINTF_MATRIX_NODE_SUMMARY(FD,M) RSB_FPRINTF_MATRIX_SUMMARY(FD,M)
+//#define RSB_FPRINTF_MATRIX_NODE_SUMMARY(FD,M) RSB_FPRINTF(FD,"%dx%d@%d,%d:%d", (M)->nr, (M)->nc, (M)->roff, (M)->coff, (M)->nnz)
+#define RSB_FPRINTF_MATRIX_NODE_SUMMARY(FD,M) RSB_FPRINTF(FD,"%dx%d\\n@%d,%d\\n:%d(%s)", (M)->nr, (M)->nc, (M)->roff, (M)->coff, (M)->nnz,(rsb__is_recursive_matrix((M)->flags))?("*"):( \
+((RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))? \
+(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES)?"HCSR":"CSR"): \
+(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES)?"HCOO":"COO")) \
+			))
+
+		RSB_FPRINTF(fd,"\"");
+		RSB_FPRINTF_MATRIX_NODE_SUMMARY(fd,mtxAp);
+		RSB_FPRINTF(fd,"\" -> \"");
+		RSB_FPRINTF_MATRIX_NODE_SUMMARY(fd,submatrix);
+		RSB_FPRINTF(fd,"\"\n");
+		errval = rsb_do_dump_graphviz_dot_graph_do_file_inner(submatrix,fd);
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_dump_graphviz_dot_graph_do_file(const struct rsb_mtx_t *mtxAp, FILE * fd)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+
+	RSB_FPRINTF(fd,"/* example usage: dot -Tps filename.dot > filename.ps */\n");
+	RSB_FPRINTF(fd,"digraph matrix {\n" 
+				"quadtree=TRUE;\n"
+			       	"ratio=1.4;\n");
+	errval = rsb_do_dump_graphviz_dot_graph_do_file_inner(mtxAp,fd);
+	RSB_FPRINTF(fd,"}\n");
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_dump_internals(const struct rsb_mtx_t *mtxAp)
+{
+	/**
+		\ingroup gr_internals
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+
+	if(!mtxAp)
+	{
+		return RSB_ERR_BADARGS;
+	}
+
+	if(rsb__is_root_matrix(mtxAp))
+		RSB_STDOUT(	"#R %zd x %zd, %zd nnz (%zd bytes),"
+				"%zd index space for bytes, %zd bytes for %zd structs (%zd of which are on the diagonal) "
+				"(%3.2lg%% of nnz are on the diagonal) "
+				"\n"
+			,(size_t)mtxAp->nr
+			,(size_t)mtxAp->nc
+			,(size_t)mtxAp->nnz
+			,((size_t)mtxAp->nnz)*RSB_SIZEOF(mtxAp->typecode)
+			,(size_t)rsb__get_index_storage_amount(mtxAp)
+			,((size_t)rsb__terminal_recursive_matrix_count(mtxAp))*sizeof(struct rsb_mtx_t)
+			,((size_t)rsb__terminal_recursive_matrix_count(mtxAp))
+			,(size_t)rsb__get_diagonal_submatrices_count(mtxAp)
+			,((((double)rsb__get_diagonal_elements_count(mtxAp))*100)/(mtxAp->nnz))
+			);
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		RSB_STDOUT("#T at %zd %zd, %zd x %zd, %zd nnz (%3.2lg%%)\n"
+				,(size_t)mtxAp->roff
+				,(size_t)mtxAp->coff
+				,(size_t)mtxAp->nr
+				,(size_t)mtxAp->nc
+				,(size_t)mtxAp->nnz
+				,((((double)mtxAp->nnz)*100)/(mtxAp->nr))/(mtxAp->nc)
+				);
+	}
+	else
+	{
+		RSB_STDOUT("#N at %zd %zd, %zd x %zd, %zd nnz (%3.2lg%%)\n"
+				,(size_t)mtxAp->roff
+				,(size_t)mtxAp->coff
+				,(size_t)mtxAp->nr
+				,(size_t)mtxAp->nc
+				,(size_t)mtxAp->nnz
+				,((((double)mtxAp->nnz)*100)/(mtxAp->nr))/(mtxAp->nc)
+				);
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_dump_internals(submatrix));
+	}
+	RSB_DO_ERR_RETURN(errval)
+#else
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif
+}
+
+rsb_err_t rsb__do_print_matrix_stats(const struct rsb_mtx_t *mtxAp, rsb_dump_flags_t flags, const rsb_char_t*filename)
+{
+	/**
+		\ingroup gr_internals
+		FIXME: document me. now stdout is default on NULL filename. but shall do in a different way, in future.
+		FIXME: filename is really supported only for  RSB_CONST_DUMP_MATRIX_MARKET
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	FILE *stream = NULL;
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+
+	if(filename == NULL)
+		stream = RSB_DEFAULT_STREAM;
+	else
+	{
+		stream = rsb__util_fopen(filename,"w");
+		if(!stream)
+		{
+			errval = RSB_ERR_GENERIC_ERROR;
+			RSB_PERR_GOTO(err,"problems opening %s!\n",filename);
+		}
+		/* FIXME: and what about errno ? */
+	}
+
+	if(flags == RSB_CONST_DUMP_DEFAULT)
+		flags = RSB_CONST_DUMP_DEFAULT_INNER;
+
+	if(flags&RSB_CONST_DUMP_RECURSION_BRIEF)
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_dump_internals_brief(mtxAp));
+
+	if(flags&RSB_CONST_DUMP_RECURSION)
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_dump_internals(mtxAp));
+
+	if(flags&RSB_CONST_DUMP_DIMENSIONS)
+	{
+#if RSB_ALLOW_STDOUT
+		RSB_STDOUT(
+				"m : %d\n"
+				"k : %d\n"
+				"submatrices : %d\n"
+				,(mtxAp->nr)
+				,(mtxAp->nc)
+				,(mtxAp->all_leaf_matrices_n)
+				);
+#else
+		errval = RSB_ERR_UNSUPPORTED_FEATURE; goto err;
+#endif
+	}
+
+	if(flags&RSB_CONST_DUMP_TIMES)
+	{
+#if RSB_ALLOW_STDOUT
+		RSB_STDOUT(
+				"assembly : %10.2lf s\n"
+				"perf.est.:%10.2lf s\n"
+				"str.anal.:%10.2lf s\n"
+				"el.ins.  :%10.2lf s\n"
+				"el.sort. :%10.2lf s\n"
+				"el.part. :%10.2lf s\n"
+				,
+				(mtxAp->tat),
+				(mtxAp->pet),
+				(mtxAp->sat),
+				(mtxAp->eit),
+				(mtxAp->est),
+				(mtxAp->cpt));
+#else
+		errval = RSB_ERR_UNSUPPORTED_FEATURE; goto err;
+#endif
+	}
+	
+	if(flags&RSB_CONST_DUMP_INTERNALS)
+	{
+#if RSB_ALLOW_STDOUT
+		/* TODO: complete this hex dumping code */
+		size_t nbytes = sizeof(struct rsb_mtx_t),nwords=(nbytes+sizeof(long))/sizeof(long),n;
+		size_t words_per_row = 4;
+		RSB_STDOUT("%p:\n",mtxAp);
+		for(n=0;n<nwords;++n)
+		{
+			RSB_STDOUT("%lx:\n",((long*)mtxAp)[n]);
+			if((n%words_per_row)==0)
+				RSB_STDOUT("\n");
+		}
+		if( ( n%words_per_row ) != 1 )
+			RSB_STDOUT("\n");
+#else
+		errval = RSB_ERR_UNSUPPORTED_FEATURE; goto err;
+#endif
+	}
+
+	if(flags&RSB_CONST_DUMP_BLOCKS)
+		;/* TODO */
+	if(flags&RSB_CONST_DUMP_MATRIX_MARKET || flags&RSB_CONST_DUMP_OCTAVE_STYLE
+			|| flags&RSB_CONST_DUMP_CSR )
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_print_matrix_t(mtxAp,stream,flags));
+#if 0
+	else if(flags&RSB_CONST_DUMP_COO)
+		/* FIXME: TODO: should print in Octave/Matlab style, here. */
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_print_matrix_t_inner(mtxAp,0,0));
+#endif
+
+	if(flags&RSB_CONST_DUMP_RSB)
+	{
+		rsb_time_t st = - rsb_time();
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_save_matrix_file_as_binary(mtxAp,stream));
+		if(RSB_SOME_ERROR(errval)){RSB_ERROR(RSB_ERRM_ES);goto err;}
+		st += rsb_time();
+		RSB_IO_NOTICE("#binary saving file %s succeeded and took %lf s (%.0f nnz/s).\n",filename,st,(1.0/(st/mtxAp->nnz)));
+	}
+
+	if(flags&RSB_CONST_DUMP_DOT)
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_dump_graphviz_dot_graph_do_file(mtxAp,stream));
+err:
+	if(stream && stream != RSB_DEFAULT_STREAM )
+		fclose(stream);
+	/* FIXME: what about errno here ? */
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_dump_bitmap(const rsb_bitmap_data_t * bmap, size_t w, size_t h)
+{
+#if RSB_ALLOW_STDOUT
+	size_t wi,hi;
+	if(!bmap)
+		return RSB_ERR_BADARGS;
+	for(wi=0;wi<w;++wi)
+		for(hi=0;hi<h;++hi)
+			RSB_STDOUT("%c",RSB_BITMAP_GET(bmap,w,h,wi,hi)?'1':'0');
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif
+}
+
+/* @endcond */
diff --git a/rsb_dump.h b/rsb_dump.h
new file mode 100644
index 0000000..ab6dee9
--- /dev/null
+++ b/rsb_dump.h
@@ -0,0 +1,54 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Matrix info dumping code
+ * @author Michele Martone
+ * */
+
+#ifndef RSB_DUMP_H_INCLUDED
+#define RSB_DUMP_H_INCLUDED
+
+/*#include "rsb_common.h"*/
+#include "rsb.h"
+
+#define RSB_DEFAULT_DUMPFILENAME "/dev/stdout"
+#define RSB_DEFAULT_FD stdout
+typedef rsb_flags_t rsb_dump_flags_t;
+#define RSB_CONST_DUMP_RECURSION	0x00000001
+#define RSB_CONST_DUMP_INTERNALS	0x00000002
+#define RSB_CONST_DUMP_TIMES		0x00000004
+#define RSB_CONST_DUMP_BLOCKS		0x00000008
+#define RSB_CONST_DUMP_MATRIX_MARKET	0x00000010
+#define RSB_CONST_DUMP_DIMENSIONS	0x00000020
+#define RSB_CONST_DUMP_COO		0x00000040
+#define RSB_CONST_DUMP_RECURSION_BRIEF	0x00000080
+#define RSB_CONST_DUMP_OCTAVE_STYLE	0x00000100	/* FIXME: unfinished */
+#define RSB_CONST_DUMP_CSR		0x00000200
+#define RSB_CONST_DUMP_RSB		0x00000400
+#define RSB_CONST_DUMP_DOT		0x00000800
+#define RSB_CONST_DUMP_DEFAULT		0x00000000
+rsb_err_t rsb__do_print_matrix_stats(const struct rsb_mtx_t *mtxAp, rsb_dump_flags_t flags, const rsb_char_t*filename);
+rsb_err_t rsb__do_dump_bitmap(const rsb_bitmap_data_t * bmap, size_t w, size_t h);
+#endif /* RSB_DUMP_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_eps.c b/rsb_eps.c
new file mode 100644
index 0000000..6883735
--- /dev/null
+++ b/rsb_eps.c
@@ -0,0 +1,1117 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains postscript rendering functions.
+ * */
+
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+typedef float rsb_rf_t; /* real float */
+#define RSB_RR_LEVELS 16
+
+#define RSB_PRINTF_MATRIX_TIME_ARGS(MTXAP)  \
+					"ect: %5.2le""  "	\
+					"est: %5.2le""  "	\
+					"sat: %5.2le""  "	\
+					"eit: %5.2le""  "	\
+					"cpt: %5.2le"	\
+					, \
+					(MTXAP)->ect, (MTXAP)->est, (MTXAP)->sat, (MTXAP)->eit, (MTXAP)->cpt
+
+#define RSB_EPS_TRSL(DX,DY,SX,SY,XS,YS,MTXAP) DX = XS*(/*(MTXAP)->nc-*/(SX)); DY = YS*((MTXAP)->nr-(SY)); /* translate for EPS */
+#define RSB_MTX_EFF_R(MTXAP) (MTXAP->bm-((MTXAP)->broff-(MTXAP)->roff)) /* effective rows count */
+#define RSB_MTX_EFF_C(MTXAP) (MTXAP->bk-((MTXAP)->bcoff-(MTXAP)->coff)) /* effective columns count */
+#define RSB_MTX_LAR(MTXAP) ((MTXAP)->roff+(MTXAP)->bm) /* last absolute row */
+#define RSB_MTX_LAC(MTXAP) ((MTXAP)->coff+(MTXAP)->bk) /* last absolute column */
+#define RSB_MTX_LER(MTXAP) ((MTXAP)->broff-(MTXAP)->roff) /* local empty rows */
+#define RSB_MTX_LEC(MTXAP) ((MTXAP)->bcoff-(MTXAP)->coff) /* local empty columns */
+#define RSB_EPS_NEWPATH "N" /* newpath */
+#define RSB_EPS_MOVETO "M" /* moveto */
+#define RSB_EPS_LINETO "L" /* lineto */
+#define RSB_EPS_RLINETO "R" /* rlineto */
+#define RSB_EPS_SCALEFONT "SCF" /* scalefont */
+#define RSB_EPS_SETFONT "SF" /* setfont */
+#define RSB_EPS_SETRGB "SRGB" /* setrgbcolor */
+#define RSB_EPS_SETLINEWIDTH "SLW" /* setlinewidth */
+#define RSB_EPS_CLOSEPATH "C" /* closepath */
+
+static rsb_err_t render_ps_box(FILE*fd, int r0, int c0, int dr, int dc, rsb_coo_idx_t orows, rsb_rf_t xs, rsb_rf_t ys, rsb_rf_t r, rsb_rf_t g, rsb_rf_t b)
+{
+	/**
+	   \ingroup gr_internals
+	   Prints out a box in the postscript language.
+
+	   \param r0 the box base row
+	   \param c0 the box base column
+	   \param dr the box height
+	   \param dc the box width
+	   \param orows the box offset row
+	   \param xs the scale on x (rows)
+	   \param ys the scale on y (column)
+	   \param r red value
+	   \param g green value
+	   \param b blue value
+	 */
+#if RSB_ALLOW_STDOUT
+		RSB_FPRINTF(fd,
+			"newpath\n"
+			"%g %g "RSB_EPS_MOVETO"\n"
+			"%g %d "RSB_EPS_RLINETO"\n"
+			"%d %g "RSB_EPS_RLINETO"\n"
+			"%g %d "RSB_EPS_RLINETO"\n"
+			"%d %d "RSB_EPS_RLINETO"\n"
+			RSB_EPS_CLOSEPATH"\n"
+			"%g %g %g "RSB_EPS_SETRGB"\n"
+			"1 "RSB_EPS_SETLINEWIDTH"\n"
+			"stroke\n\n"
+			,
+			c0*xs,(orows-r0)*ys,
+			 (dc)*xs,  0,
+			0,  -(dr)*ys,
+			-(dc)*xs, 0,
+/*			c0*xs,(orows-r0)*ys,
+			 (submatrix->nc)*xs,  0,
+			0,  -(submatrix->nr)*ys,
+			-(submatrix->nc)*xs, 0,*/
+			0, 0,
+			r, g, b
+			);
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_err_t rsb_dump_postscript_z_curve(FILE*fd, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_rf_t xs, rsb_rf_t ys, rsb_coo_idx_t orows, int level,int * p, int * pv)
+{
+	/**
+	   \ingroup gr_internals
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix=NULL;
+	const int levels = RSB_RR_LEVELS;
+	const int want_eb=0;/* effective boundaries (FIXME: option currently unfinished) */
+
+	if(!mtxAp)
+	{
+		goto err;
+	}
+
+	if(level>=levels-1)
+		level=levels;
+
+#if 1
+	if(pv)
+	{
+		rsb_submatrix_idx_t smi=0;
+		RSB_SUBMATRIX_FOREACH_LEAF_PERMUTED(mtxAp,submatrix,smi,pv)
+		//RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+		{
+			rsb_dump_postscript_z_curve(fd,submatrix,submatrix->roff,submatrix->coff,xs,ys,orows,level+1,p,NULL);
+
+			if(smi<mtxAp->all_leaf_matrices_n-1)
+			{
+				rsb_rf_t fcoff=(rsb_rf_t)submatrix->coff;
+				rsb_rf_t froff=(rsb_rf_t)submatrix->roff;
+				rsb_rf_t fnc=(rsb_rf_t)submatrix->nc;
+				rsb_rf_t fnr=(rsb_rf_t)submatrix->nr;
+				rsb_rf_t shade= .8 - (.8*smi)/(mtxAp->all_leaf_matrices_n);
+
+				if(want_eb)
+				{
+					fcoff=(rsb_rf_t)submatrix->bcoff;
+					froff=(rsb_rf_t)submatrix->broff;
+					fnc=(rsb_rf_t)submatrix->bm;
+					fnr=(rsb_rf_t)submatrix->bk;
+				}
+
+				RSB_FPRINTF(fd,"%g %g %g "RSB_EPS_SETRGB" 1 "RSB_EPS_SETLINEWIDTH" stroke "RSB_EPS_NEWPATH" %g %g "RSB_EPS_MOVETO"\n",
+				shade,shade,1.0, (fcoff*xs+fnc*(xs/2)), ((-froff+orows))*ys-(fnr)*(ys/2));
+			}
+
+		}
+		goto ret;
+	}
+#endif
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		rsb_coo_idx_t scoff=coff;
+		rsb_coo_idx_t sroff=roff;
+		rsb_coo_idx_t snc=mtxAp->nc;
+		rsb_coo_idx_t snr=mtxAp->nr;
+		const int want_sc = 1;/* want submatrix comments */
+
+		if(want_eb)
+		{
+			sroff=roff+RSB_MTX_LER(mtxAp);
+			scoff=coff+RSB_MTX_LEC(mtxAp);
+			snr=mtxAp->bm;
+			snc=mtxAp->bk;
+		}
+
+		if(want_sc)
+		RSB_FPRINTF(fd,"%% matrix at %d %d, level %d, xs %g, ys %g, orows %d\n",sroff,scoff,level,xs,ys,orows);
+		if(*p>0)
+			RSB_FPRINTF(fd, "%g %g "RSB_EPS_LINETO"\n" , scoff*xs+snc*(xs/2), ((rsb_rf_t)(orows-sroff))*ys-((rsb_rf_t)snr)*(ys/2));
+		else
+			RSB_FPRINTF(fd, "%g %g "RSB_EPS_MOVETO"\n" , scoff*xs+snc*(xs/2), ((rsb_rf_t)(orows-sroff))*ys-((rsb_rf_t)snr)*(ys/2));
+		++*p;
+	}
+	else
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+//			rsb_coo_idx_t scoff=submatrix->coff;
+//			rsb_coo_idx_t sroff=submatrix->roff;
+			rsb_coo_idx_t snc=submatrix->nc;
+			rsb_coo_idx_t snr=submatrix->nr;
+
+			if(0)
+			if(want_eb)
+			{
+		//		scoff=submatrix->bcoff;
+		//		sroff=submatrix->broff;
+		//		snr=submatrix->bm;
+		//		snc=submatrix->bk;
+			}
+
+			rsb_dump_postscript_z_curve(fd,submatrix, roff+(i?(mtxAp->nr-snr):0), coff+(j?mtxAp->nc-snc:0),xs,ys,orows, level+1,p,NULL);
+		}
+	}
+ret:
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_GENERIC_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_err_t rsb__dump_postscript_ussv_order_curve(const struct rsb_mtx_t * mtxAp, rsb_rf_t xs, rsb_rf_t ys, int * p)
+{
+	/**
+	   \ingroup gr_internals
+	   NEW, EXPERIMENTAL
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_translated_matrix_t * all_leaf_matrices=NULL;
+	rsb_submatrix_idx_t all_leaf_matrices_n=0,n;
+	FILE*fd = RSB_DEFAULT_FD;
+
+	if(!mtxAp)
+	{
+		goto err;
+	}
+
+	errval = rsb__do_get_submatrices_for_ussv(mtxAp,&all_leaf_matrices,&all_leaf_matrices_n,RSB_TRANSPOSITION_N);
+	if(!all_leaf_matrices || RSB_SOME_ERROR(errval))
+	{
+		errval = RSB_ERR_ENOMEM;
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+
+	RSB_FPRINTF(fd,"%%%%there are %d leaves, %d for ussv\n",mtxAp->all_leaf_matrices_n,all_leaf_matrices_n);
+#if 1
+	for(n=0;n<all_leaf_matrices_n;++n)
+	{
+		rsb_coo_idx_t rows=all_leaf_matrices[n].nr;
+		rsb_coo_idx_t cols=all_leaf_matrices[n].nc;
+		rsb_coo_idx_t roff=all_leaf_matrices[n].roff;
+		rsb_coo_idx_t coff=all_leaf_matrices[n].coff;
+		rsb_coo_idx_t my=mtxAp->nr-((roff+rows/2));
+		rsb_coo_idx_t mx=(coff+cols/2);
+		rsb_rf_t mys=my;
+		rsb_rf_t mxs=mx;
+		mys*=ys/mtxAp->nc;
+		mxs*=xs/mtxAp->nr;
+//		my/=mtxAp->cols;
+		RSB_FPRINTF(fd,"%% matrix %d at %d %d, %d x %d \n",n,roff,coff,rows,cols);
+		if(*p>0)
+			RSB_FPRINTF(fd, "%g %g "RSB_EPS_LINETO"\n" , mxs, mys);
+		else
+			RSB_FPRINTF(fd, "%g %g "RSB_EPS_MOVETO"\n" , mxs, mys);
+		++*p;
+	}
+#endif
+err:
+	RSB_CONDITIONAL_FREE(all_leaf_matrices);
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+int rsb_dump_postscript(const int argc, char * const argv[])
+{
+	/**
+	   \ingroup gr_internals
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_option options[] = {
+	    {"matrix-filename",	required_argument, NULL, 0x66},/* f */  
+	    {"dump-recursion",	no_argument, NULL, 'd'},/* NEW */
+	    {"width",required_argument	, NULL, 0x5757},/* W */
+	    {"height",required_argument	, NULL, 0x4848}, /* H */
+	    {"auto-size",no_argument	, NULL, 'a'},
+	    {"block-dump",no_argument	, NULL, 'B'},
+	    {"nonzeros-dump",no_argument, NULL, 'N'},
+	    {"block-rowsize",	required_argument, NULL, 0x72 },/* r */
+	    {"block-columnsize",	required_argument, NULL, 0x63},/* c */  
+	    {"z-dump",	no_argument, NULL, 'z'},
+	    {"ussv-dump",	no_argument, NULL, 'S'},
+	    RSB_BENCH_PROG_OPTS
+	    {0,0,0,0}
+	};
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#else /* RSB_NUMERICAL_TYPE_FLOAT */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	rsb_blk_idx_t br=1;
+	rsb_blk_idx_t bc=1;
+
+	const char * filename=NULL;
+	int c,w = RSB_DEFAULT_MATRIX_RENDERING_COLS,h = RSB_DEFAULT_MATRIX_RENDERING_ROWS;
+	int opt_index = 0;
+	int dump_recursion=0;
+	int g_auto_size=0;
+	rsb_bool_t want_blocks=0,want_nonzeros=0,z_dump=0;
+
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+	if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc,argv,RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS"ar:c:df:BNzSn:"/*"W:H:"*/,options,&opt_index);
+		if (c == -1)
+			break;
+		RSB_DO_FLAG_ADD(flags,rsb__sample_program_options_get_flags(c,optarg));	/* FIXME : NEW */
+		switch (c)
+		{
+			case 'r':
+			br = rsb__util_atoi(optarg);
+			if(br<1) { errval = RSB_ERR_BADARGS; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			break;
+			case 'c':
+			bc = rsb__util_atoi(optarg);
+			if(br<1) { errval = RSB_ERR_BADARGS; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			break;
+			case 'f':
+			filename = optarg;
+			break;
+			case 'N':
+			want_nonzeros=1;
+			break;
+			case 'B':
+			want_blocks=1;
+			break;
+			case 'a':
+			g_auto_size=1;
+			break;
+			case 'd':
+			dump_recursion=1;
+			break;
+			case 'S':
+			z_dump=2;
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+			break;
+			case 'z':
+			z_dump=1;
+			break;
+ 			case 0x4848:
+			h = rsb__util_atoi(optarg);
+			if(h<1) { errval = RSB_ERR_BADARGS; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			break;
+			case 0x5757:
+			w = rsb__util_atoi(optarg);
+			if(w<1) { errval = RSB_ERR_BADARGS; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			break;
+			case 'T':
+			typecode = toupper(*optarg);
+			break;
+			case 'n':
+#if 1
+			rsb__set_num_threads(rsb__util_atoi(optarg));
+#else
+			{
+				rsb_thread_t ca_[1]={1};
+				rsb_thread_t * ca=ca_;
+				rsb_thread_t cn=1,ci=0;
+				ca=NULL;cn=0;
+				if(RSB_SOME_ERROR(errval = rsb__util_get_bx_array(optarg,&cn,&ca)))
+				{
+				       	RSB_PERR_GOTO(err,RSB_ERRM_ES); 
+				}
+			}
+#endif
+			default:
+			;
+	    	}
+	}
+	
+	if(!filename)
+	{
+		const char*usagestring=" -aRzd -f pd.mtx";
+		//errval = RSB_ERR_BADARGS;
+		RSB_INFO("Did not specify a matrix file.\n");
+		RSB_INFO("Usage example: %s %s\n",argv[0],usagestring);
+		//RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR); /* FIXME : breaks  -r1 -c1 -Fbr -aRzD */
+	if(g_auto_size)
+	{
+		/* rescale smartly to reflect the desired area and keep proportions (but lose both dimensions) */
+		size_t cols,rows;
+		rsb_rf_t area=1,p;
+		rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+		if(RSB_SOME_ERROR(rsb__do_util_get_matrix_dimensions(filename, &cols, &rows, NULL, &flags)))
+			goto err;
+
+		area*=w;
+		area*=h;
+		p=((rsb_rf_t)cols)/((rsb_rf_t)rows);
+		h=sqrt(area/p);
+		w=h*p;
+	}
+
+	if(!dump_recursion)
+		want_nonzeros = 1;
+
+	errval = rsb__dump_postscript_recursion_from_matrix(filename,br,bc,w,h,flags,want_blocks,z_dump,want_nonzeros,dump_recursion,typecode);
+err:
+	RSB_MASK_OUT_SOME_ERRORS(errval)
+	rsb__do_perror(NULL,errval);
+	return RSB_ERR_TO_PROGRAM_ERROR(errval);
+}
+
+static rsb_err_t rsb__dump_block_rectangles(FILE*fd, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_rf_t xs, rsb_rf_t ys, rsb_coo_idx_t orows, int level)
+{
+	/**
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	const int levels = RSB_RR_LEVELS;
+	const int want_eb = 0;/* want effective boundaries (working) */
+	const int want_sc = 1;/* want submatrix comments */
+	rsb_rf_t shade;
+
+	if(!mtxAp)
+	{
+		goto err;
+	}
+
+	if(level>=levels-1)
+		level=levels;
+	shade = 1.0*(level)/levels;
+
+	if(want_sc)RSB_FPRINTF(fd,"%% matrix at %d %d, level %d\n",roff,coff,level);
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		rsb_coo_idx_t eroff=mtxAp->broff,ecoff=mtxAp->bcoff;
+		rsb_coo_idx_t er=RSB_MTX_EFF_R(mtxAp),ec=RSB_MTX_EFF_C(mtxAp);
+		if(want_eb==0)
+			eroff=mtxAp->roff,ecoff=mtxAp->coff,er=mtxAp->nr,ec=mtxAp->nc;
+		if(want_sc)RSB_FPRINTF(fd,"%% terminal matrix at %d %d\n",roff,coff);
+		render_ps_box(fd,eroff, ecoff, er, ec, orows, xs, ys, shade, shade, shade);
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+	{
+		rsb__dump_block_rectangles(fd,submatrix, roff+(i?(mtxAp->nr-submatrix->nr):0), coff+(j?mtxAp->nc-submatrix->nc:0),xs,ys,orows, level+1);
+	}
+
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_GENERIC_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__dump_postscript_recursion_from_mtx_t(FILE*fd, const char * filename, const struct rsb_mtx_t*mtxAp, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_marf_t rflags, rsb_bool_t want_blocks, rsb_bool_t z_dump, rsb_bool_t want_nonzeros, int *pv )
+{
+	/*
+	 * ( rflags == RSB_FLAG_NOFLAGS ) is allowed and implies defaults.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t nrA = mtxAp->nr, ncA = mtxAp->nc;
+	const int want_structure_comments_dump = 1;
+	rsb_rf_t xs, ys;
+
+	if(fd && filename)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES); 
+	}
+
+	if(filename)
+	{
+		fd = rsb__util_fopen(filename,"w");
+		if( fd == NULL )
+		{
+			errval=RSB_ERR_GENERIC_ERROR;
+			goto err;
+		}
+	}
+
+	ys = ((rsb_rf_t)height)/nrA, xs = ((rsb_rf_t)width)/ncA;
+	errval = rsb__do_print_postscript_header(fd, width, height, xs, ys );
+
+	if(want_structure_comments_dump)
+	{
+		RSB_POSTSCRIPT_DUMP_COMMENT(fd,RSB_PRINTF_MTX_SUMMARY_ARGS(mtxAp));
+		RSB_POSTSCRIPT_DUMP_COMMENT(fd,RSB_PRINTF_MATRIX_TIME_ARGS(mtxAp));
+		RSB_FPRINTF(fd,"%%%% ");
+		rsb__fprint_matrix_implementation_code(mtxAp, "", mtxAp->flags, fd);
+		RSB_FPRINTF(fd,"\n");
+	}
+
+	if( rflags == RSB_MARF_EPS_L )
+	{
+			struct rsb_mtx_t * submatrix = NULL;
+			rsb_submatrix_idx_t smi;
+			double mnnz = 0, annz = 0;
+			rsb_coo_idx_t hoo = -RSB_MIN(RSB_MIN(mtxAp->nr,mtxAp->nc)/1000,10); /* how much out; this shall turn some coloured lines inwards the box; on smaller matrices this shall be limited */
+
+			RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi) 
+				mnnz = RSB_MAX(mnnz,submatrix->nnz),
+				annz += submatrix->nnz;
+			annz /= mtxAp->all_leaf_matrices_n;
+
+			RSB_FPRINTF(fd,"%% colored boxes dump\n");
+			RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi) 
+			{
+				rsb_rf_t fx,fy;
+				double rv,gv,bv, iv;
+
+				RSB_EPS_TRSL(fx,fy,submatrix->bcoff,submatrix->broff,xs,ys,mtxAp);
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" %g %g "RSB_EPS_MOVETO" ",fx,fy);
+				RSB_FPRINTF(fd,"%g %g "RSB_EPS_RLINETO" ", xs*RSB_MTX_EFF_C(submatrix),0.0);
+				RSB_FPRINTF(fd,"%g %g "RSB_EPS_RLINETO" ",0.0,-ys*RSB_MTX_EFF_R(submatrix));
+				RSB_FPRINTF(fd,"%g %g "RSB_EPS_RLINETO" ",-xs*RSB_MTX_EFF_C(submatrix),0.0);
+
+				if(submatrix->nnz > annz)
+					iv = 0.3 * ( ( - annz + submatrix->nnz ) / submatrix->nnz ), 
+					rv = gv = bv = 0.7,
+					rv+=iv;
+				else
+					iv = 0.3 * ( ( + annz - submatrix->nnz ) / annz ),
+					rv = gv = bv = 0.7,
+				       	gv+=iv;
+				RSB_FPRINTF(fd,RSB_EPS_CLOSEPATH" %g %g %g "RSB_EPS_SETRGB" 1 "RSB_EPS_SETLINEWIDTH" fill ",rv,gv,bv);
+				RSB_FPRINTF(fd,"%% submatrix %d square\n",smi);
+			}
+
+			RSB_FPRINTF(fd,"%% lhs dump\n");
+			RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi) 
+			{
+				rsb_rf_t fx,fy,tx,ty;
+/*
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" ");
+				RSB_EPS_TRSL(fx,fy,submatrix->coff ,submatrix->roff,xs,ys,mtxAp);
+				RSB_EPS_TRSL(tx,ty,0               ,submatrix->roff,xs,ys,mtxAp);
+				RSB_FPRINTF(fd,"%g %g "RSB_EPS_MOVETO" ",fx,fy);
+				RSB_FPRINTF(fd,"%g %g lineto ",tx,ty);
+				px=tx,py=ty;
+				RSB_EPS_TRSL(fx,fy,submatrix->coff ,submatrix->roff+submatrix->nr,xs,ys,mtxAp);
+				RSB_EPS_TRSL(tx,ty,0               ,submatrix->roff+submatrix->nr,xs,ys,mtxAp);
+				RSB_FPRINTF(fd,"%g %g "RSB_EPS_MOVETO" ",fx,fy);
+				RSB_FPRINTF(fd,"%g %g lineto ",tx,ty);
+
+				RSB_FPRINTF(fd,"closepath "); RSB_FPRINTF(fd,"1 0 1 "RSB_EPS_SETRGB" "); RSB_FPRINTF(fd,"1 setlinewidth "); RSB_FPRINTF(fd,"1 stroke ");
+				RSB_FPRINTF(fd,"%% submatrix %d to-lhs\n",smi);
+
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" ");
+				RSB_FPRINTF(fd,"%g %g "RSB_EPS_MOVETO" ",px,py);
+				RSB_FPRINTF(fd,"%g %g lineto ",tx,ty);
+				RSB_FPRINTF(fd,"closepath "); RSB_FPRINTF(fd,"1 0 1 "RSB_EPS_SETRGB" "); RSB_FPRINTF(fd,"5 setlinewidth "); RSB_FPRINTF(fd,"1 stroke ");
+				RSB_FPRINTF(fd,"%% submatrix %d lhs\n",smi);
+				*/
+
+				RSB_EPS_TRSL(fx,fy,-hoo+submatrix->bcoff,-hoo+submatrix->broff      ,xs,ys,mtxAp);
+				RSB_EPS_TRSL(tx,ty,-hoo+submatrix->bcoff,-hoo+RSB_MTX_LAR(submatrix),xs,ys,mtxAp);
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" %g %g "RSB_EPS_MOVETO" %g %g "RSB_EPS_LINETO" ",fx,fy,tx,ty);
+				RSB_FPRINTF(fd,RSB_EPS_CLOSEPATH" 1 0 1 "RSB_EPS_SETRGB" 1 "RSB_EPS_SETLINEWIDTH" 1 stroke ");
+				RSB_FPRINTF(fd,"%% submatrix %d lhs\n",smi);
+			}
+
+			RSB_FPRINTF(fd,"%% rhs dump\n");
+			RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi) 
+			{
+				rsb_rf_t fx,fy,tx,ty;
+				//rsb_rf_t ih = (RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_USE_HALFWORD_INDICES)) ? 1.0 : 0.0;
+/*
+				RSB_FPRINTF(fd,"%% submatrix %d\n",smi);
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" ");
+				
+				RSB_EPS_TRSL(fx,fy,submatrix->coff ,submatrix->roff,xs,ys,mtxAp);
+				RSB_EPS_TRSL(tx,ty,mtxAp->nr       ,submatrix->coff,xs,ys,mtxAp);
+				RSB_FPRINTF(fd,"%g %g moveto ",fx,fy);
+				RSB_FPRINTF(fd,"%g %g lineto ",tx,ty);
+				px=tx,py=ty;
+				RSB_EPS_TRSL(fx,fy,submatrix->coff+submatrix->nc,submatrix->roff,              xs,ys,mtxAp);
+				RSB_EPS_TRSL(tx,ty,mtxAp->nr                    ,submatrix->coff+submatrix->nc,xs,ys,mtxAp);
+				RSB_FPRINTF(fd,"%g %g moveto ",fx,fy);
+				RSB_FPRINTF(fd,"%g %g lineto ",tx,ty);
+				RSB_FPRINTF(fd,"closepath "); RSB_FPRINTF(fd,"0.5 1 0.5 setrgbcolor "); RSB_FPRINTF(fd,"1 setlinewidth "); RSB_FPRINTF(fd,"1 stroke ");
+				RSB_FPRINTF(fd,"%% submatrix %d to-rhs\n",smi);
+
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" ");
+				RSB_FPRINTF(fd,"%g %g moveto ",px,py);
+				RSB_FPRINTF(fd,"%g %g lineto ",tx,ty);
+				RSB_FPRINTF(fd,"closepath "); RSB_FPRINTF(fd,"0.5 1 0.5 setrgbcolor "); RSB_FPRINTF(fd,"5 setlinewidth "); RSB_FPRINTF(fd,"1 stroke ");
+				RSB_FPRINTF(fd,"%% submatrix %d rhs\n",smi);
+			*/
+				RSB_EPS_TRSL(fx,fy,-hoo+submatrix->bcoff,             -hoo+submatrix->broff,xs,ys,mtxAp);
+				RSB_EPS_TRSL(tx,ty,-hoo+submatrix->coff+submatrix->bk,-hoo+submatrix->broff,xs,ys,mtxAp);
+				RSB_FPRINTF(fd,RSB_EPS_NEWPATH" %g %g "RSB_EPS_MOVETO" %g %g "RSB_EPS_LINETO" ",fx,fy,tx,ty);
+				RSB_FPRINTF(fd,RSB_EPS_CLOSEPATH" .5 1 .5 "RSB_EPS_SETRGB" 1 "RSB_EPS_SETLINEWIDTH" 1 stroke ");
+				RSB_FPRINTF(fd,"%% submatrix %d rhs\n",smi);
+			}
+
+			RSB_FPRINTF(fd,"%% node content labels\n");
+			RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi) 
+			{
+				rsb_rf_t ox,oy;
+				char fstr[RSB_MAX_STRERRLEN];
+				double fs;
+
+				RSB_EPS_TRSL(ox,oy,submatrix->coff,submatrix->roff+submatrix->nr/2,xs,ys,mtxAp);
+				sprintf(fstr," %d/%d %s%s %0.1e",1+smi,mtxAp->all_leaf_matrices_n,(RSB_DO_FLAG_HAS(submatrix->flags,RSB_FLAG_USE_HALFWORD_INDICES))?"H":"",(submatrix->matrix_storage == RSB_MATRIX_STORAGE_BCOR)?"COO":"CSR",(double)(submatrix->nnz));
+				fs = ( xs * submatrix->nc ) /  ( strlen(fstr) ) * 1.3 ;
+				RSB_FPRINTF(fd,"/Courier-Bold findfont %g "RSB_EPS_SCALEFONT" "RSB_EPS_SETFONT" %g %g "RSB_EPS_MOVETO" (%s) 0 0 0 "RSB_EPS_SETRGB" show\n",fs,ox,oy,fstr);
+			}
+	}
+
+	RSB_POSTSCRIPT_DUMP_COMMENT(fd,"sparse blocks dump");
+	errval = rsb__dump_block_rectangles(fd,mtxAp,0,0,xs,ys,mtxAp->nr,0);
+
+	if(z_dump)
+	{
+		int p = 0;
+		RSB_POSTSCRIPT_DUMP_COMMENT(fd,"z dump\nnewpath");
+
+		if(z_dump==1)
+			errval = rsb_dump_postscript_z_curve(fd,mtxAp, 0,0,xs,ys,mtxAp->nr,0,&p,pv);
+		else
+			errval = rsb__dump_postscript_ussv_order_curve(mtxAp,(rsb_rf_t)height,(rsb_rf_t)width,&p);
+		RSB_FPRINTF(fd,
+			"%d %d  %d setrgbcolor\n"
+			"1 setlinewidth\n"
+			"stroke\n\n"
+			,
+			0,0,1
+			);
+	}
+	if(want_blocks)
+		;/* dead code removed from here */
+	if(filename)
+	{
+		fclose(fd);
+	}
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_dump_postscript_from_coo(FILE*fd, rsb_coo_idx_t *IA, rsb_coo_idx_t *JA, void *VA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_bool_t all_nnz, rsb_type_t typecode)
+{
+	/**
+	 \ingroup gr_internals
+	 Need better error handling.
+	 This function is experimentally used to render the sparse matrix.
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n=0;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_rf_t ys,xs;
+	rsb_rf_t csh,csw,dx=0.0,dy=0.0,rd=1.0;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE) ;
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT) ;
+
+if(1)
+{
+	       	rsb_coo_idx_t /* ri=0,ci=0,*/nr=m,nc=k;
+	       	rsb_nnz_idx_t nzi=0;
+	       	rsb_nnz_idx_t onnz=nnz;
+		// RSB_STDERR("%s","FIXME: this is functioning code to render PostScript spy plots; it just needs to be called the right way ..\n");
+		rsb_aligned_t max[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t min[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		const int want_points_plot = 0; /* was 1 */
+
+		rsb__fill_with_ones(VA,typecode,nnz,1);
+#if 1
+		while( (nnz>100000 || nr>512 || nc>512 ) && (nr>2 && nc>2))
+		{
+			/*
+				May be better to write a function *resize_to_nnz*.
+				This code is quite poor but does the job.
+			 */
+		       	rsb_coo_idx_t nnr=nr/2, nnc=nc/2;
+			rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+			// RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT) ;
+			// RSB_STDERR("will rescale %d %d (%d nz) to %d %d...\n",nr,nc,nnz,nnr,nnc);
+			errval = rsb__mtx_as_pixmap_resize(VA,IA,JA,nnz,&nnz,nr,nc,nnr,nnc,typecode,flags);
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT) ;
+			// nnz = rsb_weed_out_duplicates(IA,JA,VA,nnz,typecode,RSB_FLAG_DUPLICATES_SUM/*|RSB_FLAG_SORTED_INPUT*/);
+			if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+			IA = rsb__realloc(IA,sizeof(rsb_coo_idx_t)*(nnz));
+			JA = rsb__realloc(JA,sizeof(rsb_coo_idx_t)*(nnz));
+			VA = rsb__realloc_vector(VA,nnz,typecode);
+			nc=nnc; nr=nnr;
+			nnc/=2; nnr/=2;
+		}
+#endif
+#if 1
+		// if( (nnz>100000 || nr>height || nc>width ) && (nr>2 && nc>2))
+		if(1)
+		{
+		       	rsb_coo_idx_t nnr=height, nnc=width;
+			rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT) ;
+			// RSB_STDERR("will rescale further %d %d (%d nz) to %d %d...\n",nr,nc,nnz,nnr,nnc);
+			if(nnr>nr)
+				rd=((rsb_rf_t)nnr)/((rsb_rf_t)nr);
+			errval = rsb__mtx_as_pixmap_resize(VA,IA,JA,nnz,&nnz,nr,nc,nnr,nnc,typecode,flags);
+			if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+			IA = rsb__realloc(IA,sizeof(rsb_coo_idx_t)*(nnz));
+			JA = rsb__realloc(JA,sizeof(rsb_coo_idx_t)*(nnz));
+			VA = rsb__realloc_vector(VA,nnz,typecode);
+			nc=nnc; nr=nnr;
+		}
+#endif
+		errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,nr,nc, typecode,RSB_FLAG_NOFLAGS);
+		if(RSB_SOME_ERROR(errval)) 
+		{
+		       	RSB_ERROR(RSB_ERRM_ES); /* RSB_PERR_GOTO(err,RSB_ERRM_ES)*/ /*not a critical error; however shall emit a warning : */
+		}
+		nnz = rsb_weed_out_duplicates(IA,JA,VA,nnz,typecode,RSB_FLAG_DUPLICATES_SUM|RSB_FLAG_SORTED_INPUT);
+
+		RSB_FPRINTF(fd,""
+			"%%!PS-Adobe-3.0 EPSF-3.0\n"
+			"%%%%Creator: "RSB_PACKAGE_STRING"\n"
+			"%%%%Title: matrix spy plot (originally %d x %d / %d, here %d x %d/%d)\n"
+			"%%%%CreationDate: \n"
+			"%%%%DocumentData: Clean7Bit\n"
+			"%%%%Origin: 0 0\n"
+			"%%%%BoundingBox: 0 0 %d %d\n"
+			"%%%%LanguageLevel: 2\n"
+			"%%%%Pages: 1\n"
+			"%%%%Page: 1 1\n"
+			/* "0.5 0.5 0.5 setrgbcolor\n" */
+			,m,k,onnz,nr,nc,nnz,nc,nr);
+		RSB_FPRINTF(fd,"save /$LIBRSB_DICT 3 dict def $LIBRSB_DICT begin /M {moveto} bind def /Z {gsave currentpoint lineto %g setlinewidth 1 setlinecap stroke grestore} bind def /D {M Z} bind def /K {0.5 0.5 setrgbcolor} bind def\n",rd);
+		RSB_FPRINTF(fd,"/R {rlineto} bind def\n");
+		RSB_FPRINTF(fd,"/N {newpath} bind def\n");
+		RSB_FPRINTF(fd,"/L {lineto} bind def\n");
+		RSB_FPRINTF(fd,"/C {closepath} bind def\n");
+		RSB_FPRINTF(fd,"/SLW {setlinewidth} bind def\n");
+		RSB_FPRINTF(fd,"/SRGB {setrgbcolor} bind def\n");
+		RSB_FPRINTF(fd,"/SCF {scalefont} bind def\n");
+		RSB_FPRINTF(fd,"/SF {setfont} bind def\n");
+		rsb__util_find_max(&max[0],VA,typecode,nnz,1);
+		rsb__util_find_min(&min[0],VA,typecode,nnz,1);
+		// RSB_STDERR("%lf %lf\n", *(double*)(&min[0]), *(double*)(&max[0]));
+		rsb__util_do_negate(&min[0],typecode,1);
+		rsb__util_vector_add(&max[0],&min[0],typecode,1);
+		// RSB_STDERR("%lf %lf\n", *(double*)(&min[0]), *(double*)(&max[0]));
+		rsb__vector_scale_inv(VA,&max[0],typecode,nnz);
+
+		if(want_points_plot)
+		{
+		RSB_FPRINTF(fd,"%% dots plot\n");
+		for(nzi=0;nzi<nnz;++nzi)
+		{
+			// RSB_FPRINTF(fd,"%d %d D\n",IA[nzi],JA[nzi]);
+			// RSB_FPRINTF(fd,"%d %d D ",nr-1-IA[nzi],JA[nzi]);
+			rsb_rf_t cv=0.0;
+			RSB_NUMERICAL_TYPE_CAST_TO_ANY_P(rsb_rf_t,cv,typecode,VA,nzi);
+			// cv=1.0f-cv;
+			cv=0.5+cv/2.0; /* stronger */
+			//cv=0.0+cv/2;
+			// cv=0.5;
+			// gray ... red
+			// RSB_FPRINTF(fd,"%0.2f %0.2f %0.2f setrgbcolor\n",cv,0.5,0.5);
+			RSB_FPRINTF(fd,"%.2f K ",cv);
+			//RSB_FPRINTF(fd,"%d %d D ",nr-1-IA[nzi],JA[nzi]);
+			RSB_FPRINTF(fd,"%d %d D ",JA[nzi],nr-1-IA[nzi]);
+			if(nzi%32==0)
+				RSB_FPRINTF(fd,"\n");
+		}
+		}
+		// RSB_FPRINTF(fd,"gsave grestore showpage\n");
+		RSB_FPRINTF(fd,"stroke\n");
+		goto err;
+	}
+
+	if(!all_nnz)
+	{
+		/* rsb__mtx_as_pixmap_resize is optional */
+		if( RSB_SOME_ERROR(errval = rsb__mtx_as_pixmap_resize(VA, IA, JA, nnz, &nnz, m, k, height, width, typecode, flags)))
+			goto err;
+	}
+	/*	if(m<=height)
+			ys=1.0;
+		else
+			ys=((rsb_rf_t)height)/m;
+		if(k<=width)
+			xs=1.0;
+		else*/
+		ys=((rsb_rf_t)height)/m;
+		xs=((rsb_rf_t)width)/k;
+		csw=ys;
+		csh=xs;
+	
+//	{
+//		ys=((rsb_rf_t)height)/m;
+//		xs=((rsb_rf_t)width)/k;
+//	}
+	if(width>k)
+		dx=.1*csw, csw*=.8;
+	else
+		xs=csw=1.0;
+	if(height>m)
+		dy=.1*csh, csh*=.8;
+	else
+		ys=csh=1.0;
+/*
+	if(height>m)
+		yps=ys*.8;
+	else
+		yps=ys;
+	if(width>m)
+		xps=xs*.8;
+	else
+		xps=xs;
+
+	if(!all_nnz)
+	{
+		m=height;
+		k=width;
+	}*/
+
+	rsb__do_print_postscript_header(fd, width, height, csw, csh);
+
+	RSB_FPRINTF(fd,"%%%% nnz dump\n");
+	RSB_FPRINTF(fd,"%%%% scales : %g %g\n",xs,ys);
+
+
+	if(xs>1.0) xs=1.0;
+	if(ys>1.0)ys=1.0;
+
+	for(n=0;n<nnz;++n)
+	{
+		RSB_FPRINTF(fd,"%%%% at : %d %d\n",(int)IA[n],(int)JA[n]);
+		RSB_FPRINTF(fd,
+			"%g %g translate\n"
+			".85 .85 .85 csquare\n"
+			"-%g -%g translate\n"
+			, dx+((rsb_rf_t) (JA[n]))*xs, -dy+((rsb_rf_t)height)-((rsb_rf_t) (IA[n]))*ys
+			, dx+((rsb_rf_t) (JA[n]))*xs, -dy+((rsb_rf_t)height)-((rsb_rf_t) (IA[n]))*ys);
+	}
+
+	//RSB_FPRINTF(fd, "%%%%EOF\n");
+
+err:
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE);
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__dump_postscript_recursion_from_matrix(const char * filename, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_flags_t flags, rsb_bool_t want_blocks, rsb_bool_t z_dump , rsb_bool_t want_nonzeros, rsb_bool_t want_recursion, rsb_type_t typecode)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t *IA=NULL, *JA=NULL;
+	void *VA=NULL;
+	rsb_coo_idx_t m=0,k=0;
+       	rsb_nnz_idx_t nnz=0;
+	struct rsb_mtx_t * mtxAp=NULL;
+	FILE*fd = RSB_DEFAULT_FD;
+
+	if(!filename )
+	{
+		RSB_ERROR(RSB_ERRM_ES); 
+		return RSB_ERR_BADARGS;
+	}
+
+	errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&m,&k,&nnz,typecode,flags,NULL,NULL);
+	if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,m,k,br,bc,flags,&errval);
+	if(!mtxAp || RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES); 
+	}
+
+	if( want_nonzeros )
+	{
+		// RSB_POSTSCRIPT_DUMP_COMMENT(fd,"nonzeros structure dump");
+		errval = rsb_dump_postscript_from_coo(fd, IA, JA, VA, m, k, nnz, br, bc, width, height, want_nonzeros, typecode);
+		want_nonzeros = 0;
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	RSB_CONDITIONAL_FREE(IA);
+       	RSB_CONDITIONAL_FREE(JA);
+       	RSB_CONDITIONAL_FREE(VA);
+
+	if(want_recursion)
+	{
+		errval = rsb__dump_postscript_recursion_from_mtx_t(fd, NULL, mtxAp, br, bc, width, height, RSB_FLAG_NOFLAGS/* FIXME */, want_blocks, z_dump , 0 /*want_nonzeros*/, NULL );
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	RSB_MTX_FREE(mtxAp);/* we don't need it anymore here */
+
+	if( want_nonzeros )
+	{
+		RSB_POSTSCRIPT_DUMP_COMMENT(fd,"nonzeros structure dump");
+		errval = rsb__dump_postscript_from_matrix(filename, br, bc, width, height, 1);
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+	}
+
+	errval = RSB_ERR_NO_ERROR;
+	goto ret;
+err:
+	errval = RSB_ERR_GENERIC_ERROR;
+ret:
+	RSB_CONDITIONAL_FREE(IA); RSB_CONDITIONAL_FREE(JA); RSB_CONDITIONAL_FREE(VA);
+	RSB_MTX_FREE(mtxAp);
+	return errval;
+#else  /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_err_t rsb_dump_postscript_from_mtx_t(FILE*fd, const struct rsb_mtx_t*mtxAp, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_bool_t all_nnz)
+{
+	struct rsb_coo_matrix_t coo;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	RSB_INIT_COO_FROM_MTX(&coo,mtxAp);
+	if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+       	{
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES); 
+	}
+	errval = rsb__do_get_coo(mtxAp,(rsb_byte_t**)(&coo.VA),&coo.IA,&coo.JA,RSB_FLAG_NOFLAGS);
+	if(!RSB_SOME_ERROR(errval))
+		errval = rsb_dump_postscript_from_coo(fd, coo.IA, coo.JA, coo.VA, coo.nr, coo.nc, coo.nnz, br, bc, width, height, all_nnz, mtxAp->typecode);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(merr,RSB_ERRM_ES);
+	}
+merr:
+	rsb__destroy_coo_matrix_t(&coo);
+err:
+	return errval;
+}
+
+rsb_err_t rsb__dump_postscript_from_matrix(const char * filename, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_bool_t all_nnz)
+{
+	/**
+	 \ingroup gr_internals
+	 This function is experimentally used to render the sparse matrix.
+	 Needs better error handling.
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t *IA=NULL, *JA=NULL;
+	void *VA=NULL;
+	rsb_coo_idx_t m=0,k=0;
+	rsb_nnz_idx_t nnz=0;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_time_t t=0;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE) ;
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT) ;
+
+	if(!filename )
+		return RSB_ERR_BADARGS;
+
+	t = - rsb_time();
+	if( RSB_SOME_ERROR(errval = rsb__util_mm_load_matrix_f(filename, &IA, &JA,&VA , &m, &k, &nnz , typecode, flags, NULL, NULL)) )
+		goto err;
+	t += rsb_time();
+
+#if 1
+	errval = rsb_dump_postscript_from_coo(/*fd*/RSB_DEFAULT_FD, IA, JA, VA, m, k, nnz, br, bc, width, height, all_nnz, typecode);
+#else
+#if 0
+	{
+		RSB_STDERR("%s","FIXME: this is functioning code to render PostScript raster spy plots; it just needs the right place to be employed ..\n");
+		FILE*fd = RSB_DEFAULT_FD;
+	       	rsb_coo_idx_t ri=0,ci=0,nr=m,nc=k;
+	       	const rsb_coo_idx_t nnr=16/*0*2*/;
+		const rsb_coo_idx_t nnc=nc/(nr/nnr);
+	       	rsb_nnz_idx_t nzi=0;
+		errval = rsb__mtx_as_pixmap_resize(VA,IA,JA,nnz,&nnz,nr,nc,nnr,nnc,typecode,RSB_FLAG_NOFLAGS);
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+		nc=nnc;
+		nr=nnr;
+		errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,nr,nc, typecode,RSB_FLAG_NOFLAGS);
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+		errval = rsb__util_compress_to_row_pointers_array(NULL,nnz,nr,RSB_FLAG_NOFLAGS,RSB_FLAG_NOFLAGS,IA);
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+		// errval = rsb__do_switch_rsb_mtx_to_csr_sorted(mtxAp, &VA, &IA, &JA, RSB_FLAG_NOFLAGS);
+		if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+		// RSB_POSTSCRIPT_DUMP_COMMENT(fd,"raster dump\n");
+		RSB_FPRINTF(fd,""
+			"%%!PS-Adobe-3.0 EPSF-3.0\n"
+			"%%%%Creator: "RSB_PACKAGE_STRING"\n"
+			"%%%%Title: matrix spy plot\n"
+			"%%%%CreationDate: \n"
+			"%%%%DocumentData: Clean7Bit\n"
+			"%%%%Origin: 0 0\n"
+			"%%%%BoundingBox: 0 0 %d %d\n"
+			"%%%%LanguageLevel: 2\n"
+			"%%%%Pages: 1\n"
+			"%%%%Page: 1 1\n"
+		,nc,nr);
+		RSB_FPRINTF(fd,"gsave\n""0 %d translate\n""%d %d scale\n""%d %d 8 [%d 0 0 -%d 0 0]\n"" {<",nr,nc,nr,nc,nr,nc,nr);
+		for(ri=0;ri<nr;++ri)
+		{
+	       		rsb_coo_idx_t fc=0,lc=0;
+	       		rsb_coo_idx_t crp=IA[ri],nrp=IA[ri+1];
+			if(nrp==crp)
+				lc=nc-1;
+			else
+				lc=JA[crp]-1;
+			for(ci=fc;ci<=lc;++ci)
+				RSB_FPRINTF(fd,"FF");
+			for(nzi=crp;nzi<nrp;++nzi)
+			{
+				RSB_FPRINTF(fd,"00");
+				fc=JA[nzi]+1;
+				lc=fc-1;
+				if(JA[nzi]==nc-1)
+					lc=nc-1;
+				else
+				{
+					if(nzi+1 < nrp)
+						lc=JA[nzi+1]-1;
+					else
+						lc=nc-1;
+				}
+				for(ci=fc;ci<=lc;++ci)
+					RSB_FPRINTF(fd,"FF");
+			}
+			RSB_FPRINTF(fd,"\n");
+		}
+		RSB_FPRINTF(fd,">}\n""image\n""grestore\n""showpage\n");
+	}
+#else
+		goto err;
+#endif
+#endif
+err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE);
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__do_mtx_render(const char * filename, const struct rsb_mtx_t*mtxAp, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	switch(rflags)
+	{
+		case(RSB_MARF_RGB):
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNIMPLEMENTED_YET);
+		break;
+		case(RSB_MARF_EPS_L):
+		case(RSB_MARF_EPS_B):
+		case(RSB_MARF_EPS_S):
+		case(RSB_MARF_EPS):
+		{
+			FILE * fd = NULL;
+			rsb_time_t dt = rsb_time();
+			/* filename = filename ? filename : RSB_DEFAULT_DUMPFILENAME; */
+			if( ! filename )
+			{
+		       		fd = RSB_DEFAULT_FD;
+			}
+			else
+		       		fd = rsb__util_fopen(filename,"w");
+
+			if( rflags == RSB_MARF_EPS || rflags == RSB_MARF_EPS_S || rflags == RSB_MARF_EPS_L )
+				RSB_DO_ERROR_CUMULATE(errval,rsb_dump_postscript_from_mtx_t(fd,mtxAp,1,1,pmWidth,pmHeight,1));
+			if( rflags == RSB_MARF_EPS || rflags == RSB_MARF_EPS_B || rflags == RSB_MARF_EPS_L )
+				RSB_DO_ERROR_CUMULATE(errval,rsb__dump_postscript_recursion_from_mtx_t(fd,NULL,mtxAp,1,1,pmWidth,pmHeight,rflags,0,1,0,NULL));
+			if( fd )
+			{
+				dt = rsb_time() - dt;
+				RSB_FPRINTF(fd,"%% rendering time ~ %lg s\n",dt);
+			}
+			
+			if( filename )
+				fclose(fd);
+		}
+		break;
+		default: {errval = RSB_ERR_UNIMPLEMENTED_YET; goto err;}
+	}
+err:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_eps.h b/rsb_eps.h
new file mode 100644
index 0000000..a83fa06
--- /dev/null
+++ b/rsb_eps.h
@@ -0,0 +1,38 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains postscript rendering functions.
+ * */
+#ifndef RSB_EPS_H_INCLUDED
+#define RSB_EPS_H_INCLUDED
+#include "rsb_internals.h"
+rsb_err_t rsb__dump_postscript_from_matrix(const char * filename, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_bool_t all_nnz);
+rsb_err_t rsb__dump_postscript_recursion_from_matrix(const char * filename, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_flags_t flags, rsb_bool_t want_blocks, rsb_bool_t z_dump , rsb_bool_t want_nonzeros, rsb_bool_t want_recursion, rsb_type_t typecode );
+rsb_err_t rsb__dump_postscript_recursion_from_mtx_t(FILE*fd, const char * filename, const struct rsb_mtx_t*mtxAp, rsb_blk_idx_t br, rsb_blk_idx_t bc, int width, int height, rsb_marf_t rflags, rsb_bool_t want_blocks, rsb_bool_t z_dump, rsb_bool_t want_nonzeros, int *pv );
+#define RSB_POSTSCRIPT_DUMP_COMMENT(FD,C) {RSB_FPRINTF(FD,"%%%% ");RSB_FPRINTF(FD,C);RSB_FPRINTF(FD,"\n");}
+rsb_err_t rsb__do_mtx_render(const char * filename, const struct rsb_mtx_t*mtxAp, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags);
+#endif /* RSB_EPS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_err.c b/rsb_err.c
new file mode 100644
index 0000000..9ad3b61
--- /dev/null
+++ b/rsb_err.c
@@ -0,0 +1,143 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ * */
+#include "rsb_common.h"
+#include "rsb_util.h"
+#include "rsb.h"
+
+#define rsb__strcpy strcpy
+#define rsb__sprintf sprintf
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_err_t rsb__do_strerror_r(rsb_err_t errval, rsb_char_t * buf, size_t buflen)
+{
+	/* TODO: what if buflen is not enough ? shall report this somehow. */
+	rsb_char_t*sbuf = buf;
+	const rsb_char_t *s = "No error occurred (success). The return value that means function operation success, in most cases.\n";
+
+	if((errval == RSB_ERR_NO_ERROR))
+		goto err;
+
+	if( buf == NULL)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	switch(errval)
+	{
+		case RSB_ERR_GENERIC_ERROR:
+		s = "An unspecified error occurred.";
+		break;
+		case RSB_ERR_UNSUPPORTED_OPERATION:
+		s = "The user requested an operation which is not supported (e.g.: was opted out at build time).";
+		break;
+		case RSB_ERR_UNSUPPORTED_TYPE:
+		s = "The user requested to use a type which is not supported (e.g.: was opted out at build time).";
+		break;
+		case RSB_ERR_UNSUPPORTED_FORMAT:
+		s = "The user requested to use a matrix storage format which is not supported (e.g.: was opted out at build time).";
+		break;
+		case RSB_ERR_INTERNAL_ERROR:
+		s = "An error occurred which is not apparently caused by a user's fault (internal error).";
+		break;
+		case RSB_ERR_BADARGS:
+		s = "The user supplied some corrupt data as argument.";
+		break;
+		case RSB_ERR_ENOMEM:
+		s = "There is not enough dynamical memory to perform the requested operation.";
+		break;
+		case RSB_ERR_UNIMPLEMENTED_YET:
+		s = "The requested operation was not implemented yet in this code revision.";
+		break;
+		case RSB_ERR_LIMITS:
+		s = "The requested operation could not be executed, or index overflow will happen.";
+		break;
+		case RSB_ERR_NO_USER_CONFIGURATION:
+		s = "A file containing user set configuration was not present.";
+		break;
+		case RSB_ERR_CORRUPT_INPUT_DATA:
+		s = "User supplied data (e.g.: from file) was corrupt.";
+		break;
+		case RSB_ERR_FAILED_MEMHIER_DETECTION:
+		s = "Memory hierarchy info failed to be detected. You can bypass this by setting a meaningful RSB_USER_SET_MEM_HIERARCHY_INFO environment variable.";
+		break;
+		case RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS:
+		s = "User gave flags for an inplace constructor in a copy-based routine.";
+		break;
+		case RSB_ERR_UNSUPPORTED_FEATURE:
+		s = "The requested feature (e.g.:blocking) is not available because it was opted out or not configured at built time.";
+		break;
+		case RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT:
+		s = "Output to stream feature has been disabled at configure time.";
+		break;
+		case RSB_ERR_INVALID_NUMERICAL_DATA:
+		s = "User gave some input with invalid numerical data.";
+		break;
+		case RSB_ERR_MEMORY_LEAK:
+		s = "Probable memory leak (user did not deallocate librsb structures before calling rsb_lib_exit()).";
+		break;
+		/*
+		case RSB_ERR_FORTRAN_ERROR:
+		s = "A Fortran specific error occurred.";
+		break;
+		*/
+		default:
+		{
+			rsb__sprintf(sbuf,"Unknown error code (%x).",errval);
+			s = sbuf;
+			errval = RSB_ERR_BADARGS;
+			goto err;
+		}
+	}
+	errval = RSB_ERR_NO_ERROR;
+	rsb__sprintf(sbuf,"%s",s);
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_perror(FILE *stream, rsb_err_t errval)
+{
+	rsb_char_t sbuf[RSB_MAX_STRERRLEN];
+	/*!
+	 * \ingroup gr_internals
+	 */
+	if((errval == RSB_ERR_NO_ERROR))
+		goto err;
+
+	rsb__do_strerror_r(errval,sbuf,sizeof(sbuf)/sizeof(sbuf[0]));
+	 
+	if(stream)
+		fprintf(stream,"ERROR 0x%x : %s\n",(unsigned int)errval,sbuf);
+	else
+		RSB_STDERR("ERROR 0x%x : %s\n",(unsigned int)errval,sbuf);
+err:
+	RSB_DO_ERR_RETURN(RSB_ERR_NO_ERROR)
+}
+
+/* @endcond */
diff --git a/rsb_err.h b/rsb_err.h
new file mode 100644
index 0000000..a8ad773
--- /dev/null
+++ b/rsb_err.h
@@ -0,0 +1,107 @@
+/*                                                                                                                            
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*
+ * @author Michele Martone
+ */
+#ifndef RSB_ERR_H_INCLUDED
+#define RSB_ERR_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb_common.h"
+
+#define RSB_ERRM_E_MTXAP	"Supplied NULL  matrix structure pointer !"
+#define RSB_ERRM_E_MTXAPP	"Supplied NULL  matrix structure pointer pointer !"
+#define RSB_ERRM_E_VIJ	"Supplied NULL  VA, IA, JA arrays !"
+#define RSB_ERRM_E_VIJP	"Supplied NULL  VAP, IAP, JAP arrays !"
+#define RSB_ERRM_CNHEAF	"Cannot reuse arrays while passing them as const."
+#define RSB_ERRM_EM	"!\n"
+#define RSB_ERRM_BCE	"Internal bounds computing error!\n"
+#define RSB_ERRM_ZSM	"WARNING : zero sized malloc !\n"
+#define RSB_ERRM_BFEANS	"Bad flags: RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS are not supported here!\n"
+#define RSB_ERRM_SEOWS	"some error occurred while shuffling\n"
+#define RSB_ERRM_ALSMINT	"a leaf submatrix is non terminal!\n"
+#define RSB_ERRM_ANLSMIT	"a non leaf submatrix is terminal!\n"
+#define RSB_ERRM_FAOTAFS	"failed allocation of temporary array for swap"
+#define RSB_ERRM_ES	""
+#define RSB_ERRM_NPSFF	"NULL pointer supplied for filename."
+#define RSB_ERRM_CBAEM	"Cannot build and empty matrix.\n"
+#define RSB_ERRM_CMOINIY	"Column Major order is not implemented yet.\n"
+#define RSB_ERRM_MDNFARTS	"matrix does not fit as RCSR (too sparse)!\n"
+#define RSB_ERRM_FYRYNS	"fatal : your rsb_coo_idx_t type is not supported.."
+#define RSB_ERRM_WTC		"error : specified type code is not valid\n"
+#define RSB_ERRM_COVMUINS	"control of virtual memory usage is not supported\n"
+#define RSB_ERRM_FCOVMU		"failed control of virtual memory usage\n"
+#define RSB_ERRM_PFTM		"Problems finalizing the matrix!\n"
+#define RSB_ERRM_WUF		"WARNING: unfinished code!\n"
+#define RSB_ERRM_PAL		"probable allocation problem\n"
+#define RSB_ERRM_NAOL		"no array of leaves ?\n"
+#define RSB_ERRM_NNTC		"no nonzeros to clone ?\n"
+#define RSB_ERRM_NDIANN		"no diagonal implicit and no nonzeros ?\n"
+#define RSB_ERRM_CP		"cleanup problem\n"
+#define RSB_ERRM_BNCS		"blocking not correctly specified!\n"
+#define RSB_ERRM_CIIAUF		"Clique insertion is an unfinished functionality!\n"
+#define RSB_ERRM_FMMTDT		"failed matrix multiplication to dense test\n"
+#define RSB_ERRM_FMATD		"failed matrix add to dense\n"
+#define RSB_ERRM_FMM		"failed matrix multiplication\n"
+#define RSB_ERRM_FMC		"failed matrix cloning\n"
+#define RSB_ERRM_CMINBC		"cloned matrix is not built correctly\n"
+#define RSB_ERRM_FCMS		"Failed computing matrix sum.\n"
+#define RSB_ERRM_FMATDBC	"failed matrix add to dense basic checksum\n"
+#define RSB_ERRM_FYCITINS	"fatal : your rsb_coo_idx_t type is not supported.."
+#define RSB_ERRM_WOPSTASA	"WARNING : overflow possible. Switching to another sort algorithm.\n"
+#define RSB_ERRM_SLIINS		"error : seems like input is not sorted\n"
+#define RSB_ERRM_EWEFCFD	"error while estimating fillin (corrupt fillin data?)\n"
+#define RSB_ERRM_EWEFCTD	"error while estimating fillin (corrupt timing data?)\n"
+#define RSB_ERRM_ERROR		"error\n"
+#define RSB_ERRM_ESIIB		"error : sorting input is bad\n"
+#define RSB_ERRM_SLSIB		"error : seems like sorting is bugged\n"
+#define RSB_ERRM_SIL		"error initializing library!\n"
+#define RSB_ERRM_EDNC		"error during nonzeros compacting\n"
+#define RSB_ERRM_ZCFRWAEC	"zero compacting function returned with an error code\n"
+#define RSB_ERRM_EQRPF		"error reading performance file.\n"
+#define RSB_ERRM_ELMPF		"error loading memory performance file.\n"
+#define RSB_ERRM_AE		"allocation error\n"
+#define RSB_ERRM_IE		"internal error ?\n"
+#define RSB_ERRM_NL		"\n"
+#define RSB_ERRM_MBE		"matrix build error!\n"
+#define RSB_ERRM_BM		"bad mtxAp:"
+#define RSB_ERRM_TS		"a problem occurred in triangular solve!\n"
+#define RSB_ERRM_MV		"a problem occurred in sparse matrix-vector product!\n"
+#define RSB_ERRM_NOLP		"timer-based profiling has not been enabled at configure time!\n"
+#define RSB_ERRM_IMNIP		"input matrix is not an in place one!\n"
+#define RSB_ERRM_DNSAMIWAFCB		"Did not supply a matrix initiated with rsb_mtx_alloc_from_coo_begin!\n"
+#define RSB_ERRM_IPEWIEM	"Internal problem encounted when initiating an empty matrix\n"
+#define RSB_ERRM_NO_XDR	"No XDR configured in: binary matrix I/O disabled.\n"
+
+rsb_err_t rsb__do_perror(FILE *stream, rsb_err_t errval);
+/*const rsb_char_t * rsb__do_strerror_r(rsb_err_t errval, rsb_char_t * buf, size_t buflen);*/
+rsb_err_t rsb__do_strerror_r(rsb_err_t errval, rsb_char_t * buf, size_t buflen);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_ERR_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_failure_tests.c b/rsb_failure_tests.c
new file mode 100644
index 0000000..920b3b9
--- /dev/null
+++ b/rsb_failure_tests.c
@@ -0,0 +1,250 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ *
+ * failure testing code.
+ * \internal
+ *
+ * */
+#include "rsb_common.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_WANT_FAILURE_TESTER 1
+#define RSB_FAILURE_NOTICE(STMT) STMT;RSB_INFO("Injecting failure:\n%s\n",#STMT)
+#define RSB_VARIATION_NOTICE(STMT) STMT;RSB_INFO("Injecting variation:\n%s\n",#STMT)
+#define RSB_FT_FFL_PRINTF printf("In %s located in %20s:%d :\n",__func__,__FILE__,__LINE__)
+#if 0
+static rsb_err_t rsb_do_meminfo(void)
+{
+	/*!
+	   \ingroup rsb_doc_library
+	  
+	   Write to the info stream (see #RSB_IO_WANT_OUTPUT_STREAM) some memory allocation info.
+
+	   \warning \rsb_warn_soon_to_be_deprecated_msg 
+	   \return \rsberrcodemsg
+	 */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	RSB_INFO("allocated %zd bytes of memory in %zu chunks\n",rsb__get_g_rsb_memory_count(),rsb__get_g_rsb_allocations_count());
+#endif
+	return RSB_ERR_NO_ERROR;
+}
+#define RSB_ALLOC_INFO() RSB_FT_FFL_PRINTF ;rsb_do_meminfo()
+#else
+#define RSB_ALLOC_INFO() RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+#endif
+#define RSB_MTX_FREE_PARANOID(MTXAP) if(MTXAP) { RSB_ALLOC_INFO(); RSB_MTX_FREE(MTXAP); }
+#define RSB_FREE_PARANOID(PTR) if(PTR) { RSB_ALLOC_INFO(); RSB_CONDITIONAL_FREE(PTR); }
+
+static rsb_bool_t rsb_random_event(rsb_int out_of) 
+{
+	rsb_bool_t res = RSB_BOOL_FALSE;
+
+	RSB_ALLOC_INFO();
+	if(!out_of)
+		goto rf;
+	if( rsb__rand_coo_index(out_of) + 1 == out_of)
+		res = RSB_BOOL_TRUE;
+rf:
+	RSB_ALLOC_INFO();
+	return res;
+}
+
+rsb_err_t rsb_blas_failure_tester(const rsb_char_t*tds)
+{
+	/**
+	 * \ingroup gr_internals
+	 * FIXME: still unfinished. this tester shall perform randomized failure-prone operations.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_WANT_FAILURE_TESTER
+	struct rsb_limiter ls;
+	struct rsb_mtx_t * mtxAp=NULL, * mtxBp=NULL, * mtxCp=NULL;
+	rsb_coo_idx_t*IA = NULL;
+	rsb_coo_idx_t*JA = NULL;
+	rsb_type_t typecodea[] = RSB_MATRIX_TYPE_CODES_ARRAY;
+	const int typecodei = 0;
+	const int af = 1;/* allow failures */
+	rsb_type_t typecode = typecodea[typecodei];
+	void*VA = NULL;
+	rsb_coo_idx_t nr = 16, nc = nr , nm = RSB_MAX(nr,nc);
+	const rsb_coo_idx_t bs = 1, maxdim = 16, mindim = 4;
+	const rsb_coo_idx_t lbw = mindim, ubw = mindim;
+	rsb_nnz_idx_t nnz = 0;
+	//const int fipi=0; /* if -1: probability 0; else, probability 1/fipi */ 
+	const rsb_flags_t flags = RSB_FLAG_DEFAULT_MATRIX_FLAGS;
+	void * xp = NULL, * yp = NULL;
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE], beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	const void *alphap = &alpha,* betap = β
+	const int /*sp=20,*/hp = 10,lp =100, mp = 1000,ip = 10000;
+	const int maxasym = 5;
+
+	RSB_INFO("BASIC FAILURE BASED TEST: BEGIN\n");
+	RSB_ALLOC_INFO();
+	for(errval = rsb__limiter_init_from_str(&ls,tds);rsb__limiter_continue(&ls);rsb__limiter_step(&ls))
+	{
+		RSB_ALLOC_INFO();
+		typecode = typecodea[typecodei];
+	/* FIXME: please write here */
+	if(!mtxAp)
+	{
+		/* random size */
+		nr = mindim+rsb__rand_coo_index(maxdim)+lbw+ubw;
+		if(rsb_random_event(lp*af))
+			{RSB_VARIATION_NOTICE(nc*=1+rsb__rand_coo_index(maxasym));}
+		else
+			nc = nr;/* square matrix */
+		nm = RSB_MAX(nr,nc);
+		RSB_INFO("Create a %d x %d matrix...\n",nr,nc);
+
+		if(RSB_SOME_ERROR(errval = rsb__generate_blocked_banded_coo(nr,1,lbw,ubw,&IA,&JA,&VA,&nnz,typecode)))
+		{
+		       	RSB_PERR_GOTO(dca,RSB_ERRM_ES); 
+	       	}
+		/* try to instantiate one, with a probability of failure injection */
+		/* if(...) ... */
+		/* change typecode, possibly to an invalid one */
+		/* zero a parameter */
+		/* dealloc an array and nullify its pointer */
+		/* zeros in numbers */
+		/* too big indices */
+		/* negative indices */
+		/* inject a NaN */
+		//if(rsb_random_event(lp*af)) {nc=nr = RSB_MAX_MATRIX_DIM+1;}
+		//if(rsb_random_event(lp*af)) {nnz=0;}
+		if(rsb_random_event(lp*af)) {RSB_FAILURE_NOTICE(typecode = RSB_NUMERICAL_TYPE_INVALID_TYPE);}
+		/**/
+		/**/
+		if(!IA||!JA||!VA)
+		{
+			RSB_PERR_GOTO(dca,RSB_ERRM_ES);
+		}
+		RSB_ALLOC_INFO();
+		mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnz,typecode,nr,nc,bs,bs,flags,&errval);
+		RSB_ALLOC_INFO();
+
+		if(!mtxAp || RSB_SOME_ERROR(errval))
+		{
+			/**/
+			rsb__do_perror(NULL,errval);
+			RSB_PERR_GOTO(dca,RSB_ERRM_ES);
+		}
+		else
+		{
+			xp = rsb__calloc_vector(nm,typecode);
+			yp = rsb__calloc_vector(nm,typecode);
+			if(!xp || !yp)
+			{
+				RSB_FREE_PARANOID(xp);
+				RSB_FREE_PARANOID(yp);
+				RSB_PERR_GOTO(dca,RSB_ERRM_ES);
+			}
+		}
+		RSB_ALLOC_INFO();
+		goto ndca;
+dca:
+		RSB_INFO("At:\n");
+		rsb__limiter_info(&ls);
+		RSB_INFO("Freeing matrix due to error\n");
+		RSB_FREE_PARANOID(xp); RSB_FREE_PARANOID(yp);
+		RSB_FREE_PARANOID(IA); RSB_FREE_PARANOID(JA); RSB_FREE_PARANOID(VA);
+ndca:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+	}
+	else
+	{
+		rsb_trans_t transA = RSB_TRANSPOSITION_N, transB = transA;
+		rsb_coo_idx_t incx=1,incy=1;
+		RSB_INFO("Use the matrix...\n");
+		if(rsb_random_event(ip*af)) {RSB_FAILURE_NOTICE(RSB_MTX_FREE_PARANOID(mtxAp));}
+		if(rsb_random_event(lp*af)) {RSB_FAILURE_NOTICE(RSB_FREE_PARANOID(xp));}
+		if(rsb_random_event(lp*af)) {RSB_FAILURE_NOTICE(RSB_FREE_PARANOID(yp));}
+		if(rsb_random_event(ip*af)) {RSB_FAILURE_NOTICE(transA = RSB_INVALID_TRANS);}
+		if(rsb_random_event(mp*af)) {RSB_FAILURE_NOTICE(incx=-1);}
+		if(rsb_random_event(mp*af)) {RSB_FAILURE_NOTICE(incy=-1);}
+		if(RSB_SOME_ERROR(errval = rsb_do_spmv(transA,alphap,mtxAp,xp,incx,betap,yp,incy)))
+		{
+			rsb__do_perror(NULL,errval);
+			RSB_PERR_GOTO(mdca,RSB_ERRM_ES);
+		}
+		if(rsb_random_event(2*af))
+		{
+			RSB_MTX_FREE_PARANOID(mtxBp);
+		RSB_ALLOC_INFO();
+			/* if( ( mtxBp = rsb__clone_simple(mtxAp) ) != NULL ) */
+			if( rsb__clone(&mtxBp, mtxAp->typecode,RSB_TRANSPOSITION_N,NULL,mtxAp,flags) == RSB_ERR_NO_ERROR )
+			{
+		RSB_ALLOC_INFO();
+				rsb__do_perror(NULL,errval);
+				RSB_PERR_GOTO(mdca,RSB_ERRM_ES);
+			}
+		}
+		if(rsb_random_event(2*af))
+		{
+			RSB_MTX_FREE_PARANOID(mtxCp);
+			mtxCp = rsb__do_matrix_mul(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,&errval);
+			if(RSB_SOME_ERROR(errval))
+			{
+				rsb__do_perror(NULL,errval);
+				RSB_PERR_GOTO(mdca,RSB_ERRM_ES);
+			}
+		}
+		if(rsb_random_event(hp*af))
+		{
+		       	RSB_FAILURE_NOTICE(goto mdca);
+		}
+		goto nmdca;
+mdca:
+		rsb__limiter_info(&ls);
+		RSB_MTX_FREE_PARANOID(mtxAp);
+		RSB_MTX_FREE_PARANOID(mtxBp);
+		RSB_FREE_PARANOID(xp);
+		RSB_FREE_PARANOID(yp);
+		RSB_FREE_PARANOID(IA);
+		RSB_FREE_PARANOID(JA);
+		RSB_FREE_PARANOID(VA);
+nmdca:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+	}
+	}
+			RSB_MTX_FREE_PARANOID(mtxAp);
+			RSB_MTX_FREE_PARANOID(mtxBp);
+			RSB_MTX_FREE_PARANOID(mtxCp);
+			RSB_FREE_PARANOID(xp);
+		       	RSB_FREE_PARANOID(yp);
+			RSB_FREE_PARANOID(IA);
+		       	RSB_FREE_PARANOID(JA);
+		       	RSB_FREE_PARANOID(VA);
+	rsb__limiter_info(&ls);
+	RSB_ALLOC_INFO();
+	rsb__do_check_leak();
+	RSB_INFO("BASIC FAILURE BASED TEST: END\n");
+#endif /* RSB_WANT_FAILURE_TESTER */
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_failure_tests.h b/rsb_failure_tests.h
new file mode 100644
index 0000000..6c0a88f
--- /dev/null
+++ b/rsb_failure_tests.h
@@ -0,0 +1,42 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*
+ * @author Michele Martone
+ */
+#ifndef RSB_FAILURE_TESTS_H_INCLUDED
+#define RSB_FAILURE_TESTS_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb_common.h"
+
+rsb_err_t rsb_blas_failure_tester(const rsb_char_t*tds);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_FAILURE_TESTS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_fortran_macros.m4 b/rsb_fortran_macros.m4
new file mode 100644
index 0000000..a6c0218
--- /dev/null
+++ b/rsb_fortran_macros.m4
@@ -0,0 +1,668 @@
+dnl
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+include(`libspblas_macros.m4')dnl
+dnl
+dnl
+define(`RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_RSB_INTERFACE',`dnl
+dnl RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_PSB_INTERFACE($@)`'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS($@)`'dnl
+')dnl
+dnl
+define(`RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_SB_INTERFACE',`dnl
+dnl RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_PSB_INTERFACE($@)`'dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS($@)`'dnl
+')dnl
+dnl
+define(`RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_PSB_INTERFACE',`dnl
+dnl
+dnl	FIXME
+dnl
+patsubst(dnl
+`patsubst(dnl
+`patsubst(dnl
+`patsubst(dnl
+`patsubst(dnl
+`patsubst(dnl
+`patsubst(dnl
+`RSB_M4_ARGS_TO_ACTUAL_ARGS($@)',dnl
+`\<a\>',`a%infoa(psb_const_infoa_rsb_)'dnl
+)',dnl
+`\<append\>',`appendi'dnl
+)',dnl
+`\<has_iren\>',`has_ireni'dnl
+)',dnl
+`\<has_gtl\>',`has_gtli'dnl
+)',dnl
+`\<do_rebuild\>',`do_rebuildi'dnl
+)',dnl
+`\<real_in\>',`in'dnl
+)',dnl
+`\<extra\>',`extra,typecode'dnl
+)dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_PSBLAS_IARRAY_TYPE',`dnl
+INTEGER :: dnl
+pushdef(`firstarg',`0')dnl
+foreach(`arg',`($@)',`ifelse(firstarg,`0',`pushdef(`firstarg',1)arg`(:)'',`,arg`(:)'')`'')`'
+dnl	DO NOT REMOVE THE FOLLOWING LINE
+ifelse(firstarg,`1',`popdef(`firstarg')')dnl
+popdef(`firstarg')dnl
+')dnl
+dnl
+define(`RSB_M4_C2F_NORM_TYPE',`dnl
+pushdef(`type',$1)`'dnl
+dnl
+ifelse(RSB_M4_want_old_fortran_float_types,`1',`dnl
+ifelse(type,`float',`REAL*4')`'dnl
+ifelse(type,`double',`REAL*8')`'dnl
+ifelse(type,`float complex',`REAL*4')`'dnl
+ifelse(type,`double complex',`REAL*8')`'dnl
+',`dnl
+dnl ifelse(type,`float',`REAL(rsb_spk_)')`'dnl
+dnl ifelse(type,`double',`REAL(rsb_dpk_)')`'dnl
+dnl ifelse(type,`float complex',`REAL(rsb_spk_)')`'dnl
+dnl ifelse(type,`double complex',`REAL(rsb_dpk_)')`'dnl
+ifelse(type,`float',`REAL(KIND(1.e0))')`'dnl
+ifelse(type,`double',`REAL(KIND(1.d0))')`'dnl
+ifelse(type,`float complex',`REAL(KIND(1.e0))')`'dnl
+ifelse(type,`double complex',`REAL(KIND(1.d0))')`'dnl
+')`'dnl
+dnl
+ifelse(type,`int',`INTEGER')`'dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_C2F_TYPE',`dnl
+pushdef(`type',$1)`'dnl
+dnl
+ifelse(RSB_M4_want_old_fortran_float_types,`1',`dnl
+ifelse(type,`float',`REAL*4')`'dnl
+ifelse(type,`double',`REAL*8')`'dnl
+ifelse(type,`float complex',`COMPLEX*8')`'dnl
+ifelse(type,`double complex',`COMPLEX*16')`'dnl
+',`dnl
+dnl ifelse(type,`float',`REAL(rsb_spk_)')`'dnl
+dnl ifelse(type,`double',`REAL(rsb_dpk_)')`'dnl
+dnl ifelse(type,`float complex',`COMPLEX(rsb_spk_)')`'dnl
+dnl ifelse(type,`double complex',`COMPLEX(rsb_dpk_)')`'dnl
+ifelse(type,`float',`REAL(KIND(1.e0))')`'dnl
+ifelse(type,`double',`REAL(KIND(1.d0))')`'dnl
+ifelse(type,`float complex',`COMPLEX(KIND(1.e0))')`'dnl
+ifelse(type,`double complex',`COMPLEX(KIND(1.d0))')`'dnl
+')`'dnl
+dnl
+ifelse(type,`int',`INTEGER')`'dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSBLAS_INTERFACE_RADIX',`psb_rsb')dnl
+define(`RSB_M4_RSBLAS_INTERFACE_RADIX',`rsb')dnl
+define(`RSB_M4_SBLAS_INTERFACE_RADIX',`us')dnl unstructured sparse
+dnl
+define(`RSB_M4_FORTRAN_INTERFACE_RADIX',`rsb__')dnl
+dnl
+dnl define(`RSB_M4_SBLAS_INTERFACE_OPS',`RSB_M4_PSBLAS_INTERFACE_OPS')dnl
+dnl
+define(`RSB_M4_SBLAS_INTERFACE_OPS',`(dnl
+RSB_M4_COMMA_LIST(RSB_M4_SPBLAS_MATRIX_CREATION_MOPS)`'dnl
+,RSB_M4_COMMA_LIST(RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS)`'dnl
+,RSB_M4_COMMA_LIST(RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS)`'dnl
+)')dnl
+dnl
+define(`RSB_M4_SBLAS_GENERIC_OPS',`(dnl
+RSB_M4_COMMA_LIST((RSB_M4_SPBLAS_MATRIX_INSERTION_MOPS))`'dnl
+,RSB_M4_COMMA_LIST(RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS)`'dnl
+,RSB_M4_COMMA_LIST(RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS)`'dnl
+)')dnl
+dnl dnl
+define(`RSB_M4_RSBLAS_INTERFACE_OPS',`RSB_M4_PSBLAS_INTERFACE_OPS')dnl
+dnl
+define(`RSB_M4_PSBLAS_INTERFACE_OPS',`(scale,getdiag,get_rows_nnz,get_rows_sparse,destroy_sparse_matrix,allocate_sparse_matrix,get_matrix_nnz,infinity_norm,usmm,ussm,usmv,set_elements,set_element,get_element)')dnl
+dnl
+dnl
+dnl	FIXME: new stuff
+dnl
+define(`RSB_M4_SBLAS_EXTRA_INTERFACE_OPS_LIST',``rows_scale',`get_diag',`get_rows_nnz',`get_rows_sparse',`get_matrix_nnz',`get_infinity_norm',`set_elements',`set_element',`get_element'')dnl
+dnl define(`RSB_M4_SBLAS_EXTRA_INTERFACE_OPS_LIST',`')dnl
+define(`RSB_M4_SBLAS_EXTRA_INTERFACE_OPS',`(RSB_M4_SBLAS_EXTRA_INTERFACE_OPS_LIST)')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS_INTERFACE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+dnl
+RSB_M4_RSBLAS_INTERFACE_RADIX`_'pmop`'dnl
+dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+define(`RSB_M4_PSBLAS_INTERFACE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+dnl
+RSB_M4_PSBLAS_INTERFACE_RADIX`_'pmop`'dnl
+dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_SBLAS_INTERFACE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+dnl
+RSB_M4_SBLAS_INTERFACE_RADIX`'pmop`'dnl
+dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_RSB_TYPE_CHARCODE',`dnl
+RSB_M4_PSB_TYPE_CHARCODE($@)`'dnl
+')dnl
+dnl
+define(`RSB_M4_SB_TYPE_CHARCODE',`dnl
+RSB_M4_PSB_TYPE_CHARCODE($@)`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSB_TYPE_CHARCODE',`dnl
+pushdef(`type',$1)`'dnl
+tolowercase(RSB_M4_TYPE_CHARCODE(type))`'dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSB_MTYPE_CHARCODE',`dnl
+pushdef(`type',$1)`'dnl
+`psb_'RSB_M4_PSB_TYPE_CHARCODE(type)`'`spmat_type'dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_RSB_MTYPE_CHARCODE',`dnl
+dnl
+RSB_M4_SB_MTYPE_CHARCODE($@)`'dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_SB_MTYPE_CHARCODE',`dnl
+dnl
+RSB_M4_SB_MTYPE_CHARCODE($@)`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS2VBR_SUBROUTINE_RADIX',`dnl
+dnl
+dnl
+RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_rsb_'`'dnl
+dnl RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_'RSB_M4_PSB_TYPE_CHARCODE(mtype)`_'pmop`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_SBLAS2VBR_SUBROUTINE_RADIX',`dnl
+pushdef(`mop',$1)`'dnl
+pushdef(`mtype',$2)`'dnl
+pushdef(`lang',$3)`'dnl
+dnl
+dnl RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_rsb_blas_'`'dnl
+dnl RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_'RSB_M4_PSB_TYPE_CHARCODE(mtype)`_'pmop`'dnl
+ifelse(lang,`f90',`dnl
+dnl `rsb_'`'f90_blas_`'RSB_M4_PSB_TYPE_CHARCODE(mtype)`us'`'dnl
+`'blas_`'RSB_M4_PSB_TYPE_CHARCODE(mtype)`us'`'dnl
+',`dnl
+BLAS_`'RSB_M4_PSB_TYPE_CHARCODE(mtype)`us'`'dnl
+')`'dnl
+dnl
+popdef(`mtype')`'dnl
+popdef(`lang')`'dnl
+popdef(`mop')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSBLAS2VBR_SUBROUTINE_RADIX',`dnl
+dnl
+dnl
+RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_'`'dnl
+dnl RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_'RSB_M4_PSB_TYPE_CHARCODE(mtype)`_'pmop`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS2VBR_SUBROUTINE_IDENTIFIER',`dnl
+dnl
+RSB_M4_SBLAS2VBR_SUBROUTINE_RADIX`'pmop`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_SBLAS2VBR_SUBROUTINE_IDENTIFIER',`dnl
+pushdef(`mop',$1)`'dnl
+pushdef(`mtype',$2)`'dnl
+pushdef(`lang',$3)`'dnl
+dnl
+dnl
+dnl
+RSB_M4_SBLAS2VBR_SUBROUTINE_RADIX(mop,mtype,lang)`'mop`'dnl
+ifelse(lang,`f90',RSB_M4_FORTRAN_SYMBOL_ADD_TO_F,`')`'dnl
+dnl
+popdef(`lang')`'dnl
+popdef(`mtype')`'dnl
+popdef(`mop')`'dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_PSBLAS2VBR_SUBROUTINE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+dnl
+RSB_M4_PSBLAS2VBR_SUBROUTINE_RADIX`'pmop`'dnl
+dnl RSB_M4_FORTRAN_INTERFACE_RADIX`do_fortran_'RSB_M4_PSB_TYPE_CHARCODE(mtype)`_'pmop`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+RSB_M4_SB_TYPE_CHARCODE(mtype)`'dnl
+RSB_M4_SBLAS_INTERFACE_RADIX`'dnl
+pmop`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS_SUBROUTINE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+RSB_M4_RSBLAS_INTERFACE_RADIX`'RSB_M4_RSB_TYPE_CHARCODE(mtype)`'pmop`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_PSBLAS_SUBROUTINE_IDENTIFIER',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+RSB_M4_PSBLAS_INTERFACE_RADIX`_'RSB_M4_PSB_TYPE_CHARCODE(mtype)`_'pmop`'dnl
+dnl RSB_M4_PSBLAS_INTERFACE_RADIX`_'RSB_M4_C2F_TYPE(mtype)`_'pmop`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS_SUBROUTINE_INFO_DECLARATION',`dnl
+RSB_M4_PSBLAS_SUBROUTINE_INFO_DECLARATION($@)`'dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_INFO_DECLARATION',`dnl
+pushdef(`id',$1)`'dnl
+dnl RSB_M4_PSBLAS_SUBROUTINE_INFO_DECLARATION($@)`'dnl
+          INTEGER, INTENT(OUT) ::id
+dnl
+popdef(`id')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSBLAS_SUBROUTINE_INFO_DECLARATION',`dnl
+dnl
+           INTEGER, INTENT(OUT) ::info
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS_SUBROUTINE_ARGS_DECLARATION',`dnl
+RSB_M4_SBLAS_SUBROUTINE_ARGS_DECLARATION()`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_C_POINTER_TO_FORTRAN_ARRAY',`dnl
+pushdef(`type',$1)`'dnl
+dnl patsubst(`patsubst(`patsubst(`$1',`const',`')',` *[a-zA-Z_0-9]+ *[a-zA-Z_0-9]+ *[a-zA-Z_0-9]+ *',`')',` *\* *',`(:)')`'dnl
+ifelse(patsubst(patsubst($1,`[a-zA-Z_0-9]*',`'),` *',`'),`',`',`(:)')`'dnl
+dnl patsubst(`patsubst(`patsubst(`$1',`const',`')',` *[a-zA-Z_0-9]+ *[a-zA-Z_0-9]+ *[a-zA-Z_0-9]+ *',`')',` *\* *',`(:)')`'dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_C_TYPE_TO_FORTRAN_TYPE',`dnl
+pushdef(`type',$1)`'dnl
+ifelse(type,`blas_sparse_matrix',`INTEGER',`dnl
+ifelse(type,`rsb_blas_int_t',`INTEGER',`dnl
+ifelse(type,`rsb_blas_int_t*',`INTEGER',`dnl
+ifelse(type,`enum blas_trans_type',`INTEGER',`dnl
+ifelse(type,`enum`'blas_trans_type',`INTEGER',`dnl
+ifelse(type,`enum blas_trans_type ',`INTEGER',`dnl
+ifelse(type,`enum blas_order_type',`INTEGER',`dnl
+ifelse(type,`enum`'blas_order_type',`INTEGER',`dnl FIXME
+ifelse(type,`enum blas_order_type*',`INTEGER',`dnl
+RSB_M4_C2F_NORM_TYPE(patsubst(type,` *\* *',`'))`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+')`'dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+define(`RSB_M4_SPBLAS_FIXTYPE',`dnl
+pushdef(`type',$1)`'dnl
+patsubst(`patsubst(`$2',`\n',`')',`void',type)`'dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl define(`RSB_M4_C_ARG_TO_FORTRAN_ARG',`a')dnl
+dnl define(`RSB_M4_C_ARG_TO_FORTRAN_ARG',`$@ ')dnl
+define(`RSB_M4_C_ARG_TO_FORTRAN_ARG',`patsubst($1,`\(.*\) *\(\<[a-zA-Z_0-9]+$\)',`dnl
+RSB_M4_C_TYPE_TO_FORTRAN_TYPE(`patsubst(`patsubst(\1,` * ',`')',`const ',`')') `::' \2 pushdef(`kiki',\1)`'RSB_M4_C_POINTER_TO_FORTRAN_ARRAY(kiki)`'popdef(`kiki')')')dnl
+dnl
+dnl
+define(`RSB_M4_C_ARGS_TO_FORTRAN_ARGS',`dnl
+dnl
+dnl	WARNING : this is THIN ICE :)
+pushdef(`firstarg',`0')dnl
+foreach(`arg',`$1',`ifelse(firstarg,`0',`pushdef(`firstarg',1)',`,')`'RSB_M4_C_ARG_TO_FORTRAN_ARG(arg)')`'dnl
+ifelse(firstarg,`1',`popdef(`firstarg')')dnl
+popdef(`firstarg')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_ARGS_DECLARATION',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl        INTEGER :: a
+RSB_M4_SBLAS_SUBROUTINE_DECL(mop,mtype)`'dnl
+dnl	RSB_M4_PSBLAS_SUBROUTINE_ARGS_DECLARATION($@)`'dnl
+dnl	dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+define(`RSB_M4_PSBLAS_SUBROUTINE_ARGS_DECLARATION',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+          type(RSB_M4_PSB_MTYPE_CHARCODE(mtype)) :: a ! psblas matrix type (mtype)
+dnl
+dnl
+ifelse(pmop,`scale',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: d(:)
+')dnl
+ifelse(pmop,`getdiag',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: d(:)
+')dnl
+ifelse(pmop,`get_element',`dnl
+          INTEGER :: i,j
+          RSB_M4_C2F_TYPE(mtype) :: v
+')dnl
+ifelse(pmop,`set_elements',`dnl
+          INTEGER :: ia(:),ja(:)
+          INTEGER :: imin,imax,jmin,jmax,nnz
+          INTEGER :: has_gtli,gtl(:)
+dnl ,do_rebuildi
+          logical :: has_gtl
+dnl ,do_rebuild=.FALSE.
+          RSB_M4_C2F_TYPE(mtype) :: val(:)
+')dnl
+ifelse(pmop,`set_element',`dnl
+          INTEGER :: i,j
+          RSB_M4_C2F_TYPE(mtype) :: v
+')dnl
+ifelse(pmop,`get_rows_nnz',`dnl
+          INTEGER :: fr,lr
+          INTEGER :: nnz
+')`'dnl
+ifelse(pmop,`get_rows_sparse',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: val(:)
+          INTEGER :: fr,lr
+          logical :: append
+          logical :: has_iren
+          INTEGER :: appendi=0
+dnl          INTEGER :: nzin ! FIXME, still unused !
+          INTEGER :: nzin,has_ireni=0,iren(:) ! FIXME, still unused !
+          RSB_M4_PSBLAS_IARRAY_TYPE(ia,ja)dnl
+          INTEGER :: nnz
+')`'dnl
+ifelse(pmop,`destroy_sparse_matrix',`dnl
+')`'dnl
+ifelse(pmop,`allocate_sparse_matrix',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: val(:)
+          RSB_M4_PSBLAS_IARRAY_TYPE(ia,ja)dnl
+          INTEGER :: nnz,m,k,br,bc
+dnl          character(len=16),parameter :: extra="a"
+          character(len=*) :: extra
+          INTEGER,parameter :: typecode = RSB_M4_TYPE_CHARCODE_ASCII_VALUE(mtype)
+dnl          !extra=""
+')`'dnl
+ifelse(pmop,`get_matrix_nnz',`dnl
+          INTEGER :: ires
+')`'dnl
+ifelse(pmop,`infinity_norm',`dnl
+          RSB_M4_C2F_NORM_TYPE(mtype) :: real_in
+          RSB_M4_C2F_TYPE(mtype) :: in
+          character :: trans ! 
+dnl          INTEGER :: itrans ! FIXME: this value is ignored !
+')`'dnl
+ifelse(pmop,`ussm',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: b(:,:),c(:,:)
+dnl          INTEGER :: lb,lc,nrhs ! FIXME : new, still unused! (FIXME: nrhs is PSBLAS s iwsz)
+          INTEGER :: lb,lc,nrhs ! FIXME : new, still unused! (FIXME: nrhs is iwsz)
+          RSB_M4_C2F_TYPE(mtype) :: alpha
+          character :: trans ! 
+dnl          INTEGER :: itrans ! FIXME: this value is ignored !
+')`'dnl
+ifelse(pmop,`usmv',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: b(:),c(:)
+dnl          INTEGER :: lb,lc,nrhs ! FIXME : new, still unused! (FIXME: nrhs is PSBLAS s iwsz)
+          RSB_M4_C2F_TYPE(mtype) :: alpha,beta
+          character :: trans ! 
+dnl          INTEGER :: itrans ! FIXME: this value is ignored !
+')`'dnl
+dnl
+ifelse(pmop,`usmm',`dnl
+          RSB_M4_C2F_TYPE(mtype) :: b(:,:),c(:,:)
+          INTEGER :: lb,lc,nrhs ! FIXME : new, still unused! (FIXME: nrhs is iwsz)
+dnl          INTEGER :: lb,lc,nrhs ! FIXME : new, still unused! (FIXME: nrhs is PSBLAS s iwsz)
+          RSB_M4_C2F_TYPE(mtype) :: alpha,beta
+          character :: trans ! 
+dnl          INTEGER :: itrans ! FIXME: this value is ignored !
+')`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS_SUBROUTINE_HELP_COMMENT',`dnl
+RSB_M4_PSBLAS_SUBROUTINE_HELP_COMMENT($@)`'dnl
+')dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_HELP_COMMENT',`dnl
+RSB_M4_PSBLAS_SUBROUTINE_HELP_COMMENT($@)`'dnl
+')dnl
+dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_EXTRA_FORTRAN_HELP_COMMENT',`dnl
+pushdef(`pmop',$1)dnl
+\rsb_spblas_f_istat_msg`'dnl
+ifelse(RSB_M4_MEMBER(pmop,`cr_begin',`cr_block_begin',`cr_variable_block_begin'),`1',`dnl
+\rsb_spblasl2_A_msg_ftn
+')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_PSBLAS_SUBROUTINE_HELP_COMMENT',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+ifelse(pmop,`getdiag',`gets the diagonal of A into array D')`'dnl
+ifelse(pmop,`set_element',`sets the matrix at element i,j (if in the nonzero pattern)')`'dnl
+ifelse(pmop,`set_elements',`sets the matrix at elements ia,ja (if in the nonzero pattern)')`'dnl
+ifelse(pmop,`get_element',`gets the matrix at element i,j (if in the nonzero pattern)')`'dnl
+ifelse(pmop,`scale',`scales each row of A by multiplying it to a value of D')`'dnl
+ifelse(pmop,`get_rows_sparse',`writes in ia,ja,va the row index, column index, and value of nonzeros from row fr to lr')`'dnl
+ifelse(pmop,`get_rows_nnz',`gets the number of nonzeros in the specified rows interval')`'dnl
+ifelse(pmop,`allocate_sparse_matrix',`allocates a sparse matrix A')`'dnl
+ifelse(pmop,`destroy_sparse_matrix',`frees all allocated resources to the descriptor of matrix A')`'dnl
+ifelse(pmop,`get_matrix_nnz',`gets the nonzeros count of matrix A')`'dnl
+ifelse(pmop,`infinity_norm',`gets the infinity norm (the maximal sum of rows elements) of matrix A')`'dnl
+ifelse(pmop,`sm',`triangular solve: b <- alpha A^-1 b')`'dnl
+ifelse(pmop,`sv',`triangular solve: b <- alpha A^-1 b')`'dnl
+ifelse(pmop,`mm',`multiplication  : c <- beta c + alpha A b')`'dnl
+ifelse(pmop,`mv',`multiplication  : c <- beta c + alpha A b')`'dnl
+dnl
+ifelse(pmop,`cr',`matrix creation')`'dnl
+ifelse(pmop,`cr_insert_row',`inserts a sparse row')`'dnl
+ifelse(pmop,`cr_insert_col',`inserts a sparse column')`'dnl
+ifelse(pmop,`cr_insert_block',`inserts a dense block')`'dnl
+ifelse(pmop,`cr_insert_clique',`inserts a clique')`'dnl
+ifelse(pmop,`cr_insert_entry',`inserts a single entry')`'dnl
+ifelse(pmop,`cr_insert_entries',`inserts multiple entries')`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_RSBLAS_SUBROUTINE_ARGS',`dnl
+RSB_M4_PSBLAS_SUBROUTINE_ARGS($@)`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_DECL',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+dnl
+foreach(`d',`(dnl
+RSB_M4_C_ARGS_TO_FORTRAN_ARGS((`RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(mtype,mop,`',`ARGS',0,`lang_c')'))dnl
+RSB_M4_C_ARGS_TO_FORTRAN_ARGS((`RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(mtype,mop,`?tri?',`ARGS',0,`lang_c')'))dnl
+RSB_M4_C_ARGS_TO_FORTRAN_ARGS((`RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(mtype,mop,`?tri?',`ARGS',0,`lang_c')'))dnl
+)',`dnl
+          patsubst(patsubst(d,` *:: \(trans\|order\)',`INTEGER :: \1'),`^ *::',RSB_M4_C2F_TYPE(mtype) ::)
+')`'dnl
+dnl
+ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`dnl
+          INTEGER,INTENT(OUT) :: A
+')`'dnl
+dnl
+dnl RSB_M4_C_ARGS_TO_FORTRAN_ARGS(RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(mtype,mop,`?tri?',`ARGS'))`'dnl
+dnl RSB_M4_C_ARGS_TO_FORTRAN_ARGS(RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(mtype,mop,`?tri?',`ARGS'))`'dnl
+dnl
+dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+define(`RSB_M4_SBLAS_SUBROUTINE_ARGS',`dnl
+pushdef(`mop',$1)`'dnl
+pushdef(`mtype',$2)`'dnl
+pushdef(`lang',$3)`'dnl
+dnl RSB_M4_PSBLAS_SUBROUTINE_ARGS($@)`'dnl
+dnl
+dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS((dnl
+RSB_M4_SPBLAS_FUNCTION(mtype,mop,`',`ARGS',0,lang)`'dnl
+dnl RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(mtype,mop,`',`ARGS',0,lang)))`'dnl
+dnl RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(mtype,mop,`?tri?',`ARGS',0,lang)))`'dnl
+dnl RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(mtype,mop,`?tri?',`ARGS',0,lang)))`'dnl
+dnl ifelse(RSB_M4_MEMBER(mop,RSB_M4_SPBLAS_MATRIX_BEGIN_MOPS),`1',`,A')`'dnl
+)))`'
+dnl
+popdef(`lang')dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+define(`RSB_M4_PSBLAS_SUBROUTINE_ARGS',`dnl
+pushdef(`pmop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+(a`'dnl
+ifelse(pmop,`getdiag',`,d')`'dnl
+ifelse(pmop,`set_elements',`,val,ia,ja,nnz,imin,imax,jmin,jmax,has_gtl,gtl')`'dnl
+dnl ifelse(pmop,`set_elements',`,val,ia,ja,nnz,imin,imax,jmin,jmax,has_gtl,gtl,do_rebuild')`'dnl
+ifelse(pmop,`set_element',`,v,i,j')`'dnl
+ifelse(pmop,`get_element',`,v,i,j')`'dnl
+ifelse(pmop,`scale',`,d')`'dnl
+dnl ifelse(pmop,`get_rows_sparse',`,val,fr,lr,ia,ja,nnz,nzin,append')`'dnl
+ifelse(pmop,`get_rows_sparse',`,val,fr,lr,ia,ja,nnz,nzin,append,has_iren,iren')`'dnl
+ifelse(pmop,`get_rows_nnz',`,fr,lr,nnz')`'dnl
+ifelse(pmop,`allocate_sparse_matrix',`,val,ia,ja,nnz,m,k,br,bc,extra')`'dnl
+ifelse(pmop,`destroy_sparse_matrix',`')`'dnl
+ifelse(pmop,`get_matrix_nnz',`,ires')`'dnl
+ifelse(pmop,`infinity_norm',`,real_in,trans')`'dnl
+ifelse(pmop,`ussm',`,b,c,lb,lc,nrhs,alpha,trans')`'dnl
+ifelse(pmop,`usmm',`,b,c,lb,lc,nrhs,alpha,beta,trans')`'dnl
+ifelse(pmop,`usmv',`,b,c,alpha,beta,trans')`'dnl
+,info)`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`pmop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_INTERFACE_LIST',`dnl
+pushdef(`firstarg',`0')dnl
+foreach(`arg',`($@)',`ifelse(firstarg,`0',`pushdef(`firstarg',1)arg &',`
+        &, arg &')`'')`'
+dnl	DO NOT REMOVE THE FOLLOWING LINE
+        & ;
+ifelse(firstarg,`1',`popdef(`firstarg')')dnl
+popdef(`firstarg')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
diff --git a/rsb_fpb.c b/rsb_fpb.c
new file mode 100644
index 0000000..0e1b286
--- /dev/null
+++ b/rsb_fpb.c
@@ -0,0 +1,129 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Floating point microbenchmarks.
+ */
+
+#include "rsb_common.h"
+#include "rsb.h"
+#include <limits.h>
+
+#define RSB_DECLARE_FPB_F(FN) rsb_err_t FN(rsb__times_t times, size_t bs, rsb_bool_t aloud)	/* times to perform, buffer size */
+#define RSB_M 1000000.0
+typedef rsb_err_t (*rsb_fpb_fp_t)(rsb__times_t,size_t,rsb_bool_t);	/* floating point benchmark function pointer type */
+
+#define RSB_DEFINE_FPB_F(FNAME,FPBEXPR,FPBNAME) \
+static RSB_DECLARE_FPB_F(FNAME) \
+{ \
+	/* \ingroup gr_internals */ \
+	rsb_nnz_idx_t N; rsb__times_t t,T=times; rsb_time_t dt; \
+	rsb_type_t mtca[]=RSB_MATRIX_TYPE_CODES_ARRAY; rsb_char_t*mtcn[]=RSB_MATRIX_TYPES_ARRAY; \
+	rsb_int ti=0; void *p=NULL; \
+	p = rsb__calloc(bs); \
+	if(!p) return RSB_ERR_ENOMEM; \
+	for(ti=0;ti<RSB_IMPLEMENTED_TYPES;++ti) \
+	{ \
+		rsb_char_t *tn=mtcn[ti]; rsb_type_t typecode=mtca[ti]; \
+		rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE]; \
+ \
+		/* RSB_INFO("bs:%zd, T:%zd\n",bs,T); */  \
+ \
+		N=bs/RSB_SIZEOF(typecode); \
+		rsb__util_set_area_to_converted_integer(&alpha,typecode,1); \
+		rsb__util_set_array_to_converted_integer(p,typecode,N,1,1); \
+		dt = - rsb_time(); \
+		for(t=0;t<T;++t) \
+			FPBEXPR; \
+		dt += rsb_time(); \
+		if(aloud) \
+			RSB_INFO("#op\ttype\tbs\tpasses\telements\tMOPS\n"), \
+			RSB_INFO("%s\t%s\t%zd\t%zd\t%zd\t%f\n",FPBNAME,tn,bs,(size_t)times,(size_t)N,(((1.0/dt)*N)*T)/RSB_M); \
+	} \
+	RSB_CONDITIONAL_FREE(p); \
+	return RSB_ERR_NO_ERROR; \
+}
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+/* This is horrible and sad, sad, I know. */
+RSB_DEFINE_FPB_F(rsb_fpb_add,rsb__util_vector_add(p,&alpha,typecode,N),"ADD")
+RSB_DEFINE_FPB_F(rsb_fpb_sum,rsb__util_vector_sum(&alpha,p,typecode,N),"SUM")
+RSB_DEFINE_FPB_F(rsb_fpb_mul,rsb__cblas_Xscal(typecode,N,&alpha,p,1),"MUL")
+RSB_DEFINE_FPB_F(rsb_fpb_neg,rsb__util_do_negate(p,typecode,N),"NEG")
+RSB_DEFINE_FPB_F(rsb_fpb_inc,rsb__vector_increase_by_one(p,typecode,N),"INC")
+RSB_DEFINE_FPB_F(rsb_fpb_sqr,rsb__util_vector_sqrt(p,typecode,N),"SQR")
+RSB_DEFINE_FPB_F(rsb_fpb_div,rsb__util_vector_div(p,&alpha,typecode,N),"DIV")
+
+rsb_err_t rsb__fp_benchmark(void)
+{
+	/**
+	 * Will benchmark the floating point units.
+	 * You should call rsb_lib_init(RSB_NULL_INIT_OPTIONS) before.
+	 *
+	 * TODO: pow, log
+	 * benchmark for ops in the L1
+	 * strided ops
+	 *
+	 */
+	rsb_fpb_fp_t fpba[]={rsb_fpb_add,rsb_fpb_sum,rsb_fpb_mul,rsb_fpb_neg,rsb_fpb_inc,rsb_fpb_sqr,rsb_fpb_div};
+	rsb_int i;
+//	size_t bs = rsb__get_lastlevel_c_size();
+//	size_t bs = rsb__get_first_level_c_size();
+	rsb_int_t cln = rsb__get_cache_levels_num(),cli;
+	rsb__times_t times,mtimes = RSB_MEMSCAN_MIN_TIMES,Mtimes = RSB_MEMSCAN_MAX_TIMES;
+	rsb_time_t mt = RSB_FPBENCH_MULTITYPE_TIME;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	for(cli=1;cli<=cln;++cli)
+	for(i=0;i<sizeof(fpba)/sizeof(rsb_fpb_fp_t);++i)
+	{
+		size_t bs = rsb__get_lnc_size(cli);
+		if(!bs)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		RSB_INFO("#probing for an iterations count (to a total of %f s) .. \n",mt);
+		for(times=mtimes;times<Mtimes;times*=2)
+		{
+			rsb_bool_t aloud = RSB_BOOL_FALSE;
+			rsb_time_t dt;
+
+			dt = - rsb_time();
+			fpba[i](times,bs,aloud);
+			dt += rsb_time();
+			if(dt>mt)
+			{
+				aloud = RSB_BOOL_TRUE,
+				fpba[i](times,bs,aloud);
+				break;	/* break the inner loop, go for another benchmark */
+			}
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_fpb.h b/rsb_fpb.h
new file mode 100644
index 0000000..b76d223
--- /dev/null
+++ b/rsb_fpb.h
@@ -0,0 +1,46 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Floating point microbenchmarks.
+ */
+
+#ifndef RSB_FPB_H_INCLUDED
+#define RSB_FPB_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb.h"		/* public API specification */
+
+#define RSB_FPBENCH_TIME  2.0	/* min time for performing a floating point performance test on a type array  */
+#define RSB_FPBENCH_MULTITYPE_TIME  ((RSB_FPBENCH_TIME)*(RSB_IMPLEMENTED_TYPES))	/* min time for performing a floating point performance test  */
+rsb_err_t rsb__fp_benchmark(void);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_FPB_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_garbage.c b/rsb_garbage.c
new file mode 100644
index 0000000..a0422d5
--- /dev/null
+++ b/rsb_garbage.c
@@ -0,0 +1,955 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for benchmarking, integration testing, and miscellaneous.
+ * 
+ * FIXME : this file contains both important and obsolete code.
+ * */
+
+#include <ctype.h>	/*isdigit*/
+#include <string.h>	/*memcmp, strchr*/
+#include <math.h>	/*fabs*/
+#include "rsb_garbage.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+static rsb_bool_t rsb_are_same_coo(
+	void * VA,
+	void * new_VA,
+	rsb_coo_idx_t * IA, 
+	rsb_coo_idx_t * new_IA, 
+	rsb_coo_idx_t * JA, 
+	rsb_coo_idx_t * new_JA, 
+	rsb_nnz_idx_t nnz,
+	size_t el_size,
+	rsb_err_t * errvalp
+	)
+{
+	/**
+	 * \ingroup gr_internals
+
+		FIXME
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!VA || !new_VA || !IA || !new_IA || !JA || !new_JA || RSB_INVALID_NNZ_INDEX(nnz))
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	if(RSB_MEMCMP(IA,new_IA,sizeof(rsb_coo_idx_t)*(nnz)))
+		goto diff;
+
+	if(RSB_MEMCMP(JA,new_JA,sizeof(rsb_coo_idx_t)*(nnz)))
+		goto diff;
+
+	if(RSB_MEMCMP(VA,new_VA,el_size*(nnz)))
+		goto diff;
+
+	return RSB_BOOL_TRUE;
+diff:
+	return RSB_BOOL_FALSE;
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return RSB_BOOL_MAYBE;
+}
+
+static rsb_bool_t rsb_util_is_matrix_equal_to_coo(
+	struct rsb_mtx_t * mtxCp,
+	void * VA,
+	void * new_VA,
+	rsb_coo_idx_t * IA, 
+	rsb_coo_idx_t * new_IA, 
+	rsb_coo_idx_t * JA, 
+	rsb_coo_idx_t * new_JA, 
+	rsb_nnz_idx_t nnz,
+	rsb_coo_idx_t m,
+	rsb_coo_idx_t k,
+	size_t el_size,
+	rsb_type_t typecode,
+	rsb_flags_t flags,
+	rsb_err_t * errvalp)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * FIXME
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_bool_t same = RSB_BOOL_FALSE;
+
+	RSB_INFO("getting back matrix in coo format ... \n");
+	if( (errval = rsb__do_get_coo_noalloc(mtxCp,new_VA,new_IA,new_JA,NULL,RSB_FLAG_C_INDICES_INTERFACE)) != RSB_ERR_NO_ERROR )
+	{
+		RSB_INFO("rsb__do_get_coo_noalloc returned with an error code\n");
+		goto err;
+	}
+
+	RSB_INFO("sorting back reconstructed matrix in 1x1 blocks  ... \n");
+	if((errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,m,k,typecode,flags))!=RSB_ERR_NO_ERROR)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		goto err;
+	}
+
+	if((errval = rsb_util_sort_row_major_inner(new_VA,new_IA,new_JA,nnz,m,k,typecode,flags))!=RSB_ERR_NO_ERROR)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		goto err;
+	}
+
+	same = rsb_are_same_coo( VA, new_VA, IA, new_IA, JA, new_JA, nnz, el_size, &errval);
+	if(same == RSB_BOOL_MAYBE)
+	{
+		RSB_STDERR("error while comparing coo\n");
+//		errval = RSB_ERR_INTERNAL_ERROR;
+		goto err;
+	}
+	if(same == RSB_BOOL_FALSE)
+	{
+		RSB_STDERR("mismatch\n");
+		goto diff;
+	}
+
+	return 1;
+diff:
+	return 0;
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return -1;
+}
+
+int rsb__test_bitmap_driver(rsb_coo_idx_t r, rsb_coo_idx_t c)
+{
+	/**
+	 * Since we could modify our implementation of bitmap data structures over time, 
+	 * there will always be a testing need, so we place here this routine.
+	 *
+	 * It will test for the library bitmap functionalities.
+	 *
+	 * \param r	should specify the bitmap rows count
+	 * \param r	should specify the bitmap columns count
+	 * 
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * FIXME : seems broken (./rsbench -Ot -b)
+	 * */
+	rsb_bitmap_data_t *bitmap    = NULL;
+	rsb_coo_idx_t * IA=NULL,*JA = NULL;
+	rsb_coo_idx_t i,k,nnz;
+	rsb_nnz_idx_t counted_nnz = 0;
+	rsb_nnz_idx_t duplicates  = 0;
+
+	if(r<1 || c<1){RSB_ERROR("rsb__test_bitmap_driver(r=%d, c=%d)\n",r,c);goto err;}
+	nnz=(r*c)/2;
+	RSB_INFO("testing a %ld x %ld bitmap...\n",(long)r,(long)c);
+	
+	/* We generate a r x c bitmap and two integer index arrays */
+	bitmap = rsb__allocate_bitmap(r,c);
+	IA = rsb__malloc(sizeof(rsb_coo_idx_t)*nnz);
+	JA = rsb__malloc(sizeof(rsb_coo_idx_t)*nnz);
+	if(!bitmap || !IA || !JA)goto err;
+	/* We populate the arrays with random coordinates (please note that there could be duplicates) */
+	RSB_INFO("generating coefficients...\n");
+	for(k=0;k<nnz;++k) {IA[k]=rsb__rand_coo_index(r);} 
+	for(k=0;k<nnz;++k) {JA[k] =rsb__rand_coo_index(c);}
+	/* with a very stupid algorithm for avoiding duplicates :) */
+	RSB_INFO("fixing duplicates (WARNING : could take forever!)..  \n");
+	duplicates = 0;
+	do
+	{
+		/* WARNING : THERE IS NO PROOF THIS COULD TERMINATE, AS IT IS DEPENDENT ON THE PSEUDORANDOM NUMER GENERATOR */
+		duplicates=0;
+		for(k=0;k<nnz;++k) for(i=0;i<k;++i)if(IA[i]==IA[k]) if(JA[i]==JA[k])
+		{
+			//RSB_STDOUT("k:%d i:%d ; ",k,i); RSB_STDOUT("IA[k]:%d JA[k]:%d - ",IA[k],JA[k]); RSB_STDOUT("IA[i]:%d JA[i]:%d\n",IA[i],JA[i]);
+			IA[k] = rsb__rand_coo_index(r);
+			JA[k]  = rsb__rand_coo_index(c);
+			++duplicates;
+		}
+	}
+	while(duplicates);
+	
+	/* We try to set bits in the bitmap according to the coordinate arrays */
+	RSB_INFO("setting bits randomly...\n");
+	for(k=0;k<nnz;++k) RSB_BITMAP_SET(bitmap,r,c,IA[k],JA[k]);
+
+	/* We try to read bits in the bitmap according to the coordinate arrays and cross-validate */
+	RSB_INFO("checking count directly...  ");
+
+	counted_nnz=0;
+	for(k=0;k<nnz;++k) if(RSB_BITMAP_GET(bitmap,r,c,IA[k],JA[k]))++counted_nnz;
+
+#ifdef RSB_DEBUG_BITMAP
+	if(counted_nnz!=nnz) {RSB_ERROR("inserted nonzeros : %d\ncounted nonzeros : %d\n",nnz,counted_nnz);goto err;}
+#endif
+
+	RSB_INFO(" ...ok\n");
+	RSB_INFO("checking count indirectly...");
+	counted_nnz=0;
+	counted_nnz = rsb__bitmap_bit_count(bitmap,r,c);
+	if(counted_nnz!=nnz-duplicates) {RSB_ERROR("inserted nonzeros : %d - %d = %d\ncounted nonzeros  : %d\n",nnz,duplicates,nnz-duplicates,counted_nnz);goto err;}
+	RSB_INFO(" ...ok\n");
+
+	/* We validate the bit count */
+	/* ok, tests passed */
+	rsb__free(bitmap);
+	rsb__free(IA);
+	rsb__free(JA);
+	return 0;
+err:
+	RSB_CONDITIONAL_FREE(bitmap);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	return -1;
+}
+
+rsb_err_t rsb__test_gen_matrix(rsb_type_t typecode, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, int allow_duplicates)
+{
+	/**
+	 * Generates a randomly populated matrix.
+	 * FIXME : move to some macro
+	 * FIXME : UNTESTED
+	 */
+
+	if( !IA || !JA || !VA)goto err;
+	*IA=NULL;
+	*JA=NULL;
+	*VA=NULL;
+
+	if( nnz<1 )goto err;
+	if( rows < 1 || cols < 1 )goto err;
+
+	if(RSB_SOME_ERROR( rsb_util_coo_alloc( VA, IA, JA,nnz,typecode,RSB_BOOL_TRUE)))
+		goto err;
+
+	if(rsb__test_fill_matrix_nnz(typecode, nnz, *VA ))goto err;
+	if(rsb__test_fill_matrix_coords(*IA, *JA, rows, cols, nnz, allow_duplicates))goto err;
+
+	return 0;
+err:
+	RSB_CONDITIONAL_FREE(*IA);
+	RSB_CONDITIONAL_FREE(*JA);
+	RSB_CONDITIONAL_FREE(*VA);
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+int rsb__test_fill_matrix_nnz(rsb_type_t typecode, rsb_nnz_idx_t nnz, void *VA )
+{
+	/**
+	 * Fills a given array with random values.
+	 *
+	 * FIXME : move to some macro
+	 * FIXME : UNTESTED
+	 * */
+	rsb_nnz_idx_t k;
+	
+	if(!VA || nnz<1 )goto err;
+
+#ifdef RSB_NUMERICAL_TYPE_INT
+	if(typecode == RSB_NUMERICAL_TYPE_INT ) for(k=0;k<nnz;++k) {((int *)(VA))[k]=(int )rand();} 
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if(typecode == RSB_NUMERICAL_TYPE_FLOAT ) for(k=0;k<nnz;++k) {((float *)(VA))[k]=(float )rand();} 
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if(typecode == RSB_NUMERICAL_TYPE_DOUBLE) for(k=0;k<  nnz;++k) {((double*)(VA))[k]=(double)rand();} 
+	else
+#endif
+	return -1;
+
+	return 0;
+err:
+	return -1;
+}
+
+rsb_coo_idx_t rsb__rand_coo_index(rsb_coo_idx_t max_plus_one)
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	rsb_coo_idx_t i = (rsb_coo_idx_t )rand()%max_plus_one;
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(i));
+	return i;
+}
+
+rsb_blk_idx_t rsb__rand_blk_index(rsb_blk_idx_t max_plus_one)
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	rsb_coo_idx_t i = (rsb_blk_idx_t )rand()%max_plus_one;
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_BLK_INDEX(i));
+	return i;
+}
+
+rsb_err_t rsb__test_fill_matrix_coords(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t allow_duplicates)
+{
+	/**
+	 * Since we could modify our implementation of bitmap data structures over time, 
+	 * there will always be a testing need, so we place here this routine.
+	 *
+	 * FIXME : document this
+	 * */
+	rsb_nnz_idx_t i,k;
+	rsb_nnz_idx_t duplicates = 0;
+	
+	if( !IA || !JA)goto err;
+	if( rows < 1 || cols < 1 || nnz < 1 )goto err;
+
+	/* We populate the arrays with random coordinates (please note that there could be duplicates) */
+	for(k=0;k<nnz;++k) {IA[k]=rsb__rand_coo_index(rows);} 
+	for(k=0;k<nnz;++k) {JA[k] =rsb__rand_coo_index(cols);}
+	/* with a very stupid algorithm for avoiding duplicates (COULD TAKE FOREVER!) */
+	duplicates = 0;
+	if(!allow_duplicates)
+	do
+	{
+		/* WARNING : THERE IS NO PROOF THIS COULD TERMINATE, AS IT IS DEPENDENT ON THE PSEUDORANDOM NUMER GENERATOR */
+		duplicates=0;
+		for(k=0;k<nnz;++k) for(i=0;i<k;++i)if(IA[i]==IA[k]) if(JA[i]==JA[k])
+		{
+			IA[k] = rsb__rand_coo_index(rows);
+			JA[k]  = rsb__rand_coo_index(cols);
+			++duplicates;
+		}
+	}
+	while(duplicates);
+
+	return 0;
+err:
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+int rsb_test_dump_main(const int argc,rsb_char_t *const argv[])
+{
+	/**
+	 * \ingroup gr_internals
+	 * This example main program reads in a Matrix Market file in fixed
+	 * block format and dumps it out.
+	 *
+	 * TODO :
+	 *         * this a strictly debugging function.
+	 *         * it needs better documentation.
+	 *         * only double precision numbers for now.
+	 *         * it should go to some macro file
+	 * 
+	 * FIXME : this file functionality is already in test_matops.m4.
+	 **/
+	rsb_option options[] = {
+	    {"block-rowsize",	required_argument, NULL, 'r'},  
+	    {"block-columns",	required_argument, NULL, 'c'},  
+	    {"type",		required_argument, NULL, 'T'},  
+	    {"matrix-filename",	required_argument, NULL, 'f'},  
+	    {"in-place-permutation",	no_argument, NULL, 'P'},  
+	    {0,0,0,0}
+	};
+
+	rsb_nnz_idx_t nnz=0;/*was -1 */
+	int c;
+	int opt_index = 0;
+
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	void *VA=NULL;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE;	/* double precision floating point */
+#else
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+#endif
+
+	rsb_blk_idx_t br=1;
+	rsb_blk_idx_t bc=1;
+
+	rsb_coo_idx_t m=0, k=0;/* was -1 */
+	const rsb_char_t * filename=NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_flags_t flags = RSB_FLAG_DEFAULT|RSB_FLAG_DEFAULT_STORAGE_FLAGS/* this second flags is NEW */;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_STORAGE_FLAGS;
+
+	if(typecode==-1)
+	{
+		
+		RSB_STDERR("error : please recompile with double precision floating point numbers supported! \n");
+		return -1;
+	}
+
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc, argv, RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS"T:f:r:c:", options, &opt_index);/* Flawfinder: ignore */
+		if (c == -1)break;
+		RSB_DO_FLAG_ADD(flags,rsb__sample_program_options_get_flags(c,optarg));
+		switch (c)
+		{
+			case 'r':
+			br = (rsb_blk_idx_t)(abs(rsb__util_atoi(optarg))%RSB_NNZ_BLK_MAX) ;
+			break;
+			case 'c':
+			bc = (rsb_blk_idx_t)(abs(rsb__util_atoi(optarg))%RSB_NNZ_BLK_MAX) ;
+			break;
+			case 'T':
+			typecode = *optarg;
+			break;
+			case 'f':
+			filename = optarg;
+			break;
+			case 'h':
+			RSB_STDERR("usage : %s [OPTIONS]\n where OPTIONS are taken from [ -f filename ] [ -r br ] [ -c bc ] [ -T TIMES ]:\n", argv[0]);
+	    	}
+	}
+
+	if(RSB_SOME_ERROR(errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)))
+		goto err;
+
+	if(filename)
+	{
+		//rsb_blk_idx_t M_b=0, K_b=0;
+		//rsb_blk_idx_t i;
+		struct rsb_mtx_t * mtxAp = NULL;
+//		rsb_coo_idx_t *p_r=NULL,*p_c=NULL;
+
+		RSB_WARN("imposing RSB_FLAG_SORT_INPUT!\n");
+
+		if((rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&m,&k,&nnz,typecode,flags,NULL,NULL))!=0)
+		{
+			errval = RSB_ERR_GENERIC_ERROR;
+			RSB_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+			goto err;
+		}
+
+//		p_r = rsb__util_get_partitioning_array( br, m , &M_b, flags);
+//		p_c = rsb__util_get_partitioning_array( bc, k , &K_b, flags);
+
+		/* note the last block size : it is the same, regardless congruences */
+//		if(! p_r) goto err;
+//		if(! p_c) goto err;
+
+		/* 
+		 * plain blocked
+		 * */
+		mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,m,k,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags,&errval);
+
+		if(!mtxAp)
+		{
+			errval = RSB_ERR_GENERIC_ERROR;
+//			RSB_CONDITIONAL_FREE(p_r);
+//			RSB_CONDITIONAL_FREE(p_c);
+			goto err;
+		}
+
+		errval = rsb__do_file_mtx_save( mtxAp, NULL );
+		RSB_MASK_OUT_SOME_ERRORS(errval);
+
+		if( RSB_SOME_ERROR(errval) )
+		{
+			errval = RSB_ERR_GENERIC_ERROR;
+			RSB_STDERR("[!] some problem occurred!\n");
+			goto err;
+		}
+
+//		RSB_CONDITIONAL_FREE(p_r);
+//		RSB_CONDITIONAL_FREE(p_c);
+		RSB_MTX_FREE(mtxAp);
+	}
+	else
+	{
+		RSB_INFO("Please specify a matrix filename (with -f)");
+	}
+
+err:
+	RSB_MASK_OUT_SOME_ERRORS(errval);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	if(RSB_SOME_ERROR(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)))return -1;
+	return RSB_ERR_TO_PROGRAM_ERROR(errval);
+}
+
+int rsb__test_gen_and_print_matrix(rsb_type_t typecode, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Allocates a whole matrix and returns it back to the caller.
+	 * */
+	int allow_duplicates = 0;
+	if( !IA || !JA || !VA)goto err;
+	*IA=NULL;
+	*JA=NULL;
+	*VA=NULL;
+	if(nnz<1 )goto err;
+	if( rows < 1 || cols < 1 )goto err;
+	
+	if(rsb__test_gen_matrix(typecode, IA, JA, VA, rows, cols, nnz, allow_duplicates ))goto err;
+	if(rsb__test_print_coo_mm(typecode,RSB_FLAG_NOFLAGS,*IA,*JA,*VA,rows,cols,nnz,RSB_BOOL_TRUE,RSB_DEFAULT_STREAM ))goto err;
+
+	/* Note : we do not free these arrays, but give them back to the caller */
+	return 0;
+err:
+	RSB_CONDITIONAL_FREE(*IA);
+	RSB_CONDITIONAL_FREE(*JA);
+	RSB_CONDITIONAL_FREE(*VA);
+	return -1;
+}
+
+rsb_flags_t rsb__sample_program_options_get_flags(int c, const rsb_char_t * optarg)
+{
+	/**
+		\ingroup gr_internals
+		
+		This function should be used in demo programs to let the user set
+		via command line program switches the sparse matrix format flags.
+
+	 	c = rsb_getopt_long(argc, argv, ...
+
+		\see RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS
+	*/
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_bool_t format_chosen = RSB_BOOL_FALSE;
+
+	switch (c)
+	{
+			case 0x41: /* A */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_AUTO_BLOCKING);
+			break;
+#if 0
+			case 0x45: /* E */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE);/* FIXME : EXPERIMENTAL */;
+			break;
+			case 0x43: /* C */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE);/* FIXME : EXPERIMENTAL */;
+			break;
+#endif
+			case 0x44: /* D */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG);/* FIXME : EXPERIMENTAL */;
+			break;
+			case 0x48: /* H */
+			//RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES_COO);/* FIXME : EXPERIMENTAL */;
+			//RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES_CSR);/* FIXME : EXPERIMENTAL */;
+			RSB_ERROR("-H switch is DEPRECATED\n");
+			break;
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+			case 0x4C: /* L */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES);/* FIXME : EXPERIMENTAL */;
+			break;
+#endif
+			case 0x52: /* R */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_QUAD_PARTITIONING /* FIXME : EXPERIMENTAL */ /*  | RSB_FLAG_RECURSIVE_SHRINK_BOUNDING_BOX*/);
+			break;
+			case 0x69: /* i */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR);
+			break;
+//			case 0x64: /* d */
+//			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SHOULD_DEBUG);/* new */
+//			break;
+			case 0x73: /* s */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+			break;
+			case 'P': /* P */
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT);
+			break;
+//			case 'V': /* V */
+//			RSB_STDERR("RSB_FLAG_SHOULD_BE_VERBOSE flag is not supported anymore\n");
+//			//RSB_DO_FLAG_ADD(flags,RSB_FLAG_SHOULD_BE_VERBOSE);
+//			break;
+			case 'q': /* q */
+			{
+				const rsb_char_t * op=optarg;
+
+				while(op && *op)
+				{
+					switch(toupper(*op))
+					{
+//						case 'T':
+//						RSB_DO_FLAG_ADD(flags,RSB_FLAG_TRIANGULAR);
+//						break;
+						case 'U':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+						break;
+//						case 'L':
+//						RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+//						break;
+#if 0
+						case 'C':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE);
+						break;
+						case 'E':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE);
+						break;
+#endif
+//						case 'E':
+//						RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG);
+//						break;
+						case 'H':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+						break;
+//						case 'H':
+//						RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+//						break;
+						case 'O':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES_COO);
+						break;
+						case 'R':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_QUAD_PARTITIONING);
+						break;
+						case 'T':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS);
+						break;
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+						case 'L':
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES);
+						break;
+#endif
+					}
+					++op;
+				}
+			}
+			break;
+			case 'F': /* F */
+			/* FIXME : UNFINISHED, UNDOCUMENTED */
+			format_chosen = RSB_BOOL_TRUE;
+			{int _sf=0;
+			if(strchr(optarg,0x6F))/* o */
+				{RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_COO_STORAGE);_sf++;}
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+			if(strchr(optarg,0x6C))/* l */ /* FIXME */
+				{RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_LINKED_STORAGE);_sf++;}
+#endif
+			if(strchr(optarg,0x62))/* b */
+				{RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);_sf++;}
+			if(strchr(optarg,0x63))/* c */
+				{RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER);_sf++;}
+#ifdef RSB_FLAG_WANT_AUTO_MAJOR_ORDER
+			if(strchr(optarg,'a'))/* c */
+				{RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_AUTO_MAJOR_ORDER);_sf++;}
+#endif
+			if(strchr(optarg,0x76))/* v */
+				{RSB_DO_FLAG_DEL(flags,RSB_FLAG_WANT_BCSS_STORAGE);_sf++;}
+			if( !_sf ){ RSB_STDERR("specified an unknown matrix format (should be [b|v|l|o][c])\n");goto err;}
+			}
+			break;
+    	}
+//	if(format_chosen == RSB_BOOL_FALSE)
+//	{
+//		RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_STORAGE);
+//	}
+err:
+	return flags;
+}
+
+rsb_err_t rsb__oski_estimate_bcsr_fill_from_coo(/*  const*/ rsb_coo_idx_t * IA, /*const*/ rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_fillin_t * efillinmap )
+{
+	/*
+		FIXME : we waste resources here.
+	*/
+
+	rsb_nnz_idx_t * ptr=NULL, * indx=NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void * VA=NULL;
+
+	VA = rsb__malloc_vector(nnz,typecode);	/* ! FIXME ! */
+
+	if(!VA)
+	{
+		errval = RSB_ERR_ENOMEM;
+		goto err;
+	}
+	
+	errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,m,k,typecode,RSB_FLAG_NOFLAGS);	/* ! */
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	if(RSB_SOME_ERROR(errval = rsb__allocate_csr_arrays_from_coo_sorted(NULL,IA,JA,nnz,m,k,RSB_NUMERICAL_TYPE_INVALID_TYPE,NULL,&indx,&ptr)))
+		goto err;
+
+	rsb__oski_estimate_bcsr_fillin_from_csr(ptr, indx, m, k, nnz, efillinmap );
+err:
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(ptr);
+	RSB_CONDITIONAL_FREE(indx);
+	
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__oski_estimate_bcsr_fillin_from_csr(const rsb_nnz_idx_t * pntr, const rsb_coo_idx_t * indx, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const rsb_nnz_idx_t nnz, rsb_fillin_t * efillinmap)
+{
+	/*
+		\ingroup gr_internals
+		FIXME : UNFINISHED
+		A basic implementation of OSKI's fillin blocking estimation algorithm.
+	*/
+
+	const rsb_int rua[] = RSB_ROWS_UNROLL_ARRAY;
+	const rsb_int cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_int ci=0,ri=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if (RSB_WANT_EXPERIMENTAL_FILLIN_ESTIMATOR==1)
+	rsb_nnz_idx_t * block_count=NULL;
+
+	block_count = rsb__malloc(k*RSB_COLUMNS_UNROLL_ARRAY_LENGTH);
+	if(!block_count)
+	{
+		errval = RSB_ERR_ENOMEM;
+		goto err;
+	}
+#endif
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+			efillinmap[ri*RSB_COLUMNS_UNROLL_ARRAY_LENGTH+ci]=RSB_REAL_ZERO;
+
+//	RSB_STDOUT("#experimenal fillin estimator\n");
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		rsb_coo_idx_t i=0,mi=0,Mi=0,j=0,bi=0;
+//		const rsb_int fraction=m<10000?1000:m/(100*rua[ri]);
+		const rsb_int fraction=1000/rua[ri];	/* highest the constant, the ligher the computation */
+		rsb_nnz_idx_t num_blocks[RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+		rsb_coo_idx_t last_block_index[RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+		rsb_nnz_idx_t nnz_visited = 0;
+//		rsb_nnz_idx_t dr=((m/rua[ri])/fraction)*rua[ri];/* FIXME */
+		rsb_nnz_idx_t dr=fraction*rua[ri];/* FIXME */
+		Mi = RSB_MIN((m)/fraction,m-1);	/* FIXME */
+
+//		dr=m;
+
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+			num_blocks[ci]=0;
+	
+		//for(i=mi;i+rua[ri]-1<=Mi && i+rua[ri]-1<m;i+=dr)	/* FIXME */
+		for(i=mi;i+rua[ri]-1<m;i+=dr)	/* FIXME */
+		{
+			rsb_nnz_idx_t ja[RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE];
+
+//			RSB_STDOUT("#%zd / %zd\n",(rsb_printf_int_t)i,(rsb_printf_int_t)m);
+
+			for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+				last_block_index[ci]=RSB_MARKER_NNZ_VALUE;
+
+			/* for each nonzero */
+			for(bi=0;bi<rua[ri];++bi)
+				ja[bi]=pntr[i+bi];
+
+#if (RSB_WANT_EXPERIMENTAL_FILLIN_ESTIMATOR==1)
+			/* UNFINISHED */
+			for(bi=0;bi<rua[ri];++bi)
+			for(j=pntr[i+bi];j!=pntr[i+bi+1];++j)
+			for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+			{
+				if(!block_count(k*ci+j/cua[ci]))
+				{
+					block_count(k*ci+j/cua[ci])++;
+					nnz_visited++;
+				}
+			}
+			RSB_BITMAP_CLEAR(bitmap,RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE,k);
+#else
+			for(j=0;j<k;++j)
+			for(bi=0;bi<rua[ri];++bi)
+			if(pntr[i+bi]!=pntr[i+bi+1])
+			{
+/*				RSB_STDOUT("#%zd , %zd ? %zd , %zd \n",
+					(rsb_printf_int_t)(bi),(rsb_printf_int_t)j,
+					(rsb_printf_int_t)indx[ja[bi]],(rsb_printf_int_t)ja[bi]
+					);*/
+				if(indx[ja[bi]]==j)
+				{
+//					RSB_STDOUT("#%zd , %zd\n",(rsb_printf_int_t)(i+bi),(rsb_printf_int_t)j);
+					nnz_visited++;
+
+			//		RSB_STDOUT("#%zd -> ",(rsb_printf_int_t)ja[bi]);
+			//		if(ja[bi]+1<pntr[i+bi+1])
+					ja[bi]++;
+
+//					RSB_STDOUT("%zd\n",(rsb_printf_int_t)ja[bi]);
+
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+					{
+						if(j/cua[ci] != last_block_index[ci])
+						{
+							last_block_index[ci]= j/cua[ci] ;
+							num_blocks[ci]++;
+						}
+					}
+				}
+			}
+#endif
+
+//			RSB_STDOUT("at the end of %zd, %zd nnz\n",(rsb_printf_int_t)i,(rsb_printf_int_t)nnz_visited);
+		}
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			efillinmap[ri*RSB_COLUMNS_UNROLL_ARRAY_LENGTH+ci]=(((rsb_fillin_t)num_blocks[ci])*rua[ri]*cua[ci])/((rsb_fillin_t)nnz_visited);
+//			RSB_STDOUT("#nnz_visited : %d, num_blocks : %d \n",nnz_visited,num_blocks[ci]);
+//			RSB_STDOUT("#%d %d %f\n",rua[ri],cua[ci],efillinmap[ri][ci]);
+		}
+	}
+
+	goto err;
+//	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+//		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+//			RSB_STDOUT("#%d %d %f\n",rua[ri],cua[ci],efillinmap[ri*RSB_COLUMNS_UNROLL_ARRAY_LENGTH+ci]);
+err:
+#if (RSB_WANT_EXPERIMENTAL_FILLIN_ESTIMATOR==1)
+	RSB_CONDITIONAL_FREE(block_count);
+#endif
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_column_expand(rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t * kp, rsb_int factor)
+{
+	rsb_nnz_idx_t i;
+	rsb_coo_idx_t k;
+
+	/*!
+	*/
+
+	k=*kp;
+
+	if(factor>0)
+	{
+		for(i=0;i<nnz;++i)
+		{
+			JA[i]*=factor;
+		}
+		*kp=*kp*(factor);
+	}
+	else
+	{
+		factor=-factor;
+#if 1
+		/* mirror */
+		for(i=0;i<nnz;++i)
+		{
+			JA[i]=(k-(JA[i]+1))*factor;
+		}
+		*kp=*kp*factor;
+#else
+		/* this has the potential of introducing duplicates, so do not use it */
+		for(i=0;i<nnz;++i)
+		{
+			JA[i]=JA[i]/factor;
+		}
+		*kp=*kp/factor;
+#endif
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_print_some_vector_stats(const void * p, rsb_type_t typecode, rsb_nnz_idx_t m, rsb_nnz_idx_t inc)
+{
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_aligned_t errnorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	/* this is debug information, very cheap to include */
+	rsb__util_find_min(errnorm,p,typecode,m,inc);
+	RSB_STDOUT("#min:");
+	RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_value(errnorm,typecode));
+	RSB_STDOUT("\n");
+
+	rsb__util_find_max(errnorm,p,typecode,m,inc);
+	RSB_STDOUT("#max:");
+	RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_value(errnorm,typecode));
+	RSB_STDOUT("\n");
+
+	rsb__util_vector_sum_strided(errnorm,p,typecode,m,inc);
+	RSB_STDOUT("#sum:");
+	RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_value(errnorm,typecode));
+	RSB_STDOUT("\n");
+
+	RSB_DO_ERROR_CUMULATE(errval,rsb__vector_norm_strided(errnorm,p,typecode,m,inc));
+	RSB_STDOUT("#norm:");
+	RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_value(errnorm,typecode));
+	RSB_STDOUT("\n");
+
+	RSB_DO_ERR_RETURN(errval)
+#else
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif
+}
+
+
+
+#if 0
+rsb_err_t _rsb_BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_de_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup gr_kernels
+	 * computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+
+	/*	Outer loop. Occurs on the major dimension.	*/
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+
+
+#if 1
+		register double c_0=0;				
+		double *c=out+(1*(i*1));
+		for(k=bpntr[i]+0,j=bindx[k];k<bpntr[i+1]-5  ;k+=6,a += rows*columns,j=bindx[k])	/* k is the index of the block */
+		{
+			const double *b = rhs+(1*(j*1));
+			c_0+=a[(0*1)+0]*b[0];
+			c_0+=a[(0*1)+1]**(rhs+(1*(bindx[k+1]*1)));
+			c_0+=a[(0*1)+2]**(rhs+(1*(bindx[k+2]*1)));
+			c_0+=a[(0*1)+3]**(rhs+(1*(bindx[k+3]*1)));
+			c_0+=a[(0*1)+4]**(rhs+(1*(bindx[k+4]*1)));
+			c_0+=a[(0*1)+5]**(rhs+(1*(bindx[k+5]*1)));
+		}
+
+		for(;k<bpntr[i+1]  ;++k,a += rows*columns,j=bindx[k])	/* k is the index of the block */
+		{
+			const double *b = rhs+(1*(j*1));
+			c_0+=a[(0*1)+0]*b[0];
+		}
+		c[0]+=c_0;
+#else
+		for(k=bpntr[i]+0,j=bindx[k];k<bpntr[i+1]  ;k+=1,a += rows*columns,j=bindx[k])	/* k is the index of the block */
+		{
+			const double *b = rhs+(1*(j*1));
+			double *c=out+(1*(i*1));
+		register double c_0=0;				
+		c_0+=a[(0*1)+0]*b[0];
+			c[0]+=c_0;
+		}
+#endif
+
+	}
+	return 0;
+}
+#endif
+/* @endcond */
diff --git a/rsb_garbage.h b/rsb_garbage.h
new file mode 100644
index 0000000..35c1b39
--- /dev/null
+++ b/rsb_garbage.h
@@ -0,0 +1,56 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for benchmarking, integration testing, and miscellaneous.
+ * */
+
+#ifndef RSB_GARBAGE_H_INCLUDED
+#define RSB_GARBAGE_H_INCLUDED
+
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+#include "rsb_common.h"
+
+int rsb__test_bitmap_driver(rsb_coo_idx_t r, rsb_coo_idx_t c);
+rsb_err_t rsb__test_gen_matrix(rsb_type_t typecode, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, int allow_duplicates);
+int rsb__test_fill_matrix_nnz(rsb_type_t typecode, rsb_nnz_idx_t nnz, void *VA );
+rsb_err_t rsb__test_fill_matrix_coords(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t allow_duplicates);
+int rsb_test_dump_main(const int argc,rsb_char_t *const argv[]);
+int rsb__test_gen_and_print_matrix(rsb_type_t typecode, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz);
+int rsb_test_main_block_partitioned_matrix_stats(int argc,rsb_char_t *argv[]);
+rsb_coo_idx_t rsb__rand_coo_index(rsb_coo_idx_t max_plus_one);
+rsb_blk_idx_t rsb__rand_blk_index(rsb_blk_idx_t max_plus_one);
+rsb_flags_t rsb__sample_program_options_get_flags(int c, const rsb_char_t * optarg);
+int rsb_dump_postscript(const int argc, rsb_char_t * const argv[]);
+rsb_err_t rsb__oski_estimate_bcsr_fillin_from_csr(const rsb_nnz_idx_t * pntr, const rsb_coo_idx_t * indx, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const rsb_nnz_idx_t nnz, rsb_fillin_t * efillinmap);
+rsb_err_t rsb__oski_estimate_bcsr_fill_from_coo(/*  const*/ rsb_coo_idx_t * IA, /*const*/ rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_fillin_t * efillinmap );
+rsb_err_t rsb__do_column_expand(rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t * kp, rsb_int factor);
+rsb_err_t rsb__do_print_some_vector_stats(const void * p, rsb_type_t typecode, rsb_nnz_idx_t m, rsb_nnz_idx_t inc);
+
+#define RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS "q:QLECHDVARisF:PT:"
+#define RSB_FLAG_DEFAULT_STORAGE RSB_FLAG_WANT_BCSS_STORAGE
+
+#endif /* RSB_GARBAGE_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_gen.c b/rsb_gen.c
new file mode 100644
index 0000000..b4acc32
--- /dev/null
+++ b/rsb_gen.c
@@ -0,0 +1,592 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix generating functions.
+ * */
+
+#include "rsb_internals.h"
+
+rsb_err_t rsb__generate_blocked_banded_coo(rsb_nnz_idx_t dim, rsb_nnz_idx_t spacing, rsb_nnz_idx_t lbw, rsb_nnz_idx_t ubw, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_nnz_idx_t *nnzp, rsb_type_t typecode)
+{
+	/* 
+	 * duuu
+	 * lduuu
+	 * llduuu
+	 *  llduu
+	 *   lldu
+	 *    lld
+	 *
+	 * assuming lbw<dim an ubw<dim,
+	 *
+	 * there are dim 'd' type nonzeros
+	 * there are lbw 'l' type nonzeros
+	 * there are ubw*dim - (ubw*(ubw-1))/2 'u' type nonzeros
+	 * there are lbw*dim - (lbw*(lbw-1))/2 'u' type nonzeros
+	 *
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t nnz = RSB_NNZ_OF_BANDED(dim,lbw,ubw);
+	rsb_coo_idx_t ri=0,ci=0,nzi=0;
+	//rsb_time_t dt;
+	/* overflow is possible here */
+	if(RSB_INVALID_NNZ_COUNT(spacing)){errval = RSB_ERR_BADARGS;goto err;}
+	if(RSB_INVALID_NNZ_COUNT(dim*spacing)){errval = RSB_ERR_BADARGS;goto err;}
+	if(RSB_INVALID_NNZ_COUNT(dim)){errval = RSB_ERR_BADARGS;goto err;}
+	if(lbw>0)if(RSB_INVALID_NNZ_COUNT(lbw)){errval = RSB_ERR_BADARGS;goto err;}
+	if(ubw>0)if(RSB_INVALID_NNZ_COUNT(ubw)){errval = RSB_ERR_BADARGS;goto err;}
+	if((ubw>=dim)||(lbw>=dim)){errval = RSB_ERR_BADARGS;goto err;}
+	if(!VA || !JA || !IA || !nnzp) {errval = RSB_ERR_BADARGS;goto err;}
+	if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc( VA, IA, JA,nnz,typecode,RSB_BOOL_FALSE))){goto err;}
+	//dt = - rsb_time();
+	for(ri=0;ri<lbw;++ri)
+	for(ci=0;ci<RSB_MIN(ri+1+ubw,dim);++ci)
+	{
+		(*IA)[nzi]=ri;
+		(*JA)[nzi]=ci;
+		++nzi;
+	}
+	
+	for(ri=lbw;ri<(dim-ubw);++ri)
+	for(ci = ri-lbw;ci<1+ri+ubw;++ci)
+	{
+		(*IA)[nzi]=ri;
+		(*JA)[nzi]=ci;
+		++nzi;
+	}
+		
+	for(ri = RSB_MAX(lbw,dim-ubw);ri<dim;++ri)
+	for(ci = ri-lbw;ci<dim;++ci)
+	{
+		(*IA)[nzi]=ri;
+		(*JA)[nzi]=ci;
+		++nzi;
+	}
+	
+	//dt += rsb_time();
+	//printf("TIME: %lg, %d\n",dt,omp_get_num_threads());
+	*nnzp=nnz;
+	if((errval = rsb__fill_with_ones(*VA,typecode,nnz,1))!=RSB_ERR_NO_ERROR)goto err;
+	if(spacing>1)
+		rsb__util_coo_arrays_mul(*IA,*JA,spacing,spacing,nnz);
+
+	goto ok;
+err:
+	RSB_CONDITIONAL_FREE(*VA);
+	RSB_CONDITIONAL_FREE(*IA);
+	RSB_CONDITIONAL_FREE(*JA);
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__generate_dense_full(rsb_nnz_idx_t dim_r, rsb_nnz_idx_t dim_c, rsb_nnz_idx_t spacing, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_nnz_idx_t *nnzp, rsb_type_t typecode)
+{
+	/* 
+	 * FIXME : unfinished, untested
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t nnz = dim_r*dim_c,/*n=0,*/lda=dim_c;
+	rsb_coo_idx_t ri=0,ci=0;
+	//rsb_time_t dt;
+	/* FIXME : overflow possible  */
+	if(!VA || !JA || !IA || !nnzp) {errval = RSB_ERR_BADARGS;goto err;}
+	if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc( VA, IA, JA,nnz,typecode,RSB_BOOL_FALSE))){goto err;}
+	//dt = - rsb_time();
+	for(ri=0;ri<dim_r;++ri){
+	for(ci=0;ci<dim_c;++ci)
+	{
+		(*IA)[lda*ri+ci]=ri;
+		(*JA)[lda*ri+ci]=ci;
+	}}
+	/* n=dim*dim; */
+	//dt += rsb_time();
+	//printf("TIME: %lg, %d\n",dt,omp_get_num_threads());
+	*nnzp=nnz;
+	if((errval = rsb__fill_with_ones(*VA,typecode,nnz,1))!=RSB_ERR_NO_ERROR)goto err;
+	if(spacing>1)
+		rsb__util_coo_arrays_mul(*IA,*JA,spacing,spacing,nnz);
+
+	goto ok;
+err:
+	RSB_CONDITIONAL_FREE(*VA);
+	RSB_CONDITIONAL_FREE(*IA);
+	RSB_CONDITIONAL_FREE(*JA);
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__generate_dense_lower_triangular_coo(rsb_nnz_idx_t dim, rsb_nnz_idx_t spacing, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_nnz_idx_t *nnzp, rsb_type_t typecode)
+{
+	/* 
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t nnz = (dim%2)?(((dim+1)/2)*dim):((dim/2)*(dim+1)),n=0;
+	rsb_coo_idx_t ri = 0, ci = 0;
+
+	/* overflow is possible here! */
+	if(!VA || !JA || !IA || !nnzp)
+       	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	if(nnz == 0)
+		goto skalloc; /* tolerated corner case */
+
+	if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc( VA, IA, JA,nnz,typecode,RSB_BOOL_FALSE)))
+	{
+		goto err;
+	}
+skalloc:	
+	for(ri=0;ri < dim;++ri)
+	for(ci=0;ci <= ri;++ci)
+	{
+		(*IA)[n]=ri;
+		(*JA)[n]=ci;
+		++n;
+	}
+	*nnzp=nnz;
+	if((errval = rsb__fill_with_ones(*VA,typecode,nnz,1))!=RSB_ERR_NO_ERROR)
+		goto err;
+	if(spacing>1)
+		rsb__util_coo_arrays_mul(*IA,*JA,spacing,spacing,nnz);
+
+	goto ok;
+err:
+	RSB_CONDITIONAL_FREE(*VA);
+	RSB_CONDITIONAL_FREE(*IA);
+	RSB_CONDITIONAL_FREE(*JA);
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+struct rsb_mtx_t * rsb__generate_dense_lower_triangular(const rsb_coo_idx_t dim, double * timep, rsb_type_t typecode)
+{
+	/*
+	 *  FIXME : unfinished 
+	 *  */
+
+	void * VA=NULL;
+	rsb_coo_idx_t * IA=NULL;
+	rsb_coo_idx_t * JA=NULL;
+	rsb_nnz_idx_t nnz = RSB_MARKER_NNZ_VALUE;
+	struct rsb_mtx_t * mtxAp=NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_flags_t flags = RSB_FLAG_OWN_PARTITIONING_ARRAYS | RSB_FLAG_WANT_BCSS_STORAGE;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_MATRIX_FLAGS;
+	rsb_time_t time;
+	const rsb_blk_idx_t br=1,bc=1;	/* FIXME */
+	rsb_blk_idx_t m=dim,k=dim;
+
+	errval = rsb__generate_dense_lower_triangular_coo(dim,1,&IA,&JA,&VA,&nnz,typecode);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	time = - rsb_time();
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,m,k,br,bc,flags,&errval);
+	time += rsb_time();
+	if(!mtxAp)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		rsb__do_perror(NULL,errval);
+		goto err;
+	}
+	return mtxAp;
+err:
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	return NULL;
+}
+
+struct rsb_mtx_t * rsb__generate_banded(const rsb_blk_idx_t br, const rsb_blk_idx_t bc, const rsb_coo_idx_t rows, const rsb_coo_idx_t cols, rsb_coo_idx_t bw, double * timep, rsb_type_t typecode)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * A function which generates and returns a sparse banded matrix.
+         *
+         * Note that it is not performance optimized, and this will
+         * result in longer benchmarking times.
+         *
+	 * \return a matrix pointer, NULL in case of failure.
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	void * VA=NULL;
+	rsb_coo_idx_t * IA=NULL;
+	rsb_coo_idx_t * JA=NULL;
+	struct rsb_mtx_t * mtxAp=NULL;
+	rsb_coo_idx_t lbw=0;
+	rsb_coo_idx_t rbw=0;
+	rsb_coo_idx_t ri=0,ci=0;
+
+	rsb_blk_idx_t rob;
+	rsb_blk_idx_t cob;
+	rsb_nnz_idx_t blockcount=0;
+	size_t blockcount_=0;
+	rsb_nnz_idx_t nnz=0;
+	size_t  r=0,c=0;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_STORAGE_FLAGS;
+	rsb_time_t time;
+
+	if( (bw*bc) > cols )
+	{
+		RSB_ERROR("too much bandwidth..\n");
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	if(rows != cols)
+	{
+		RSB_ERROR("matrix is not square..\n");
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	if(rows != cols)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	lbw=bw>0?(bw-1)/2:0;
+	rbw=bw-lbw;
+	ri=0;ci=0;/* NOTE : danger : it was signed and -1 before */
+
+	/*!
+	 * Will create a blocked banded matrix with bandwidth expressed
+	 * in number of blocks, as in the experiments from [ijhcp_07] 
+	 * (Buttari, Eijkhout, Langou, Filippone 2007) article from 
+	 * International Journal of High Performance Computing Applications 2007; 21; 467
+	 * 
+	 * \code
+	 * +*+-------+
+	 * +*+*+     |  Example for bandwidth of 1 block
+	 * | +*+     |
+	 * |    ...  |
+	 * |       +*+
+	 * +-------+*+
+	 *
+	 * +*+*+-----+
+	 * +*+*+*+   |  Example for bandwidth of 2 blocks
+	 * | +*+*+   |
+	 * |    ...  |
+	 * |       +*+
+	 * +-------+*+
+	 *
+	 * +*+*+-----+
+	 * +*+*+*+   |  Example for bandwidth of 3 blocks
+	 * +*+*+*+   |
+	 * |    ...  |
+	 * |     +*+*+
+	 * +-----+*+*+
+	 * \endcode
+	 **/
+		
+	rob = rows/br;	/* rows of blocks */
+	cob=cols/bc;	/* cols of blocks */
+
+	for(ri=0;ri<rob;++ri)
+	for(ci = ri-lbw;ci <= ri+rbw;++ci)
+		if(ci>=0 && ci<cob )
+			++blockcount_;	/* yes, we waste resources, but we are in hurry. FIXME */
+	blockcount=blockcount_;
+		
+	if(blockcount<=0 || (((size_t)blockcount_)!=blockcount))
+	{errval = RSB_ERR_INTERNAL_ERROR;goto err;}/* overflow */
+
+	nnz = blockcount * ( br * bc );
+
+	if(nnz<=0 || nnz<blockcount){errval = RSB_ERR_INTERNAL_ERROR;goto err;}/* overflow */
+
+	if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&VA,&IA,&JA,nnz,typecode,RSB_BOOL_FALSE))){goto err;}
+
+	for(ri=0;ri<rob;++ri)
+	for(ci = ri-lbw;ci <= ri+rbw;++ci)
+	if(ci>=0 && ci<cob )
+	for( r=0; r< br;++ r)
+	for( c=0; c< bc;++ c)
+	{
+		*IA = ri*br+r;
+		*JA=ci*bc+c;
+		++IA;++JA;
+	}
+	if((errval = rsb__fill_with_ones(VA,typecode,nnz,1))!=RSB_ERR_NO_ERROR)
+		goto err;
+
+	IA-=nnz;
+	JA-=nnz;
+		
+//	p_r = rsb__util_get_partitioning_array(br,rows,&M_b,flags);
+//	p_c = rsb__util_get_partitioning_array(bc,cols,&K_b,flags);
+
+//	if(! p_r || !p_c) {errval = RSB_ERR_ENOMEM;goto err;}
+
+	time = - rsb_time();
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags,&errval);
+	time += rsb_time();
+	if(timep)*timep=time;
+
+	if(!mtxAp || (RSB_SOME_ERROR(errval))) {errval = RSB_ERR_ENOMEM;goto err;}
+
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	return mtxAp;
+err:
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+//	RSB_CONDITIONAL_FREE(p_r);
+//	RSB_CONDITIONAL_FREE(p_c);
+	RSB_MTX_FREE(mtxAp);
+	return NULL;
+}
+
+void rsb__do_fill_with_diag(void *VA, rsb_coo_idx_t *IA, rsb_coo_idx_t *JA, rsb_coo_idx_t ioff, rsb_coo_idx_t joff, rsb_nnz_idx_t nzoff, rsb_type_t typecode, rsb_nnz_idx_t nnz)
+{
+	rsb_nnz_idx_t nzi;
+	void *dVA=((char*)VA)+((size_t)RSB_SIZEOF(typecode))*nzoff;
+	if(VA)
+		rsb__fill_with_ones(dVA,typecode,nnz,1);
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		IA[nzoff+nzi]=nzi+ioff;
+		JA[nzoff+nzi]=nzi+joff;
+	}
+}
+
+#if 0
+struct rsb_mtx_t * rsb_generate_diagonal(const rsb_coo_idx_t rows, double * timep, rsb_type_t typecode)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * FIXME : untested, undocumented
+ 	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void * VA = NULL;
+	rsb_nnz_idx_t nnz   = rows, cols = rows;
+	rsb_coo_idx_t * IA   = NULL;
+	rsb_coo_idx_t * JA   = NULL;
+	rsb_coo_idx_t *p_r=NULL,*p_c=NULL;
+	struct rsb_mtx_t * mtxAp=NULL;
+	//rsb_flags_t flags = RSB_FLAG_OWN_PARTITIONING_ARRAYS | RSB_FLAG_WANT_FIXED_BLOCKING_VBR;
+	rsb_time_t time;
+	rsb_blk_idx_t M_b=0,K_b=0;
+	rsb_flags_t flags = RSB_FLAG_OWN_PARTITIONING_ARRAYS;
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);
+//	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER);	/* experimental */
+
+	if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&VA,&IA,&JA,nnz,typecode,RSB_BOOL_FALSE))){goto err;}
+	p_r = rsb__util_get_partitioning_array( 1, rows , &M_b, flags);
+	p_c = rsb__util_get_partitioning_array( 1, cols , &K_b, flags);
+	if(! p_r || !p_c) {errval = RSB_ERR_ENOMEM;goto err;}
+
+	rsb__do_fill_with_diag(NULL,IA,JA,0,0,0,typecode,nnz);
+	if((errval = rsb__fill_with_ones(VA,typecode,nnz,1))!=RSB_ERR_NO_ERROR)goto err;
+
+	time = - rsb_time();
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,M_b,K_b,flags,&errval);
+	time += rsb_time();
+
+	if(timep)*timep=time;
+
+	if(!mtxAp || (RSB_SOME_ERROR(errval))) {errval = RSB_ERR_ENOMEM;goto err;}
+
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE( IA );
+	RSB_CONDITIONAL_FREE( JA );
+	return mtxAp;
+err:
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE( IA );
+	RSB_CONDITIONAL_FREE( JA );
+	RSB_CONDITIONAL_FREE(p_r);
+	RSB_CONDITIONAL_FREE(p_c);
+	RSB_MTX_FREE(mtxAp);
+	return NULL;
+}
+#endif
+
+struct rsb_mtx_t * rsb__generate_blocked_banded(const rsb_blk_idx_t br, const rsb_blk_idx_t bc, const rsb_coo_idx_t rows, const rsb_coo_idx_t cols, const rsb_coo_idx_t bw, double * timep, rsb_type_t typecode,rsb_bool_t want_lowtri)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * A function which generates and returns a sparse banded matrix.
+         *
+         * Note that it is not performance optimized, and this will
+         * result in longer benchmarking times.
+         *
+	 * \return a matrix pointer, NULL in case of failure.
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	void * VA = NULL;
+	rsb_coo_idx_t * IA   = NULL;
+	rsb_coo_idx_t * JA   = NULL;
+	rsb_coo_idx_t *p_r=NULL,*p_c=NULL;
+	struct rsb_mtx_t * mtxAp=NULL;
+	rsb_coo_idx_t lbw=(bw-1)/2;
+	rsb_coo_idx_t rbw= bw-lbw;
+	rsb_coo_idx_t ri=0,ci=0;/* NOTE : danger : it was signed and -1 before */
+	size_t rob;
+	size_t cob;
+	rsb_nnz_idx_t nnz = 0;
+	size_t blockcount = 0;
+	rsb_blk_idx_t M_b=0,K_b=0;
+	size_t  r =0, c =0;
+	rsb_flags_t flags = RSB_FLAG_OWN_PARTITIONING_ARRAYS | RSB_FLAG_WANT_BCSS_STORAGE;
+	rsb_time_t time;
+
+	if( bw*bc >= cols ){RSB_ERROR("too much bandwidth..\n");errval = RSB_ERR_BADARGS;goto err;}
+	if(rows != cols){RSB_ERROR("matrix is not square..\n");errval = RSB_ERR_BADARGS;goto err;}
+	if(rows != cols){errval = RSB_ERR_BADARGS;goto err;}
+	/*!
+	 * Will create a blocked banded matrix with bandwidth expressed
+	 * in number of blocks, as in the experiments from [ijhcp_07] 
+	 * (Buttari, Eijkhout, Langou, Filippone 2007) article from 
+	 * International Journal of High Performance Computing Applications 2007; 21; 467
+	 * 
+	 * \code
+	 * +*+-------+
+	 * +*+*+     |  Example for bandwidth of 1 block
+	 * | +*+     |
+	 * |    ...  |
+	 * |       +*+
+	 * +-------+*+
+	 *
+	 * +*+*+-----+
+	 * +*+*+*+   |  Example for bandwidth of 2 blocks
+	 * | +*+*+   |
+	 * |    ...  |
+	 * |       +*+
+	 * +-------+*+
+	 *
+	 * +*+*+-----+
+	 * +*+*+*+   |  Example for bandwidth of 3 blocks
+	 * +*+*+*+   |
+	 * |    ...  |
+	 * |     +*+*+
+	 * +-----+*+*+
+	 * \endcode
+	 **/
+
+	rob = rows/br;	/* rows of blocks */
+	cob=cols/bc;	/* cols of blocks */
+
+	for(ri=0;ri<rob;++ri)
+	for(ci = ri-lbw;ci <= ri+rbw;++ci)
+	if(ci>=0 && ci<cob )
+		++blockcount;	/* yes, we waste resources, but we are in hurry. FIXME */
+
+	if(blockcount<=0){errval = RSB_ERR_INTERNAL_ERROR;goto err;}/* overflow */
+
+	nnz=blockcount*(br*bc);
+
+	if(nnz<=0){errval = RSB_ERR_INTERNAL_ERROR;goto err;}/* overflow */
+
+	if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&VA,&IA,&JA,nnz,typecode,RSB_BOOL_FALSE))){goto err;}
+	if(want_lowtri)
+	{
+		nnz=0;
+		/* FIXME: a dirty hack : will result in half the nonzeros :/ */
+		for(ri=0;ri<rob;++ri)
+		for(ci = ri-lbw;ci <= ri+rbw;++ci)
+		if(ci>=0 && ci<cob )
+		for( r=0; r< br;++ r)
+		for( c=0; c< bc;++ c)
+		{
+			*IA   = ri*br+r;
+			*JA   = ci*bc+c;
+			if(*IA>=*JA)
+				++IA,++JA,++nnz;
+		}
+	}
+	else
+	{
+		for(ri=0;ri<rob;++ri)
+		for(ci = ri-lbw;ci <= ri+rbw;++ci)
+		if(ci>=0 && ci<cob )
+		for( r=0; r< br;++ r)
+		for( c=0; c< bc;++ c)
+		{
+			*IA   = ri*br+r;
+			*JA   = ci*bc+c;
+			++IA;++JA;
+		}
+	}
+	
+	IA -= nnz;
+	JA -= nnz;
+	if(rsb__fill_with_ones(VA,typecode,nnz,1)) { errval = RSB_ERR_INTERNAL_ERROR; goto err; }
+	
+	p_r = rsb__util_get_partitioning_array(br,rows,&M_b,flags);
+	p_c = rsb__util_get_partitioning_array(bc,cols,&K_b,flags);
+
+	if(! p_r || !p_c) {errval = RSB_ERR_ENOMEM;goto err;}
+
+	time = - rsb_time();
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,M_b,K_b,flags,&errval);
+	time += rsb_time();
+	if(timep)*timep=time;
+
+	if(!mtxAp) {goto err;}
+
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	return mtxAp;
+err:
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(p_r);
+	RSB_CONDITIONAL_FREE(p_c);
+	RSB_MTX_FREE(mtxAp);
+	return NULL;
+}
+
+rsb_err_t rsb__generate_blocked_banded_mtx(rsb_nnz_idx_t dim, rsb_nnz_idx_t spacing, rsb_nnz_idx_t lbw, rsb_nnz_idx_t ubw, struct rsb_mtx_t ** mtxApp, rsb_type_t typecode)
+{
+	rsb_err_t errval = RSB_ERR_BADARGS;
+	void * VA = NULL;
+	rsb_nnz_idx_t nnz = dim;
+	rsb_coo_idx_t rows = dim, cols = dim;
+	rsb_coo_idx_t * IA = NULL;
+	rsb_coo_idx_t * JA = NULL;
+	if(!mtxApp)
+		goto ret; 
+	errval = rsb__generate_blocked_banded_coo(dim, spacing, lbw, ubw, &IA, &JA, &VA, &nnz, typecode);
+	if(RSB_SOME_ERROR(errval))
+	{
+		goto ret;
+	}
+	*mtxApp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,RSB_FLAG_DEFAULT_MATRIX_FLAGS,&errval);
+ret:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_gen.h b/rsb_gen.h
new file mode 100644
index 0000000..e59e9ab
--- /dev/null
+++ b/rsb_gen.h
@@ -0,0 +1,47 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix generating functions.
+ * */
+
+#ifndef RSB_GEN_H_INCLUDED
+#define RSB_GEN_H_INCLUDED
+
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+/* #define RSB_HSQUARE(X) (((X)*(X+1))/2) */
+#define RSB_HSQUARE(X) ((X)<2?(((X)*(X+1))/2):( ((X)*(X-1))/2 + X ) )
+#define RSB_NNZ_OF_BANDED(DIM,L,U) (((DIM)*(1+(L)+(U)))-(RSB_HSQUARE(L))-(RSB_HSQUARE(U)))
+/* struct rsb_mtx_t * rsb_generate_diagonal(const rsb_coo_idx_t rows, double * timep, rsb_type_t typecode); */
+struct rsb_mtx_t * rsb__generate_banded(const rsb_blk_idx_t br, const rsb_blk_idx_t bc, const rsb_coo_idx_t rows, const rsb_coo_idx_t cols, rsb_coo_idx_t bw, double * timep, rsb_type_t typecode);
+struct rsb_mtx_t * rsb__generate_dense_lower_triangular(const rsb_coo_idx_t dim, double * timep, rsb_type_t typecode);
+rsb_err_t rsb__generate_dense_lower_triangular_coo(rsb_nnz_idx_t dim, rsb_nnz_idx_t spacing, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_nnz_idx_t *nnzp, rsb_type_t typecode);
+rsb_err_t rsb__generate_dense_full(rsb_nnz_idx_t dim_r, rsb_nnz_idx_t dim_c, rsb_nnz_idx_t spacing, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_nnz_idx_t *nnzp, rsb_type_t typecode);
+rsb_err_t rsb__generate_blocked_banded_coo(rsb_nnz_idx_t dim, rsb_nnz_idx_t spacing, rsb_nnz_idx_t lbw, rsb_nnz_idx_t ubw, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void ** VA, rsb_nnz_idx_t *nnzp, rsb_type_t typecode);
+struct rsb_mtx_t * rsb__generate_blocked_banded(const rsb_blk_idx_t br, const rsb_blk_idx_t bc, const rsb_coo_idx_t rows, const rsb_coo_idx_t cols, const rsb_coo_idx_t bw, double * timep, rsb_type_t typecode,rsb_bool_t want_lowtri);
+void rsb__do_fill_with_diag(void *VA, rsb_coo_idx_t *IA, rsb_coo_idx_t *JA, rsb_coo_idx_t ioff, rsb_coo_idx_t joff, rsb_nnz_idx_t nzoff, rsb_type_t typecode, rsb_nnz_idx_t nnz);
+rsb_err_t rsb__generate_blocked_banded_mtx(rsb_nnz_idx_t dim, rsb_nnz_idx_t spacing, rsb_nnz_idx_t lbw, rsb_nnz_idx_t ubw, struct rsb_mtx_t ** mtxApp, rsb_type_t typecode);
+#endif /* RSB_GEN_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_genmm.c b/rsb_genmm.c
new file mode 100644
index 0000000..d4fca2a
--- /dev/null
+++ b/rsb_genmm.c
@@ -0,0 +1,220 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*
+ * unsorted random matrix market generator
+ * */
+/**
+ @file
+ @author Michele Martone
+ @brief
+ A toy program generating sparse matrices.
+ */
+
+//#include <stdlib.h>	/* bsearch, calloc, malloc */
+//#include <stdio.h>	/* printf */
+//#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+//#define RSB_CONDITIONAL_FREE(p) {if((p))free((p));(p)=NULL;}
+
+int g_allow_duplicates;
+
+static rsb_err_t gen_mm(rsb_coo_idx_t r, rsb_coo_idx_t c, rsb_nnz_idx_t nnz)
+{
+	/*
+	 * Since we could modify our implementation of bitmap data structures over time, 
+	 * there will always be a testing need, so we place here this routine.
+	 * */
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	float *VA;
+	rsb_nnz_idx_t i,k;
+	rsb_nnz_idx_t duplicates = 0;
+	
+	/* We generate a r x c bitmap and two integer index arrays */
+	IA = rsb__calloc(sizeof(rsb_coo_idx_t)*nnz);
+	JA = rsb__calloc(sizeof(rsb_coo_idx_t)*nnz);
+	VA = rsb__calloc(sizeof(float)*nnz);
+	if(!VA || !IA || !JA)goto err;
+	/* We populate the arrays with random coordinates (please note that there could be duplicates) */
+	for(k=0;k<nnz;++k) {IA[k]=rand()%r;} 
+	for(k=0;k<nnz;++k) {JA[k]=rand()%c;}
+	for(k=0;k<nnz;++k) {VA[k]=(float)rand();} 
+	/* with a very stupid algorithm for avoiding duplicates (COULD TAKE FOREVER!) */
+	duplicates = 0;
+	if(!g_allow_duplicates)
+	do
+	{
+		/* WARNING : THERE IS NO PROOF THIS COULD TERMINATE, AS IT IS DEPENDENT ON THE PSEUDORANDOM NUMER GENERATOR */
+		duplicates=0;
+		for(k=0;k<nnz;++k) for(i=0;i<k;++i)if(IA[i]==IA[k]) if(JA[i]==JA[k])
+		{
+			IA[k]=rand()%r;
+			JA[k]=rand()%c;
+			++duplicates;
+		}
+	}
+	while(duplicates);
+
+	printf("%s","%%MatrixMarket matrix coordinate real general\n");
+	printf("%d %d %d\n",r,c,nnz);
+	for(k=0;k<nnz;++k)
+	{
+		printf("%6d %6d %20g\n",IA[k]+1,JA[k]+1,VA[k]);
+	}
+
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(JA);
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(VA);
+	RSB_CONDITIONAL_FREE(JA);
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+rsb_option options[] = {
+    {"nnz",     required_argument, NULL, 'n'},  
+    {"cols",     required_argument, NULL, 'c'},  
+    {"rows",     required_argument, NULL, 'r'},  
+    {"banded",     required_argument, NULL, 'b'},  
+    {"allow-duplicates",     no_argument, NULL, 'd'},  
+    {"generate-matrix",     no_argument, NULL, 'g'},  
+    {0,0,0,0}
+};
+
+//size_t strlen(char *s){char *ss=s;while(*s)++s;return s-ss;}
+
+int rsb_genmm_main(int argc,char *argv[])
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int c;
+	int              opt_index = 0;
+	rsb_coo_idx_t rows=0,cols=0;
+	rsb_nnz_idx_t nnz=0;
+	double want_percentage=0.0;
+	rsb_bool_t g_want_banded = 0;
+	rsb_bool_t g_diagonal = 0;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT_INTEGER;
+
+	g_allow_duplicates = 0;
+	if(rsb_lib_init(RSB_NULL_INIT_OPTIONS) != RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR("initialization error!\n");
+		goto err;
+	}
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc, argv, "gDb:dr:c:n:", options, &opt_index);
+		if (c == -1)break;
+
+		switch (c)
+		{
+			case 'b':
+			g_want_banded = rsb__util_atoi(optarg);
+			g_want_banded++;/* just a trick */
+			break;
+			case 'D':
+			g_diagonal = 1;
+			break;
+			case 'd':
+			g_allow_duplicates = 1;
+			break;
+			case 'c':
+			cols = rsb__util_atoi(optarg);
+			break;
+			case 'n':
+			nnz = rsb__util_atoi(optarg);
+			if(*optarg && optarg[strlen(optarg)-1]=='%')want_percentage=nnz;
+			break;
+			case 'r':
+			rows = rsb__util_atoi(optarg);
+			break;
+			case 'g':
+			break;
+		}
+	}
+
+	if( g_want_banded != 0 )
+	{
+		/* we want a banded matrix */
+		struct rsb_mtx_t * mtxAp;
+		mtxAp = rsb__generate_banded(1, 1 , rows, cols, /*rows/4*/ g_want_banded-1 , NULL, typecode);
+		/* errval = rsb__generate_blocked_banded_mtx(rows,1,g_want_banded-1,g_want_banded-1,&mtxAp,typecode); */
+		if(!mtxAp) goto err;
+		rsb__do_file_mtx_save(mtxAp,NULL); /* FIXME: errval = rsb_pr..*/
+		return 0;
+	}
+
+	if( want_percentage )
+	{
+		want_percentage *= 0.01;
+		want_percentage *= rows;
+		want_percentage *= cols;
+		nnz = want_percentage ;
+		if(!nnz)++nnz;
+	}
+
+	if( g_diagonal )
+	{
+		/* we want a diagonal matrix */
+		struct rsb_mtx_t * mtxAp = NULL;
+		/* mtxAp = rsb_generate_diagonal( rows, NULL, typecode); */
+		errval = rsb__generate_blocked_banded_mtx(rows,1,0,0,&mtxAp,typecode);
+		if(!mtxAp) goto err;
+		rsb__do_file_mtx_save(mtxAp,NULL);
+		return RSB_PROGRAM_SUCCESS;
+	}
+
+	if( nnz < 1 )
+	{
+		fprintf(stderr,
+			"usage: %s -g -r rows -c cols \n"
+			"\t [ -n nonzeros [%%] ] "
+			"| [ -b bandwidth ] (-b for a banded matrix with 'bandwidth' wide bandwidth)\n"
+			"\t[-d ] (-d means that duplicates are allowed) !\n",
+			argv[0]
+			);
+		return RSB_PROGRAM_ERROR;
+	}
+	if( nnz > rows * cols )
+	{
+		fprintf(stderr,"can't generate more nonzeros than rows x columns!\n");
+		return RSB_PROGRAM_ERROR;
+	}
+	return RSB_ERR_TO_PROGRAM_ERROR(gen_mm(rows,cols,nnz));
+err:	
+	fprintf(stderr,"some error occurred during matrix generation\n");
+	/* no deallocation, though */
+	return RSB_PROGRAM_ERROR;
+}
+
+/*
+int main(int argc,char *argv[])
+{
+	return rsb_genmm_main(argc,argv);
+}
+*/
+
+/* @endcond */
diff --git a/rsb_get.c b/rsb_get.c
new file mode 100644
index 0000000..e3fe2cd
--- /dev/null
+++ b/rsb_get.c
@@ -0,0 +1,1878 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix getter functions.
+ * */
+
+#include "rsb_internals.h"
+
+#define RSB_VA_MEMCPY(VD,VS,DOFF,SOFF,NNZ,ES) \
+	RSB_MEMCPY(((rsb_char_t*)(VD))+(ES)*(DOFF),((rsb_char_t*)(VS))+(ES)*(SOFF),(ES)*(NNZ))
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_err_t rsb__do_get_coo(const struct rsb_mtx_t *mtxAp, rsb_byte_t ** VA, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, rsb_flags_t flags )
+{
+	/*! 
+	 * \ingroup gr_internals
+	 *  Returns the matrix converted in a coordinate storage format.
+	 *
+	 * \param VA  (optional) the values array pointer, sized at least for mtxAp->nnz elements of matrix type
+	 * \param IA  (optional) an integer array pointer for row    coordinates
+	 * \param JA  (optional) an integer array pointer for column coordinates
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * WARNING : If any of the given arrays points to NULL, it will be allocated accordingly.
+	 *
+	 * The entire matrix will be returned in COO format, in the specified VA,IA,JA arrays
+	 * No more than mtxAp->nnz elements will be written to in the VA, IA and JA arrays
+	 * TODO: according to flags, may sort according to rows or columns!
+	 * Note: the filled array could result smaller;
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t el_size;
+	rsb_nnz_idx_t nnz = 0;/* FIXME */
+
+#if RSB_ALLOW_ZERO_DIM
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+		goto err; /* FIXME: skipping further error checks */
+#endif
+
+	if( !IA || !JA || !VA || !mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	el_size = mtxAp->el_size;
+
+	/* Note : we do not allocate all arrays at once  (but do so in freeing)! */
+	if(!*VA ) *VA= rsb__malloc(el_size     * mtxAp->nnz);
+	if(!*IA ) *IA= rsb__malloc(sizeof(rsb_coo_idx_t) * mtxAp->nnz);
+	if(!*JA ) *JA= rsb__malloc(sizeof(rsb_coo_idx_t) * mtxAp->nnz);
+
+	if(!*VA || !*IA || !*JA)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( (errval = rsb__do_get_coo_noalloc(mtxAp,*VA,*IA,*JA,&nnz,flags)) == RSB_ERR_NO_ERROR)
+		return RSB_ERR_NO_ERROR;
+	else
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+err:
+	/* Note : we free all arrays at once ! */
+	RSB_CONDITIONAL_FREE(*IA);
+	RSB_CONDITIONAL_FREE(*JA);
+	RSB_CONDITIONAL_FREE(*VA);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_row_dense(const struct rsb_mtx_t *mtxAp , void * row , rsb_blk_idx_t rowindex)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Will write entire row rowindex of matrix \c mtxAp in the row vector.
+	 *
+	 * \param rowindex the specified row
+	 * \param row an already allocated vector of the same type as \c mtxAp.
+	 * \param matrix is a valid pointer to a rsb_mtx_t structure
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * \note This function is slow.
+	 * */
+	register rsb_byte_t	*bp = NULL;
+	register rsb_nnz_idx_t baserow,basecolumn;
+	register rsb_blk_idx_t rows,columns;
+	register rsb_blk_idx_t blockrow,blockcolumn;
+	size_t el_size = mtxAp->el_size;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!row || rowindex <0 || rowindex >= mtxAp->nr)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		{
+			rsb_coo_idx_t moff;
+			rsb_coo_idx_t koff;
+			if(submatrix)
+			{
+				moff = submatrix->roff-mtxAp->roff;
+				koff = submatrix->coff-mtxAp->coff;
+			}
+		if(submatrix && rowindex >= moff && rowindex < submatrix->nr+submatrix->roff )
+		{
+			errval = rsb__do_get_row_dense(submatrix, ((rsb_byte_t*)row)+el_size*(j*koff) , rowindex-(i*moff));
+			if(RSB_SOME_ERROR(errval))
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}}
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(rowindex));	
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+	if(rsb__is_bcsr_matrix(mtxAp))
+	{
+		/* plain BCSR case */
+		rsb_blk_idx_t br, bc;
+		rsb_nnz_idx_t frb,lrb,bi;
+
+		rsb__get_blocking_size(mtxAp, &br, &bc);
+	
+		frb = mtxAp->bpntr[ rowindex/br   ];
+		lrb = mtxAp->bpntr[(rowindex/br)+1];
+		for(bi=frb;RSB_LIKELY(bi<lrb);++bi)
+		{
+			/* FIXME : numerical overflow possible, in br*bc and alike */
+			rsb_memcpy(
+				((rsb_byte_t*)row)+el_size * mtxAp->bindx[bi]*bc,
+				((rsb_byte_t*)mtxAp->VA)+(bi*br*bc + bc*(rowindex - (rowindex/br)*br))*el_size,
+				el_size*bc
+			);
+		}
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	else
+	if(rsb__is_bcsc_matrix(mtxAp))
+	{
+		rsb_blk_idx_t br, bc;
+		rsb_nnz_idx_t bri, bci, bi;
+
+		if((errval = rsb__get_blocking_size(mtxAp, &br, &bc))!=RSB_ERR_NO_ERROR)
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+		bri= rowindex/br ;	/* the block row of interest */
+
+
+		RSB_DEBUG_ASSERT(br>0 && bc>0);
+		RSB_DEBUG_ASSERT(el_size);
+
+		for(bci=0;RSB_LIKELY(bci<mtxAp->Mdim);++bci)
+		{
+			rsb_byte_t * dst = ((rsb_byte_t*)row) + el_size * bc * bci;
+
+			if((bi = rsb__seek_nnz_idx_t(mtxAp->bindx+mtxAp->bpntr[bci+0],bri,mtxAp->bpntr[bci+1]-mtxAp->bpntr[bci+0]))!=RSB_MARKER_NNZ_VALUE)
+			{
+				rsb_nnz_idx_t boff = rowindex-br*bri;
+				bi += mtxAp->bpntr[bci+0];
+				RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(bci));
+				RSB_DEBUG_ASSERT(bc );
+				RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(bc*bci));
+				rsb_memcpy(dst,((rsb_byte_t*)mtxAp->VA)+(bi*br*bc+bc*boff)*el_size,el_size*bc);
+			}
+		}
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		rsb_blk_idx_t c;
+		if(rowindex >= baserow && rowindex <baserow+rows)
+		for(c=0;c<columns;++c)
+		{
+			rsb_byte_t*src = (rsb_byte_t*)(bp+RSB_GET_INTRA_BLOCK_OFFSET(rowindex,basecolumn+c,blockrow,blockcolumn,mtxAp));
+			rsb_byte_t*dst = ((rsb_byte_t*)row) + el_size * (basecolumn+c);
+			RSB_NUMERICAL_TYPE_SET_ELEMENT(dst,src,mtxAp->typecode);	/* FIXME : SLOW */
+		}
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	errval = RSB_ERR_NO_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_get_rows_nnz(const struct rsb_mtx_t *mtxAp, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_nnz_idx_t  *rnz)
+{
+        /*!
+	 * \ingroup gr_internals
+         *
+	 * FIXME : NEW, TO DOCUMENT
+         * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(fr));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(lr));
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(lr< mtxAp->roff+mtxAp->nr);
+	RSB_DEBUG_ASSERT(fr>=mtxAp->roff);
+
+	// 20101023 the following breaks `make tests`, for some uninvestigated reason
+	//if(fr==0 && lr==mtxAp->nr-1)
+	//	return mtxAp->nnz;
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix && RSB_SUBMATRIX_INTERSECTS_ROWS(submatrix,fr,lr))
+		{
+			const rsb_coo_idx_t fri = RSB_SUBMATRIX_ROWS_INTERSECTION_FIRST(submatrix,fr);
+			const rsb_coo_idx_t lri = RSB_SUBMATRIX_ROWS_INTERSECTION_LAST(submatrix,lr);
+			errval = rsb_do_get_rows_nnz(submatrix,fri,lri,rnz);
+			if(RSB_SOME_ERROR(errval))
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+	}
+	else
+	{
+		/* 
+		 * leaf matrix processing
+		 * */
+		const rsb_coo_idx_t fri = RSB_SUBMATRIX_ROWS_INTERSECTION_FIRST(mtxAp,fr)-mtxAp->roff;
+		const rsb_coo_idx_t lri = RSB_SUBMATRIX_ROWS_INTERSECTION_LAST(mtxAp,lr) -mtxAp->roff;
+		if(rsb__is_coo_matrix(mtxAp))
+		{	
+			rsb_nnz_idx_t nnz1, nnz0, nnz = mtxAp->nnz;
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				RSB_DECLARE_CONST_HALFCOO_IARRAY_FROM_MATRIX(IA,mtxAp)
+				// we search the beginning of line fri
+				nnz0 = rsb__nnz_split_hcoo_bsearch(IA,fri,nnz);
+				// we search the end of line lri
+				nnz1 = nnz0+rsb__nnz_split_hcoo_bsearch(IA+nnz0,lri+1,nnz-nnz0);
+				*rnz += nnz1-nnz0;
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCOO_IARRAY_FROM_MATRIX(IA,mtxAp)
+				// we search the beginning of line fri
+				nnz0 = rsb__nnz_split_coo_bsearch(IA,fri,nnz);
+				// we search the end of line lri
+				nnz1 = nnz0+rsb__nnz_split_coo_bsearch(IA+nnz0,lri+1,nnz-nnz0);
+				*rnz += nnz1-nnz0;
+			}
+		}
+		else
+		if(rsb__is_csr_matrix(mtxAp))
+		{
+			*rnz += mtxAp->bpntr[lri+1]-mtxAp->bpntr[fri];
+		}
+		else
+			errval = RSB_ERR_UNIMPLEMENTED_YET;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_get_columns_sparse(const struct rsb_mtx_t *mtxAp , void * RSB_RESTRICT VA , rsb_blk_idx_t fc, rsb_blk_idx_t lc, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT CP, rsb_coo_idx_t ioff, rsb_coo_idx_t joff)
+{
+        /*!
+	 * \ingroup gr_internals
+	 *
+	 * JA can be NULL.
+         * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(fc));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(lc));
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(lc< mtxAp->roff+mtxAp->nr);
+	RSB_DEBUG_ASSERT(fc>=mtxAp->roff);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix && RSB_SUBMATRIX_INTERSECTS_ROWS(submatrix,fc,lc))
+		{
+			const rsb_coo_idx_t fci = RSB_SUBMATRIX_COLS_INTERSECTION_FIRST(submatrix,fc);
+			const rsb_coo_idx_t lci = RSB_SUBMATRIX_COLS_INTERSECTION_LAST(submatrix,lc);
+			errval = rsb_do_get_columns_sparse(submatrix,VA,fci,lci,IA,JA,CP,ioff,joff);
+			if(RSB_SOME_ERROR(errval))
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+	}
+	else
+	{
+		/* 
+		 * leaf matrix processing
+		 *
+		 * FIXME: this code could (should?) be improved, regarding its performance
+		 * */
+		const rsb_coo_idx_t roff = mtxAp->roff;
+		const rsb_coo_idx_t coff = mtxAp->coff;
+		//const rsb_coo_idx_t fci = RSB_SUBMATRIX_COLS_INTERSECTION_FIRST(mtxAp,fc);
+		//const rsb_coo_idx_t lci = RSB_SUBMATRIX_COLS_INTERSECTION_LAST(mtxAp,lc);
+		rsb_nnz_idx_t nnz = mtxAp->nnz/*,dnnz = 0*/;
+		register rsb_coo_idx_t i;
+		register rsb_nnz_idx_t n;
+		//const void * MVA = mtxAp->VA;
+		const rsb_coo_idx_t * bindx = mtxAp->bindx;
+
+		if(!rsb__is_csr_matrix(mtxAp))
+			return RSB_ERR_UNIMPLEMENTED_YET;
+
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+#if 0
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+			{
+				for(n=0;n<mtxAp->nnz;++n)
+				{
+					rsb_coo_idx_t ij = bindx[n],j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+					rsb_nnz_idx_t idx = CP[coff+j];
+					i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+					RSB_VA_MEMCPY(VA,mtxAp->VA,idx,n,1,mtxAp->el_size);
+						if(IA)IA[idx] = i+roff+ioff;
+					if(JA)JA[idx] = j+coff+joff;
+					CP[coff+j]++;
+				}
+			}
+			else
+#endif
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(mIA,mJA,mtxAp)
+				for(n=0;RSB_LIKELY(n<nnz);++n)
+				{
+					rsb_half_idx_t i = mIA[n],j = mJA[n];
+					rsb_nnz_idx_t idx = CP[coff+j];
+					RSB_VA_MEMCPY(VA,mtxAp->VA,idx,n,1,mtxAp->el_size);
+					if(IA)IA[idx] = roff+ioff+i;
+					if(JA)JA[idx] = coff+joff+j;
+					CP[coff+j]++;
+				}
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(mIA,mJA,mtxAp)
+				for(n=0;RSB_LIKELY(n<nnz);++n)
+				{
+					rsb_coo_idx_t i = mIA[n],j = mJA[n];
+					rsb_nnz_idx_t idx = CP[coff+j];
+					RSB_VA_MEMCPY(VA,mtxAp->VA,idx,n,1,mtxAp->el_size);
+					if(IA)IA[idx] = roff+ioff+i;
+					if(JA)JA[idx] = coff+joff+j;
+					CP[coff+j]++;
+				}
+			}
+		}
+		else
+		{
+		if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+		{
+			const rsb_half_idx_t *hbindx = (const rsb_half_idx_t *)bindx;
+			for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+			for(n=mtxAp->bpntr[i];RSB_LIKELY(n<mtxAp->bpntr[i+1]);++n)
+			{
+				rsb_coo_idx_t j = hbindx[n];// FIXME: is this mixed types assignment correct ?
+				rsb_nnz_idx_t idx = CP[coff+j];
+				RSB_VA_MEMCPY(VA,mtxAp->VA,idx,n,1,mtxAp->el_size);
+				if(IA)IA[idx] = i+roff+ioff;
+				if(JA)JA[idx] = j+coff+joff;
+				CP[coff+j]++;
+			}
+		}
+		else
+		{
+			for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+			for(n=mtxAp->bpntr[i];RSB_LIKELY(n<mtxAp->bpntr[i+1]);++n)
+			{
+				rsb_coo_idx_t j = bindx[n];
+				rsb_nnz_idx_t idx = CP[coff+j];
+//				RSB_INFO("%d %d %d %d %d %d\n",roff,coff,i,j,n,idx);
+				RSB_VA_MEMCPY(VA,mtxAp->VA,idx,n,1,mtxAp->el_size);
+				if(IA)IA[idx] = i+roff+ioff;
+				if(JA)JA[idx] = j+coff+joff;
+				CP[coff+j]++;
+			}
+//			if(roff>0)exit(-1);
+		}}
+	}
+err:
+        RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_rows_sparse_rec(const struct rsb_mtx_t *mtxAp , void * RSB_RESTRICT VA , rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT rnz, rsb_coo_idx_t ioff, rsb_coo_idx_t joff)
+{
+        /*!
+	 * \ingroup gr_internals
+	 *
+	 * IA can be NULL.
+	 * FIXME: shall rewrite this to be faster.
+         * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(fr));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(lr));
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(lr< mtxAp->roff+mtxAp->nr);
+	RSB_DEBUG_ASSERT(fr>=mtxAp->roff);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix && RSB_SUBMATRIX_INTERSECTS_ROWS(submatrix,fr,lr))
+		{
+			const rsb_coo_idx_t fri = RSB_SUBMATRIX_ROWS_INTERSECTION_FIRST(submatrix,fr);
+			const rsb_coo_idx_t lri = RSB_SUBMATRIX_ROWS_INTERSECTION_LAST(submatrix,lr);
+			errval = rsb__do_get_rows_sparse_rec(submatrix,VA,fri,lri,IA,JA,rnz,ioff,joff);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+			}
+		}
+	}
+	else
+	{
+		/* 
+		 * leaf matrix processing
+		 *
+		 * FIXME: this code could (should?) be improved, regarding its performance
+		 * */
+		const rsb_coo_idx_t roff = mtxAp->roff;
+		const rsb_coo_idx_t coff = mtxAp->coff;
+		const rsb_coo_idx_t fri = RSB_SUBMATRIX_ROWS_INTERSECTION_FIRST(mtxAp,fr)-roff;
+		const rsb_coo_idx_t lri = RSB_SUBMATRIX_ROWS_INTERSECTION_LAST (mtxAp,lr)-roff;
+		rsb_nnz_idx_t nnz,dnnz = 0;
+		const rsb_nnz_idx_t zoff = *rnz;
+		rsb_nnz_idx_t doff = 0;
+		register rsb_coo_idx_t i;
+		const void * MVA = mtxAp->VA;
+//		if(!rsb__is_csr_matrix(mtxAp))
+		//	return RSB_ERR_UNIMPLEMENTED_YET;
+		//RSB_INFO("!!\n");
+
+#define RSB_CSR2COO_MEMCPY_(VD,ID,JD,VS,I,JS,DOFF,SOFF,NNZ,ES,J0) \
+	{	\
+		if(ID)rsb__util_coo_array_set(((rsb_coo_idx_t*)(ID))+(DOFF),(NNZ),(I)); \
+		RSB_IA_MEMCPY(JD,JS,DOFF,SOFF,NNZ,J0);	\
+	}
+
+		if(rsb__is_coo_matrix(mtxAp))
+		{	
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				rsb_nnz_idx_t nnz1, nnz0, nnz = mtxAp->nnz;
+				RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(mIA,mJA,mtxAp)
+				// we search the beginning of line fri
+				nnz0 = rsb__nnz_split_hcoo_bsearch(mIA,fri,nnz);
+				// we search the end of line lri
+				nnz1 = nnz0+rsb__nnz_split_hcoo_bsearch(mIA+nnz0,lri+1,nnz-nnz0);
+				nnz = nnz1-nnz0;
+				if(nnz>0)
+				{
+					RSB_DEBUG_ASSERT( nnz <= mtxAp->nc );
+					/* FIXME: need specialized little functions or macros, here */
+					if(IA)
+						for(i=nnz0;i<nnz1;++i)
+							IA[zoff+i-nnz0] = mIA[i],
+							IA[zoff+i-nnz0] += mtxAp->roff+ioff;
+					for(i=nnz0;i<nnz1;++i)
+						JA[zoff+i-nnz0] = mJA[i],
+						JA[zoff+i-nnz0] += mtxAp->coff+joff;
+
+					RSB_VA_MEMCPY(VA,MVA,zoff,nnz0,nnz,mtxAp->el_size);
+					doff = nnz0;
+					dnnz = nnz;
+				}
+				if(nnz<0)
+				{
+					errval = RSB_ERR_INTERNAL_ERROR;
+					RSB_PERR_GOTO(err,RSB_ERRM_ES)
+				}
+			}
+			else
+			{
+				rsb_nnz_idx_t nnz1,nnz0,nnz = mtxAp->nnz;
+				RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(mIA,mJA,mtxAp)
+				// we search the beginning of line fri
+				nnz0 = rsb__nnz_split_coo_bsearch(mIA,fri,nnz);
+				// we search the end of line lri
+				nnz1 = nnz0+rsb__nnz_split_coo_bsearch(mIA+nnz0,lri+1,nnz-nnz0);
+				nnz = nnz1-nnz0;
+				if(nnz>0)
+				{
+					RSB_DEBUG_ASSERT( nnz <= mtxAp->nc );
+					RSB_COA_MEMCPY(JA,mJA,zoff,nnz0,nnz);
+					rsb__util_coo_array_add(JA+zoff,nnz,mtxAp->coff+joff);
+					if(IA)
+						RSB_COA_MEMCPY(IA,mIA,zoff,nnz0,nnz),
+						rsb__util_coo_array_add(IA+zoff,nnz,mtxAp->roff+ioff);
+					RSB_VA_MEMCPY(VA,MVA,zoff,nnz0,nnz,mtxAp->el_size);
+					doff = nnz0;
+					dnnz = nnz;
+				}
+				if(nnz<0)
+				{
+					errval = RSB_ERR_INTERNAL_ERROR;
+					RSB_PERR_GOTO(err,RSB_ERRM_ES)
+				}
+//				RSB_INFO("COO OUT (%d..%d) (%d nnz) (@ %d)\n",fri,lri,nnz,doff);
+			}
+		}
+		else /* csr ! FIXME: why was RSB_FLAG_USE_HALFWORD_INDICES_CSR being used ?? */
+		{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		// if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+		{
+			for(i=fri;RSB_LIKELY(i<=lri);++i)
+			{
+				nnz = mtxAp->bpntr[i+1]-mtxAp->bpntr[i];
+				if(IA)rsb__util_coo_array_set(IA+(zoff+dnnz),nnz,i+roff+ioff);
+				dnnz += nnz;
+			}
+			RSB_MEMCPY(JA+zoff,((rsb_half_idx_t*)mtxAp->bindx)+mtxAp->bpntr[fri],
+					sizeof(rsb_half_idx_t)*dnnz);
+			rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t *)(JA+zoff),dnnz,mtxAp->coff+joff);
+			//rsb__util_coo_array_add(JA+zoff,dnnz,mtxAp->coff+joff);
+			doff = mtxAp->bpntr[fri];
+			RSB_VA_MEMCPY(VA,MVA,zoff,doff,dnnz,mtxAp->el_size);
+		}
+#if 0
+		else
+		if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+		{
+			/* FIXME: here, need fast seek to fri and lri */
+			rsb_nnz_idx_t n,fi = mtxAp->bpntr[fri],li = mtxAp->bpntr[lri+1];//FIXME : relying on bpntr is EVIL !
+		//	for(n=0;n<=mtxAp->nnz;++n)
+			for(n=fi;n<li;++n)
+			{
+				rsb_coo_idx_t ij = mtxAp->bindx[n];
+				rsb_coo_idx_t j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+				rsb_coo_idx_t i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+//				if(i<fri || i>lri) continue;/* FIXME: slow ! */
+				if(IA)IA[zoff+dnnz] = i+roff+ioff;
+				if(JA)JA[zoff+dnnz] = j+coff+joff;
+				dnnz += 1;
+			}
+			doff = mtxAp->bpntr[fri];
+		}
+#endif
+		else
+		{
+			for(i=fri;RSB_LIKELY(i<=lri);++i)
+			{
+				nnz = mtxAp->bpntr[i+1]-mtxAp->bpntr[i];
+				RSB_CSR2COO_MEMCPY_(VA,IA,JA,MVA,i+roff+ioff,mtxAp->bindx,zoff+dnnz,
+					mtxAp->bpntr[i],nnz,mtxAp->el_size,coff+joff);
+				dnnz += nnz;
+			}
+			doff = mtxAp->bpntr[fri];
+			RSB_VA_MEMCPY(VA,MVA,zoff,doff,dnnz,mtxAp->el_size);
+		}
+		}
+		*rnz += dnnz;
+
+#if 0
+			if(IA && JA && (mtxAp->nnz==3))for(i=0;i<dnnz;++i)
+			{
+				RSB_STDOUT("at %d %d\n",1+IA[i],1+JA[i]);
+			}
+#endif
+	}
+err:
+        RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_rows_dense(const struct rsb_mtx_t *mtxAp , void * row , rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t  *rnz, rsb_flags_t flags )
+{
+        /*!
+	 * \ingroup gr_internals
+         *
+	 * FIXME : rename this function
+	 * FIXME : THIS IS NOT ROWS_DENSE ! IT DOES SOMETHING ELSE !
+	 *
+         * \note This function is slow.
+         * */
+        rsb_coo_idx_t i,j;
+	rsb_nnz_idx_t l;
+	rsb_nnz_idx_t nnz = 0,gap = 0,discarded = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t gt,ct;
+
+	if(!mtxAp || !rnz)
+	{errval = RSB_ERR_BADARGS;RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+					
+
+	if(fr<0 || lr>mtxAp->nr)
+	{errval = RSB_ERR_BADARGS;RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+	if(!row || fr>lr)
+	{errval = RSB_ERR_BADARGS;RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(fr));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(lr));
+
+	/* input fortran indices */
+	if( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+	{
+		lr--;
+		fr--;
+	}
+	
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(fr));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(lr));
+
+	/* FIXME : slow */
+        for(i=fr;RSB_LIKELY(i<=lr);++i)
+        for(j=0;RSB_LIKELY(j<mtxAp->nc);++j)
+	{
+		l = mtxAp->nc*(i-fr)+j;
+		IA[l] = i;
+		JA[l] = j;
+	}
+
+//	for(i=0;i<mtxAp->nc;++i)
+//		printf("%d %d %lg\n",IA[i],JA[i],((double*)row)[i]);
+
+	gt = - rsb_time();
+	RSB_BZERO(row,mtxAp->el_size*mtxAp->nc*(lr-fr+1));
+        for(i=fr;RSB_LIKELY(i<=lr);++i)
+        {
+		errval = rsb__do_get_row_dense(mtxAp,((rsb_byte_t*)row)+(mtxAp->el_size*(i-fr))*mtxAp->nc , i);
+		if(RSB_SOME_ERROR(errval))
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	gt += rsb_time();
+
+//	printf("!\n");
+//	for(i=0;i<mtxAp->nc*(lr-fr+1);++i)
+//		printf("%d %d %lg\n",IA[i],JA[i],((double*)row)[i]);
+	
+	/* output fortran indices */
+	if( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+        for(i=fr;RSB_LIKELY(i<=lr);++i)
+        for(j=0;RSB_LIKELY(j<mtxAp->nc);++j)
+	{
+		l = mtxAp->nc*(i-fr)+j;
+		IA[l]++;
+		JA[l]++;
+	}
+
+	nnz = ((lr+1)-fr)*mtxAp->nc;
+	/* FIXME : (SLOW!) (do we really need this ?) 
+	 * FIXME : THIS IS NOT ANYMORE ROWS_DENSE !
+	 * */
+	ct = - rsb_time();
+	rsb_util_compact_nonzeros(row,IA,JA,nnz,mtxAp->typecode,&gap,&discarded,RSB_FLAG_NOFLAGS);
+	ct += rsb_time();
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(discarded));
+	RSB_BZERO(((rsb_byte_t*)row)+(mtxAp->el_size*(nnz-discarded)),mtxAp->el_size*discarded);//NEW
+	//nnz -= gap;
+	nnz -= discarded;
+
+//	printf("\ncompa %lg   getrow %lg\n",ct,gt);
+/*	if(mtxAp->nr==1 && mtxAp->nc==1)
+	{
+		printf("\nMATRIX %d:%d %lg\n",fr,lr,*((double*)mtxAp->VA));
+		printf("\nGETROWDENSEFIRST: %d %d %lg\n",IA[0],JA[0],((double*)row)[0]);	
+	}*/
+
+	if(discarded)
+	{
+		IA[nnz] = 0;	/* FUNDAMENTAL! FIXME ! */
+		JA[nnz] = 0;
+	}
+
+	*rnz = nnz;	// we notify the callee
+
+//	for(i=0;i<nnz;++i)
+//		printf("%d %d %lg\n",IA[i],JA[i],((double*)row)[i]);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nnz));
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_coo_noalloc(const struct rsb_mtx_t *mtxAp, rsb_byte_t * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * nnzp, rsb_flags_t flags)
+{
+	/*! 
+	 * \ingroup gr_internals
+	 *
+	 *  Returns the matrix converted in a coordinate storage format.
+	 *
+	 * \param VA  the values array pointer, sized at least for mtxAp->nnz elements of matrix type
+	 * \param IA  an integer array pointer for row    coordinates
+	 * \param JA  an integer array pointer for column coordinates
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * The entire matrix will be returned in COO format, in the specified VA,IA,JA arrays
+	 * No more than mtxAp->nnz elements will be written to in the VA, IA and JA arrays
+	 * 
+	 * FIXME : should add an offset argument, for recursive matrices.
+	 * */
+	register rsb_nnz_idx_t baserow,basecolumn;
+	register rsb_blk_idx_t rows,columns;
+	register rsb_blk_idx_t blockrow,blockcolumn;
+	register rsb_byte_t *bp;
+	size_t el_size = 0;
+	rsb_nnz_idx_t nz = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( !IA || !JA || !VA )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( !mtxAp )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	el_size = mtxAp->el_size;
+
+	if(( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_COO) ||
+	   ( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+	 || 1 // FIXME !
+	  )
+	{
+		/* FIXME! THIS IS TEMPORARYAND AND BREAKS THE ROUTINE IF NON RECURSIVE ROOT WON'T HAVE THESE FLAGS */
+		rsb_nnz_idx_t dnnz = 0;
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,VA,IA,JA,0,mtxAp->nr-1,&dnnz,RSB_FLAG_NOFLAGS));
+		if(nnzp)
+			*nnzp = dnnz;
+		if( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+			rsb__util_coo_array_to_fortran_indices_parallel(IA,dnnz),
+			rsb__util_coo_array_to_fortran_indices_parallel(JA,dnnz);
+		goto ret;
+	}
+	// FIXME: THE FOLLOWING IS OLD AND BROKEN
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		struct rsb_mtx_t * submatrix;
+		rsb_submatrix_idx_t i,j;
+		rsb_nnz_idx_t nzoff = 0;
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			rsb_nnz_idx_t snnz;
+			rsb_coo_idx_t moff = submatrix->roff-mtxAp->roff;
+			rsb_coo_idx_t koff = submatrix->coff-mtxAp->coff;
+
+			snnz = submatrix->nnz;
+
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_COUNT(snnz));
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(nzoff));
+
+			errval = rsb__do_get_coo_noalloc(submatrix,VA+el_size*nzoff,IA+nzoff,JA+nzoff,nnzp,flags);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(erri,RSB_ERRM_ES)
+			}
+			rsb__util_coo_arrays_add(IA+nzoff,JA +nzoff, moff*i, koff*j, snnz);
+
+			nzoff += snnz;
+		}
+erri:
+		goto ret;
+	}
+
+	RSB_DEBUG_ASSERT(mtxAp->bpntr);
+	RSB_DEBUG_ASSERT(mtxAp->indptr);
+	RSB_DEBUG_ASSERT(mtxAp->bindx);
+
+	RSB_BZERO(VA,el_size     * mtxAp->nnz);
+	RSB_BZERO(IA,sizeof(rsb_coo_idx_t) * mtxAp->nnz);
+	RSB_BZERO(JA,sizeof(rsb_coo_idx_t) * mtxAp->nnz);
+
+
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+	if(rsb__is_bcss_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t bi, nz = 0, bri, bci;
+
+		if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+		{
+			RSB_BCSC_MATRIX_FOREACH_BLOCK(mtxAp,bp,bri,bci,bi,baserow,basecolumn)
+				rsb__do_extract_nnz_from_block(bp, ((rsb_byte_t*)(VA))+el_size*nz, IA+nz, JA+nz, mtxAp->br, mtxAp->bc, baserow, basecolumn, mtxAp->typecode, el_size, &nz);
+		}
+		else
+		{
+			RSB_BCSR_MATRIX_FOREACH_BLOCK(mtxAp,bp,bri,bci,bi,baserow,basecolumn)
+				rsb__do_extract_nnz_from_block(bp, ((rsb_byte_t*)(VA))+el_size*nz, IA+nz, JA+nz, mtxAp->br, mtxAp->bc, baserow, basecolumn, mtxAp->typecode, el_size, &nz);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+	RSB_DEBUG_ASSERT(mtxAp->rpntr);
+	RSB_DEBUG_ASSERT(mtxAp->cpntr);
+
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		/*
+			FIXME : should better use rsb__do_extract_nnz_from_block !
+		*/
+		rsb_coo_idx_t r,c;
+		for(r=0;r<rows;++r)
+		for(c=0;c<columns;++c)
+		{
+			rsb_byte_t*src = (rsb_byte_t*)((double*)(bp+RSB_GET_INTRA_BLOCK_OFFSET(baserow+r,basecolumn+c,blockrow,blockcolumn,mtxAp)));
+			RSB_DEBUG_ASSERT(src>=(rsb_byte_t*)bp);
+			RSB_DEBUG_ASSERT(src>=(rsb_byte_t*)mtxAp->VA);
+//			RSB_DEBUG_ASSERT(src<(rsb_byte_t*)mtxAp->VA+el_size*mtxAp->element_count);// intrablock struct breaks this because of extra space
+			/*
+			 * SERVE UNA NUOVA MACRO CHE INDIVIDUI PER CIASCUN TIPO SE AREA DI MEMORIA E' NNZ
+			 * */
+			if(  RSB_IS_ELEMENT_NONZERO(src,mtxAp->typecode) )
+			{
+				rsb_byte_t * dst = (VA) + el_size * (nz);
+				RSB_NUMERICAL_TYPE_SET_ELEMENT(dst,src,mtxAp->typecode) /* FIXME : this is SLOW */
+				(IA)[nz] = baserow   +r;
+				(JA)[nz] = basecolumn+c;
+				++nz;
+				if(nz>mtxAp->nnz)goto fatal_err;/* PLACE HERE ERROR CHECKING CODE */
+			}
+		}
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	/* FIXME : this should happen only in verbose mode */
+	if(nz<mtxAp->nnz)
+		RSB_INFO("warning : counted less nonzeros (%zd) than it should (%zd) (may be zeros..)!\n",(size_t)nz,(size_t)mtxAp->nnz);
+
+	if(nz>mtxAp->nnz)
+	{
+		goto fatal_err;
+	}
+	if(nnzp)
+		*nnzp = nz;
+	else
+	{
+		/* FIXME : WRITE ME */	
+	}
+
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_BADARGS;
+fatal_err:
+	RSB_ERROR("fatal: counted more nonzeros (%zd) than it should (%zd)!\n",(size_t)nz,(size_t)mtxAp->nnz);
+	errval = RSB_ERR_BADARGS;
+ret:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb__do_infinity_norm_inner( const struct rsb_mtx_t *mtxAp , rsb_byte_t * row_sums, rsb_bool_t do_testing, rsb_trans_t transA)
+{
+	/*
+		FIXME : document
+	*/
+	size_t el_size = mtxAp->el_size;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( mtxAp && rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+
+		if(RSB_SOME_ERROR(errval))
+		{RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			/*
+			rsb_coo_idx_t off = 0;
+			if(RSB_DOES_NOT_TRANSPOSE(transA))
+				off = submatrix->roff-mtxAp->roff;
+			else
+				off = submatrix->coff-mtxAp->coff;
+			*/
+			errval = rsb__do_infinity_norm_inner(submatrix,row_sums/*+el_size*(off)*/,do_testing,transA);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+		}
+	}
+	else
+	{
+#ifndef RSB_HAVE_OPTYPE_INFTY_NORM
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+#else /* RSB_HAVE_OPTYPE_INFTY_NORM */
+		if(do_testing!=0 || (mtxAp->flags & RSB_FLAG_SHOULD_DEBUG))
+		{
+			// uhm.. FIXME : do we really need this ?
+			if(RSB_SOME_ERROR(errval = rsb__infty_norm_testing(mtxAp,transA,row_sums)))
+			{RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+		}
+		else
+		{
+			if(RSB_SOME_ERROR(errval = rsb__do_infty_norm(mtxAp,transA,row_sums)))
+			{RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+		}
+#endif /* RSB_HAVE_OPTYPE_INFTY_NORM */
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_absolute_rows_sums( const struct rsb_mtx_t * mtxAp, void * d)
+{
+	/* FIXME: UNFINISHED */
+	return rsb__do_infinity_norm_inner(mtxAp,d,RSB_BOOL_FALSE,RSB_TRANSPOSITION_N);
+}
+
+rsb_err_t rsb__do_absolute_columns_sums( const struct rsb_mtx_t * mtxAp, void * d)
+{
+	/* FIXME: UNFINISHED */
+	return rsb__do_infinity_norm_inner(mtxAp,d,RSB_BOOL_FALSE,RSB_TRANSPOSITION_T);
+}
+
+rsb_err_t rsb__do_infinity_norm(const struct rsb_mtx_t *mtxAp, void * infinity_norm, const rsb_bool_t do_testing, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Returns the maximum of sums of absolute values of elements in a row, over all rows.
+	 *
+	 * \input mtxAp is a pointer to a valid matrix.
+	 * \input infinity_norm is a pointer to the location where the norm will be written
+	 * also known as the maximum absolute row sum norm : ||A|| = max_i sum_j^n |a_ij|
+	 *
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * TODO : should check for consistence of matrix struct arrays/indices.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#ifndef RSB_HAVE_OPTYPE_INFTY_NORM
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+#else /* RSB_HAVE_OPTYPE_INFTY_NORM */
+	//rsb_blk_idx_t max_block_rows = 0;
+	void * row_sums = NULL,*mrp = NULL;
+	rsb_blk_idx_t maximal_row = 0;
+	rsb_coo_idx_t tm = 0;
+	//rsb_blk_idx_t i;
+
+	if(!mtxAp)
+		{errval = RSB_ERR_BADARGS;RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	if(!infinity_norm)
+		{errval = RSB_ERR_BADARGS;RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+       	tm = RSB_MTX_TRANSPOSED_ROWS(mtxAp,transA);
+//	if(RSB_DOES_TRANSPOSE(transA))
+//		return RSB_ERR_UNIMPLEMENTED_YET;
+
+#if 0
+	if( matrix && rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb__do_perror(NULL,RSB_ERR_UNIMPLEMENTED_YET);
+		errval = RSB_ERR_UNIMPLEMENTED_YET;
+		goto ret;
+	}
+#endif
+	/*
+	 * first, we allocate an array long like the maximal height block
+	 * */
+	row_sums = rsb__calloc(mtxAp->el_size*(tm+RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE));
+	if(!row_sums)
+		{RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+	errval = rsb__do_infinity_norm_inner(mtxAp,row_sums,do_testing,transA);
+	if(RSB_SOME_ERROR(errval))
+		{RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+	/*
+	 * we determine the maximal row sum
+	 * FIXME : slow!
+	 * TODO : should use BLAS own icamax or similar..
+	 * */
+	RSB_VECTOR_FIND_MAXIMAL_ELEMENT(maximal_row,row_sums,tm,mtxAp->typecode);
+
+	if(maximal_row<0)
+	{
+		/* fixme : should set error code */
+		RSB_PERR_GOTO(erri,RSB_ERRM_ES)
+	}
+
+	/* 
+	 * that row sum is our infinity norm now
+	 * */
+	mrp = (((rsb_byte_t*)row_sums)+(mtxAp->el_size*maximal_row));
+	if(rsb__get_diagonal_type_flag(mtxAp)==RSB_DIAGONAL_I)
+		rsb__util_increase_by_one(mrp,0,mtxAp->typecode);
+
+	RSB_NUMERICAL_TYPE_SET_ELEMENT_REAL(infinity_norm,mrp,mtxAp->typecode)
+
+	RSB_CONDITIONAL_FREE(row_sums);
+
+	goto ret;
+erri:
+	RSB_CONDITIONAL_FREE(row_sums);
+	RSB_ERROR(RSB_ERRM_ES);
+	return RSB_ERR_INTERNAL_ERROR;
+//errg:
+//	return RSB_ERR_GENERIC_ERROR;
+err:
+	RSB_CONDITIONAL_FREE(row_sums);
+	errval = RSB_ERR_ENOMEM;
+ret:
+	RSB_DO_ERR_RETURN(errval)
+#endif /* RSB_HAVE_OPTYPE_INFTY_NORM */
+}
+
+double rsb__do_get_matrix_fillin(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * This function returns the fillin of a given matrix.
+	 * The fillin is defined as the ratio of the allocated elements 
+	 * count and the original matrix nonzeros.
+	 *
+         * This function is expected to be quite fast (so, it's won't be a number
+         * crunching routine, no matter how fancy our data structures are).
+	 *
+	 * \return NULL on error, othwerwise the fillin
+	 * */
+	if(!mtxAp)
+		return 0.0;
+	return ((double)mtxAp->element_count)/((double)mtxAp->nnz) ;/* numbers / nonzeros */
+}
+
+rsb_nnz_idx_t rsb__do_get_matrix_nnz(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * This function returns the number of nonzeros of a given sparse matrix.
+	 *
+	 * \return NULL on error, othwerwise the nonzeros
+	 * */
+	if(!mtxAp)
+		return 0;
+	else
+		return (mtxAp->nnz) ;/* nonzeros */
+}
+
+rsb_long_t rsb__submatrices(const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * 	\ingroup gr_internals
+	 * 	Counts submatrices: either leaves or nodes.
+	 */
+	rsb_long_t sm = 0;
+	rsb_submatrix_idx_t i,j;
+	const struct rsb_mtx_t * submatrix;
+
+	if(!mtxAp)
+	{
+		return -1;
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	{
+		if(submatrix)
+		{
+			sm += rsb__submatrices(submatrix);
+		}
+	}
+	return sm+1;
+}
+
+rsb_err_t rsb__get_blocking_size(const struct rsb_mtx_t * mtxAp, rsb_blk_idx_t *brp, rsb_blk_idx_t *bcp)
+{
+	/*!
+	 *
+	 * \ingroup gr_internals
+	 *
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * \attention : before it used rsb_coo_idx_t !
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	if( RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_NON_ROOT_MATRIX) && !(rsb__have_fixed_blocks_matrix_flags(mtxAp->flags) /*|| rsb__is_bcss_matrix(mtxAp)*/ ))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+#if (RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS)
+	if( (mtxAp->rpntr==NULL) && ( mtxAp->cpntr==NULL) )
+	{
+
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+		RSB_ASSERT(mtxAp->br>0);
+		RSB_ASSERT(mtxAp->bc>0);
+		*brp= mtxAp->br;
+		*bcp= mtxAp->bc;
+#else /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+		*brp = 1;
+		*bcp = 1;
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+	}
+	else
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+	{
+		*brp = mtxAp->rpntr[1]-mtxAp->rpntr[0];	/* we assume that block_count >= 1 */
+		*bcp = mtxAp->cpntr[1]-mtxAp->cpntr[0];	/* we assume that block_count >= 1 */
+	}
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(*brp));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(*bcp));
+err:
+	return errval;
+}
+
+rsb_err_t rsb__get_blocking_size_as_row_major(const struct rsb_mtx_t * mtxAp, rsb_blk_idx_t *bMp, rsb_blk_idx_t *bmp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * */
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+	if(!mtxAp)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	
+	if( mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER )
+		errval = rsb__get_blocking_size(mtxAp, bmp, bMp);
+	else
+		errval = rsb__get_blocking_size(mtxAp, bMp, bmp);
+err:
+	return errval;
+}
+
+size_t rsb__do_get_max_blocksize(const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *	FIXME : this is for VBR and recursive matrices.
+	 *      it is UNFINISHED
+	 */
+	rsb_blk_idx_t maxMd = 0,maxmd = 0;
+	rsb_blk_idx_t i;
+	size_t sz;
+
+//	if(rsb__is_recursive_matrix(mtxAp->flags))
+//		RSB_WARN("rsb__do_get_max_blocksize unfinished for recursive formats!\n");
+
+	RSB_DEBUG_ASSERT(mtxAp);
+
+	if(rsb__have_fixed_blocks_matrix_flags(mtxAp->flags))
+		rsb__get_blocking_size_as_row_major(mtxAp, &maxMd, &maxmd);
+	else
+	{
+		RSB_DEBUG_ASSERT(mtxAp->Mdim);
+		RSB_DEBUG_ASSERT(mtxAp->mdim);
+		RSB_DEBUG_ASSERT(mtxAp->Mpntr);
+		RSB_DEBUG_ASSERT(mtxAp->mpntr);
+
+		for(i=1;RSB_LIKELY(i<=mtxAp->Mdim);++i)
+			if(mtxAp->Mpntr[i]-mtxAp->Mpntr[i-1]>maxMd)
+				maxMd = mtxAp->Mpntr[i]-mtxAp->Mpntr[i-1];
+		for(i=1;RSB_LIKELY(i<=mtxAp->mdim);++i)
+			if(mtxAp->mpntr[i]-mtxAp->mpntr[i-1]>maxmd)
+				maxmd = mtxAp->mpntr[i]-mtxAp->mpntr[i-1];
+	}
+	sz = mtxAp->el_size;
+	sz *= maxmd;
+	sz *= maxMd;
+	return sz;
+}
+
+rsb_err_t rsb__get_physical_blocking_size(const struct rsb_mtx_t * mtxAp, rsb_blk_idx_t *brp, rsb_blk_idx_t *bcp)
+{
+	/**
+		FIXME
+	*/
+	if( mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER )
+		return rsb__get_blocking_size(mtxAp, bcp, brp);
+	else
+		return rsb__get_blocking_size(mtxAp, brp, bcp);
+}
+
+void rsb__do_extract_nnz_from_block(void * blockpointer, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t br, rsb_coo_idx_t bc, rsb_coo_idx_t baserow, rsb_coo_idx_t basecolumn, rsb_type_t typecode, size_t el_size, rsb_nnz_idx_t *nnzp )
+{
+	/**
+		FIXME
+	*/
+	rsb_coo_idx_t r,c;
+	rsb_nnz_idx_t nz = 0;
+
+	for(r=0;r<br;++r)
+	for(c=0;c<bc;++c)
+	{
+		rsb_byte_t*src=((rsb_byte_t*)blockpointer)+(el_size*(r*bc+c));
+		/*
+		 * SERVE UNA NUOVA MACRO CHE INDIVIDUI PER CIASCUN TIPO SE AREA DI MEMORIA E' NNZ
+		 * */
+		if( RSB_IS_ELEMENT_NONZERO(src,typecode) )
+		{
+			rsb_byte_t * dst = ((rsb_byte_t*)(VA)) + el_size * (nz);
+			RSB_NUMERICAL_TYPE_SET_ELEMENT(dst,src,typecode) /* FIXME : this is SLOW */
+			(IA)[nz] = baserow   +r;
+			(JA)[nz] = basecolumn+c;
+			++nz;
+		//	if(nz>mtxAp->nnz)goto fatal_err;/* PLACE HERE ERROR CHECKING CODE */
+		}
+	}
+	*nnzp += nz;
+}
+
+rsb_submatrix_idx_t rsb__get_recursive_matrix_depth(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_submatrix_idx_t msmd = 0,smd = 0;
+
+	if(!mtxAp)
+		return 0;
+
+	if(!rsb__is_recursive_matrix(mtxAp->flags))
+		goto norec;
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+	{
+		smd = rsb__get_recursive_matrix_depth(submatrix);
+		msmd = RSB_MAX(smd,msmd);
+	}
+
+	return msmd+1;
+norec:
+	return 0;
+}
+
+rsb_flags_t rsb__get_hermitian_flag(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	if(!mtxAp)
+		return RSB_FLAG_NOFLAGS;
+	return (mtxAp->flags & RSB_FLAG_HERMITIAN);
+}
+
+rsb_flags_t rsb__get_symmetry_flag(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	if(!mtxAp)
+		return RSB_FLAG_NOFLAGS;
+	return (mtxAp->flags & RSB_FLAG_SYMMETRIC);
+}
+
+rsb_flags_t rsb__get_diagonal_type_flag(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	if(!mtxAp)
+		return RSB_FLAG_NOFLAGS;
+
+#ifdef RSB_DIAGONAL_I
+	return (mtxAp->flags & RSB_FLAG_UNIT_DIAG_IMPLICIT) ? RSB_DIAGONAL_I : RSB_DIAGONAL_E ;
+#else /* RSB_DIAGONAL_I */
+	return RSB_DIAGONAL_E ;
+#endif /* RSB_DIAGONAL_I */
+}
+
+rsb_flags_t rsb__get_symmetry_type_flag(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	if(!mtxAp)
+		return RSB_FLAG_NOFLAGS;
+
+	return (rsb__get_hermitian_flag(mtxAp) | rsb__get_symmetry_flag(mtxAp));
+}
+
+RSB_INLINE rsb_coo_idx_t rsb_do_get_rows_of(const struct rsb_mtx_t *mtxAp, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : if using this for right hand side blanking, consider excess elements!
+	 */
+	RSB_DEBUG_ASSERT(mtxAp);
+	if(RSB_DOES_NOT_TRANSPOSE(transA))
+		return mtxAp->nr;
+	return mtxAp->nc;
+}
+
+RSB_INLINE rsb_coo_idx_t rsb_do_get_columns_of(const struct rsb_mtx_t *mtxAp, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : if using this for right hand side blanking, consider excess elements!
+	 */
+	RSB_DEBUG_ASSERT(mtxAp);
+	if(RSB_DOES_NOT_TRANSPOSE(transA))
+		return mtxAp->nc;
+	return mtxAp->nr;
+}
+
+static rsb_err_t rsb__do_get_elements_for_all_columns_inner(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * CP)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : NEW, UNFINISHED STUB, UNTESTED
+	 * TODO : to parallelize this
+	 * TODO : to declare this function in some header
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(CP);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_elements_for_all_columns_inner(submatrix,CP));
+	}
+	else
+	{
+		if(!rsb__is_css_matrix(mtxAp))
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			rsb_nnz_idx_t n;
+			//const rsb_coo_idx_t roff = mtxAp->roff;
+			const rsb_coo_idx_t coff = mtxAp->coff;
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				RSB_DECLARE_CONST_HALFCOO_JARRAY_FROM_MATRIX(mJA,mtxAp)
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					rsb_half_idx_t /*i = mIA[n],*/j = mJA[n];
+					(CP)[coff+j]++;
+				}
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCOO_JARRAY_FROM_MATRIX(mJA,mtxAp)
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					rsb_coo_idx_t /*i = mIA[n],*/j = mJA[n];
+					(CP)[coff+j]++;
+				}
+			}
+		}
+		else
+		if(rsb__is_bcsc_matrix(mtxAp))
+		{
+			rsb__util_nnz_array_add_array(CP+mtxAp->coff,mtxAp->bpntr,mtxAp->nc);
+		}
+		else
+		if(rsb__is_bcsr_matrix(mtxAp))
+		{
+			rsb_nnz_idx_t n;
+			rsb_coo_idx_t *bindx = mtxAp->bindx;
+			//const rsb_coo_idx_t roff = mtxAp->roff;
+			const rsb_coo_idx_t coff = mtxAp->coff;
+
+#if 0
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+			{
+				for(n=0;n<mtxAp->nnz;++n)
+				{
+					rsb_coo_idx_t ij = bindx[n],j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+					(CP+1)[coff+j]++;
+				}
+			}
+#endif
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+			{
+				const rsb_half_idx_t *hbindx = (const rsb_half_idx_t *)bindx;
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					rsb_coo_idx_t j = hbindx[n];
+					(CP)[coff+j]++;
+				}
+			}
+			else
+			{
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					rsb_coo_idx_t j = bindx[n];
+					(CP)[coff+j]++;
+				}
+			}
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb__do_get_elements_for_each_column(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * CP)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : NEW, UNFINISHED STUB
+	 * TODO : to parallelize this
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_nnz_idx_t n;
+	if(!mtxAp)
+       	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(CP);
+	rsb__util_nnz_array_set(CP,mtxAp->nc,0);
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_elements_for_all_columns_inner(mtxAp,CP));
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb__do_get_elements_for_all_columns(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * CP)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : NEW, UNFINISHED STUB
+	 * TODO : to parallelize this
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n;
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(CP);
+	rsb__util_nnz_array_set(CP,mtxAp->nc+1,0);
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_elements_for_each_column(mtxAp,CP+1));
+	for(n=0;n<mtxAp->nc;++n)CP[n+1] += CP[n];
+//err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb__do_get_elements_for_all_rows_inner(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * RP)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : NEW, UNFINISHED STUB
+	 * TODO : to parallelize this
+	 * TODO : to declare this function in some header
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(RP);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		const struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_elements_for_all_rows_inner(submatrix,RP));
+	}
+	else
+	{
+		rsb_nnz_idx_t n;
+		rsb_nnz_idx_t i;
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			const rsb_coo_idx_t roff = mtxAp->roff;
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				RSB_DECLARE_CONST_HALFCOO_IARRAY_FROM_MATRIX(mIA,mtxAp)
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					rsb_half_idx_t i = mIA[n];//,j = mJA[n];
+					(RP)[roff+i]++;
+				}
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCOO_IARRAY_FROM_MATRIX(mIA,mtxAp)
+				for(n = 0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					rsb_coo_idx_t i = mIA[n];//,j = mJA[n];
+					(RP)[roff+i]++;
+				}
+			}
+		}
+		else
+		{
+//			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+//				rsb__util_nnz_array_add_array(RP+mtxAp->roff,mtxAp->bpntr+1,mtxAp->nr-1);
+//			else
+//				rsb__util_nnz_array_add_array(RP+mtxAp->roff,mtxAp->bpntr+1,mtxAp->nr-1);
+			for(i=0;i<mtxAp->nr;++i)
+			{
+				(RP)[mtxAp->roff+i] += mtxAp->bpntr[i+1]-mtxAp->bpntr[i];
+			}
+		}
+	}
+//err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb__do_get_elements_for_all_rows(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * RP)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : NEW, UNFINISHED STUB
+	 * TODO : to parallelize this
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_nnz_idx_t nzi;
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(RP);
+
+	rsb__util_nnz_array_set(RP,mtxAp->nr+1,0);
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_elements_for_all_rows_inner(mtxAp,RP+1));
+	//for(nzi=0;RSB_LIKELY(nzi<mtxAp->nr);++nzi) RP[nzi+1] += RP[nzi];
+	rsb_do_prefix_sum_nnz_idx_t(RP,mtxAp->nr+1);
+//err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__dodo_get_csr(const struct rsb_mtx_t *mtxAp, rsb_byte_t ** VA, rsb_nnz_idx_t ** RP, rsb_coo_idx_t ** JA)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	rsb_nnz_idx_t rnz = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(RSB_UNLIKELY(!RP || !*RP || !mtxAp || !VA || !*VA || !JA || !*JA))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+//	RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_rows_sparse_rec(mtxAp,*VA,0,mtxAp->nr-1,NULL,*JA,&rnz,0,0));
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,*VA,NULL,*JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS));
+
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_get_elements_for_all_rows(mtxAp,*RP));
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_csc(const struct rsb_mtx_t *mtxAp, rsb_byte_t ** VA, rsb_nnz_idx_t ** CPp,rsb_coo_idx_t ** IA)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : NEW, UNFINISHED STUB
+	 * TODO : to parallelize this
+	 * NOTE : sort in a SPSV-like order the matrices; compute the columns pointer vector; extract ;
+	 * TODO : to parallelize this
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_translated_matrix_t * all_leaf_matrices = NULL;	/** NEW, EXPERIMENTAL */
+	rsb_submatrix_idx_t all_leaf_matrices_n = 0;
+	rsb_submatrix_idx_t * deps = NULL;	/** NEW, EXPERIMENTAL */
+	rsb_nnz_idx_t n;
+	//return 0;
+	rsb_nnz_idx_t * CP = *CPp;
+
+	if(!mtxAp||!VA||!CPp||!IA)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	errval = rsb__do_get_submatrices_block_for_get_csr(mtxAp,&all_leaf_matrices,&all_leaf_matrices_n/*,RSB_TRANSPOSITION_N*/);/* ! */
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+//	deps = rsb__malloc(sizeof(rsb_submatrix_idx_t)*all_leaf_matrices_n);
+//	if(RSB_SOME_ERROR(errval) || !all_leaf_matrices || !deps)
+//	{errval = RSB_ERR_ENOMEM;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+//
+	if(0)
+	for(n=0;n<all_leaf_matrices_n;++n)
+	{
+		all_leaf_matrices[n].roff = mtxAp->nc-(all_leaf_matrices[n].coff+all_leaf_matrices[n].mtxlp->nc*1);
+		all_leaf_matrices[n].coff = mtxAp->nr-(all_leaf_matrices[n].roff+all_leaf_matrices[n].mtxlp->nr*1);
+		all_leaf_matrices[n].nr = all_leaf_matrices[n].mtxlp->nc;
+		all_leaf_matrices[n].nc = all_leaf_matrices[n].mtxlp->nr;
+	}
+
+	errval = rsb__do_get_elements_for_all_columns(mtxAp,CP);/*  */
+	//errval = rsb__do_get_elements_for_each_column(mtxAp,CP);
+	/* ... */
+
+	for(n=0;RSB_LIKELY(n<all_leaf_matrices_n);++n)
+	{
+		/* extract elements ... */
+		struct rsb_mtx_t *submatrix = all_leaf_matrices[n].mtxlp;
+		rsb_do_get_columns_sparse(submatrix,*VA,0,mtxAp->nc-1,*IA,NULL,CP,0,0);
+//		rsb_do_get_columns_sparse(submatrix,*VA,0,mtxAp->nc-1,*IA,NULL,CP,submatrix->roff,submatrix->coff);
+	}
+
+	for(n=mtxAp->nc;RSB_LIKELY(n>0);--n) CP[n] = CP[n-1];
+	CP[0] = 0;
+
+	// FIXME : should check for correctness, now
+	if(RSB_UNLIKELY(CP[mtxAp->nc]!=mtxAp->nnz))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,"%d != %d\n",CP[mtxAp->nc],mtxAp->nnz)
+	}
+
+err:
+	RSB_CONDITIONAL_FREE(deps);
+	RSB_CONDITIONAL_FREE(all_leaf_matrices);
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_get_block_sparse_pattern(const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t fr, rsb_coo_idx_t lr, rsb_coo_idx_t fc, rsb_coo_idx_t lc, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnz, rsb_flags_t flags )
+{
+	/*! 
+	 * \ingroup gr_internals
+	 * */
+	return rsb__do_get_block_sparse(mtxAp,NULL,IA,JA,fr,lr,fc,lc,IREN,JREN,rnz,flags);
+}
+
+static rsb_err_t rsb__do_get_block_sparse_leaf(const struct rsb_mtx_t * mtxAp, void* OVA, rsb_coo_idx_t fr, rsb_coo_idx_t lr, rsb_coo_idx_t fc, rsb_coo_idx_t lc, rsb_coo_idx_t * OIA, rsb_coo_idx_t * OJA, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnz, rsb_coo_idx_t ioff, rsb_coo_idx_t joff, rsb_flags_t flags)
+{
+	/*! 
+	 * \ingroup gr_internals
+	 *
+	 * FIXME: IREN/JREN untested
+	 * */
+	rsb_nnz_idx_t nnz = 0;
+	rsb_coo_idx_t i = 0,roff = mtxAp->roff,coff = mtxAp->coff;
+	RSB_DEFAULT_TYPE *VA = mtxAp->VA;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_nnz_idx_t zoff = *rnz;// FIXME: rnz is MANDATORY 
+
+	fr -= mtxAp->roff; lr -= mtxAp->roff; fc -= mtxAp->coff; lc -= mtxAp->coff; 
+
+	if(!mtxAp->bindx || !mtxAp->bpntr)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t nnz0 = 0,nnz1 = mtxAp->nnz;
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			nnz0 += rsb__nnz_split_hcoo_bsearch(IA+nnz0,fr,nnz1-nnz0);
+			nnz1 = nnz0+rsb__nnz_split_hcoo_bsearch(IA+nnz0,lr+1,nnz1-nnz0);
+			if(nnz1-nnz0)
+			{
+				rsb_nnz_idx_t fnz = 0, lnz = 0, rnz = 0;
+				for(i=fr;RSB_LIKELY(i<=lr);++i)
+				{
+					fnz = lnz;
+					lnz = nnz1;
+					rnz = lnz-fnz;
+					fnz = fnz+rsb__nnz_split_hcoo_bsearch(IA+fnz,i,rnz);
+					rnz = lnz-fnz;
+					lnz = fnz+rsb__nnz_split_hcoo_bsearch(IA+fnz,i+1,rnz);
+					rnz = lnz-fnz;
+					fnz = fnz+rsb__nnz_split_hcoo_bsearch(JA+fnz,fc,rnz);
+					rnz = lnz-fnz;
+					lnz = fnz+rsb__nnz_split_hcoo_bsearch(JA+fnz,lc+1,rnz);
+
+					if(OVA)
+						RSB_VA_MEMCPY(OVA,VA,zoff+nnz,fnz,rnz,mtxAp->el_size);
+					if(OJA)
+						RSB_IA_MEMCPY_H(OJA,JA,zoff+nnz,fnz,rnz,coff+joff);
+					if(OIA)
+						RSB_IA_MEMCPY_H(OIA,IA,zoff+nnz,fnz,rnz,roff+ioff);
+					nnz += lnz-fnz;				}
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			nnz0 += rsb__nnz_split_coo_bsearch(IA+nnz0,fr,nnz1-nnz0);
+			nnz1 = nnz0+rsb__nnz_split_coo_bsearch(IA+nnz0,lr+1,nnz1-nnz0);
+			if(nnz1-nnz0)
+			{
+				rsb_nnz_idx_t fnz = 0, lnz = 0, rnz = 0;
+				for(i=fr;RSB_LIKELY(i<=lr);++i)
+				{
+					fnz = lnz;
+					lnz = nnz1;
+					rnz = lnz-fnz;
+					fnz = fnz+rsb__nnz_split_coo_bsearch(IA+fnz,i,rnz);
+					rnz = lnz-fnz;
+					lnz = fnz+rsb__nnz_split_coo_bsearch(IA+fnz,i+1,rnz);
+					rnz = lnz-fnz;
+					fnz = fnz+rsb__nnz_split_coo_bsearch(JA+fnz,fc,rnz);
+					rnz = lnz-fnz;
+					lnz = fnz+rsb__nnz_split_coo_bsearch(JA+fnz,lc+1,rnz);
+
+					if(OVA)
+						RSB_VA_MEMCPY(OVA,VA,zoff+nnz,fnz,rnz,mtxAp->el_size);
+
+					if(OJA)
+						RSB_IA_MEMCPY(OJA,JA,zoff+nnz,fnz,rnz,coff+joff);
+					if(OIA)
+						RSB_IA_MEMCPY(OIA,IA,zoff+nnz,fnz,rnz,roff+ioff);
+					nnz += lnz-fnz;
+				}
+			}
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			for(i=fr;RSB_LIKELY(i<=lr);++i)
+			{
+				rsb_nnz_idx_t lnz = PA[i+1],fnz = PA[i],rnz = lnz-fnz;
+				if(rnz)
+				{
+					lnz = fnz+rsb__nnz_split_hcoo_bsearch(JA+fnz,lc+1,rnz);
+					fnz = fnz+rsb__nnz_split_hcoo_bsearch(JA+fnz,fc,rnz);
+					rnz = lnz-fnz;
+					if(OVA)
+						RSB_VA_MEMCPY(OVA,VA,zoff+nnz,fnz,rnz,mtxAp->el_size);
+					if(OJA)
+						RSB_IA_MEMCPY_H(OJA,JA,zoff+nnz,fnz,rnz,coff+joff);
+					if(OIA)
+						rsb__util_coo_array_set(OIA+(zoff+nnz),rnz,i+roff+ioff);
+					nnz += rnz;
+				}
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			for(i=fr;RSB_LIKELY(i<=lr);++i)
+			{
+				rsb_nnz_idx_t lnz = PA[i+1],fnz = PA[i],rnz = lnz-fnz;
+				if(rnz)
+				{
+					lnz = fnz+rsb__nnz_split_coo_bsearch(JA+fnz,lc+1,rnz);
+					fnz = fnz+rsb__nnz_split_coo_bsearch(JA+fnz,fc,rnz);
+					rnz = lnz-fnz;
+					if(OVA)
+						RSB_VA_MEMCPY(OVA,VA,zoff+nnz,fnz,rnz,mtxAp->el_size);
+					if(OJA)
+						RSB_IA_MEMCPY(OJA,JA,zoff+nnz,fnz,rnz,coff+joff);
+					if(OIA)
+						rsb__util_coo_array_set(OIA+(zoff+nnz),rnz,i+roff+ioff);
+					nnz += rnz;
+				}
+			}
+		}
+	}
+	else
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+
+	if(IREN)
+		rsb__util_coo_array_renumber(OIA+zoff,IREN,nnz,flags,flags,flags);
+	if(JREN)
+		rsb__util_coo_array_renumber(OJA+zoff,JREN,nnz,flags,flags,flags);
+err:
+	if(rnz)
+		*rnz += nnz;
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_block_sparse(const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t fr, rsb_coo_idx_t lr, rsb_coo_idx_t fc, rsb_coo_idx_t lc, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnz, rsb_flags_t flags )
+{
+	/*! 
+	 * \ingroup gr_internals
+	 *
+	 * FIXME: IREN/JREN untested
+	 * */
+	rsb_nnz_idx_t nnz = 0;
+	rsb_coo_idx_t ioff = 0,joff = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const struct rsb_mtx_t * submatrix = NULL;
+
+#if RSB_ALLOW_ZERO_DIM
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+		goto ret; /* FIXME: skipping further error checks */
+#endif
+
+	if(mtxAp == NULL)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto ret;
+	}
+
+	if( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+		fr--,lr--,fc--,lc--,ioff++,joff++;
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t smi;
+		//#pragma omp parallel reduction(|:errval,+,nnz) shared(mtxAp)  RSB_NTC
+		RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+		if(RSB_SUBMATRIX_INTERSECTS_BOX(submatrix,fr,lr,fc,lc))
+		{
+      			const rsb_coo_idx_t fri = RSB_SUBMATRIX_ROWS_INTERSECTION_FIRST(submatrix,fr);
+			const rsb_coo_idx_t lri = RSB_SUBMATRIX_ROWS_INTERSECTION_LAST(submatrix,lr);
+      			const rsb_coo_idx_t fci = RSB_SUBMATRIX_COLS_INTERSECTION_FIRST(submatrix,fc);
+			const rsb_coo_idx_t lci = RSB_SUBMATRIX_COLS_INTERSECTION_LAST(submatrix,lc);
+			errval = rsb__do_get_block_sparse_leaf(submatrix,VA,fri,lri,fci,lci,IA,JA,IREN,JREN,&nnz,ioff,joff,flags);
+		}
+	}
+	else
+		errval = rsb__do_get_block_sparse_leaf(mtxAp,VA,fr,lr,fc,lc,IA,JA,IREN,JREN,&nnz,ioff,joff,flags);
+ret:
+	if(rnz)
+		*rnz = nnz;
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_nnz_idx_t rsb__do_get_block_nnz(const struct rsb_mtx_t *mtxAp, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_blk_idx_t fc, rsb_blk_idx_t lc, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	/*! 
+	 * \ingroup gr_internals
+	 * */
+	rsb_nnz_idx_t nnz = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb__do_get_block_sparse(mtxAp,NULL,NULL,NULL,fr,lr,fc,lc,NULL,NULL,&nnz,flags);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return nnz;
+}
+
+/* @endcond */
diff --git a/rsb_get.h b/rsb_get.h
new file mode 100644
index 0000000..a9249d1
--- /dev/null
+++ b/rsb_get.h
@@ -0,0 +1,67 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix getter functions.
+ * */
+#ifndef RSB_GET_H_INCLUDED
+#define RSB_GET_H_INCLUDED
+
+#include "rsb_internals.h"
+
+rsb_err_t rsb__dodo_get_csr(const struct rsb_mtx_t *mtxAp, rsb_byte_t ** VA, rsb_nnz_idx_t ** RP, rsb_coo_idx_t ** JA);
+rsb_err_t rsb__do_get_csc(const struct rsb_mtx_t *mtxAp, rsb_byte_t ** VA, rsb_nnz_idx_t ** RP, rsb_coo_idx_t ** IA);
+rsb_err_t rsb__do_get_coo(const struct rsb_mtx_t *mtxAp, rsb_byte_t ** VA, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, rsb_flags_t flags);
+rsb_err_t rsb__do_get_coo_noalloc(const struct rsb_mtx_t *mtxAp, rsb_byte_t * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * nnzp, rsb_flags_t flags );
+rsb_err_t rsb__do_get_row_dense(const struct rsb_mtx_t *mtxAp, void * row, rsb_blk_idx_t rowindex);
+rsb_err_t rsb__do_get_rows_dense(const struct rsb_mtx_t *mtxAp, void * row, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t  *rnz, rsb_flags_t flags);
+rsb_err_t rsb__do_get_rows_sparse_rec(const struct rsb_mtx_t *mtxAp , void * RSB_RESTRICT VA , rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT rnz, rsb_coo_idx_t ioff, rsb_coo_idx_t joff);
+rsb_err_t rsb_do_get_columns_sparse(const struct rsb_mtx_t *mtxAp , void * RSB_RESTRICT VA , rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT CP, rsb_coo_idx_t ioff, rsb_coo_idx_t joff);
+rsb_err_t rsb_do_get_rows_nnz(const struct rsb_mtx_t *mtxAp, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_nnz_idx_t  *rnz);
+rsb_coo_idx_t rsb__mtx_strict_diagonal_size(const struct rsb_mtx_t *mtxAp);
+rsb_err_t rsb__do_infinity_norm(const struct rsb_mtx_t *mtxAp , void * infinity_norm, const rsb_bool_t do_testing, rsb_trans_t transA);
+double rsb__do_get_matrix_fillin(const struct rsb_mtx_t *mtxAp);
+rsb_nnz_idx_t rsb__do_get_matrix_nnz(const struct rsb_mtx_t *mtxAp);
+rsb_long_t rsb__submatrices(const struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__get_blocking_size(const struct rsb_mtx_t * mtxAp, rsb_blk_idx_t *brp, rsb_blk_idx_t *bcp);
+rsb_err_t rsb__get_physical_blocking_size(const struct rsb_mtx_t * mtxAp, rsb_blk_idx_t *brp, rsb_blk_idx_t *bcp);
+rsb_err_t rsb__get_blocking_size_as_row_major(const struct rsb_mtx_t * mtxAp, rsb_blk_idx_t *bMp, rsb_blk_idx_t *bmp);
+size_t rsb__do_get_max_blocksize(const struct rsb_mtx_t * mtxAp);
+void rsb__do_extract_nnz_from_block(void * blockpointer, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t br, rsb_coo_idx_t bc, rsb_coo_idx_t baserow, rsb_coo_idx_t basecolumn, rsb_type_t typecode, size_t el_size, rsb_nnz_idx_t *nnzp );
+rsb_submatrix_idx_t rsb__get_recursive_matrix_depth(const struct rsb_mtx_t *mtxAp);
+rsb_flags_t rsb__get_symmetry_flag(const struct rsb_mtx_t *mtxAp);
+rsb_flags_t rsb__get_symmetry_type_flag(const struct rsb_mtx_t *mtxAp);
+rsb_flags_t rsb__get_hermitian_flag(const struct rsb_mtx_t *mtxAp);
+RSB_INLINE rsb_coo_idx_t rsb_do_get_rows_of(const struct rsb_mtx_t *mtxAp, rsb_trans_t transA);
+RSB_INLINE rsb_coo_idx_t rsb_do_get_columns_of(const struct rsb_mtx_t *mtxAp, rsb_trans_t transA);
+rsb_flags_t rsb__get_diagonal_type_flag(const struct rsb_mtx_t *mtxAp);
+rsb_err_t rsb__do_absolute_rows_sums( const struct rsb_mtx_t * mtxAp , void * d);
+rsb_err_t rsb__do_absolute_columns_sums( const struct rsb_mtx_t * mtxAp , void * d);
+rsb_err_t rsb__do_get_block_sparse_pattern(const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t fr, rsb_coo_idx_t lr, rsb_coo_idx_t fc, rsb_coo_idx_t lc, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnz, rsb_flags_t flags );
+rsb_err_t rsb__do_get_block_sparse(const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t fr, rsb_coo_idx_t lr, rsb_coo_idx_t fc, rsb_coo_idx_t lc, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnz, rsb_flags_t flags );
+rsb_nnz_idx_t rsb__do_get_block_nnz(const struct rsb_mtx_t *mtxAp, rsb_blk_idx_t fr, rsb_blk_idx_t lr, rsb_blk_idx_t fc, rsb_blk_idx_t lc, rsb_flags_t flags, rsb_err_t * errvalp);
+
+#endif /* RSB_GET_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_idx.c b/rsb_idx.c
new file mode 100644
index 0000000..886c305
--- /dev/null
+++ b/rsb_idx.c
@@ -0,0 +1,904 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO handling and check.
+ * */
+
+#include "rsb_internals.h"
+
+#define RSB_LIKELY_OMP(EXP) EXP
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+void rsb__util_nnz_array_set_sequence(rsb_nnz_idx_t * p, rsb_nnz_idx_t n, rsb_nnz_idx_t o, rsb_nnz_idx_t i)
+{
+	/*!
+		\ingroup gr_internals
+		TODO: rsb__util_nnz_array_set_sequence -> rsb__nnz_idx_set_sequence
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(o>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(o));
+
+	for(k=0;RSB_LIKELY(k<n);++k)
+	{
+		p[k] = o+k*i;
+	}
+}
+
+void rsb__util_coo_array_set_sequence(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t o, rsb_coo_idx_t i)
+{
+	/*!
+		\ingroup gr_internals
+		TODO: rsb__util_coo_array_set_sequence -> rsb__coo_idx_set_sequence
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(o>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(o));
+
+	for(k=0;RSB_LIKELY(k<n);++k)
+	{
+		p[k] = o+k*i;
+	}
+}
+
+void rsb__util_nnz_array_set(rsb_nnz_idx_t * p, rsb_nnz_idx_t n, rsb_nnz_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(a));
+
+	for(k=0;RSB_LIKELY(k<n);++k)
+	{
+		p[k] = a;
+	}
+}
+
+void rsb__util_coo_array_set(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(a) || a==RSB_MARKER_COO_VALUE);
+
+	for(k=0;RSB_LIKELY(k<n);++k)
+	{
+		p[k] = a;
+	}
+}
+
+void rsb__util_hcoo_array_copy_trans_add(rsb_coo_idx_t * d, const rsb_half_idx_t * s, rsb_nnz_idx_t n, rsb_coo_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(s);
+	RSB_DEBUG_ASSERT(d);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	for(k=0;RSB_LIKELY(k+3<n);k+=4)
+	{
+		/* RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(d[k])); */
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(a+s[k]));
+		d[k+0] = a+s[k+0];
+		d[k+1] = a+s[k+1];
+		d[k+2] = a+s[k+2];
+		d[k+3] = a+s[k+3];
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(d[k]));
+	}
+	for(;(k<n);k+=1)
+		d[k] = s[k]+a;
+}
+
+void rsb__util_coo_array_copy_trans_add(rsb_coo_idx_t * d, const rsb_coo_idx_t * s, rsb_nnz_idx_t n, rsb_coo_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(s);
+	RSB_DEBUG_ASSERT(d);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	for(k=0;RSB_LIKELY(k+3<n);k+=4)
+	{
+		/* RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(d[k])); */
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(a+s[k]));
+		d[k+0] = a+s[k+0];
+		d[k+1] = a+s[k+1];
+		d[k+2] = a+s[k+2];
+		d[k+3] = a+s[k+3];
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(d[k]));
+	}
+	for(;(k<n);k+=1)
+		d[k] = s[k]+a;
+}
+
+void rsb__util_coo_array_mul(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(a)
+		for(k=0;RSB_LIKELY(k<n);++k)
+		{
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+			p[k] *= a;
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+		}
+}
+
+void rsb__util_coo_arrays_mul(rsb_coo_idx_t * p, rsb_coo_idx_t * q, rsb_coo_idx_t a, rsb_coo_idx_t b, rsb_nnz_idx_t n)
+{
+	/*!
+		TODO : document this.
+	*/
+	rsb__util_coo_array_mul(p,n,a);
+	rsb__util_coo_array_mul(q,n,b);
+}
+
+void rsb__util_coo_array_add(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	/* RSB_DEBUG_ASSERT(a>=0); */
+	RSB_DEBUG_ASSERT(a>=-1); /* FIXME: -1 is sometimes necessary for Fortran cases ... */
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(a)
+		for(k=0;RSB_LIKELY(k<n);++k)
+		{
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+			p[k] += a;
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+		}
+}
+
+void rsb__util_hcoo_array_add(rsb_half_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(a)
+		for(k=0;RSB_LIKELY(k<n);++k)
+		{
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+			p[k] += a;
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+		}
+}
+
+void rsb__util_coo_arrays_add(rsb_coo_idx_t * p, rsb_coo_idx_t * q, rsb_coo_idx_t a, rsb_coo_idx_t b, rsb_nnz_idx_t n)
+{
+	/*!
+		TODO : document
+	*/
+	rsb__util_coo_array_add(p,n,a);
+	rsb__util_coo_array_add(q,n,b);
+}
+
+#if 0
+void rsb_util_coo_arrays_sub(rsb_coo_idx_t * p, rsb_coo_idx_t * q, rsb_coo_idx_t a, rsb_coo_idx_t b, rsb_nnz_idx_t n)
+{
+	/*!
+		TODO : document
+	*/
+	rsb__util_coo_array_sub(p,n,a);
+	rsb__util_coo_array_sub(q,n,b);
+}
+#endif
+
+void rsb_util_nnz_array_add(rsb_nnz_idx_t * p, rsb_nnz_idx_t n, rsb_nnz_idx_t a)
+{
+	/*!
+		\ingroup gr_internals
+		Subtracts s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(a>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(a)
+		for(k=0;RSB_LIKELY(k<n);++k)
+		{
+			p[k] += a;
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(p[k]));
+		}
+}
+
+void rsb_util_nnz_array_sub(rsb_nnz_idx_t * p, rsb_nnz_idx_t n, rsb_nnz_idx_t s)
+{
+	/*!
+		\ingroup gr_internals
+		Subtracts s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(s>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(s)
+		for(k=0;RSB_LIKELY(k<n);++k)
+		{
+			p[k] -= s;
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(p[k]));
+		}
+}
+
+void rsb__util_coo_array_sub(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t s)
+{
+	/*!
+		\ingroup gr_internals
+		Subtracts s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(s>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(s)
+		for(k=0;RSB_LIKELY(k<n);++k)
+		{
+			p[k] -= s;
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+		}
+}
+
+void rsb__util_nnz_array_to_fortran_indices(rsb_nnz_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		Adds 1 to the given matrix coordinate indices array.
+	*/
+	rsb_util_nnz_array_add(p, n, 1);
+}
+
+void rsb__util_coo_array_to_fortran_indices(rsb_coo_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		Adds 1 to the given matrix coordinate indices array.
+	*/
+	rsb__util_coo_array_add(p, n, 1);
+}
+
+void rsb__util_coo_array_to_fortran_indices_parallel(rsb_coo_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		Adds 1 to the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+	const rsb_nnz_idx_t mcs = RSB_MINIMUM_VECOP_OMP_CHUNK; 
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(p>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(p)
+	{
+		#pragma omp parallel for schedule(guided,mcs) shared(p)   RSB_NTC
+		for(k=0;RSB_LIKELY_OMP(k<n);++k)
+		{
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+			++p[k];
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+		}
+	}
+}
+
+void rsb__util_nnz_array_from_fortran_indices(rsb_nnz_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		Subtracts 1 from the given matrix coordinate indices array.
+	*/
+	rsb_util_nnz_array_sub(p, n, 1);
+}
+
+void rsb__util_coo_array_from_fortran_indices(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_bool_t want_parallel)
+{
+	/*!
+		\ingroup gr_internals
+		Subtracts 1 from the given matrix coordinate indices array.
+		TODO: parallelize
+	*/
+	register rsb_nnz_idx_t k;
+	const rsb_nnz_idx_t mcs = RSB_MINIMUM_VECOP_OMP_CHUNK; 
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(p>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	if(!p)
+		return;
+
+	if(want_parallel)
+	{
+		#pragma omp parallel for schedule(guided,mcs) shared(p)   RSB_NTC
+		for(k=0;RSB_LIKELY_OMP(k<n);++k)
+		{
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+			--p[k];
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(p[k]));
+		}
+	}
+	else
+		rsb__util_coo_array_sub(p, n, 1);
+}
+
+rsb_flags_t rsb__util_coo_determine_uplo_flags(const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t n;
+	const rsb_flags_t tflags = RSB_FLAG_UPPER_TRIANGULAR|RSB_FLAG_LOWER_TRIANGULAR;
+	rsb_flags_t flags = tflags;
+	
+	for(n=0;n<nnz ;++n)
+	if(IA[n]<JA[n])
+	{
+		RSB_DO_FLAG_DEL(flags,RSB_FLAG_LOWER);
+		for(;RSB_LIKELY(n<nnz) ;++n)
+		if(IA[n]>JA[n])
+		{
+			RSB_DO_FLAG_DEL(flags,RSB_FLAG_UPPER);
+			goto done;
+		}
+	}
+	else
+	if(IA[n]>JA[n])
+	{
+		RSB_DO_FLAG_DEL(flags,RSB_FLAG_UPPER);
+		for(;RSB_LIKELY(n<nnz) ;++n)
+		if(IA[n]<JA[n])
+		{
+			RSB_DO_FLAG_DEL(flags,RSB_FLAG_LOWER);
+			goto done;
+		}
+	}
+done:
+	if((flags&tflags)==RSB_FLAG_TRIANGULAR )
+		RSB_DO_FLAG_DEL(flags,RSB_FLAG_TRIANGULAR);
+	return flags;
+}
+
+rsb_bool_t rsb__util_coo_check_if_triangle_non_empty(const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t n;
+	
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER))
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			if(IA[n]<JA[n])
+				return RSB_BOOL_TRUE;
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER))
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			if(IA[n]>JA[n])
+				return RSB_BOOL_TRUE;
+
+	return RSB_BOOL_FALSE;
+}
+
+void rsb__util_coo_upper_to_lower_symmetric(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz)
+{
+	/*!
+		\ingroup gr_internals
+		A transpose function.
+	*/
+	rsb_nnz_idx_t n;
+	
+	for(n=0;n<nnz;++n)
+		if(IA[n]<JA[n])
+			RSB_SWAP(rsb_coo_idx_t,IA[n],JA[n]);
+}
+
+void rsb__util_coo_lower_to_upper_symmetric(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb__util_coo_upper_to_lower_symmetric(JA, IA, nnz);
+}
+
+rsb_err_t rsb__util_coo_check_if_has_diagonal_elements(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_bool_t *has_diagonal_elements)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_bitmap_data_t * bmap = NULL;
+	rsb_nnz_idx_t n,cnnz = 0;
+
+	if(RSB_INVALID_NNZ_INDEX(nnz) || RSB_INVALID_COO_INDEX(m) || !IA || !JA || !has_diagonal_elements)
+		return RSB_ERR_BADARGS;
+
+	if(m>nnz)
+		goto missing_diagonal_element; /* trivially */
+	/* we allow for duplicates, though (instead a count would be enough) */
+
+	bmap = rsb__allocate_bitmap(1,m);
+
+	if(!bmap)
+		return RSB_ERR_ENOMEM;
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		if(RSB_UNLIKELY(IA[n]==JA[n]))
+		{
+			if(	IA[n]<0 || IA[n]>m || JA[n]<0 || JA[n]>m)
+			{
+				RSB_ERROR(RSB_ERRMSG_BADCOO"\n");
+				goto badinput;
+			}
+			RSB_BITMAP_SET(bmap,1,nnz,0,IA[n]);
+			++cnnz;
+		}
+	}
+#if 1
+	if(cnnz<m)
+	{
+		RSB_STDERR("missing %zd diagonal elements\n",(size_t)(m-cnnz));
+	}
+#endif
+	for(n=0;RSB_LIKELY(n<m);++n)
+		if(!RSB_BITMAP_GET(bmap,1,nnz,0,n))
+		{
+#if 1
+			RSB_STDERR("missing element %zd\n",(size_t)n);
+#endif
+			goto missing_diagonal_element;
+		}
+goto ok;
+
+ok:
+	RSB_CONDITIONAL_FREE(bmap);
+	*has_diagonal_elements = RSB_BOOL_TRUE;
+	return RSB_ERR_NO_ERROR;
+
+missing_diagonal_element:
+	RSB_CONDITIONAL_FREE(bmap);
+	*has_diagonal_elements = RSB_BOOL_FALSE;
+	return RSB_ERR_NO_ERROR;
+
+badinput:
+	RSB_CONDITIONAL_FREE(bmap);
+	return RSB_ERR_BADARGS;
+}
+
+rsb_bool_t rsb__util_is_halfword_coo_array_sorted_up(const rsb_half_idx_t* p, const rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	if(n<2)
+		return RSB_BOOL_TRUE;
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(RSB_UNLIKELY(p[i-1]>=p[i]))
+			return RSB_BOOL_FALSE;
+	return RSB_BOOL_TRUE;
+}
+
+rsb_bool_t rsb__util_is_halfword_coo_array_sorted_up_partial_order(const rsb_half_idx_t * p, const rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	if(n<2)
+		return RSB_BOOL_TRUE;
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(RSB_UNLIKELY(p[i-1]>p[i]))
+		{
+			/* RSB_STDOUT("n=%d, p[%d-1]>p[%d] -- %d > %d\n",n,i,i,p[i-1],p[i]); */
+			return RSB_BOOL_FALSE;
+		}
+	return RSB_BOOL_TRUE;
+}
+
+rsb_bool_t rsb__util_is_nnz_array_sorted_up_partial_order(const rsb_nnz_idx_t * p, const rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	if(n<2)
+		goto yes;
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(RSB_UNLIKELY(p[i-1]>p[i]))
+		{
+			/* RSB_STDOUT("n=%d, p[%d-1]>p[%d] -- %d > %d\n",n,i,i,p[i-1],p[i]); */
+			goto no;
+		}
+yes:
+	return RSB_BOOL_TRUE;
+no:
+	/* RSB_STDOUT("%d=p[%d]>=p[%d]=%d\n",p[i-1],i-1,i,p[i]); */
+	return RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__util_is_coo_array_sorted_up_partial_order(const rsb_nnz_idx_t * p, const rsb_nnz_idx_t n)
+{
+	return rsb__util_is_nnz_array_sorted_up_partial_order(p, n);
+}
+
+rsb_bool_t rsb__util_is_coo_array_sorted_up(const rsb_coo_idx_t * p, const rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	if(n<2)
+		goto yes;
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(RSB_UNLIKELY(p[i-1]>=p[i]))
+			goto no;
+yes:
+	return RSB_BOOL_TRUE;
+no:
+	/* TODO: if this becomes a debugging function, one can use a RSB_ERROR printout instead */
+	/* RSB_STDOUT("%d=p[%d]>=p[%d]=%d\n",p[i-1],i-1,i,p[i]); */
+	return RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__util_is_nnz_array_sorted_up(const rsb_nnz_idx_t * p, const rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	if(n<2)
+		goto yes;
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(RSB_UNLIKELY(p[i-1]>=p[i]))
+			goto no;
+yes:
+	return RSB_BOOL_TRUE;
+no:
+	return RSB_BOOL_FALSE;
+}
+
+void rsb__util_nnz_array_add_array(rsb_nnz_idx_t * p, const rsb_nnz_idx_t * q, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+		Adds s from the given matrix coordinate indices array.
+	*/
+	register rsb_nnz_idx_t k;
+
+	RSB_DEBUG_ASSERT(p || n==0);
+	RSB_DEBUG_ASSERT(q || n==0);
+	RSB_DEBUG_ASSERT(n>=0);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n));
+
+	for(k=0;RSB_LIKELY(k<n);++k)
+	{
+		p[k] += q[k];
+	}
+}
+
+rsb_coo_idx_t rsb__util_find_max_index(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i,l=0;
+	rsb_coo_idx_t m;
+	if(n<1)
+		goto ret;
+	m = p[l];
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(p[i]>m)
+			m = p[i], l = i;
+ret:
+	return l;
+}
+
+rsb_coo_idx_t rsb__util_find_min_index(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i,l=0;
+	rsb_coo_idx_t m;
+	if(n<1)
+		goto ret;
+	m = p[l];
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(p[i]<m)
+			m = p[i], l = i;
+ret:
+	return l;
+}
+
+rsb_coo_idx_t rsb__util_find_max_index_val(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	rsb_coo_idx_t m;
+	if(n<1)
+		return RSB_MARKER_COO_VALUE;
+	m = p[0];
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(p[i]>m)
+			m = p[i];
+	return m;
+}
+
+rsb_coo_idx_t rsb__util_find_min_index_val(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i;
+	rsb_coo_idx_t m;
+	if(n<1)
+		return RSB_MARKER_COO_VALUE;
+	m = p[0];
+	for(i=1;RSB_LIKELY(i<n);++i)
+		if(p[i]<m)
+			m = p[i];
+	return m;
+}
+
+void rsb__util_coo_array_renumber(rsb_coo_idx_t * a, rsb_coo_idx_t * iren, rsb_nnz_idx_t n, rsb_flags_t aflags, rsb_flags_t pflags, rsb_flags_t oflags)
+{
+	/*!
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t i = 0;
+	rsb_coo_idx_t ioff = 0,ooff = 0,oooff = 0;
+
+	if( aflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+		ioff = 1;
+	if( pflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+		ooff = 1;
+	if( oflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )
+		oooff = 1;
+
+	for(i=0;RSB_LIKELY(i<n);++i)
+		a[i] = iren[a[i]-ioff]-ooff+oooff;
+}
+
+rsb_err_t rsb__util_compress_to_row_pointers_array(rsb_coo_idx_t * RSB_RESTRICT pa, rsb_nnz_idx_t nz, rsb_coo_idx_t m, rsb_flags_t iflags, rsb_flags_t oflags, rsb_coo_idx_t * ta)
+{
+	/* 
+	 * Note that this routine invokes OpenMP.
+	 * Requires m+1 temporary space.
+	 * TODO: rsb__util_compress_to_row_pointers_array -> rsb__idx_fia2fpa
+	 * */
+	rsb_nnz_idx_t i;
+	rsb_bool_t sa = RSB_BOOL_TRUE;
+	rsb_nnz_idx_t ifo = ( iflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+	rsb_nnz_idx_t ofo = ( oflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+
+	if(!ta)
+		return RSB_ERR_BADARGS;
+	if(!pa)
+		pa = rsb__calloc((m+1)*sizeof(rsb_coo_idx_t));
+	else 
+		sa = RSB_BOOL_FALSE,
+		RSB_BZERO(pa,(m+1)*sizeof(rsb_coo_idx_t));
+	if(!pa)
+		goto err;
+	for(i=0;RSB_LIKELY(i<nz);++i)
+		pa[1+ta[i]-ifo]++;
+	for(i=0;RSB_LIKELY(i<m );++i)
+		pa[i+1] += pa[i]; /* TODO: need a prefix sum routine */
+	if(ofo)
+		for(i=0;RSB_LIKELY(i<m+1);++i)
+			pa[i]++;
+
+	RSB_COA_MEMCPY_parallel(ta,pa,0,0,m+1);
+	if(sa)
+		RSB_CONDITIONAL_FREE(pa);
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_ENOMEM; 
+}
+
+rsb_err_t rsb__util_uncompress_row_pointers_array(const rsb_coo_idx_t * pa, rsb_nnz_idx_t n, rsb_flags_t iflags, rsb_flags_t oflags, rsb_coo_idx_t * ta)
+{
+	/*
+	 TODO: write a version to exploit no-pointer-aliasing (if available)
+	 */
+	rsb_nnz_idx_t i,nz;
+	rsb_bool_t sa = RSB_BOOL_TRUE; /* same array */
+	rsb_nnz_idx_t ifo = ( iflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+	rsb_nnz_idx_t ofo = ( oflags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+	rsb_coo_idx_t * ota = ta, *tmp = NULL;
+
+	if(!pa || !ta)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		return RSB_ERR_BADARGS;
+	}
+	nz = pa[n]-ifo;
+	if(nz==0)
+		goto ret;
+	if(ta==pa)
+	{
+		if(RSB_LIKELY(n+1<nz))
+			pa = tmp = rsb__clone_area(pa,sizeof(rsb_coo_idx_t)*(n+1));
+		else
+			tmp = ta = rsb__clone_area(pa,sizeof(rsb_coo_idx_t)* nz  );
+	}
+	else 
+		sa = RSB_BOOL_FALSE;
+	if((!ta) || (!pa))
+		goto err;
+	/* TODO: this shall be parallel! */
+	for(i=0;RSB_LIKELY(i<n);++i)
+		rsb__util_coo_array_set(ta+(pa[i]-ifo),(pa[i+1]-pa[i]),i+ofo);
+	if(sa && !(n+1<nz))
+		RSB_COA_MEMCPY_parallel(ota,ta,0,0,nz);
+	RSB_CONDITIONAL_FREE(tmp);
+ret:
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_ENOMEM; 
+}
+
+rsb_err_t rsb__debug_print_index_vector(const rsb_coo_idx_t * v1, size_t n){
+	/*! 
+	 **/
+#if RSB_ALLOW_STDOUT
+	size_t i = 0;
+	int ioff = 1,voff = 1;
+	if(!v1)
+		return RSB_ERR_BADARGS;
+
+	for(i=0;i<n ;++i) 
+		RSB_STDOUT("%zd : %d \n",(rsb_printf_int_t)(i+ioff),v1[i]+voff);
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__debug_print_index_vectors_diff(const rsb_coo_idx_t * v1, const rsb_coo_idx_t * v2, size_t n, int onlyfirst){
+	/*! 
+	 **/
+#if RSB_ALLOW_STDOUT
+	size_t i,differing = 0;
+	if(!v1 || !v2)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t indices vectors diff :\n");
+	
+		for(i=0;i<n ;++i) 
+			if(v1[i]!=v2[i]){
+		differing++;
+		if((onlyfirst==0)||(onlyfirst>differing))
+		RSB_STDOUT("%zd : %d %d \n",(rsb_printf_int_t)i,						v1[i],v2[i]		);
+			}
+		if(differing>onlyfirst)RSB_STDOUT("...and %zd more ...\n",(rsb_printf_int_t)(differing-onlyfirst));
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__util_find_extremal_half_index_val(const rsb_half_idx_t * RSB_RESTRICT p, rsb_nnz_idx_t n, rsb_coo_idx_t lb, rsb_coo_idx_t ub, rsb_half_idx_t *lf, rsb_half_idx_t * RSB_RESTRICT uf)
+{
+	/* TODO: remove the useless 'ub' argument */
+	/* TODO: this is a naive implementation; need a better one */
+	rsb_half_idx_t vm = RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t),vM = 0;
+	rsb_nnz_idx_t i = 0;
+
+	for(i=0;i<n;++i)
+		vm = RSB_MIN(vm,p[i]), vM = RSB_MAX(vM,p[i]);
+	if(lf)*lf = vm; if(uf)*uf = vM;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_find_extremal_full_index_val(const rsb_coo_idx_t * RSB_RESTRICT p, rsb_nnz_idx_t n, rsb_coo_idx_t lb, rsb_coo_idx_t ub, rsb_coo_idx_t * RSB_RESTRICT lf, rsb_coo_idx_t * RSB_RESTRICT uf)
+{
+	/* TODO: remove the useless 'ub' argument */
+	/* TODO: this is a naive implementation; need a better one */
+	rsb_coo_idx_t vm = RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t),vM = 0;
+	rsb_nnz_idx_t i = 0;
+
+	for(i=0;i<n;++i)
+		vm = RSB_MIN(vm,p[i]), vM = RSB_MAX(vM,p[i]);
+	RSB_ASSIGN_IF_DP(lf,vm);
+	RSB_ASSIGN_IF_DP(uf,vM);
+	return RSB_ERR_NO_ERROR;
+
+}
+
+rsb_bool_t rsb__util_reverse_halfword_coo_array(rsb_half_idx_t* p, rsb_nnz_idx_t n)
+{
+	rsb_nnz_idx_t nzi;
+	--n;
+	for(nzi=0;nzi<(n+1)/2;++nzi)
+		RSB_SWAP(rsb_coo_idx_t,p[n-nzi],p[nzi]);
+	return RSB_BOOL_TRUE;
+}
+
+rsb_bool_t rsb__util_reverse_fullword_coo_array(rsb_coo_idx_t* p, rsb_nnz_idx_t n)
+{
+	rsb_nnz_idx_t nzi;
+	--n;
+	for(nzi=0;nzi<(n+1)/2;++nzi)
+		RSB_SWAP(rsb_half_idx_t,p[n-nzi],p[nzi]);
+	return RSB_BOOL_TRUE;
+}
+
+/* @endcond */
diff --git a/rsb_idx.h b/rsb_idx.h
new file mode 100644
index 0000000..9b7238a
--- /dev/null
+++ b/rsb_idx.h
@@ -0,0 +1,84 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for COO handling.
+ * */
+
+#ifndef RSB_IDX_H_INCLUDED
+#define RSB_IDX_H_INCLUDED
+
+#include "rsb_internals.h"
+
+void rsb__util_nnz_array_set_sequence(rsb_nnz_idx_t * p, rsb_nnz_idx_t n, rsb_nnz_idx_t o, rsb_nnz_idx_t i);
+void rsb__util_coo_array_set_sequence(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t o, rsb_coo_idx_t i);
+void rsb__util_coo_array_set(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a);
+void rsb__util_nnz_array_set(rsb_nnz_idx_t * p, rsb_nnz_idx_t n, rsb_nnz_idx_t a);
+void rsb__util_coo_array_mul(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a);
+void rsb__util_coo_arrays_mul(rsb_coo_idx_t * p, rsb_coo_idx_t * q, rsb_coo_idx_t a, rsb_coo_idx_t b, rsb_nnz_idx_t n);
+void rsb__util_coo_array_add(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a);
+void rsb__util_hcoo_array_add(rsb_half_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t a);
+void rsb__util_coo_arrays_add(rsb_coo_idx_t * p, rsb_coo_idx_t * q, rsb_coo_idx_t a, rsb_coo_idx_t b, rsb_nnz_idx_t n);
+/* void rsb_util_coo_arrays_sub(rsb_coo_idx_t * p, rsb_coo_idx_t * q, rsb_coo_idx_t a, rsb_coo_idx_t b, rsb_nnz_idx_t n); */
+void rsb__util_nnz_array_add_array(rsb_nnz_idx_t * p, const rsb_nnz_idx_t * q, rsb_nnz_idx_t n);
+void rsb__util_coo_array_sub(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_coo_idx_t s);
+void rsb__util_coo_array_to_fortran_indices(rsb_coo_idx_t * p, rsb_nnz_idx_t n);
+void rsb__util_coo_array_to_fortran_indices_parallel(rsb_coo_idx_t * p, rsb_nnz_idx_t n);
+void rsb__util_coo_array_from_fortran_indices(rsb_coo_idx_t * p, rsb_nnz_idx_t n, rsb_bool_t want_parallel);
+void rsb__util_coo_upper_to_lower_symmetric(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz);
+void rsb__util_coo_lower_to_upper_symmetric(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz);
+void rsb__util_nnz_array_from_fortran_indices(rsb_coo_idx_t * p, rsb_nnz_idx_t n);
+void rsb__util_nnz_array_to_fortran_indices(rsb_coo_idx_t * p, rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_coo_check_if_triangle_non_empty(const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+rsb_flags_t rsb__util_coo_determine_uplo_flags(const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t nnz);
+rsb_err_t rsb__util_coo_check_if_has_diagonal_elements(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_bool_t *has_diagonal_elements);
+void rsb__util_coo_array_copy_trans_add(rsb_coo_idx_t * d, const rsb_coo_idx_t * s, rsb_nnz_idx_t n, rsb_coo_idx_t a);
+void rsb__util_hcoo_array_copy_trans_add(rsb_coo_idx_t * d, const rsb_half_idx_t * s, rsb_nnz_idx_t n, rsb_coo_idx_t a);
+rsb_bool_t rsb__util_reverse_halfword_coo_array(rsb_half_idx_t* p, rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_reverse_fullword_coo_array(rsb_coo_idx_t* p, rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_is_coo_array_sorted_up_partial_order(const rsb_coo_idx_t * p, const rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_is_halfword_coo_array_sorted_up_partial_order(const rsb_half_idx_t * p, const rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_is_coo_array_sorted_up(const rsb_coo_idx_t * p, const rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_is_halfword_coo_array_sorted_up(const rsb_half_idx_t* p, const rsb_nnz_idx_t n);
+rsb_bool_t rsb__util_is_nnz_array_sorted_up(const rsb_nnz_idx_t * p, const rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__util_find_max_index_val(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__util_find_min_index_val(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n);
+rsb_err_t rsb__util_find_extremal_half_index_val(const rsb_half_idx_t * RSB_RESTRICT p, rsb_nnz_idx_t n, rsb_coo_idx_t lb, rsb_coo_idx_t ub, rsb_half_idx_t *lf, rsb_half_idx_t * RSB_RESTRICT uf);
+rsb_err_t rsb__util_find_extremal_full_index_val(const rsb_coo_idx_t * RSB_RESTRICT p, rsb_nnz_idx_t n, rsb_coo_idx_t lb, rsb_coo_idx_t ub, rsb_coo_idx_t * RSB_RESTRICT lf, rsb_coo_idx_t * RSB_RESTRICT uf);
+void rsb__util_coo_array_renumber(rsb_coo_idx_t * a, rsb_coo_idx_t * iren, rsb_nnz_idx_t n, rsb_flags_t aflags, rsb_flags_t pflags, rsb_flags_t oflags);
+rsb_err_t rsb__util_uncompress_row_pointers_array(const rsb_coo_idx_t * pa, rsb_nnz_idx_t n, rsb_flags_t iflags, rsb_flags_t oflags, rsb_coo_idx_t * ta);
+rsb_err_t rsb__util_compress_to_row_pointers_array(rsb_coo_idx_t * RSB_RESTRICT pa, rsb_nnz_idx_t nz, rsb_coo_idx_t m, rsb_flags_t iflags, rsb_flags_t oflags, rsb_coo_idx_t * ta);
+rsb_err_t rsb__debug_print_index_vector(const rsb_coo_idx_t * v1, size_t n);
+rsb_err_t rsb__debug_print_index_vectors_diff(const rsb_coo_idx_t * v1, const rsb_coo_idx_t * v2, size_t n, int onlyfirst);
+rsb_bool_t rsb__util_is_nnz_array_sorted_up_partial_order(const rsb_nnz_idx_t * p, const rsb_nnz_idx_t n);
+rsb_coo_idx_t rsb__util_find_max_index(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n);
+rsb_coo_idx_t rsb__util_find_min_index(const rsb_nnz_idx_t * p, rsb_nnz_idx_t n);
+#define RSB_IA_MEMCPY(ID,IS,DOFF,SOFF,NNZ,I0) \
+	rsb__util_coo_array_copy_trans_add(((rsb_coo_idx_t*)(ID))+(DOFF),((rsb_coo_idx_t*)(IS))+(SOFF),NNZ,I0)
+#define RSB_IA_MEMCPY_H(ID,IS,DOFF,SOFF,NNZ,I0) \
+	rsb__util_hcoo_array_copy_trans_add(((rsb_coo_idx_t*)(ID))+(DOFF),((rsb_half_idx_t*)(IS))+(SOFF),NNZ,I0)
+
+#endif /* RSB_IDX_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_init.c b/rsb_init.c
new file mode 100644
index 0000000..5c9c3f7
--- /dev/null
+++ b/rsb_init.c
@@ -0,0 +1,808 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Initialization code.
+ * @author Michele Martone
+ * */
+ 
+#include "rsb_do.h"
+#include "rsb_common.h"
+
+#define RSB_WANT_PERFORMANCE_COUNTERS_IN_RSB_INIT defined(RSB_WANT_PERFORMANCE_COUNTERS) && (RSB_WANT_PERFORMANCE_COUNTERS>1)
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+const rsb_char_t * rsb__init_get_mem_hierarchy_info_string(rsb_bool_t verbose)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Determines the memory hierarchy info string.
+	 * First queries the environment first for RSB_USER_SET_MEM_HIERARCHY_INFO.
+	 * If such variable exists, it returns it.
+	 * If no such variable exists, it returns the corresponding preprocessor symbol, if defined.
+	 * Otherwise returns NULL.
+	 * */
+	rsb_char_t * usmhi = NULL;
+#ifdef RSB_HAVE_GETENV
+	if(verbose) RSB_INFO("Checking environment RSB_USER_SET_MEM_HIERARCHY_INFO variable.\n");
+	if((usmhi = getenv("RSB_USER_SET_MEM_HIERARCHY_INFO"))!=NULL && *usmhi)
+		goto done;
+#endif /* RSB_HAVE_GETENV */
+#ifdef RSB_USER_SET_MEM_HIERARCHY_INFO
+	if(verbose) RSB_INFO("Checking hardcoded RSB_USER_SET_MEM_HIERARCHY_INFO symbol\n");
+	usmhi = RSB_USER_SET_MEM_HIERARCHY_INFO;
+	if( usmhi && *usmhi )
+		goto done;
+#endif /* RSB_USER_SET_MEM_HIERARCHY_INFO */
+#ifdef RSB_DETECTED_MEM_HIERARCHY_INFO
+	if(verbose) RSB_INFO("Checking hardcoded RSB_DETECTED_MEM_HIERARCHY_INFO symbol\n");
+	usmhi = RSB_DETECTED_MEM_HIERARCHY_INFO;
+	if( usmhi && *usmhi )
+		goto done;
+#endif /* RSB_DETECTED_MEM_HIERARCHY_INFO */
+done:
+	if(verbose) RSB_INFO("Available memory hierarchy info string: \"%s\"\n",usmhi);
+	return usmhi;
+}
+
+/* FIXME: move these constants outta here ! */
+#define RSB_CONST_KB	(1024)
+#define RSB_CONST_MB	(RSB_CONST_KB*1024)
+#define RSB_CONST_GB	(RSB_CONST_MB*1024)
+
+static int rsb_do_numerical_sprintf(rsb_char_t *s, long n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME: temporarily here!
+	 * FIXME: only for long
+	 * */
+	if(!s)
+		return 0;
+	if((n%RSB_CONST_GB)==0 && n >= RSB_CONST_GB)
+		sprintf(s,"%ldG",n/RSB_CONST_GB);
+	else
+	if((n%RSB_CONST_MB)==0 && n >= RSB_CONST_MB)
+		sprintf(s,"%ldM",n/RSB_CONST_MB);
+	else
+	if((n%RSB_CONST_KB)==0 && n >= RSB_CONST_KB)
+		sprintf(s,"%ldK",n/RSB_CONST_KB);
+	else
+		sprintf(s,"%ld",n);
+	return strlen(s);
+}
+
+const rsb_char_t * rsb__get_mem_hierarchy_info_string(rsb_char_t *usmhib)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * min RSB_MAX_LINE_LENGTH
+	 * FIXME: no check
+	 * */
+	long level = 0;
+	usmhib[0] = '\0';
+#if 0
+#error The memory hierarchy info string should the info in the struct!
+	const rsb_char_t * usmhi = rsb__init_get_mem_hierarchy_info_string(RSB_BOOL_FALSE);
+	/*  e.g.: #define RSB_USER_SET_MEM_HIERARCHY_INFO "L2:4/64/512K;L1:8/64/32K;" */
+
+	usmhib[0] = '\0';
+	if(usmhi)
+		return usmhi;
+#endif
+	/* FIXME: potential overflows here */
+	for(level = rsb_global_session_handle.memory_hierarchy_levels;level>0;--level)
+	{
+#if 0
+		sprintf(usmhib+strlen(usmhib),"L%ld:%ld/%ld/%ld;",(rsb_long_t)level,
+				(rsb_long_t)rsb_global_session_handle.caches[level].associativity,
+				(rsb_long_t)rsb_global_session_handle.caches[level].linesize,
+				(rsb_long_t)rsb_global_session_handle.caches[level].size
+				);
+#else
+		/* FIXME: TODO : this code was written in a hurry: it should be made less stoopidly inefficient!  */
+		sprintf(usmhib+strlen(usmhib),"L%ld:",(rsb_long_t)level),
+		rsb_do_numerical_sprintf(usmhib+strlen(usmhib),(rsb_long_t)rsb_global_session_handle.caches[level].associativity),
+		sprintf(usmhib+strlen(usmhib),"/"),
+		rsb_do_numerical_sprintf(usmhib+strlen(usmhib),(rsb_long_t)rsb_global_session_handle.caches[level].linesize),
+		sprintf(usmhib+strlen(usmhib),"/"),
+		rsb_do_numerical_sprintf(usmhib+strlen(usmhib),(rsb_long_t)rsb_global_session_handle.caches[level].size);
+		if(level>1)
+			sprintf(usmhib+strlen(usmhib),",");
+	}
+#endif
+	return usmhib;
+}
+
+rsb_err_t rsb__dump_mem_hierarchy_info(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+#if RSB_ALLOW_STDOUT
+	const rsb_char_t * usmhi = rsb__init_get_mem_hierarchy_info_string(RSB_BOOL_FALSE);
+	if(usmhi && *usmhi)
+		RSB_STDOUT("%s",usmhi);
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__init_mem_hierarchy_info(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	*/
+	return rsb__set_mem_hierarchy_info(NULL);
+}
+
+rsb_err_t rsb__set_mem_hierarchy_info(const rsb_char_t * usmhi)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * TODO: needs some testing code.
+	 * Calling this code should be possible also after initialization.
+	 * */
+	const rsb_char_t * mhi = usmhi?usmhi:rsb__init_get_mem_hierarchy_info_string(RSB_BOOL_FALSE);
+	const rsb_char_t * s = mhi;
+	struct rsb_memory_level_t caches[RSB_MAX_SUPPORTED_CACHE_LEVELS];	/* */
+	long memory_hierarchy_levels = 0;		/*  */
+	
+	if(!mhi || !*mhi)
+		return RSB_ERR_NO_ERROR;	/* keep defaults */
+
+	if(sizeof(rsb_global_session_handle.caches)!=sizeof(caches))
+	{
+		return RSB_ERR_INTERNAL_ERROR;
+	}
+
+	rsb_memcpy(caches,rsb_global_session_handle.caches,sizeof(caches));
+
+	//RSB_INFO("rsb__init_mem_hierarchy_info:\"%s\"\n",usmhi);
+	       /*  e.g.:"L2:4/64/512K,L1:8/64/32K" */
+		memory_hierarchy_levels = 0;
+		while(*s)
+		{
+			long level = 0;
+			if(*s=='L' && s[1] && isdigit(s[1]))
+			{
+//				RSB_INFO("uhm: %s",mhi);
+				level = rsb__util_atoi(s+1);
+				memory_hierarchy_levels = RSB_MAX(level,memory_hierarchy_levels);
+				caches[level].level = level;
+				++s;
+				while(isdigit(*s))++s;
+				if(*s!=':')goto cerr;
+				++s;
+				if(!isdigit(*s))goto cerr;
+				caches[level].associativity = rsb__util_atoi(s);
+				while(isdigit(*s))++s;
+				if(toupper(*s)=='K')
+					caches[level].associativity *= 1024,++s;
+				if(toupper(*s)=='M')
+					caches[level].associativity *= 1024*1024,++s;
+				if(*s!='/')goto cerr;
+				++s;
+				if(!isdigit(*s))goto cerr;
+				caches[level].linesize = rsb__util_atoi(s);
+				while(isdigit(*s))++s;
+				if(toupper(*s)=='K')
+					caches[level].linesize *= 1024,++s;
+				if(toupper(*s)=='M')
+					caches[level].linesize *= 1024*1024,++s;
+				if(*s!='/')goto cerr;
+				++s;
+				if(!isdigit(*s))goto cerr;
+				caches[level].size = rsb__util_atoi(s);
+				while(isdigit(*s))++s;
+				if(toupper(*s)=='K')
+					caches[level].size *= 1024,++s;
+				if(toupper(*s)=='M')
+					caches[level].size *= 1024*1024,++s;
+				if(toupper(*s)=='G')
+					caches[level].size *= 1024*1024*1024,++s;
+				if(level>1)
+				{
+					if(*s!=',')
+						goto cerr;
+					else
+						++s;
+				}
+			}
+			else break;
+		}
+/*  				RSB_INFO("parsing memory hierarchy string succeeded\n"
+						"%d %d %d\n",
+					rsb_global_session_handle.memory_hierarchy_levels,
+					rsb_global_session_handle.caches[1].size,
+					rsb_global_session_handle.caches[2].size
+						);*/
+		goto cok;
+		/* FIXME: this code is not complete: it does not check for complete nor correct information */
+cerr:
+	RSB_ERROR("error parsing memory hierarchy string (at:%s)\n",s);
+	return RSB_ERR_NO_ERROR; /* FIXME */
+cok:
+	rsb_memcpy(rsb_global_session_handle.caches,caches,sizeof(caches));
+	rsb_global_session_handle.memory_hierarchy_levels = memory_hierarchy_levels;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__init_check_for_constants_correctness(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * TODO: needs some testing code.
+	 * */
+	/* basic compatibility checks (there are programs relying on this, and so this test should reveal inconsistencies) */
+#ifdef  RSB_NUMERICAL_TYPE_DOUBLE
+	RSB_ASSERT(RSB_NUMERICAL_TYPE_DOUBLE        =='D');
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+#ifdef  RSB_NUMERICAL_TYPE_FLOAT
+	RSB_ASSERT(RSB_NUMERICAL_TYPE_FLOAT         =='S');
+#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+#ifdef  RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	RSB_ASSERT(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX=='Z');
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+#ifdef  RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	RSB_ASSERT(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX =='C');
+#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+
+	/* basic sanity checks */
+	RSB_ASSERT(RSB_FITTING_SAMPLES>0);
+	RSB_ASSERT(RSB_FIRST_FITTING_SAMPLE_BW_MAX>0);
+	RSB_ASSERT(RSB_FIRST_FITTING_SAMPLE_BW_MIN>0);
+	RSB_ASSERT(RSB_FIRST_FITTING_SAMPLE_BW_MIN <= RSB_FIRST_FITTING_SAMPLE_BW_MAX);
+	RSB_ASSERT(RSB_NNZ_BLK_MAX>RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE);
+	RSB_ASSERT(RSB_BENCHMARK_MIN_SECONDS>0.0);
+	RSB_ASSERT(RSB_BENCHMARK_MIN_RUNS>1);
+
+	/* TODO : should check for signedness, too */
+	RSB_ASSERT(sizeof(rsb_non_overflowing_t) >= sizeof(rsb_nnz_idx_t));	/* */
+	RSB_ASSERT(sizeof(rsb_nnz_idx_t) >= sizeof(rsb_coo_idx_t));
+	RSB_ASSERT(sizeof(rsb_coo_idx_t) >= sizeof(rsb_blk_idx_t));
+	RSB_ASSERT(sizeof(rsb_flags_t) >= 4 );
+	RSB_ASSERT(sizeof(rsb_err_t  ) >= 4 );
+	RSB_ASSERT(sizeof(rsb_int_t) == sizeof(int) );
+
+	RSB_ASSERT((rsb_half_idx_t)((RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t)+1))==0);
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_nnz_idx_t)>0);
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t)>0);
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_blk_idx_t)>0);
+
+	//RSB_ASSERT(RSB_IS_SIGNED(char));
+	RSB_ASSERT(RSB_IS_SIGNED(signed char));
+	RSB_ASSERT(RSB_IS_SIGNED(short int));
+	RSB_ASSERT(RSB_IS_SIGNED(signed int));
+	RSB_ASSERT(RSB_IS_SIGNED(int));
+	RSB_ASSERT(RSB_IS_SIGNED(long));
+
+	RSB_ASSERT(RSB_IS_UNSIGNED(unsigned int));
+	RSB_ASSERT(RSB_IS_UNSIGNED(unsigned short int));
+	RSB_ASSERT(RSB_IS_UNSIGNED(unsigned char));
+	RSB_ASSERT(RSB_IS_UNSIGNED(unsigned long));
+	RSB_ASSERT(RSB_IS_UNSIGNED(size_t));
+
+	RSB_ASSERT(RSB_ERR_NO_ERROR==0);
+	RSB_ASSERT(RSB_ERR_GENERIC_ERROR==-1);
+
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(short int)<RSB_MAX_VALUE_FOR_TYPE(unsigned short int));
+	/* 
+		FIXME
+		We found  RSB_MAX_VALUE_FOR_TYPE(char) == RSB_MAX_VALUE_FOR_TYPE(unsigned char)  (==255) on
+		 IBM XL C/C++ Enterprise Edition V7.0
+		 Version: 07.00.0000.0005
+	 */
+#if   defined(__xlC__)
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(signed char)	<RSB_MAX_VALUE_FOR_TYPE(unsigned char));
+#else
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(char)	<RSB_MAX_VALUE_FOR_TYPE(unsigned char));
+#endif /* __xlC__ */
+
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(signed int)	<RSB_MAX_VALUE_FOR_TYPE(unsigned int));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(int)	<RSB_MAX_VALUE_FOR_TYPE(unsigned int));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(signed long)	<RSB_MAX_VALUE_FOR_TYPE(unsigned long));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(long)	<RSB_MAX_VALUE_FOR_TYPE(unsigned long));
+
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_nnz_idx_t)>=RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t)>=RSB_MAX_VALUE_FOR_TYPE(rsb_blk_idx_t));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t)>=RSB_MAX_VALUE_FOR_TYPE(rsb_submatrix_idx_t));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t)>=RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t));
+
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(size_t)         >=RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(size_t)         >=RSB_MAX_VALUE_FOR_TYPE(rsb_nnz_idx_t));
+	RSB_ASSERT(RSB_MAX_VALUE_FOR_TYPE(size_t)         >=RSB_MAX_VALUE_FOR_TYPE(rsb_blk_idx_t));
+
+	RSB_ASSERT(RSB_MAX_MATRIX_DIM>255);
+	RSB_ASSERT(RSB_MAX_MATRIX_NNZ>255);
+	RSB_ASSERT(RSB_MAX_MATRIX_NNZ >= RSB_MAX_MATRIX_DIM);
+
+	RSB_ASSERT( RSB_IS_VALID_NNZ_SUM(RSB_MAX_MATRIX_NNZ/2,RSB_MAX_MATRIX_NNZ/2));
+	RSB_ASSERT(!RSB_IS_VALID_NNZ_SUM(RSB_MAX_MATRIX_NNZ,1));
+
+	RSB_ASSERT(RSB_MARKER_COO_VALUE>0);
+	RSB_ASSERT(RSB_MAX_ALLOCATABLE_MEMORY_CHUNK>0);
+
+	RSB_ASSERT(rsb__do_is_candidate_size_for_halfword_csr(30000,30000,30000,RSB_FLAG_USE_HALFWORD_INDICES_CSR));
+	RSB_ASSERT(rsb__do_is_candidate_size_for_halfword_csr(64000,64000,64000,RSB_FLAG_USE_HALFWORD_INDICES_CSR));
+
+	/* if any of the following is not true, the library would lose consistence */
+	RSB_ASSERT(RSB_IS_UNSIGNED(rsb_half_idx_t));
+	RSB_ASSERT(RSB_IS_SIGNED(rsb_coo_idx_t));
+	RSB_ASSERT(RSB_IS_SIGNED(rsb_nnz_idx_t));
+	{
+		rsb_half_idx_t h = RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t);
+		rsb_coo_idx_t c = RSB_MAX_VALUE_FOR_TYPE(rsb_coo_idx_t);
+		RSB_ASSERT(c>=h);
+		RSB_ASSERT((c-h)>=0);
+	}
+	
+	RSB_ASSERT(rsb__util_strlen(RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE)<RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE_MAX_CHARS);
+	{
+		/* EXPERIMENTAL, FIXME */
+		rsb_int_t ti;
+		rsb_type_t types [] = RSB_MATRIX_TYPE_CODES_ARRAY;
+		for(ti=0;ti<RSB_IMPLEMENTED_TYPES	;++ti)
+			RSB_ASSERT(rsb__do_sizeof(types[ti])<=RSB_CONST_ENOUGH_BYTES_FOR_ANY_TYPE);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__init_check_for_system_constants_correctness(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * TODO: needs some testing code.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if 1
+	/* Let's see if some invariants hold despite user set options. */
+
+	if(sizeof(rsb_blk_idx_t)>sizeof(rsb_coo_idx_t))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	if(sizeof(rsb_coo_idx_t)>sizeof(rsb_nnz_idx_t))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#ifdef CHAR_BIT
+	if(sizeof(rsb_flags_t)<(4*CHAR_BIT)/8)
+#else /* CHAR_BIT */
+	if(sizeof(rsb_flags_t)<4)/* remember that the C standard does not mandate 8 bits per byte */
+#endif /* CHAR_BIT */
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#ifdef CHAR_BIT
+	if(sizeof(rsb_err_t)<(4*CHAR_BIT)/8)
+#else /* CHAR_BIT */
+	if(sizeof(rsb_err_t)<4)/* remember that the C standard does not mandate 8 bits per byte */
+#endif /* CHAR_BIT */
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_init_inner(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	RSB_BZERO_P(&rsb_global_session_handle);
+	rsb_global_session_handle.rsb_g_initialized = RSB_BOOL_FALSE;
+	rsb_global_session_handle.memory_hierarchy_levels = 0;
+#if RSB_WANT_PERFORMANCE_FILE
+	rsb_global_session_handle.performance_binary_dump_file = RSB_PERFORMANCE_BINARY_DUMP_FILE;
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	rsb_global_session_handle.asm_sort_method = 0;
+	rsb_global_session_handle.cache_blocking_method = 0;
+	rsb_global_session_handle.mhis = NULL;
+	rsb_global_session_handle.subdivision_multiplier = 1.0;
+#if RSB_WANT_BOUNDED_BOXES
+	rsb_global_session_handle.want_bounded_box = 1;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+#if RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE
+	rsb_global_session_handle.rsb_g_verbose_interface = 0;
+#endif /* RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE */
+#if RSB_HAVE_STREAMS
+	rsb_global_session_handle.init_stream = NULL;
+	rsb_global_session_handle.exit_stream = NULL;
+	rsb_global_session_handle.error_stream = stderr;
+	rsb_global_session_handle.out_stream = stdout;
+#endif /* RSB_HAVE_STREAMS */
+#if RSB_WANT_LIBRSB_TIMER
+	rsb_global_session_handle.etime = RSB_TIME_ZERO;
+#endif /* RSB_WANT_LIBRSB_TIMER */
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+#if 0
+	#pragma omp parallel  RSB_NTC
+	{
+		rsb_global_session_handle.rsb_g_threads = omp_get_num_threads();
+		rsb_global_session_handle.rsb_want_threads = rsb_global_session_handle.rsb_g_threads;
+		/* the user may modify rsb_want_threads in a second moment */
+	}
+#else
+		rsb_global_session_handle.rsb_g_threads = omp_get_max_threads();
+		rsb_global_session_handle.rsb_want_threads = rsb_global_session_handle.rsb_g_threads;
+#endif
+	if(rsb_global_session_handle.rsb_g_threads>RSB_CONST_MAX_SUPPORTED_CORES)
+	{
+		errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		RSB_PERR_GOTO(err,"seems like your machine supports %ld threads. this code was compiled to support max %ld\n"
+				,rsb_global_session_handle.rsb_g_threads
+				,RSB_CONST_MAX_SUPPORTED_CORES
+				);
+	}
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+
+	RSB_DO_ERROR_CUMULATE(errval,rsb__init_mem_hierarchy_info());
+	RSB_DO_ERROR_CUMULATE(errval,rsb__init_check_for_constants_correctness());
+	RSB_DO_ERROR_CUMULATE(errval,rsb__init_check_for_system_constants_correctness());
+
+	/* basic sanity checks */
+	errval = rsb__util_m4_sanity_check();
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(
+			0 // RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS!=0x40000000) // 20130109 shall fix PSBLAS accordingly
+			//(RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS!=0x02)
+			||(RSB_FLAG_SORTED_INPUT!=0x04)
+			||(RSB_FLAG_FORTRAN_INDICES_INTERFACE!=0x01))
+	{
+		// these values are fixed, as PSBLAS uses them hardcoded (20101124)
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if((errval = rsb__sys_init())!=RSB_ERR_NO_ERROR)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if((errval = rsb__do_bindump_init())!=RSB_ERR_NO_ERROR)
+	{
+		if(errval != RSB_ERR_UNSUPPORTED_FEATURE)
+		{ RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+		errval = RSB_ERR_NO_ERROR;	/* we ignore such an error, for now */
+	}
+
+	rsb__g_rsb_memory_counter_init();/* .. if any */
+
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSB_INIT
+	if((errval = rsb_perf_counters_init())!=RSB_ERR_NO_ERROR)
+	{	
+		RSB_STDERR("problem initializing performance counters (rsb_perf_counters_init gave %d)\n",(int)errval);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS_IN_RSB_INIT */
+	/* checking the global memory counter */
+	if(rsb__get_g_rsb_memory_count()!=0)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	rsb__perf_init();
+	if(RSB_SHOULD_FAIL_INIT_IF_MEMHIER_DETECTION_FAILS)
+	{
+		long cbs = rsb__get_cache_block_byte_size();
+		long lcs = rsb__get_lastlevel_c_size();
+		if( cbs<RSB_MIN_ALLOWED_CACHE_BLOCK_SIZE || cbs>RSB_MAX_ALLOWED_CACHE_BLOCK_SIZE )
+		{
+			errval = RSB_ERR_FAILED_MEMHIER_DETECTION;
+			RSB_PERR_GOTO(herr,"Detected cache block size (%ld) value seems wrong.\n",cbs);
+		}
+		if( lcs<RSB_MIN_ALLOWED_CACHE_BLOCK_SIZE || lcs>RSB_MAX_ALLOWED_CACHE_BLOCK_SIZE )
+		{
+			errval = RSB_ERR_FAILED_MEMHIER_DETECTION;
+			RSB_PERR_GOTO(herr,"Detected last level cache block size (%ld) value seems wrong.\n",lcs);
+		}
+	}
+
+	rsb_global_session_handle.rsb_g_initialized = RSB_BOOL_TRUE;
+	goto err;
+herr:
+	{
+		const char * mhis = rsb__init_get_mem_hierarchy_info_string(RSB_BOOL_TRUE);
+		if(mhis)
+			RSB_ERROR("Please check your memory hierarchy info string, detected as: \"%s\"\n",mhis);
+		else
+			RSB_ERROR("Your memory hierarchy info string has not been detected\n");
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_init(struct rsb_initopts * io)
+{
+	/*!
+	 */
+
+#if 0
+	rsb_int_t oi,on = io?io->n_pairs:0,ko = 0,uo = 0;
+#endif
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if 0
+	// pre-options
+	for(oi=0;oi<on;++oi)
+	switch(io->keys[oi])
+	{
+		default: uo++; // we ignore further error processing here
+	}
+#endif
+
+	errval = rsb__do_init_inner();
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES); 
+	}
+
+	errval = rsb__do_reinit(io);
+#if 0
+	if(ko!=uo)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES); 
+		// FIXME: place unknown option error processing here
+	}
+#endif
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_reinit(struct rsb_initopts * io)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int_t oi,on = io?io->n_pairs:0,ko = 0,uo = 0;
+
+	if(!io)
+	{
+		goto err;
+	}
+
+	if(on && ( !io->keys || !io->values ) )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,"init options struct given with allegedly %d pairs, but NULL pointers ?",on);
+	}
+
+#if 0
+	if(io && io->action == RSB_IO_SPECIFIER_GET)
+	for(oi=0;oi<on;++oi)
+	{
+		/* FIXME: shall modify RSB_IF_NOT_NULL_SET_TO_CASTED for performing either input or output */
+		if(io->keys[oi]==RSB_IO_WANT_EXECUTING_THREADS)
+		{
+			//RSB_IF_NOT_NULL_SET_TO_CASTED(*(rsb_int_t*)io->values[oi],&rsb_global_session_handle.rsb_want_threads,rsb_int_t);
+			RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.rsb_want_threads,(rsb_int_t*)(io->values[oi]),rsb_int_t,io->action,errval);
+		}
+	}
+#endif
+
+	if((io!=NULL) && (((io->action == RSB_IO_SPECIFIER_GET)) || (io->action == RSB_IO_SPECIFIER_SET)))
+	for(oi=0;oi<on;++oi)
+	switch(io->keys[oi])
+	{
+		case RSB_IO_WANT_VERBOSE_INIT:
+#if RSB_HAVE_STREAMS
+		//RSB_IF_NOT_NULL_SET_TO_CASTED(rsb_global_session_handle.init_stream,io->values[oi],FILE*);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.init_stream,io->values[oi],FILE*,io->action,errval);
+#else /* RSB_HAVE_STREAMS */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_HAVE_STREAMS */
+		ko++;
+		break;
+		case RSB_IO_WANT_VERBOSE_EXIT:
+#if RSB_HAVE_STREAMS
+		//RSB_IF_NOT_NULL_SET_TO_CASTED(rsb_global_session_handle.exit_stream,io->values[oi],FILE*);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.exit_stream,io->values[oi],FILE*,io->action,errval);
+#else /* RSB_HAVE_STREAMS */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_HAVE_STREAMS */
+		ko++;
+		break;
+		case RSB_IO_WANT_OUTPUT_STREAM:
+#if RSB_HAVE_STREAMS
+		//RSB_IF_NOT_NULL_SET_TO_CASTED(rsb_global_session_handle.out_stream,io->values[oi],FILE*);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.out_stream,io->values[oi],FILE*,io->action,errval);
+#else /* RSB_HAVE_STREAMS */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_HAVE_STREAMS */
+		ko++;
+		break;
+		case RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE:
+#if RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE
+		//RSB_IF_NOT_NULL_SET_TO_CASTED(rsb_global_session_handle.rsb_g_verbose_interface,io->values[oi],rsb_int_t);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.rsb_g_verbose_interface,io->values[oi],rsb_int_t,io->action,errval);
+#else
+		if(io->action == RSB_IO_SPECIFIER_GET)
+		{ rsb_int_t mone = -1; RSB_IF_NOT_NULL_GET_SET_TO_CASTED(mone,io->values[oi],rsb_int_t,io->action,errval); }
+		else
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_WANT_DEBUG_VERBOSE_INTERFACE_NOTICE */
+		ko++;
+		break;
+		case RSB_IO_WANT_VERBOSE_ERRORS:
+#if RSB_HAVE_STREAMS
+		//RSB_IF_NOT_NULL_SET_TO_CASTED(rsb_global_session_handle.error_stream,io->values[oi],FILE*);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.error_stream,io->values[oi],FILE*,io->action,errval);
+#else /* RSB_HAVE_STREAMS */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_NO_STREAM_OUTPUT_CONFIGURED_OUT); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_HAVE_STREAMS */
+		ko++;
+		break;
+		case RSB_IO_WANT_SORT_METHOD:
+		//rsb_global_session_handle.asm_sort_method = (io->values[oi])?*(rsb_int_t*)(io->values[oi]):0;
+		//rsb_global_session_handle.asm_sort_method = RSB_IF_NOT_NULL_CAST_TO(io->values[oi],rsb_int_t,0);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.asm_sort_method,io->values[oi],rsb_int_t,io->action,errval);
+		ko++;
+		break;
+		case RSB_IO_WANT_BOUNDED_BOX_COMPUTATION:
+#if RSB_WANT_BOUNDED_BOXES
+		//rsb_global_session_handle.want_bounded_box = RSB_IF_NOT_NULL_CAST_TO(io->values[oi],rsb_int_t,1);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.want_bounded_box,io->values[oi],rsb_int_t,io->action,errval);
+		ko++;
+#else /* RSB_WANT_BOUNDED_BOXES */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNSUPPORTED_FEATURE); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_WANT_BOUNDED_BOXES */
+		break;
+		case RSB_IO_WANT_SUBDIVISION_MULTIPLIER:
+		//rsb_global_session_handle.subdivision_multiplier = RSB_IF_NOT_NULL_CAST_TO(io->values[oi],rsb_real_t,0);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.subdivision_multiplier,io->values[oi],rsb_real_t,io->action,errval);
+		ko++;
+		break;
+		case RSB_IO_WANT_CACHE_BLOCKING_METHOD:
+		//rsb_global_session_handle.cache_blocking_method = RSB_IF_NOT_NULL_CAST_TO(io->values[oi],rsb_int_t,0);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.cache_blocking_method,io->values[oi],rsb_int_t,io->action,errval);
+		ko++;
+		break;
+		case RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING:
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.mhis,io->values[oi],const rsb_char_t*,io->action,errval);
+		if((!RSB_SOME_ERROR(errval)) && ((io->action == RSB_IO_SPECIFIER_SET)))
+			RSB_DO_ERROR_CUMULATE(errval,rsb__set_mem_hierarchy_info(rsb_global_session_handle.mhis));
+		ko++;
+		break;
+		case RSB_IO_WANT_IS_INITIALIZED_MARKER:
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.rsb_g_initialized,io->values[oi],rsb_bool_t,io->action,errval);
+		ko++;
+		break;
+		case RSB_IO_WANT_EXECUTING_THREADS:
+		//RSB_IF_NOT_NULL_SET_TO_CASTED(rsb_global_session_handle.rsb_want_threads,io->values[oi],rsb_int_t);
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.rsb_want_threads,io->values[oi],rsb_int_t,io->action,errval);
+		ko++;
+		if(rsb__do_was_initialized())
+			/*errval| = */rsb__set_num_threads(rsb_global_session_handle.rsb_want_threads);
+		break;
+		case RSB_IO_WANT_MEM_ALLOC_TOT:
+		if(io->action == RSB_IO_SPECIFIER_GET)
+		{
+			size_t val = 0;
+	#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+			val = rsb_global_session_handle.allocated_memory;
+	#else
+			RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNSUPPORTED_FEATURE); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+			RSB_IF_NOT_NULL_GET_SET_TO_CASTED(val,io->values[oi],size_t,io->action,errval);
+		}
+		ko++;
+		break;
+		case RSB_IO_WANT_MEM_ALLOC_CNT:
+		if(io->action == RSB_IO_SPECIFIER_GET)
+		{
+			size_t val = 0;
+	#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+			val = rsb_global_session_handle.allocations_count;
+	#else
+			RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNSUPPORTED_FEATURE); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+			RSB_IF_NOT_NULL_GET_SET_TO_CASTED(val,io->values[oi],size_t,io->action,errval);
+		}
+		ko++;
+		break;
+		case RSB_IO_WANT_LEAF_LEVEL_MULTIVEC:
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.want_outer_spmm,io->values[oi],rsb_int_t,io->action,errval);
+		ko++;
+		break;
+		case RSB_IO_WANT_LIBRSB_ETIME:
+		{
+#if RSB_WANT_LIBRSB_TIMER
+			RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.etime,io->values[oi],rsb_time_t,io->action,errval);
+#else /* RSB_WANT_LIBRSB_TIMER */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNSUPPORTED_FEATURE); RSB_PERR_GOTO(err,RSB_ERRM_ES);
+#endif /* RSB_WANT_LIBRSB_TIMER */
+			ko++;
+		}
+		break;
+#if RSB_WANT_ALLOCATOR_LIMITS
+		case RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS:
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.allocations_count_max,io->values[oi],size_t,io->action,errval);
+		ko++;
+		break;
+		case RSB_IO_WANT_MAX_MEMORY_ALLOCATED:
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.memory_count_max,io->values[oi],size_t,io->action,errval);
+		ko++;
+		break;
+#else /* RSB_WANT_ALLOCATOR_LIMITS */
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_UNSUPPORTED_FEATURE); RSB_PERR_GOTO(err,RSB_ERRM_NOLP);
+#endif /* RSB_WANT_ALLOCATOR_LIMITS */
+		case RSB_IO_WANT_VERBOSE_TUNING:
+		RSB_IF_NOT_NULL_GET_SET_TO_CASTED(rsb_global_session_handle.verbose_tuning,io->values[oi],rsb_int_t,io->action,errval);
+		ko++;
+		break;
+		default: uo++; // we ignore further error processing here
+	}
+
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_exit(void)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if RSB_WITH_SPARSE_BLAS_INTERFACE
+	errval = rsb__BLAS_handles_free();
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSB_INIT
+	errval = rsb_perf_counters_finalize();
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS_IN_RSB_INIT */
+	errval = rsb__perf_exit();
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+
+	errval = rsb__do_check_leak();
+	//if(errval != RSB_ERR_NO_ERROR)goto err;
+	if(errval != RSB_ERR_MEMORY_LEAK)
+	{ goto err; }
+
+	rsb_global_session_handle.rsb_g_initialized = RSB_BOOL_FALSE;
+err:
+	return errval;
+}
+
+
+/* @endcond */
diff --git a/rsb_init.h b/rsb_init.h
new file mode 100644
index 0000000..438a8d3
--- /dev/null
+++ b/rsb_init.h
@@ -0,0 +1,47 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Initialization code.
+ * @author Michele Martone
+ * */
+#ifndef RSB_INIT_H_INCLUDED
+#define RSB_INIT_H_INCLUDED
+
+#define RSB_DO_REINIT_SINGLE_VALUE(IOF,IOP,IOS,ERRVAL) { enum rsb_opt_t keys[]={IOF}; void*values[]={(IOP)}; struct rsb_initopts io; io.action=(IOS); io.keys=keys; io.values=values; io.n_pairs=1; ERRVAL=rsb__do_reinit(&io); }
+#define RSB_DO_REINIT_SINGLE_VALUE_C_IOP(IOF,IOP,IOS,ERRVAL) { enum rsb_opt_t keys[]={IOF}; const void*values[]={(IOP)}; struct rsb_initopts io; io.action=(IOS); io.keys=keys; (io.values)=(void**)values; io.n_pairs=1; ERRVAL=rsb__do_reinit(&io); }
+#define RSB_DO_REINIT_SINGLE_VALUE_SET(IOF,IOP,ERRVAL) RSB_DO_REINIT_SINGLE_VALUE(IOF,IOP,RSB_IO_SPECIFIER_SET,ERRVAL)
+#define RSB_DO_REINIT_SINGLE_VALUE_GET(IOF,IOP,ERRVAL) RSB_DO_REINIT_SINGLE_VALUE(IOF,IOP,RSB_IO_SPECIFIER_GET,ERRVAL)
+
+rsb_err_t rsb__init_mem_hierarchy_info(void);
+rsb_err_t rsb__set_mem_hierarchy_info(const rsb_char_t * mhi);
+rsb_err_t rsb__dump_mem_hierarchy_info(void);
+rsb_err_t rsb__init_check_for_constants_correctness(void);
+rsb_err_t rsb__init_check_for_system_constants_correctness(void);
+const rsb_char_t * rsb__init_get_mem_hierarchy_info_string(rsb_bool_t verbose);
+const rsb_char_t * rsb__get_mem_hierarchy_info_string(rsb_char_t *usmhib);
+rsb_err_t rsb__do_init(struct rsb_initopts * io);
+rsb_err_t rsb__do_reinit(struct rsb_initopts * io);
+rsb_err_t rsb__do_exit(void);
+#endif /* RSB_INIT_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_internals.c b/rsb_internals.c
new file mode 100644
index 0000000..972cc1c
--- /dev/null
+++ b/rsb_internals.c
@@ -0,0 +1,3902 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ *
+ * Low level routines and tools for our sparse matrix formats implementations.
+ * \internal
+ *
+ * */
+#include "rsb_common.h"
+#include "rsb_util.h"
+#include "rsb.h"
+#include "rsb_types.h"
+#include "rsb_unroll.h"
+#ifdef RSB_HAVE_SYS_UTSNAME_H 
+#include <sys/utsname.h>	/* uname */
+#endif /* RSB_HAVE_SYS_UTSNAME_H  */
+
+#define RSB_WANT_NULL_ALLOCATED_ZERO_NNZ_COO_MATRICES_ARRAYS 1 /* 20110419 a bugfix for the nnz == 0 case vs realloc and memory counters */
+#define RSB_TOKLEN 16
+#define RSB_WANT_ZERO_ON_DESTROY 0	/* a useful debug option */
+#define rsb__strcpy strcpy
+
+#define RSB_ILLEGAL_FLAGS	0xFFFFFFFF
+#define RSB_WANT_DOUBLE_MATRIX_FREE_DETECT 0
+
+#if RSB_WANT_DOUBLE_MATRIX_FREE_DETECT
+#define RSB_CONDITIONAL_FREE_MTXAP(MTXAP) if(MTXAP){ if( (MTXAP)->flags == RSB_ILLEGAL_FLAGS ) { RSB_ERROR("Probable attempt to free already freed matrix at %p !\n",(MTXAP)); } (MTXAP)->flags = RSB_ILLEGAL_FLAGS; RSB_CONDITIONAL_FREE(MTXAP); }
+#else
+#define RSB_CONDITIONAL_FREE_MTXAP(MTXAP) RSB_CONDITIONAL_FREE(MTXAP)
+#endif
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+void * rsb__init_options_t(struct rsb_options_t *o)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * initializes a rsb_options_t struct to default 'vanilla' values
+	 * \return the input address
+	 */
+	if(!o)
+		goto err;
+	RSB_BZERO_P(o);
+err:
+	return o;
+}
+
+void * rsb__destroy_options_t(struct rsb_options_t *o)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * frees the given options structure and any of its allocated arrays
+	 *
+	 * \return the input address
+	 */
+	if(!o)
+		goto err;
+	RSB_CONDITIONAL_FREE(o->bitmap);
+	RSB_CONDITIONAL_FREE(o);
+err:
+	return o;
+}
+
+const void * rsb__is_valid_options_t(const struct rsb_options_t *o, rsb_coo_idx_t m, rsb_coo_idx_t k)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * checks if the structure members which are to be used for 
+	 * the creation of a matrix have meaningful values.
+	 *
+	 * \return the input address in case of success, NULL otherwise
+	 * */
+	if(!o)
+		goto err;
+
+	if(RSB_INVALID_COO_INDEX(m) || RSB_INVALID_COO_INDEX(k))
+		goto err;
+
+	/*
+	 * someday:
+	 *
+	 * if(already_initialized) { 
+	 * 	if(mtxAp->el_size<1)		goto err;
+	 *	if(! o->bitmap)			goto err;
+	 *	....
+	 */
+	return o;
+err:
+	return NULL;
+}
+
+void * rsb__reallocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp, rsb_nnz_idx_t nnnz)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return On success, return the input address; on failure, NULL.
+	 */
+	size_t es = 0;
+	void * check = NULL;
+
+	if(!cmp)
+		goto err;
+
+        es = RSB_NUMERICAL_TYPE_SIZE(cmp->typecode);
+
+	if(es < 1)
+		goto err;
+
+	if( nnnz == 0 && RSB_WANT_NULL_ALLOCATED_ZERO_NNZ_COO_MATRICES_ARRAYS )
+	{
+		cmp->IA = NULL;
+		cmp->JA = NULL;
+		cmp->VA = NULL;
+		goto done;
+	}
+
+	check = rsb__realloc(cmp->IA,sizeof(rsb_coo_idx_t)*(nnnz));
+	if(!check)
+		goto err;
+	cmp->IA = check;
+
+	check = rsb__realloc(cmp->JA,sizeof(rsb_coo_idx_t)*(nnnz));
+	if(!check)
+		goto err;
+	cmp->JA = check;
+
+	check = rsb__realloc(cmp->VA,es*nnnz);
+	if(!check)
+		goto err;
+	cmp->VA = check;
+
+	if(!cmp->IA || !cmp->JA || !cmp->VA)
+		goto cerr;
+done:
+	cmp->nnz = nnnz;
+	return cmp;
+cerr:
+	/* critical error (should not happen) */
+	if(cmp)
+	{
+		RSB_CONDITIONAL_FREE(cmp->IA);
+		RSB_CONDITIONAL_FREE(cmp->JA);
+		RSB_CONDITIONAL_FREE(cmp->VA);
+	}
+err:
+	return NULL;
+}
+
+void * rsb__callocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp)
+{
+	return rsb__xallocate_coo_matrix_t(cmp,RSB_BOOL_TRUE,RSB_FLAG_NOFLAGS);
+}
+
+void * rsb__allocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp)
+{
+	return rsb__xallocate_coo_matrix_t(cmp,RSB_BOOL_FALSE,RSB_FLAG_NOFLAGS);
+}
+
+void * rsb__xallocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp, rsb_bool_t want_calloc, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the input address on success, NULL on error
+	 */
+	size_t es = 0, nnz = 0, rnz = 0;
+
+	if(!cmp)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	rnz = nnz = cmp->nnz;
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS))
+		rnz = RSB_MAX(nnz,cmp->nr+1);
+
+        es = RSB_NUMERICAL_TYPE_SIZE(cmp->typecode);
+
+	if(es<1)
+	{
+		RSB_ERROR("typecode seem wrong: 0%x\n",cmp->typecode);
+		RSB_PERR_GOTO(err,RSB_ERRM_WTC);
+	}
+	cmp->IA = NULL;
+	cmp->JA = NULL;
+	cmp->VA = NULL;
+	/* the above will avoid problems in case of error */
+
+	if(want_calloc == RSB_BOOL_TRUE)
+		cmp->IA = rsb__calloc(sizeof(rsb_coo_idx_t)*(rnz)),
+		cmp->JA = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz)),
+		cmp->VA = rsb__calloc_vector(nnz, cmp->typecode);
+	else
+		cmp->IA = rsb__malloc(sizeof(rsb_coo_idx_t)*(rnz)),
+		cmp->JA = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz)),
+		cmp->VA = rsb__malloc_vector(nnz, cmp->typecode);
+
+	if(!cmp->IA || !cmp->JA || !cmp->VA)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	/*
+	 * Note: we do not free cmp itself.
+	 */
+	return cmp;
+err:
+	if(cmp)
+	{
+		RSB_CONDITIONAL_FREE(cmp->IA);
+		RSB_CONDITIONAL_FREE(cmp->JA);
+		RSB_CONDITIONAL_FREE(cmp->VA);
+	}
+	return NULL;
+}
+
+void * rsb__destroy_coo_matrix_t(struct rsb_coo_matrix_t *cmp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * frees the given structure allocated arrays
+	 *
+	 * \return the input address
+	 */
+	if(!cmp)
+		return cmp;
+	RSB_CONDITIONAL_FREE(cmp->IA);
+	RSB_CONDITIONAL_FREE(cmp->JA);
+	RSB_CONDITIONAL_FREE(cmp->VA);
+	/* RSB_BZERO_P(cmp); */
+	/*
+	 * Note: we do not free cmp itself.
+	 */
+	return cmp;
+}
+
+void * rsb__transpose_coo_matrix_t(struct rsb_coo_matrix_t *cmp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * transposes symbolically the given matrix
+	 *
+	 * \return the input address
+	 */
+	if(!cmp)
+		return cmp;
+	RSB_SWAP(rsb_coo_idx_t ,cmp->nr, cmp->nc );
+	RSB_SWAP(rsb_coo_idx_t*,cmp->IA,cmp->JA);
+	return cmp;
+}
+
+void * rsb__init_blank_pointers(struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the input address
+	 * */
+	if(!mtxAp)
+		return mtxAp;
+
+	mtxAp->VA = NULL;
+	mtxAp->indptr = NULL;
+	mtxAp->bindx = NULL;
+	mtxAp->rpntr = NULL;
+	mtxAp->cpntr = NULL;
+	mtxAp->bpntr = NULL;
+#if RSB_WANT_BITMAP
+	mtxAp->options = NULL;		/* exactly ... */
+#endif /* RSB_WANT_BITMAP */
+	mtxAp->mpntr = NULL;
+	mtxAp->Mpntr = NULL;
+	mtxAp->all_leaf_matrices = NULL;
+	mtxAp->sm[0] = NULL;
+	mtxAp->sm[1] = NULL;
+	mtxAp->sm[2] = NULL;
+	mtxAp->sm[3] = NULL;
+
+	return mtxAp;
+}
+
+rsb_err_t rsb__fill_struct(struct rsb_mtx_t *mtxAp, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_type_t typecode, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * initializes a rsb_mtx_t struct to default 'vanilla' values
+	 * \return the input address
+	 * FIXME: rsb__fill_struct -> rsb__mtx_init
+	 */
+
+	if(!mtxAp)
+		return RSB_ERR_GENERIC_ERROR;
+
+	rsb__init_struct(mtxAp);/* redundant ?*/
+	mtxAp->VA = VA;
+	mtxAp->bindx = JA;
+	mtxAp->bpntr = IA;
+	mtxAp->nr = m;
+	mtxAp->nc = k;
+	mtxAp->flags = flags;
+	mtxAp->typecode = typecode;
+	
+	return RSB_ERR_NO_ERROR;
+}
+
+void * rsb__fill_coo_struct(struct rsb_coo_matrix_t *mtxAp, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode)
+{
+	if(!mtxAp)
+		return NULL;
+	mtxAp->IA = IA;
+	mtxAp->JA = JA;
+	mtxAp->VA = VA;
+	mtxAp->nnz = nnz;
+	mtxAp->nr = m;
+	mtxAp->nc = k;
+	mtxAp->typecode = typecode;
+	return mtxAp;
+}
+
+void * rsb__init_struct(struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * initializes a rsb_mtx_t struct to default 'vanilla' values
+	 * \return the input address
+	 * */
+	if(!mtxAp)
+		return mtxAp;
+
+	rsb__init_blank_pointers(mtxAp);
+
+	mtxAp->flags = RSB_FLAG_NOFLAGS ;
+
+	mtxAp->sat = RSB_TIME_ZERO;
+	mtxAp->eit = RSB_TIME_ZERO;
+	mtxAp->pet = RSB_TIME_ZERO;
+	mtxAp->est = RSB_TIME_ZERO;
+	mtxAp->tat = RSB_TIME_ZERO;
+	mtxAp->cpt = RSB_TIME_ZERO;
+	mtxAp->rpt = RSB_TIME_ZERO;
+
+	return mtxAp;
+}
+
+void * rsb__destroy_inner(struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \input mtxAp is a pointer to a valid matrix structure.
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * Deallocates the guts of a sparse matrix.
+	 * (will leave the struct in an inconsistent state)
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+
+	//RSB_STDOUT("destroying matrix %p\n",mtxAp);
+
+	if(!mtxAp)
+		goto ret;
+
+	if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS))
+	{
+		if(rsb__is_root_matrix(mtxAp))
+		{
+			/* FIXME: unfinished, temporary */
+			/* this is a trick: fitting the whole recursive matrix in three arrays */
+			void *IA = NULL,*JA = NULL,*VA = NULL;
+			rsb_bool_t is_bio = rsb__do_is_matrix_binary_loaded(mtxAp); // binary I/O matrix
+			struct rsb_mtx_t *fsm = rsb__do_get_first_submatrix(mtxAp);
+			rsb_flags_t flags = mtxAp->flags;
+
+			if(!is_bio)
+				RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices)	// ?!
+			JA = fsm->bindx;
+			VA = fsm->VA;
+			if(!is_bio)
+			{
+				//IA = fsm->bpntr-(fsm->nr+1);
+				IA = fsm->bpntr;
+			}
+			else
+				IA = mtxAp;
+			if(!is_bio)
+				RSB_CONDITIONAL_FREE_MTXAP(mtxAp)// extra allocation
+//			RSB_INFO("VA:%p, IA:%p, JA:%p\n",VA,IA,JA);
+			if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+			{
+				RSB_CONDITIONAL_FREE(IA);/* these arrays are allowed to be NULL, as it happens during conversions */
+				RSB_CONDITIONAL_FREE(JA);
+				RSB_CONDITIONAL_FREE(VA);
+			}
+		}
+		return NULL;/* no deallocation, in this case */
+	}
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	{
+		if(submatrix)
+		{
+			rsb__do_mtx_free(submatrix);
+		}
+	}
+
+	if(!(mtxAp->flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR))
+	if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{
+		RSB_CONDITIONAL_FREE(mtxAp->VA);
+		RSB_CONDITIONAL_FREE(mtxAp->bindx);
+		RSB_CONDITIONAL_FREE(mtxAp->bpntr);
+	}
+
+	RSB_CONDITIONAL_FREE(mtxAp->indptr);
+
+#if RSB_WANT_BITMAP
+	if(mtxAp->options)
+		rsb__destroy_options_t(mtxAp->options);
+#endif /* RSB_WANT_BITMAP */
+
+	if((mtxAp->flags & RSB_FLAG_OWN_PARTITIONING_ARRAYS)!=0)
+	{
+		RSB_CONDITIONAL_FREE(mtxAp->rpntr);
+		RSB_CONDITIONAL_FREE(mtxAp->cpntr);
+	}
+#if RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS
+	RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices);
+#endif /* RSB_EXPERIMENTAL_SHOULD_TRAVERSE_RECURSIVE_MATRICES_AS_BLOCKS */
+	RSB_BZERO_P(mtxAp);/* this enforces correct usage */
+ret:
+	return NULL;
+}
+
+struct rsb_mtx_t * rsb__do_get_first_submatrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	if(!mtxAp)
+		return NULL;
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			return rsb__do_get_first_submatrix(submatrix);
+	}
+	return (struct rsb_mtx_t*)mtxAp;/* FIXME */
+}
+
+void * rsb__do_mtx_free(struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param mtxAp a pointer to a matrix structure
+	 *
+	 * Will destroy a valid matrix and deallocate all of its allocated data.
+	 * */
+	rsb_flags_t flags;
+
+	if(!mtxAp)
+		goto ret;
+
+	flags = mtxAp->flags;
+
+	rsb__destroy_inner(mtxAp);
+
+	if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS))
+	{
+		if(mtxAp && RSB_WANT_ZERO_ON_DESTROY)
+		{
+			RSB_BZERO_P(mtxAp);
+		}
+		RSB_CONDITIONAL_FREE_MTXAP(mtxAp);
+	}
+ret:
+	return mtxAp;
+}
+
+
+#if RSB_WANT_BITMAP
+static size_t rsb__get_sizeof_options(const struct rsb_options_t *o, rsb_blk_idx_t M_b, rsb_blk_idx_t K_b)
+{	
+	/*!
+	 * \ingroup gr_internals
+	 * \return memory usage
+	 * \param o a pointer to a valid rsb_options_t structure
+	 *
+	 * \return the amount of memory allocated for this structure, deeply
+	 * */
+	size_t count = 0;	
+
+	if(!o )
+		return 0;
+
+	count += sizeof(*o);
+
+	/* we allocate a new options structure */
+	if(o->bitmap)
+		count += RSB_BYTES_PER_BITMAP(M_b,K_b) ;
+
+	return count;
+}
+#endif /* RSB_WANT_BITMAP */
+
+size_t rsb__get_sizeof(const struct rsb_mtx_t *mtxAp )
+{
+	/*! 
+	 * \ingroup gr_internals
+	 * \param mtxAp a pointer to a valid rsb_mtx_t structure
+	 * \return the amount of memory allocated for this structure, deeply (indices + coefficients).
+	 * */
+	size_t count = 0;	
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_submatrix_idx_t i,j;
+	rsb_bool_t istrec = RSB_BOOL_FALSE;
+
+	if(!mtxAp )
+		goto err;
+
+	istrec = rsb__is_terminal_recursive_matrix(mtxAp);
+
+	count += sizeof(*mtxAp);
+
+#if RSB_WANT_BITMAP
+	if(mtxAp->options)
+		count += rsb__get_sizeof_options(mtxAp->options,mtxAp->M_b,mtxAp->K_b);
+	else
+		return count;
+#endif /* RSB_WANT_BITMAP */
+	/* we allocate a new options structure */
+	if( mtxAp->rpntr	) count += sizeof(rsb_coo_idx_t)*(mtxAp->M_b+1);
+	if( mtxAp->cpntr	) count += sizeof(rsb_coo_idx_t)*(mtxAp->K_b+1);
+	if(istrec)
+	{
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			count += sizeof(rsb_half_idx_t)*(mtxAp->nnz)*2;
+		else
+			count += sizeof(rsb_coo_idx_t)*(mtxAp->nnz)*2;
+	}
+	else
+	{
+		if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			count += sizeof(rsb_half_idx_t)*(mtxAp->nnz)+sizeof(rsb_nnz_idx_t)*(mtxAp->nr+1);
+		else
+			count += sizeof(rsb_coo_idx_t)*(mtxAp->nnz)+sizeof(rsb_nnz_idx_t)*(mtxAp->nr+1);
+	}
+	if( mtxAp->VA  	) count += mtxAp->el_size*mtxAp->nnz;
+	/* FIXME: missing the amount of memory allocated as extra submatrices for root, and the redundant structs array */
+#else /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+	if( mtxAp->bindx	) count += sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1);
+	if( mtxAp->indptr	) count += sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1);
+	if( mtxAp->bpntr	) count += sizeof(rsb_nnz_idx_t)*(mtxAp->Mdim+1);
+#if RSB_WANT_BITMAP
+	if( mtxAp->VA  	) count += ( RSB_TOTAL_BLOCK_BYTES(mtxAp,mtxAp->options) );
+#endif /* RSB_WANT_BITMAP */
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+	}
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			count += rsb__get_sizeof(submatrix);
+
+err:
+	return count;
+}
+
+int rsb__nnz_coord_compar(const void *key, const void *am)
+{	
+	/*!
+	 * \ingroup gr_internals
+	 * A service function. NOTE:
+	 *
+	 * Please note that the sole use of this function is the major bottleneck during matrix creation.
+	 * When thinking about optimizing matrix creation, come back here: this routine eats up to 90% 
+	 * of the time required for matrix creation.
+	 * */
+	register const rsb_coo_idx_t *iam = am,*ik = key;
+	/*!
+	 * this function is used as compar in for stdlib's bsearch 
+	 * on the ordered arrays p_r and p_c, and will return :
+	 * -1, 0, or 1 
+	 *  respectively if the key element is :
+	 * less than both, in between, or greater than both *am and am[1].
+	 * */
+	return (*ik < iam[1] )? (*ik<*iam?-1:0):1;
+}
+
+rsb_bitmap_data_t * rsb__allocate_bitmap(rsb_blk_idx_t rows, rsb_blk_idx_t cols)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param rows the amount of rows the bitmap should have
+	 * \param rows the amount of columns the bitmap should have
+	 * \return the bitmap area
+	 *
+	 * Allocates an area of (((cols+sizeof(rsb_bitmap_data_t)-1))/sizeof(rsb_bitmap_data_t) * rows) bytes
+	 * to use as a bitmap, through the  RSB_BITMAP_SET 
+	 * and  RSB_BITMAP_GET macros.
+	 *
+	 * This bitmap takes ( ceil(cols/sizeof(rsb_bitmap_data_t))*sizeof(rsb_bitmap_data_t)*rows ) bytes of memory.
+	 * it should be roughly 1 bit for block of data
+	 * or at worst
+	 * ((cols/(sizeof(rsb_bitmap_data_t)*8))+1/(sizeof(rsb_bitmap_data_t)*8))/cols bits for every data block.
+	 *
+	 * The bitmap is return set to zero.
+	 * assumes sizeof(rsb_bitmap_data_t)>1
+	 * */
+	if( RSB_INVALID_COO_INDEX(rows) || RSB_INVALID_COO_INDEX(cols))
+		return NULL;
+	return rsb__calloc(RSB_BYTES_PER_BITMAP(rows,cols));
+}
+
+rsb_bitmap_data_t * rsb__allocate_bitvector(rsb_blk_idx_t nbits)
+{
+	/*
+	 * \ingroup gr_internals
+	 * Allocates an array for \c nbits  bits, set to zero.
+	 * */
+#ifdef RSB_BITMAP_ROW_MAJOR_ORDER
+	return rsb__allocate_bitmap(nbits,1);
+#else /* RSB_BITMAP_ROW_MAJOR_ORDER */
+	return rsb__allocate_bitmap(1,nbits);
+#endif /* RSB_BITMAP_ROW_MAJOR_ORDER */
+}
+
+rsb_blk_idx_t rsb__bitmap_bit_count(const rsb_bitmap_data_t *bitmap, const rsb_blk_idx_t rows, const rsb_blk_idx_t cols)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param bitmap is the bitmap data pointer
+	 * \param rows is the number of rows the bitmap was allocated for
+	 * \param cols is the number of columns the bitmap was allocated for
+	 * \return -1 in case of error, the set bit count otherwise
+	 *
+	 * This function counts the bits in a bitmap
+	 * Note that it is not dependent on the internal bitmap storage scheme,
+	 * (column major or row major), but only as long as the bit pool is uniform.
+	 *
+	 * TODO : this is not core functionality, so it could be moved somewhere else.
+	 * */
+	register rsb_blk_idx_t i,bc = 0;
+	register rsb_bitmap_data_t w;
+	if(!bitmap)
+		return RSB_ERR_BADARGS;
+	for(i=0;i<((rsb_blk_idx_t)RSB_BYTES_PER_BITMAP(rows,cols)/sizeof(rsb_bitmap_data_t));++i)
+	{
+		w = bitmap[i];
+		/* TODO : no stdlib functions for counting bits in integers ? */
+		//b += (w&1); while(w) b += ((w /= 2)&1);
+		while(w!=0) {bc += (w&1);w /= 2;}
+	}
+	/* warning : overflow check missing */
+	return bc;
+}
+
+void* rsb__get_block_address( rsb_blk_idx_t blockrow, rsb_blk_idx_t blockcolumn, const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param blockrow the block row
+	 * \param blockcolumn the block column
+	 * \param mtxAp a valid matrix structure pointer
+	 * \return a pointer to the block itself or NULL if it is not present
+ 	 *
+	 * A service function for getting the (blockrow,blockcolumn) block address inside the matrix.
+	 *
+	 * This function is SLOW, and should be used for debugging purposes only !
+	 * ( it uses indirect indexing to catch elements )
+	 * */
+	rsb_nnz_idx_t l = 0;
+	rsb_nnz_idx_t fnze = 0;
+#if RSB_WANT_BITMAP
+	struct rsb_options_t *o = NULL;
+#endif /* RSB_WANT_BITMAP */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t offset = 0;
+
+	if(!mtxAp)
+	{errval = RSB_ERR_BADARGS;goto err;}
+
+#if RSB_WANT_BITMAP
+	o = mtxAp->options;
+	if(!o)
+	{errval = RSB_ERR_BADARGS;goto err;}
+#endif /* RSB_WANT_BITMAP */
+
+	if(RSB_BLK_ADD_OVERFLOW(blockrow,RSB_INDEX_OF_SAFE_EXTRA))
+	{errval = RSB_ERR_LIMITS;goto err;}
+
+	if(RSB_BLK_ADD_OVERFLOW(blockcolumn,RSB_INDEX_OF_SAFE_EXTRA))
+	{errval = RSB_ERR_LIMITS;goto err;}
+
+	/* i is the working block row */
+	if( mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER )
+	{
+		if(mtxAp->bpntr[blockcolumn]==mtxAp->bpntr[blockcolumn+1])
+			goto err;/* empty block column */
+		fnze = mtxAp->bpntr[blockcolumn];	/* first nonzero entry in bindx */
+		while(mtxAp->bindx[fnze+l]!=blockrow)++l;
+	}
+	else
+	{
+		if(mtxAp->bpntr[blockrow]==mtxAp->bpntr[blockrow+1])
+			goto err;/* empty block row */
+		fnze = mtxAp->bpntr[blockrow];	/* first nonzero entry in bindx */
+		while(mtxAp->bindx[fnze+l]!=blockcolumn)++l;
+	}
+
+	if(RSB_NNZ_ADD_OVERFLOW(fnze,l))
+	{errval = RSB_ERR_LIMITS;goto err;}
+
+	offset = fnze+l;
+	//return ((rsb_byte_t*)(mtxAp->VA)) + mtxAp->indptr[offset] * mtxAp->el_size;
+	return RSB_BLOCK_ADDRESS(mtxAp,offset);
+err:
+	rsb__do_perror(NULL,errval);
+	return NULL;
+}
+
+rsb_err_t rsb__recheck_insertion(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, const struct rsb_mtx_t *mtxAp, const struct rsb_options_t *o)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * This is a very slow debug function.
+	 * It should be supplied with some sparse matrix construction arrays in any order, and 
+	 * a fully constructed matrix structure.
+	 * 
+	 * \note: Does not support block column major matrices and sorted ones.
+	 *
+	 * TODO  : should support more matrix formats ( e.g.: block column majer )
+	 * FIXME : obsolete, very limited function
+	 * 
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * */
+	register rsb_blk_idx_t i,j;
+	rsb_nnz_idx_t k;
+	rsb_nnz_idx_t missing = 0;
+	const rsb_byte_t *moff = NULL;
+	const rsb_byte_t *src = NULL;
+
+	if(!mtxAp || !o )return RSB_ERR_BADARGS;
+	if(mtxAp->flags & RSB_FLAG_SORT_INPUT) return RSB_ERR_BADARGS;
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER) return RSB_ERR_UNIMPLEMENTED_YET;
+	if(! o->bitmap)  return RSB_ERR_UNSUPPORTED_OPERATION;/* when building sorted matrices, we don't create bitmaps .. should we ? */
+
+#if RSB_WANT_BITMAP
+	for(k=0;k<nnz;++k) { rsb_coo_idx_t iI = IA[k],iJ = JA[k];RSB_BLOCK_UNSET_BIT_FOR_NNZ(&iI,&iJ,k,mtxAp); }
+	for(k=0;k<nnz;++k) { RSB_BLOCK_SET_BIT_FOR_NNZ(  IA,JA,k,mtxAp); }
+#endif /* RSB_WANT_BITMAP */
+
+	for(k=0;k<nnz;++k)
+	{
+		i = RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,mtxAp);
+		j = RSB_GET_BLOCK_COL_FOR_NZ(JA+k,mtxAp);
+		if(!(RSB_BITMAP_GET(o->bitmap,mtxAp->M_b,mtxAp->K_b,i,j))) ++missing;
+	}
+
+	if(!missing)
+		RSB_STDERR("checking structure : there are no blocks missing.\n");
+	else
+		RSB_STDERR("checking structure : there are %zd blocks missing\n",(size_t)missing);
+	if(missing) return RSB_ERR_GENERIC_ERROR;
+	for(k=0;k<nnz;++k)
+	{
+		i = RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,mtxAp);
+		j = RSB_GET_BLOCK_COL_FOR_NZ(JA+k,mtxAp);
+		moff = rsb__get_block_address(i,j,mtxAp);
+		if(!moff)
+		{
+			RSB_STDERR("critical block error on block (%d,%d).\n",i,j);
+			return RSB_ERR_GENERIC_ERROR;
+		}
+		else
+		{
+			moff += RSB_GET_INTRA_BLOCK_OFFSET(IA[k],JA[k],i,j,mtxAp);
+			src = VA;
+			src += mtxAp->el_size*k;
+
+			if(RSB_MEMCMP(src,moff,mtxAp->el_size))
+			{
+				/* may give problems when flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER */
+				RSB_ERROR("critical error: %d'th nonzero (%d,%d) at (%d,%d) in block (%d,%d) is wrong!\n",
+				k,
+				IA[k]+1,JA[k]+1,
+				RSB_INTRA_BLOCK_ROW(IA[k],i,mtxAp),RSB_INTRA_BLOCK_COLUMN(JA[k],j,mtxAp),i,j);
+				/* warning : the following instruction is potentially harmful ! */
+#ifdef RSB_DEBUG_BLOCK_STUFF
+				RSB_STDERR("should be : 0x%x\n",*(int*)src );
+				RSB_STDERR("is : 0x%x\n",*((int*)(moff)));
+/*				RSB_STDERR("should be : %g\n",src );
+				RSB_STDERR("is : %g\n",*((float*)(moff)));*/
+#endif /* RSB_DEBUG_BLOCK_STUFF */
+				return RSB_ERR_GENERIC_ERROR;
+			}
+		}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+
+rsb_err_t rsb__do_is_valid_pinfo_t(const struct rsb_mtx_partitioning_info_t * pinfop)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param pinfop should specify a partitioning info array
+	 *
+	 * This is a strictly debugging function, whose sole purpose is to verify
+	 * the partitioning arrays contents of a rsb_mtx_partitioning_info_t structure.
+	 * */
+	rsb_nnz_idx_t k;
+	if(pinfop->nr<1)
+	{
+		RSB_PERR_GOTO(err,"m == %d ?\n",pinfop->nr);
+	}
+	if(pinfop->nc<1)
+	{
+		RSB_PERR_GOTO(err,"k == %d ?\n",pinfop->nc);
+	}
+	if(pinfop->rpntr && pinfop->M_b<1)
+	{
+		RSB_PERR_GOTO(err,"M_b == %d ?\n",pinfop->M_b-1);
+	}
+	if(pinfop->cpntr && pinfop->K_b<1)
+	{
+		RSB_PERR_GOTO(err,"K_b == %d ?\n",pinfop->K_b-1);
+	}
+
+	if(pinfop->rpntr && pinfop->cpntr )
+	{
+		/* FIXME */
+	if(pinfop->rpntr[pinfop->M_b]<=pinfop->rpntr[pinfop->M_b-1])
+	{
+		RSB_PERR_GOTO(err,"last (%d) rpntr element is %d <= %d\n",pinfop->M_b,pinfop->rpntr[pinfop->M_b],pinfop->rpntr[pinfop->M_b-1]);
+	}
+	if(pinfop->cpntr[pinfop->K_b]<=pinfop->cpntr[pinfop->K_b-1])
+	{
+		RSB_PERR_GOTO(err,"last (%d) cpntr element is %d <= %d\n",pinfop->K_b,pinfop->cpntr[pinfop->K_b],pinfop->cpntr[pinfop->K_b-1]);
+	}
+
+	for(k=0;k<pinfop->M_b;++k)if(pinfop->rpntr[k]<0)
+	{
+		RSB_PERR_GOTO(err,"bad partitioning : rpntr[%d]=%d\n",k,pinfop->rpntr[k]);
+	}
+	for(k=0;k<pinfop->K_b;++k)if(pinfop->cpntr[k]<0)
+	{
+		RSB_PERR_GOTO(err,"bad partitioning : cpntr[%d]=%d\n",k,pinfop->cpntr[k]);
+	}
+	for(k=0;k<pinfop->M_b;++k)if(pinfop->rpntr[k]>pinfop->nr)
+	{
+		RSB_PERR_GOTO(err,"bad partitioning : rpntr[%d]=%d > m==%d\n",k,pinfop->rpntr[k],pinfop->nr);
+	}
+	for(k=0;k<pinfop->K_b;++k)if(pinfop->cpntr[k]>pinfop->nc)
+	{
+		RSB_PERR_GOTO(err,"bad partitioning : cpntr[%d]=%d > k==%d\n",k,pinfop->cpntr[k],pinfop->nc);
+	}
+	}
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+rsb_err_t rsb__compute_partial_fillin_for_nnz_fraction(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,const  rsb_nnz_idx_t nnz, struct rsb_mtx_partitioning_info_t * pinfop, size_t * element_countp, size_t * block_countp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * see rsb__compute_partial_fillin_for_nnz_fractions
+	 * */
+	return rsb__compute_partial_fillin_for_nnz_fractions(IA,JA,&nnz,1,pinfop,element_countp,block_countp);
+}
+
+rsb_err_t rsb__compute_partial_fillin_for_nnz_fractions(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,const  rsb_nnz_idx_t * nnz, const rsb_nnz_idx_t nnzn, struct rsb_mtx_partitioning_info_t * pinfop, size_t * element_countp, size_t * block_countp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param IA is a row indices array sized nnz
+	 * \param JA is a column indices array sized nnz
+	 * \param nnz is the length of IA and JA
+	 * \param element_countp is where the element counts will be written
+	 * \param block_countp   is where the block   counts will be written
+	 *
+	 * Will estimate fillin for the first nnz[ni]<nnz[ni+1]<...<nnz[nnzn-1] elements, with nnz[nnzn-1] being 
+	 * less than or equal to the number of elements in the IA, JA, element_countp, block_countp arrays.
+	 *
+	 * Note: this function performs almost no data validation.
+	 * Note : this is not a service but an experimental function, and is very slow.
+	 * Note : another wayof structuring thisfunction would beto make it accept a 2*nnzn sized array with lower
+	 *        and upper segment indices both specified.
+	 *        This would have been more flexible but would require some change in this function code.
+	 * TODO : this is not core functionality, so this function could be moved elsewhere
+	 * */
+
+	rsb_bitmap_data_t * bitmap = NULL;
+	size_t  element_count = 0;
+	rsb_nnz_idx_t block_count = 0;
+	rsb_nnz_idx_t k = 0,l = 0;/* were -1 */
+	rsb_blk_idx_t i = 0,j = 0;/* were -1 */
+
+	if( !IA || !JA || !pinfop || !element_countp || !block_countp ) goto err;
+	if( nnzn < 1 ) goto err;
+	if( RSB_INVALID_BLK_INDEX(pinfop->M_b) || RSB_INVALID_BLK_INDEX(pinfop->K_b) )goto err;
+	if( !pinfop->rpntr  || !pinfop->cpntr  )goto err;
+	bitmap = rsb__allocate_bitmap(pinfop->M_b,pinfop->K_b);
+	if(!bitmap)goto err;
+
+	#ifdef  RSB_DEBUG_INPUT
+	if(rsb__do_is_valid_pinfo_t(pinfop))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	#endif /* RSB_DEBUG_INPUT */
+	
+	if(1)
+	{
+	size_t skip = 0,skipu = 0;//new
+	rsb_nnz_idx_t c = 0;
+	rsb_nnz_idx_t sl;
+
+	skip = 0;
+	
+	skipu = (nnz[nnzn-1]/(nnzn*nnzn));
+	skip = nnzn*skipu;
+	for(sl=0;sl<nnzn;++sl)
+		block_countp[sl] = element_countp[sl] = 0;
+
+	#if 1
+	/* An alternative way, much more stable!
+	 * However, it underestimates the nnz count so it should in some manner mitigated! */
+	for(l=0;l<nnzn;++l)
+	{
+		for(sl=0;sl<nnzn;++sl)
+		{
+		//RSB_INFO("#i: %d / %d  (%d)\n",sl*skip+l*skipu,sl*skip+(l+1)*skipu,nnzn);
+		for(k=sl*skip+l*skipu;k<sl*skip+(l+1)*skipu;++k)
+		{
+	
+			++c;
+			/* NOTE : the following ifelse statements are for situations where m<br or k<bc  */
+			if(pinfop->M_b>1)
+				i = RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,pinfop);
+			else
+			       	i = 0;
+			if(pinfop->K_b>1)
+				j = RSB_GET_BLOCK_COL_FOR_NZ(JA+k,pinfop);
+			else
+			       	j = 0;
+
+			/* if the bit is not already set */
+			if(!(RSB_BITMAP_GET(bitmap,pinfop->M_b,pinfop->K_b,i,j)))
+			{
+				element_count += GET_BLOCK_SIZE(i,j,pinfop);
+				(block_count)++;
+				RSB_BITMAP_SET(bitmap,pinfop->M_b,pinfop->K_b,i,j) ;
+			}
+		}
+		block_countp[l] = block_count;
+		element_countp[l] = element_count;
+		}
+	}
+	l = nnzn-1;
+	//RSB_INFO("#c: %d / %d (%d)..  %d -> %d\n",c,block_countp[l],nnzn,nnzn*skip,nnz[l]);
+	for(k=c;k<nnz[l];++k)
+	//for(k=nnzn*skip;k<nnz[l];++k)
+	{
+//		++c;
+	
+			i = RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,pinfop);
+			j = RSB_GET_BLOCK_COL_FOR_NZ(JA+k,pinfop);
+
+			/* if the bit is not already set */
+			if(!(RSB_BITMAP_GET(bitmap,pinfop->M_b,pinfop->K_b,i,j)))
+			{
+				element_countp[l] += GET_BLOCK_SIZE(i,j,pinfop);
+				block_countp[l]++;
+				RSB_BITMAP_SET(bitmap,pinfop->M_b,pinfop->K_b,i,j) ;
+			}
+	}
+	//RSB_INFO("#c: %d / %d (%d)\n",c,block_countp[l],nnzn);
+	#endif
+	}
+	else
+	for(l=0;l<nnzn;++l)
+	{
+		rsb_nnz_idx_t li;
+		if(l==0)
+			li = 0;
+	       	else
+		       	li = nnz[l-1];/* will the first loop optimized by the compiler ? :) */
+
+		for(k=li;k<nnz[l];++k)
+		{
+	
+			i = RSB_GET_BLOCK_ROW_FOR_NZ(IA+k,pinfop);
+			j = RSB_GET_BLOCK_COL_FOR_NZ(JA+k,pinfop);
+
+			/* if the bit is not already set */
+			if(!(RSB_BITMAP_GET(bitmap,pinfop->M_b,pinfop->K_b,i,j)))
+			{
+				element_count += GET_BLOCK_SIZE(i,j,pinfop);
+				(block_count)++;
+				RSB_BITMAP_SET(bitmap,pinfop->M_b,pinfop->K_b,i,j) ;
+			}
+		}
+		if(block_countp)
+			block_countp[l] = block_count;
+		if(element_countp)
+			element_countp[l] = element_count;
+	}
+
+	RSB_CONDITIONAL_FREE(bitmap);
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_ENOMEM;
+}
+
+#if RSB_WANT_BITMAP
+static rsb_err_t rsb_element_block_count_and_bitmap_from_coo_partitioning(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, size_t * element_countp, rsb_nnz_idx_t * block_countp, rsb_bitmap_data_t ** bitmapp, const rsb_flags_t flags, struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \param IA is a row indices array sized nnz
+	 * \param JA is a column indices array sized nnz
+	 * \param nnz is the length of IA and JA
+	 * \param pinfop should specify a partitioning info array
+	 * \param element_countp is where the element counts will be written
+	 * \param block_countp   is where the block   counts will be written
+	 *
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * WARNING : IA and JA can respectively point to columns and rows arrays instead of rows and columns,
+	 * as long as the pinfop information is swapped accordingly.
+	 * In this way a transposed bitmap will be allocated (note that its size could be the same. guess why..).
+	 *
+	 * In case of error, no bitmap will be allocated, but its pointer may be overwritten.
+	 * In case of success, a bitmap structure will be allocated.
+	 * */
+
+	rsb_nnz_idx_t k = 0;
+	rsb_bitmap_data_t * bitmap = NULL;
+	size_t element_count = 0;
+	rsb_nnz_idx_t block_count = 0;
+	rsb_blk_idx_t mI = 0,MI = 0;
+	const rsb_coo_idx_t * mIndx = NULL,* MIndx = NULL;
+
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		mIndx = JA;
+		MIndx = IA;
+	}
+
+	if( !IA || !JA || !pinfop || !element_countp || !bitmapp || !block_countp ) goto err;
+	if( RSB_INVALID_NNZ_INDEX(nnz) ) goto err;
+	if( RSB_INVALID_BLK_INDEX(pinfop->M_b) || RSB_INVALID_BLK_INDEX(pinfop->K_b) )goto err;
+	if( !pinfop->rpntr  || !pinfop->cpntr  )goto err;
+
+	bitmap = rsb__allocate_bitmap(mtxAp->Mdim,mtxAp->mdim);
+
+	if(!bitmap)goto err;
+
+	if( mtxAp->flags & RSB_FLAG_SHOULD_DEBUG )
+		if(rsb__do_is_valid_pinfo_t(pinfop))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+	
+	if(RSB_WANT_VERBOSE_MESSAGES)
+		RSB_INFO("counting matrix blocks ..\n");
+
+	for(k=0;RSB_LIKELY(k<nnz);++k)
+	{
+		/* 
+		 * We count the amount of elements for each block, setting bits in
+		 * our bitmap where a block should be placed, and leaving unset bits
+		 * which correspond to zero blocks 
+		 * */
+
+		MI = RSB_GET_BLOCK_MAJ_FOR_NZ(MIndx+k,mtxAp);
+		mI = RSB_GET_BLOCK_MIN_FOR_NZ(mIndx+k,mtxAp);
+
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(MI));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(mI));
+
+		if(mI>=mtxAp->mdim)
+		{
+			RSB_PERR_GOTO(err," j=%d >= o->K_b=%d\n ",mI,mtxAp->mdim);
+		} 
+		if(mI <0 )
+		{
+			RSB_PERR_GOTO(err," j=%d < 0 \n",mI);
+		}
+		if(MI>=mtxAp->Mdim)
+		{
+			RSB_PERR_GOTO(err," i=%d >= o->M_b=%d\n ",MI,mtxAp->Mdim);
+		}
+		if(MI <0 )
+		{
+			RSB_PERR_GOTO(err," i=%d < 0 \n",MI);
+		}
+		
+		/* if the bit is not already set */
+		if(!(RSB_BITMAP_GET(bitmap,mtxAp->Mdim,mtxAp->mdim,MI,mI)))
+		{
+			if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+				element_count += GET_BLOCK_SIZE(mI,MI,pinfop);
+			else
+				element_count += GET_BLOCK_SIZE(MI,mI,pinfop);
+
+			(block_count)++;
+			RSB_BITMAP_SET(bitmap,mtxAp->Mdim,mtxAp->mdim,MI,mI) ;
+		}
+	}
+
+	if(block_count > nnz)
+	{
+		RSB_PERR_GOTO(err,"(mtxAp->block_count=%d >= n=%d)!\n",block_count,nnz);
+	}
+	
+	*block_countp = block_count;
+	*element_countp = element_count;
+	*bitmapp = bitmap;
+
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_CONDITIONAL_FREE(bitmap);
+	return RSB_ERR_GENERIC_ERROR;
+}
+#endif /* RSB_WANT_BITMAP */
+
+rsb_err_t rsb__do_set_init_storage_flags(struct rsb_mtx_t *mtxAp, rsb_flags_t flags)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t storage_only_flags = RSB_DO_FLAGS_EXTRACT_STORAGE(flags);
+
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+	if(RSB_DO_FLAG_HAS(storage_only_flags,RSB_FLAG_WANT_LINKED_STORAGE))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+#ifdef RSB_MATRIX_STORAGE_LC
+			mtxAp->matrix_storage = RSB_MATRIX_STORAGE_LC;
+#els /* RSB_MATRIX_STORAGE_LC */e
+			{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_LC */
+		else
+#ifdef RSB_MATRIX_STORAGE_LR
+			mtxAp->matrix_storage = RSB_MATRIX_STORAGE_LR;
+#else /* RSB_MATRIX_STORAGE_LR */
+			{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif  /* RSB_MATRIX_STORAGE_LR */
+	}
+	else
+#endif /* RSB_FLAG_WANT_LINKED_STORAGE */
+	{
+		if(RSB_DO_FLAG_HAS(storage_only_flags,RSB_FLAG_WANT_COO_STORAGE))
+		{
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+#ifdef RSB_MATRIX_STORAGE_BCOC
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOC;
+#else /* RSB_MATRIX_STORAGE_BCOC */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_BCOC */
+			else
+#ifdef RSB_MATRIX_STORAGE_BCOR
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+#else /* RSB_MATRIX_STORAGE_BCOR */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_BCOR */
+		}
+		else //FIXME: this switch could cohexist with CSS, and be processed later or be ignored (in the old constructor)
+		if(RSB_DO_FLAG_HAS(storage_only_flags,RSB_FLAG_WANT_COO_STORAGE))
+		{
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+#ifdef RSB_MATRIX_STORAGE_BCOC
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOC;
+#else /* RSB_MATRIX_STORAGE_BCOC */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_BCOC */
+			else
+#ifdef RSB_MATRIX_STORAGE_BCOR
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+#else /* RSB_MATRIX_STORAGE_BCOR */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_BCOR */
+		}
+		else
+		if(RSB_DO_FLAG_HAS(storage_only_flags,RSB_FLAG_WANT_BCSS_STORAGE))
+		{
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+#ifdef RSB_MATRIX_STORAGE_BCSC
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCSC;
+#else /* RSB_MATRIX_STORAGE_BCSC */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_BCSC */
+			else
+#ifdef RSB_MATRIX_STORAGE_BCSR
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCSR;
+#else /* RSB_MATRIX_STORAGE_BCSR */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_BCSR */
+		}
+		else
+		if(RSB_DO_FLAG_HAS(storage_only_flags,RSB_FLAG_WANT_FIXED_BLOCKING_VBR))
+		{
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+#ifdef RSB_MATRIX_STORAGE_VBC
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_VBC;
+#else /* RSB_MATRIX_STORAGE_VBC */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_VBC */
+			else
+#ifdef RSB_MATRIX_STORAGE_VBR
+				mtxAp->matrix_storage = RSB_MATRIX_STORAGE_VBR;
+#else /* RSB_MATRIX_STORAGE_VBR */
+				{errval = RSB_ERR_UNSUPPORTED_FORMAT;goto err;}
+#endif /* RSB_MATRIX_STORAGE_VBR */
+		}
+		else
+		{
+			/* undetermined format or a merge of formats (happens on recursive matrices during construction) */
+			mtxAp->matrix_storage = storage_only_flags;
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__set_init_flags_and_stuff( struct rsb_mtx_t *mtxAp, struct rsb_options_t * o, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_nnz_idx_t block_count, rsb_nnz_idx_t element_count, rsb_type_t typecode, rsb_flags_t flags )
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * This inner service function sets some flags and variables during matrix construction.
+	 *
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp /*|| !o */ /* FIXME: o disabled lately */
+#if !RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+	 || !pinfop 
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS  */
+	)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	mtxAp->flags = flags;
+	mtxAp->typecode =typecode;
+	mtxAp->el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);
+#if RSB_WANT_BITMAP
+	mtxAp->options = NULL;	// we ignore o
+//	mtxAp->options = o;
+#endif /* RSB_WANT_BITMAP */
+	mtxAp->nnz = nnz;
+	mtxAp->element_count = element_count;
+	mtxAp->block_count = block_count;
+
+	if(pinfop)
+	{
+		mtxAp->M_b = pinfop->M_b;
+		mtxAp->K_b = pinfop->K_b;
+		mtxAp->nr = pinfop->nr;
+		mtxAp->nc = pinfop->nc;
+		mtxAp->rpntr = pinfop->rpntr;
+		mtxAp->cpntr = pinfop->cpntr;
+//#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+		mtxAp->br = pinfop->br;
+		mtxAp->bc = pinfop->bc;
+//#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR */
+	}
+	else
+	{
+		mtxAp->M_b = m;
+		mtxAp->K_b = k;
+		mtxAp->nr = m;
+		mtxAp->nc = k;
+		mtxAp->rpntr = NULL;
+		mtxAp->cpntr = NULL;
+//#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+		mtxAp->br = 1;
+		mtxAp->bc = 1;
+//#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR */
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(mtxAp->br+1));
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(mtxAp->bc+1));
+	}
+	if(RSB_IS_INVALID_TYPE_SIZE(mtxAp->el_size = rsb__do_sizeof(mtxAp->typecode)))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if((errval = rsb__do_set_init_storage_flags(mtxAp,flags))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(mtxAp->br==1 && mtxAp->bc==1)
+	{
+		mtxAp->M_b = mtxAp->nr;
+		mtxAp->K_b = mtxAp->nc;
+	}
+
+	/* setting aliases */
+	if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+	{
+		mtxAp->Mdim = mtxAp->K_b;
+		mtxAp->mdim = mtxAp->M_b;
+		mtxAp->mpntr = mtxAp->rpntr;
+		mtxAp->Mpntr = mtxAp->cpntr;
+	}
+	else
+	{
+		mtxAp->Mdim = mtxAp->M_b;
+		mtxAp->mdim = mtxAp->K_b;
+		mtxAp->Mpntr = mtxAp->rpntr;
+		mtxAp->mpntr = mtxAp->cpntr;
+	}
+
+//	RSB_DEBUG_ASSERT(mtxAp->Mdim);
+//	RSB_DEBUG_ASSERT(mtxAp->mdim);
+//	RSB_DEBUG_ASSERT(mtxAp->rpntr);
+//	RSB_DEBUG_ASSERT(mtxAp->cpntr);
+	RSB_DEBUG_ASSERT(mtxAp->el_size);
+
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if 0
+rsb_err_t rsb_dump_matrix ( const struct rsb_mtx_t *mtxAp )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 *  \param mtxAp is a valid matrix structure pointer
+	 *  \param diagonal is an array sized as min(mtxAp->nr,mtxAp->nc) which on exit will contain the diagonal elements.
+	 *  \return -1 in case of error, 0 otherwise
+	 *
+	 * FIXME : UNTESTED AND UNDOCUMENTED AND UNFINISHED
+	 * FIXME : USE rsb_print_matrix and delete this ?
+	 * */
+	register rsb_nnz_idx_t baserow,basecolumn;
+	register rsb_blk_idx_t rows,columns;
+	register rsb_blk_idx_t blockrow,blockcolumn;
+	register rsb_byte_t *bp;
+
+	RSB_INFO("%% [!] TESTING CODE !\n");
+	RSB_INFO("%%rows:%d columns:%d blocks:%d\n",mtxAp->nr,mtxAp->nc,mtxAp->block_count);
+	RSB_INFO("%d %d %d\n", mtxAp->nr,mtxAp->nc,mtxAp->nnz);
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		rsb_coo_idx_t r,c;
+		/*
+		 * FIXME
+		 * */
+//		RSB_INFO("%x \n", bp) ;
+//		RSB_INFO("_k : %d %d   ", _k,_lastk) ;
+//		RSB_INFO("%d %d ", baserow,basecolumn) ;
+		RSB_INFO("%d %d\n", rows,columns) ;
+#if 1
+		for(r=0;r<rows;++r)
+		for(c=0;c<columns;++c)
+		{
+				RSB_INFO("%d %d %lg\n", baserow+r,basecolumn+c,((double*)bp)[columns*r+c]) ;
+		}
+#endif
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+#endif
+
+rsb_err_t rsb__do_insert_sorted( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Inserts in the matrix structures the specified coo elements, sorted accordingly to the specified BCSR blocking.
+	 */
+
+	rsb_coo_idx_t blockrows = 0;
+	rsb_coo_idx_t blockcolumns = 0;
+	rsb_coo_idx_t baserow = 0;
+	rsb_coo_idx_t basecolumn = 0;
+	rsb_nnz_idx_t *indptr = mtxAp->indptr;
+	const rsb_coo_idx_t *Mpntr = NULL;
+	const rsb_coo_idx_t *mpntr = NULL;
+	const rsb_coo_idx_t *MIndx = NULL;
+	const rsb_coo_idx_t *mIndx = NULL;
+	rsb_blk_idx_t mI = 0,MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t k = 0;	/* will index a nnz sized array */
+	rsb_nnz_idx_t K = 0;
+	rsb_byte_t*dst = NULL;
+	rsb_byte_t*src = NULL;
+
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mpntr = pinfop->rpntr;
+		Mpntr = pinfop->cpntr;
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		Mpntr = pinfop->rpntr;
+		mpntr = pinfop->cpntr;
+		MIndx = IA;
+		mIndx = JA;
+	}
+
+	k = mI = MI = 0;K = 0;
+	blockrows = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping 'preceding' block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping 'preceding' block columns .. */
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	mtxAp->bindx [ K ] = mI;			/* a 'new' block */
+	indptr[ K+1 ]=indptr[ K  ] + blockrows * blockcolumns;
+
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+	if( rsb__have_linked_storage(mtxAp->flags) )
+	{
+		if(RSB_WANT_VERBOSE_MESSAGES)
+			RSB_INFO("initializing linked lists stuff.\n");
+		if(RSB_UNLIKELY(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),mI,MI,blockcolumns,blockrows,basecolumn,baserow)
+		else
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),MI,mI,blockrows,blockcolumns,baserow,basecolumn)
+	}
+#endif /* RSB_FLAG_WANT_LINKED_STORAGE */
+
+/*
+	dst = mtxAp->VA;
+	dst += RSB_BLOCK_OFFSET(mtxAp,K);
+	{rsb_blk_idx_t ibo = 0;
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+		ibo = RSB_GET_INTRA_BLOCK_OFFSET(mIndx[k],MIndx[k],mI,MI,mtxAp) ;
+	else
+		ibo = RSB_GET_INTRA_BLOCK_OFFSET(MIndx[k],mIndx[k],MI,mI,mtxAp) ;
+	dst += ibo;}
+	src = ((rsb_byte_t*)VA) + mtxAp->el_size * k;
+	RSB_NUMERICAL_TYPE_SET_ELEMENT(dst,src,mtxAp->typecode);*/
+
+	while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%d : (%d %d) is not ok\n",k, MIndx[k]+1,mIndx[k]+1);
+			RSB_STDERR("(minor dim. index %d < base row %d)\n",MIndx[k] , baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif /* DEBUG */
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			mtxAp->bindx [ K ] = mI;			/* a 'new' block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+			if( rsb__have_linked_storage(mtxAp->flags) )
+			{
+				if(RSB_UNLIKELY(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),mI,MI,blockcolumns,blockrows,basecolumn,baserow)
+				else
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),MI,mI,blockrows,blockcolumns,baserow,basecolumn)
+			}
+#endif /* RSB_FLAG_WANT_LINKED_STORAGE */
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+			while( MIndx[k] >= Mpntr[MI+1] )++MI;
+			blockrows = Mpntr[MI+1] - Mpntr[MI];
+			baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			mtxAp->bindx [ K ] = mI;			/* a 'new' block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+			if( rsb__have_linked_storage(mtxAp->flags) )
+			{
+				if(RSB_UNLIKELY(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),mI,MI,blockcolumns,blockrows,basecolumn,baserow)
+				else
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),MI,mI,blockrows,blockcolumns,baserow,basecolumn)
+			}
+#endif /* RSB_FLAG_WANT_LINKED_STORAGE */
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst = mtxAp->VA;
+		dst += RSB_BLOCK_OFFSET(mtxAp,K);
+		{
+		rsb_nnz_idx_t ibo = 0;
+		if(RSB_UNLIKELY(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+			ibo = RSB_GET_INTRA_BLOCK_OFFSET(mIndx[k],MIndx[k],mI,MI,mtxAp) ;
+		else
+			ibo = RSB_GET_INTRA_BLOCK_OFFSET(MIndx[k],mIndx[k],MI,mI,mtxAp) ;
+		dst += ibo;
+		}
+		//RSB_ERROR("%d %d %d\n",((rsb_byte_t*)dst)-((rsb_byte_t*)mtxAp->VA),MIndx[k],mIndx[k]);
+		src = ((rsb_byte_t*)VA) + mtxAp->el_size * k;
+		RSB_NUMERICAL_TYPE_SET_ELEMENT(dst,src,mtxAp->typecode);
+		++k;
+	}
+
+	if(nnz)++K;
+	mtxAp->bindx[K] = 0;	// the first element off the 'working' bindx should be set to a safe value
+
+	if(mtxAp->flags & RSB_FLAG_SHOULD_DEBUG)
+	if( K != mtxAp->block_count )
+	{
+		RSB_ERROR("K is %zd ! should be %zd (block count)!\n",(size_t)K,(size_t)mtxAp->block_count);
+		RSB_STDERR("nnz : %zd\n",(size_t)nnz);
+		RSB_STDERR("k : %zd\n",(size_t)k);
+		errval = RSB_ERR_INTERNAL_ERROR;
+		goto err;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_account_sorted( struct rsb_mtx_t * mtxAp, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_nnz_idx_t * elements_per_block_row, rsb_nnz_idx_t * blocks_per_block_row)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * The routine for inserting BCSR sorted coo elements in a fresh matrix.
+	 * It is not optimized, and it should be used as a debug resort when tuning optimized ones.
+	 *
+	 * FIXME : this code is deprecated in favour of rsb__do_account_sorted_optimized
+	 * FIXME : does not support lots of flags!
+	 */
+	rsb_coo_idx_t blockrows = 0;
+	rsb_coo_idx_t blockcolumns = 0;
+	rsb_coo_idx_t baserow = 0;
+	rsb_coo_idx_t basecolumn = 0;
+	const rsb_coo_idx_t *Mpntr = NULL;
+	const rsb_coo_idx_t *mpntr = NULL;
+	const rsb_coo_idx_t *MIndx = NULL;
+	const rsb_coo_idx_t *mIndx = NULL;
+	rsb_blk_idx_t mI = 0,MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t k = 0;	/* will index a nnz sized array */
+	rsb_nnz_idx_t K = 0;
+	k = mI = MI = K=0;
+
+	if( ! IA || ! JA 
+#if !RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+	|| !pinfop
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+	)
+	{
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mpntr = pinfop->rpntr;
+		Mpntr = pinfop->cpntr;
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		Mpntr = pinfop->rpntr;
+		mpntr = pinfop->cpntr;
+		MIndx = IA;
+		mIndx = JA;
+	}
+
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping 'preceding' block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping 'preceding' block columns .. */
+	blockrows = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	elements_per_block_row[MI*0] += blockrows * blockcolumns;
+	blocks_per_block_row[MI] += 1;
+
+	while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%d : (%d %d) is not ok\n",k, MIndx[k]+1,mIndx[k]+1);
+			RSB_STDERR("(minor dim. index %d < base row %d)\n",MIndx[k] , baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+#endif /* DEBUG */
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			elements_per_block_row[MI*0] += blockrows * blockcolumns;
+			blocks_per_block_row[MI] += 1;
+			++K;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+			while( MIndx[k] >= Mpntr[MI+1] )++MI;
+			blockrows = Mpntr[MI+1] - Mpntr[MI];
+			baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			/* get rid of this var : elements_per_block_row */
+			elements_per_block_row[MI*0] += blockrows * blockcolumns;
+			blocks_per_block_row[MI] += 1;
+			++K;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		++k;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+struct rsb_mtx_t * rsb__allocate_css_from_coo_sorted( void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t m, rsb_coo_idx_t k, struct rsb_options_t * o, rsb_type_t typecode, rsb_flags_t flags, rsb_err_t *errvalp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * The routine for matrix building from sorted coo elements. CSR only.
+	 *
+	 * FIXME : EXPERIMENTAL, UNFINISHED
+	 * FIXME : FIX THIS FUNCTION TO ALLOCATE CSR/CSC EVEN IF NOT IN PLACE
+	 * */
+//	rsb_nnz_idx_t n = 0;
+	rsb_nnz_idx_t * elements_per_block_row = NULL;
+	rsb_time_t t = RSB_TIME_ZERO;
+	struct rsb_mtx_t *mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* const rsb_coo_idx_t *MIndx = NULL, *mIndx = NULL; */
+	rsb_blk_idx_t MI = 0;
+
+	if(!errvalp)
+		return NULL;
+
+	if(!( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR ))
+	{
+		return NULL;
+	}
+	if(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+		return NULL;/* FIXME : only csr now */
+
+	pinfop = NULL;/* FIXME */
+
+	if(!o) {errval = RSB_ERR_BADARGS;goto err;}
+	rsb__init_struct(mtxAp = rsb__calloc(sizeof(*mtxAp)));
+	if(!mtxAp){errval = RSB_ERR_ENOMEM;goto err;}
+	if((errval = rsb__set_init_flags_and_stuff(mtxAp,o,pinfop,m,k,0,0,0,typecode,flags))!=RSB_ERR_NO_ERROR)goto err;
+	/*
+	if(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	       	mIndx = IA, MIndx = JA;
+       	else
+	       	MIndx = IA, mIndx = JA;
+	*/
+	elements_per_block_row = rsb__calloc(sizeof(rsb_nnz_idx_t)*(1+mtxAp->Mdim));
+
+	if(!elements_per_block_row)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	t = - rsb_time();
+
+	RSB_DEBUG_ASSERT(rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,nnz,typecode,pinfop,flags)==RSB_ERR_NO_ERROR);
+	RSB_DEBUG_ASSERT(rsb__util_are_valid_coo_arrays(IA,JA,nnz)==RSB_ERR_NO_ERROR);
+
+	errval = rsb__do_account_sorted_optimized(mtxAp,IA,JA,m,k,nnz,NULL,elements_per_block_row,NULL);
+	mtxAp->block_count = 0;
+
+	mtxAp->block_count = nnz;
+
+	t += rsb_time();
+	mtxAp->sat = t;
+
+	mtxAp->indptr = rsb__malloc(sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1));
+	mtxAp->bindx = JA;	/* ok, done :) FIXME : should type - convert ... */
+
+	if(!mtxAp->bindx ){ errval = RSB_ERR_ENOMEM; goto err;}
+	if(!mtxAp->indptr){ errval = RSB_ERR_ENOMEM; goto err;}
+
+	mtxAp->indptr[0] = 0;/* */
+
+	mtxAp->bpntr = IA;
+	mtxAp->bpntr [0] = 0;
+//	mtxAp->bpntr = NULL;
+	for(MI=0;MI<mtxAp->Mdim;++MI) mtxAp->bpntr[MI+1]= mtxAp->bpntr[MI]+ elements_per_block_row[MI];
+
+#if RSB_WANT_BITMAP
+	mtxAp->options = o ;
+#endif /* RSB_WANT_BITMAP */
+	mtxAp->nnz = nnz;
+	mtxAp->element_count = nnz;
+	mtxAp->VA = VA;
+	t = - rsb_time();
+	errval = rsb__do_insert_sorted_optimized(mtxAp,VA,IA,JA,nnz,NULL);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	t += rsb_time();
+	mtxAp->eit = t;
+	RSB_CONDITIONAL_FREE(elements_per_block_row);
+	return mtxAp;
+err:
+	RSB_STDERR("rsb__allocate_from_coo_sorted:\n");
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	if(mtxAp)
+		rsb__do_mtx_free(mtxAp);	/* destroys all of the internals of matrix */
+	RSB_CONDITIONAL_FREE(elements_per_block_row);
+	return NULL;
+}
+
+struct rsb_mtx_t * rsb__allocate_from_coo_sorted( const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t m, rsb_coo_idx_t k, struct rsb_options_t * o, rsb_type_t typecode, rsb_flags_t flags, rsb_err_t *errvalp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * The routine for matrix building from sorted coo elements.
+	 * This function requires the coefficients to be sorted accordingly to the inter block ordering policy.
+	 *
+	 * \param pinfop is the pointer to a rsb_mtx_partitioning_info_t structure with partitioning information.
+	 *
+	 * \note : should behave well with the flags: RSB_FLAG_WANT_FIXED_BLOCKING_VBR RSB_FLAG_SORTED_INPUT
+	 * \note : this function should be optimized and tested thoroughly.
+	 * */
+
+	struct rsb_mtx_t *mtxAp = NULL;
+	rsb_blk_idx_t MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* rsb_coo_idx_t blockcolumns = 0; */
+
+	const rsb_coo_idx_t *MIndx = NULL;
+
+	rsb_nnz_idx_t * elements_per_block_row = NULL;
+	rsb_nnz_idx_t * blocks_per_block_row = NULL;	/* per major dimension .. */
+	size_t element_count = 0;
+
+	rsb_time_t t = RSB_TIME_ZERO;
+
+#if !RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+	if(!pinfop)
+	{
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+	if( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR )
+	{errval = RSB_ERR_BADARGS;goto err;}
+
+//	if(!o)
+//	{
+//		errval = RSB_ERR_BADARGS;goto err;
+//	}
+
+	blocks_per_block_row = NULL;	/* per major dimension .. */
+	element_count = 0;
+
+	rsb__init_struct(mtxAp = rsb__calloc(sizeof(struct rsb_mtx_t)));
+	if(!mtxAp){errval = RSB_ERR_ENOMEM;goto err;}
+
+	if((errval = rsb__set_init_flags_and_stuff(mtxAp,o,pinfop,m,k,0,0,0,typecode,flags))!=RSB_ERR_NO_ERROR)
+		goto err;
+	if(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+		MIndx = JA;
+	else
+		MIndx = IA;
+
+	/* FIXME : elements_per_block_row can be replaced with a single variable */
+	elements_per_block_row = rsb__calloc(sizeof(rsb_nnz_idx_t)*(1+mtxAp->Mdim));
+	blocks_per_block_row = rsb__calloc(sizeof(rsb_nnz_idx_t)*(1+mtxAp->Mdim));
+
+	if(!blocks_per_block_row  )
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	if(!elements_per_block_row)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	t = - rsb_time();
+	blocks_per_block_row++;/* we increment the pointer for 1 element (we will use this array as bpntr later)*/
+	errval = rsb__do_account_sorted_optimized(mtxAp,IA,JA,m,k,nnz,pinfop,elements_per_block_row,blocks_per_block_row);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	if(nnz==0)blocks_per_block_row[0] = 0;/* handling the degenerate nnz == 0 case (e.g.: unit diag) */
+	mtxAp->block_count = 0;
+	element_count = 0;
+
+	for(MI=0;MI<mtxAp->Mdim;++MI)mtxAp->block_count += blocks_per_block_row  [MI];
+	for(MI=0;MI<mtxAp->Mdim;++MI)element_count += elements_per_block_row[MI];
+
+	t += rsb_time();
+	mtxAp->sat = t;
+	if(RSB_WANT_VERBOSE_MESSAGES)
+	RSB_STDERR("matrix creation phase 1 (accounting) : %lf seconds \n", t);
+
+	mtxAp->indptr = rsb__malloc(sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1));
+	mtxAp->bindx = rsb__malloc(sizeof(rsb_nnz_idx_t)*(mtxAp->block_count+1));
+
+	if(!mtxAp->bindx ){ errval = RSB_ERR_ENOMEM; goto err;}
+	if(!mtxAp->indptr){ errval = RSB_ERR_ENOMEM; goto err;}
+
+	mtxAp->indptr[0] = 0;/* */
+
+	mtxAp->bpntr = (--blocks_per_block_row);	/* :) */
+	for(MI=0;MI<mtxAp->Mdim;++MI)
+		mtxAp->bpntr[MI+1] += mtxAp->bpntr[MI];	/* in this way bpntr[i] has the count of blocks before row i */
+	mtxAp->bpntr [0] = 0;
+	blocks_per_block_row = NULL;		/* it will be freed with the matrix */
+
+	/* second pass : we have allocated the needed arrays and are ready to fill in data structures */
+
+#if RSB_WANT_BITMAP
+	mtxAp->options = o ;
+#endif /* RSB_WANT_BITMAP */
+	mtxAp->nnz = nnz;
+	mtxAp->element_count = element_count;
+	//mtxAp->block_count = block_count;
+
+	if(mtxAp->block_count > nnz)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_STDERR("more blocks (%zd) than nonzeros (%zd) ?could be a bug!\n",(size_t)mtxAp->block_count,(size_t)nnz);
+		goto err;
+	}
+
+	mtxAp->bpntr[0] = 0;
+	mtxAp->VA = rsb__malloc( RSB_TOTAL_BLOCK_BYTES(mtxAp,o));
+
+	if(RSB_WANT_VERBOSE_MESSAGES)
+		RSB_INFO("allocating %zd bytes.\n",(size_t)RSB_TOTAL_BLOCK_BYTES(mtxAp,o) );
+		
+	if(!mtxAp->VA)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_STDERR("had problems allocating %zd bytes.\n",(size_t)RSB_TOTAL_BLOCK_BYTES(mtxAp,o));
+		goto err;
+	}
+	//	k = 0;/* nnz index */
+	
+	t = - rsb_time();
+
+	/* the following code could run parallel with some work */
+	errval = rsb__do_insert_sorted_optimized( mtxAp, VA, IA, JA, nnz, pinfop);
+
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	t += rsb_time();
+	mtxAp->eit = t;
+	if(RSB_WANT_VERBOSE_MESSAGES)
+		RSB_STDERR("matrix creation phase 2 (insertion) : %lf seconds \n", mtxAp->eit);
+#if 0
+	if((flags & RSB_FLAG_SHOULD_DEBUG) && 0)
+	{
+		register rsb_coo_idx_t	baserow,basecolumn,rows,columns;
+		register rsb_blk_idx_t	blockrow,blockcolumn;
+		register rsb_byte_t*bp;
+
+		/* FIXME : will fail if pure bcsr */
+		RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+		if(0 /* super paranoia */)
+		while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+		{
+			RSB_INFO("%zd / %zd  ; block (%zd %zd)/(%zd %zd) base : (%zd %zd) size : (%zd %zd)\n",
+			(rsb_printf_int_t)_lastk,(rsb_printf_int_t)mtxAp->block_count,
+			(rsb_printf_int_t)blockrow,(rsb_printf_int_t)blockcolumns,
+			(rsb_printf_int_t)pinfop->M_b,(rsb_printf_int_t)pinfop->K_b,
+			(rsb_printf_int_t)baserow,(rsb_printf_int_t)basecolumn,(rsb_printf_int_t)rows,(rsb_printf_int_t)columns);
+			RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+		}
+	}
+#endif
+
+	RSB_CONDITIONAL_FREE(elements_per_block_row);
+	return mtxAp;
+err:
+	RSB_STDERR("rsb__allocate_from_coo_sorted:\n");
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	if(mtxAp)
+		rsb__do_mtx_free(mtxAp);	/* destroys all of the internals of matrix */
+	if(blocks_per_block_row != MIndx )
+		RSB_CONDITIONAL_FREE(blocks_per_block_row );
+	RSB_CONDITIONAL_FREE(elements_per_block_row);
+	return NULL;
+}
+
+rsb_err_t rsb__do_get_blocking_from_pinfo(const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_blk_idx_t *mbp, rsb_blk_idx_t *kbp)
+{
+	if( ( flags & RSB_FLAG_WANT_BCSS_STORAGE ) || ( flags & RSB_FLAG_WANT_FIXED_BLOCKING_VBR ) )
+	{
+		if( pinfop && pinfop->cpntr && pinfop->rpntr )
+		{
+			/* FIXME : experimental */
+			*kbp = pinfop->cpntr[1]-pinfop->cpntr[0];
+			*mbp = pinfop->rpntr[1]-pinfop->rpntr[0];
+		}
+		else
+		if(pinfop)
+		{
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+			*mbp = pinfop->br;
+			*kbp = pinfop->bc;
+#else /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+			*kbp = *mbp = -1;
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+		}
+		else
+		{
+			*kbp = *mbp = 1;
+		}
+		RSB_DEBUG_ASSERT(*kbp>=1);
+		RSB_DEBUG_ASSERT(*mbp>=1);
+		if( *kbp<1 || *mbp <1 )
+		{
+			return RSB_ERR_BADARGS;
+		}
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+size_t rsb__util_strlen(const rsb_char_t *s)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	return strlen(s);/* Flawfinder: ignore */
+}
+
+#if 0
+static int rsb_util_sprintf(rsb_char_t *str, const rsb_char_t *format, ...)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME :  BUGGY
+	 * */
+        va_list ap;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	va_start(ap,format);
+	errval = rsb__sprintf(str,format,ap);/* Flawfinder: ignore */
+	va_end(ap);
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+static rsb_char_t *rsb_util_strcat(rsb_char_t *dest, const rsb_char_t *src)
+{
+	/*!
+	 * A wrapper.
+	 * \ingroup gr_internals
+	 */
+	return strcat(dest,src); /* Flawfinder: ignore */
+}
+
+const rsb_char_t * rsb__sprint_matrix_implementation_code2(const struct rsb_mtx_t *mtxAp, rsb_char_t * buf, rsb_flags_t inflags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *  FIXME : missing error handling 
+	 * buf be at least RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH chars long.
+	 */
+	const rsb_char_t sep[] = "\t";
+	rsb_blk_idx_t br,bc;
+	if(!mtxAp) return NULL;
+	buf[0] = '\0';
+
+	rsb__get_blocking_size(mtxAp,&br,&bc);
+
+	/* NOTE : assumes BCSR or takes into account only the first blocks */
+	rsb__sprintf(buf+rsb__util_strlen(buf),"%ld%s%ld%s",(long)mtxAp->nr,sep,(long)mtxAp->nc,sep);
+	rsb__sprintf(buf+rsb__util_strlen(buf),"%ld%s%ld%s",(long)br,sep,(long)bc,sep);
+	rsb__sprintf(buf+rsb__util_strlen(buf),"%zd%s%lg",(size_t)rsb__do_get_matrix_nnz(mtxAp),sep,rsb__do_get_matrix_fillin(mtxAp));
+
+	return buf;
+}
+
+rsb_char_t rsb__do_get_symmetry_char(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	if(rsb__is_symmetric(mtxAp))
+		return 'S';
+	else
+	if(rsb__is_hermitian(mtxAp))
+		return 'H';
+	else
+		return 'G';
+}
+
+static const rsb_char_t * rsb_do_get_symmetry_string(const struct rsb_mtx_t *mtxAp, rsb_char_t * auxbuf)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	const rsb_char_t * s = "Symmetric";
+	const rsb_char_t * g = "General";
+	const rsb_char_t * h = "Hermitian";
+
+	if(rsb__is_symmetric(mtxAp))
+		rsb__strcpy(auxbuf,s);
+	else
+	if(rsb__is_hermitian(mtxAp))
+		rsb__strcpy(auxbuf,h);
+	else
+		rsb__strcpy(auxbuf,g);
+	return auxbuf;
+}
+
+rsb_err_t rsb__fprint_matrix_implementation_code(const struct rsb_mtx_t *mtxAp, const rsb_char_t * op, rsb_flags_t inflags, FILE*fd)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_char_t buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+	
+	fprintf( fd, "%s", rsb__sprint_matrix_implementation_code(mtxAp,op,inflags,buf));
+	return errval;
+}
+
+void rsb__cat_compver(rsb_char_t * buf)
+{
+	/* FIXME : fix	rsb_util_sprintf and use it ! */
+#if defined(__INTEL_COMPILER)
+	/* icc 10.10 is ok */
+	rsb__sprintf(buf,"intel-%d",__INTEL_COMPILER);
+#elif   defined(__xlC__)
+	/* ok on sp5 */
+	rsb__sprintf(buf,"xlc-%d",__xlC__);
+#elif   defined(__PGI)
+	/* pgcc-7.0.4 is ok */
+	rsb__sprintf(buf,"pgcc-%d.%d.%d",__PGIC__,__PGIC_MINOR__,__PGIC_PATCHLEVEL__);
+#elif   defined(__GNUC__)
+	rsb__sprintf(buf,"gcc-%d.%d",__GNUC__,__GNUC_MINOR__);
+#elif defined(__SUNPRO_CC)
+	rsb__sprintf(buf,"sun-%d",__SUNPRO_CC);
+#else /* __SUNPRO_CC */
+	rsb_util_strcat(buf,"CC?");
+#endif /* __SUNPRO_CC */
+}
+
+const rsb_char_t * rsb__sprint_matrix_implementation_code(const struct rsb_mtx_t *mtxAp, const rsb_char_t * op, rsb_flags_t inflags, rsb_char_t * buf)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Gives back a matrix implementation info string.
+	 * NOTE: for more consistency, we should translate any \t (TAB) char in ' '.
+	 * NOTE: it will give some more info, too..
+	 *
+	 * \return a static string pointer on correct operation, NULL otherwise
+	 * */
+	
+	rsb_char_t sep[] = "/";
+	const rsb_char_t * csp;
+	rsb_long_t sm = 0;
+	rsb_long_t tsm = 0;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_blk_idx_t br = 0,bc = 0;
+	rsb_char_t auxbuf[RSB_TOKLEN];
+
+	if(!mtxAp)
+		return NULL;
+
+	rsb__get_blocking_size(mtxAp,&br,&bc);
+
+	flags = mtxAp->flags|inflags;
+
+	sm = rsb__submatrices(mtxAp);
+	tsm = rsb__terminal_recursive_matrix_count(mtxAp);
+
+	buf[0] = '\0';
+
+	/* FIXME : DANGER */
+
+	if(1)
+	{
+		long hcoo = rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+		long hcsr = rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+		long fcoo = rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+		long fcsr = rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+		long kinds = 0;
+		if(hcoo)++kinds;
+		if(hcsr)++kinds;
+		if(fcoo)++kinds;
+		if(fcsr)++kinds;
+#if 0
+		if(fcoo==0 && hcoo==0)
+			rsb_util_strcat(buf,"CSR");
+		else
+		if(fcsr==0 && hcsr==0)
+			rsb_util_strcat(buf,"COO");
+		else
+#endif
+			rsb_util_strcat(buf,"RSB");
+	}
+	else
+	{
+	if(rsb__is_recursive_matrix(flags))
+		rsb_util_strcat(buf,"R");
+
+#ifdef RSB_MATRIX_STORAGE_BCOR
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCOR)
+	{
+		if(br==1&&bc==1)
+			rsb_util_strcat(buf,"COR");
+		else
+			rsb_util_strcat(buf,RSB_MATRIX_STORAGE_BCOR_STRING);
+	}
+	else
+#endif /* RSB_MATRIX_STORAGE_BCOR */
+#ifdef RSB_MATRIX_STORAGE_BCOC
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCOC)
+	{
+		if(br==1&&bc==1)
+			rsb_util_strcat(buf,"COC");
+		else
+			rsb_util_strcat(buf,RSB_MATRIX_STORAGE_BCOC_STRING);
+	}
+	else
+#endif /* RSB_MATRIX_STORAGE_BCOC */
+#ifdef RSB_MATRIX_STORAGE_BCSR
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSR)
+	{
+		if(br==1&&bc==1)
+			rsb_util_strcat(buf,"CSR");
+		else
+			rsb_util_strcat(buf,RSB_MATRIX_STORAGE_BCSR_STRING);
+	}
+	else
+#endif /* RSB_MATRIX_STORAGE_BCSR */
+#ifdef RSB_MATRIX_STORAGE_BCSC
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSC)
+	{
+		if(br==1&&bc==1)
+			rsb_util_strcat(buf,"CSC");
+		else
+			rsb_util_strcat(buf,RSB_MATRIX_STORAGE_BCSC_STRING);
+	}
+	else
+#endif /* RSB_MATRIX_STORAGE_BCSC */
+#ifdef RSB_MATRIX_STORAGE_VBR 
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_VBR )
+		rsb_util_strcat(buf,RSB_MATRIX_STORAGE_VBR_STRING);
+	else
+#endif /* RSB_MATRIX_STORAGE_VBR */
+#ifdef RSB_MATRIX_STORAGE_VBC
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_VBC )
+		rsb_util_strcat(buf,RSB_MATRIX_STORAGE_VBC_STRING);
+	else
+#endif /* RSB_MATRIX_STORAGE_VBC */
+#ifdef RSB_MATRIX_STORAGE_LR
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_LR )
+		rsb_util_strcat(buf,"LBLR");
+	else
+#endif /* RSB_MATRIX_STORAGE_LR */
+#ifdef RSB_MATRIX_STORAGE_LC
+	if(mtxAp->matrix_storage & RSB_MATRIX_STORAGE_LC )
+		rsb_util_strcat(buf,"LBLC");
+	else
+#endif /* RSB_MATRIX_STORAGE_LC */
+		return NULL;
+	}
+	{
+//	if(sm>=1 && rsb__is_recursive_matrix(mtxAp))/* NEW */ /* FIXME : rsb__is_recursive_matrix() seems plagued by indeterminism! */
+		if(sm>=1 /*&& rsb__is_recursive_matrix(mtxAp)*/)/* NEW */
+			rsb__sprintf(buf+rsb__util_strlen(buf),"(@:%ld/%ld;%3.1lf%%diagnz;%3.1lf%%diagblk)",sm,tsm,
+					(((double)rsb__get_diagonal_elements_count(mtxAp)*100)/(mtxAp->nnz)),
+					(((double)rsb__get_diagonal_submatrices_count(mtxAp)*100)/(tsm))
+					);
+	}
+#if 1
+	/* uhm. this refers to inter block ordering. */
+	if( mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER )
+		rsb_util_strcat(buf,"-C");
+	else
+		rsb_util_strcat(buf,"-R");
+#endif
+	rsb_util_strcat(buf,sep);
+	rsb_util_strcat(buf,"RowMajor");
+	rsb_util_strcat(buf,sep);
+	rsb_util_strcat(buf,rsb_do_get_symmetry_string(mtxAp,auxbuf));
+	rsb_util_strcat(buf,sep);
+	rsb_util_strcat(buf,op?op:"");
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_AUTO_BLOCKING))
+		rsb_util_strcat(buf,"-AutoBlocking");
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR))
+		rsb_util_strcat(buf,"-InPlace");
+#if 0
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES_COO))
+		rsb__sprintf(buf+rsb__util_strlen(buf),"-SwitchToHalfwordCoo:(%ld~%ld)"
+		,rsb__terminal_recursive_matrix_count_with_flags(mtxAp,RSB_FLAG_USE_HALFWORD_INDICES_COO)
+		,tsm);
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+		rsb__sprintf(buf+rsb__util_strlen(buf),"-SwitchToHalfwordCsr:(%ld~%ld)"
+		,rsb__terminal_recursive_matrix_count_with_flags_but(mtxAp,RSB_FLAG_USE_HALFWORD_INDICES_CSR,RSB_FLAG_USE_HALFWORD_INDICES_COO)
+		,tsm);
+#else
+	{
+		long hcoo = rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO);
+		long hcsr = rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+		long fcoo = rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO);
+		long fcsr = rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+		rsb__sprintf(buf+rsb__util_strlen(buf),"-HalfwordCsr:(%ld~%ld)",hcsr,tsm);
+		rsb__sprintf(buf+rsb__util_strlen(buf),"-FullwordCsr:(%ld~%ld)",fcsr,tsm);
+		rsb__sprintf(buf+rsb__util_strlen(buf),"-HalfwordCoo:(%ld~%ld)",hcoo,tsm);
+		rsb__sprintf(buf+rsb__util_strlen(buf),"-FullwordCoo:(%ld~%ld)",fcoo,tsm);
+	}
+#endif
+#if 0
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE))
+		rsb_util_strcat(buf,"-BlockForHalfCache");
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE))
+		rsb_util_strcat(buf,"-BlockForDoubleCache");
+#endif
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG))
+		rsb_util_strcat(buf,"-ExtraDiagonalSubdivisions");
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES))
+		rsb_util_strcat(buf,"-NoMicroLeafs");
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+
+	rsb_util_strcat(buf,sep);
+	RSB_NUMERICAL_TYPE_STRING(csp,mtxAp->typecode);
+	rsb_util_strcat(buf,csp);
+
+	if(1)
+	{
+		rsb_thread_t ncores = 0;
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+                #pragma omp parallel RSB_NTC
+                if(omp_get_thread_num()==0)
+                {
+                        ncores = omp_get_num_threads();
+                }
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		ncores = ncores?ncores:1;
+		rsb_util_strcat(buf,sep);
+		rsb__sprintf(buf+rsb__util_strlen(buf),"cores:%d",ncores);
+	}
+	/* see http://gasnet.cs.berkeley.edu/dist/other/portable_platform.h */
+	rsb_util_strcat(buf,sep);
+	/* NOTE : on some systems, __GNUC__ is defined even under icc! (therefore we switched precedence) */
+
+#if 0
+	/* FIXME : fix	rsb_util_sprintf and use it ! */
+#if defined(__INTEL_COMPILER)
+	/* icc 10.10 is ok */
+	rsb__sprintf(buf+rsb__util_strlen(buf),"intel-%d",__INTEL_COMPILER);
+#elif   defined(__xlC__)
+	/* ok on sp5 */
+	rsb__sprintf(buf+rsb__util_strlen(buf),"xlc-%d",__xlC__);
+#elif   defined(__PGI)
+	/* pgcc-7.0.4 is ok */
+	rsb__sprintf(buf+rsb__util_strlen(buf),"pgcc-%d.%d.%d",__PGIC__,__PGIC_MINOR__,__PGIC_PATCHLEVEL__);
+#elif   defined(__GNUC__)
+	rsb__sprintf(buf+rsb__util_strlen(buf),"gcc-%d.%d",__GNUC__,__GNUC_MINOR__);
+#elif defined(__SUNPRO_CC)
+	rsb__sprintf(buf+rsb__util_strlen(buf),"sun-%d",__SUNPRO_CC);
+#else /* __SUNPRO_CC */
+	rsb_util_strcat(buf,"CC?");
+#endif /* __SUNPRO_CC */
+#else
+	rsb__cat_compver(buf+rsb__util_strlen(buf));
+#endif
+	rsb_util_strcat(buf,sep);
+	/* still missing CXX case */
+#if   defined(CFLAGS)
+	//rsb_util_sprintf(buf+rsb__util_strlen(buf),"%s",CFLAGS);
+	rsb__sprintf(buf+rsb__util_strlen(buf),"%s",CFLAGS);
+#else /* CFLAGS */
+	rsb_util_strcat(buf,"");
+#endif /* CFLAGS */
+	/* NEW */
+	rsb_util_strcat(buf,sep);
+	rsb__sprintf(buf+rsb__util_strlen(buf),"sizeof(nnz_idx_t):%zd,",sizeof(rsb_nnz_idx_t));
+	rsb__sprintf(buf+rsb__util_strlen(buf),"sizeof(coo_idx_t):%zd,",sizeof(rsb_coo_idx_t));
+	rsb__sprintf(buf+rsb__util_strlen(buf),"sizeof(blk_idx_t):%zd",sizeof(rsb_blk_idx_t));
+
+	/* NEW */
+	rsb_util_strcat(buf,sep);
+	rsb__sprintf(buf+rsb__util_strlen(buf),"idx_storage:%zd-idx_storage_in_csr:%zd-idx_storage_in_coo:%zd"
+		,(size_t)rsb__get_index_storage_amount(mtxAp)
+		,((size_t)mtxAp->nnz)*sizeof(rsb_coo_idx_t)+((size_t)mtxAp->Mdim+1)*sizeof(rsb_nnz_idx_t)
+		,((size_t)mtxAp->nnz)*sizeof(rsb_coo_idx_t)*2
+		);
+
+	rsb_util_strcat(buf,sep);
+#ifdef RSB_PACKAGE_VERSION 
+	rsb__sprintf(buf+rsb__util_strlen(buf),"version:%s",RSB_PACKAGE_VERSION);
+#endif /* RSB_PACKAGE_VERSION */
+	rsb_util_strcat(buf,sep);
+	rsb_util_strcat(buf,"memhinfo:[");
+	{rsb_char_t usmhib[RSB_MAX_LINE_LENGTH];
+	rsb_util_strcat(buf,rsb__get_mem_hierarchy_info_string(usmhib));}
+	rsb_util_strcat(buf,"]");
+	rsb_util_strcat(buf,sep);
+#ifdef RSB_HAVE_SYS_UTSNAME_H 
+	{
+		struct utsname un;
+		if(uname(&un)==0)
+			rsb__sprintf(buf+rsb__util_strlen(buf),"%s",un.nodename);
+#if 0
+           struct utsname {
+               char sysname[];
+               char nodename[];
+               char release[];
+               char version[];
+               char machine[];
+           #ifdef _GNU_SOURCE
+               char domainname[];
+           #endif /* _GNU_SOURCE */
+           };
+#endif
+	}
+#else /* RSB_HAVE_SYS_UTSNAME_H */
+rsb_util_strcat(buf,"");
+#endif /* RSB_HAVE_SYS_UTSNAME_H */
+
+	return buf;
+}
+
+rsb_err_t rsb__util_get_bx_array(const rsb_char_t* optarg, int* bxlp, rsb_blk_idx_t **bxvp)
+{
+	/*!
+	   	\ingroup gr_internals
+	  
+		Will extract block row and block column sizes from user data codified in optarg.
+		\param bxlp will be set to the number of the desired block sizes.
+		\param bxvp will be set to an array (of dimension *bxlp) allocated with rsb__malloc() with block sizes.
+
+	        \note : there are subtle dangers in this function
+	        \note : if *bxvp is not NULL, it will be freed
+	        \todo : move to some file named parse.c
+	 */
+	int bxl = 0;
+	rsb_blk_idx_t * bxv = NULL;
+	const rsb_char_t*c = optarg;
+	int mint = 1,maxt = 1;
+
+	if(!bxlp || !bxvp)
+		return RSB_ERR_BADARGS;
+
+	if(*optarg==':')
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		maxt = omp_get_max_threads();
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		while(mint<=maxt) mint *= 2,++bxl;
+		if( mint > maxt && mint/2 != maxt ) bxl++;
+		mint = 1;
+	}
+	else
+	do
+	{
+		int nul = 0;
+		while(*c!=nul && !isdigit(*c))++c;
+		if(isdigit(*c))bxl++;
+		while(*c &&  isdigit(*c))++c;
+	}while(*c);
+
+	bxv = *bxvp;
+	if(bxv)rsb__free(bxv);
+	bxv = rsb__malloc(sizeof(rsb_blk_idx_t)*(size_t)bxl);
+	if(!bxv)goto err;
+	bxl = 0;
+	c = optarg;
+
+	if(*optarg==':')
+	{
+		while( mint <= maxt )
+			bxv[bxl++] = mint, mint *= 2;
+		if( bxv[bxl-1] != maxt )
+			bxv[bxl++] = maxt;
+	}
+	else
+	do
+	{
+		int nul = 0,ci;
+		while(*c!=nul && !isdigit(*c))++c;
+		{
+			ci = rsb__util_atoi(c);/* Flawfinder: ignore */
+			if(ci<1)goto err;
+			if(isdigit(*c))bxv[bxl++] = (rsb_blk_idx_t)ci;
+		}
+		while(*c &&  isdigit(*c))++c;
+	}while(*c);
+	
+	*bxlp = bxl;
+	*bxvp = bxv;
+	
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_CONDITIONAL_FREE(bxv);
+	rsb__do_perror(NULL,RSB_ERR_GENERIC_ERROR);
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+rsb_nnz_idx_t rsb__util_atonnz(const rsb_char_t * optarg)
+{
+	/*!
+		\ingroup gr_internals
+
+	   	Will parse a single rsb_nnz_idx_t number.
+	 	\param optarg
+
+		\note : may overflow
+		\warning : may overflow
+	 */
+	rsb_long_t i = rsb__util_atol(optarg);/*Flawfinder: ignore */
+	if(i<0)
+		i = 0;
+	return (rsb_nnz_idx_t)i;
+}
+
+rsb_long_t rsb__util_atol(const rsb_char_t *nptr)
+{
+	/*!
+	  	\ingroup gr_internals
+	 */
+	return atol(nptr);/* Flawfinder: ignore */
+}
+
+rsb_real_t rsb__util_atof(const rsb_char_t *nptr)
+{
+	/*!
+	  	\ingroup gr_internals
+	 */
+	return atof(nptr);/* Flawfinder: ignore */
+}
+
+int rsb__util_atoi(const rsb_char_t *nptr)
+{
+	/*!
+	  	\ingroup gr_internals
+	 */
+	int n = 0;
+
+	if(nptr)
+		n = atoi(nptr);/* Flawfinder: ignore */
+
+	return n;
+}
+
+static int rsb__util_atoi_kmX(const rsb_char_t *nptr, int base)
+{
+	/*!
+	  	\ingroup gr_internals
+	 */
+	int v = rsb__util_atoi(nptr);
+
+	if(!nptr)
+		goto ret;
+	while(isdigit(*nptr))
+		++nptr;
+	if(*nptr && tolower(*nptr)=='g')
+		v *= base * base * base;
+	if(*nptr && tolower(*nptr)=='m')
+		v *= base * base;
+	if(*nptr && tolower(*nptr)=='k')
+		v *= base;
+ret:
+	return v;
+}
+
+int rsb__util_atoi_km2(const rsb_char_t *nptr)
+{
+	return rsb__util_atoi_kmX(nptr, 1024);
+}
+
+int rsb__util_atoi_km10(const rsb_char_t *nptr)
+{
+	return rsb__util_atoi_kmX(nptr, 1000);
+}
+
+rsb_err_t rsb__copy_css_arrays(const void *iVA, const rsb_coo_idx_t * iINDX, const rsb_coo_idx_t * iXA, const rsb_nnz_idx_t nnz, rsb_coo_idx_t X, rsb_type_t typecode, void *oVA, rsb_coo_idx_t * oINDX, rsb_nnz_idx_t * oXA)
+{
+	if(!iVA || !iINDX || !iXA || RSB_INVALID_COO_INDEX(X) || RSB_INVALID_NNZ_INDEX(nnz) || !oVA || !oINDX || !oXA)
+		return RSB_ERR_BADARGS;
+	RSB_CSR_MEMCPY(oVA,oINDX,oXA,iVA,iINDX,iXA,nnz,X,RSB_SIZEOF(typecode));
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__allocate_csc_arrays_from_coo_sorted(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_type_t typecode, void **VAp, rsb_coo_idx_t ** indxp, rsb_nnz_idx_t ** indptrp)
+{
+	return rsb__allocate_csr_arrays_from_coo_sorted(VA, JA, IA, nnz, k, m, typecode, VAp, indxp, indptrp);
+}
+
+rsb_err_t rsb__allocate_csr_arrays_from_coo_sorted(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_type_t typecode, void **VAp, rsb_coo_idx_t ** indxp, rsb_nnz_idx_t ** indptrp)
+{
+	/*!
+	 	\ingroup gr_internals
+		
+		FIXME : UNFINISHED, UNTESTED, NEW, and SLOW
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t * indx = NULL,MI = 0;
+	rsb_coo_idx_t * indpntr = NULL;
+	void *cVA = NULL;
+
+	if(!indxp || !indptrp || !JA || !IA) /* VA is optional */
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	indx = rsb__clone_area(JA,sizeof(rsb_coo_idx_t)*(nnz));	/* FIXME ! BAD ! */
+	indpntr = rsb__calloc(sizeof(rsb_nnz_idx_t)*(m+1));
+
+	if(!indx || !indpntr)
+	{
+		errval = RSB_ERR_ENOMEM;
+		goto err;
+	}
+
+	if(VA)
+	{
+		cVA = rsb__clone_area(VA,RSB_SIZEOF(typecode)*(nnz));
+		if(!cVA)
+		{
+			errval = RSB_ERR_ENOMEM;
+			goto err;
+		}
+	}
+
+	errval = rsb__do_account_sorted_optimized_css(IA,JA,m,k,nnz,indpntr+1,NULL);
+
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	for(MI=0;MI<m;++MI)
+		indpntr[MI+1] += indpntr[MI];
+
+	if(VAp)*VAp = cVA;
+	*indxp = indx;
+	*indptrp = indpntr;
+
+	goto ok;
+err:
+	RSB_CONDITIONAL_FREE(cVA);
+	RSB_CONDITIONAL_FREE(indx);
+	RSB_CONDITIONAL_FREE(indpntr);
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__print_configuration_string(const char *pn, rsb_char_t * cs, rsb_bool_t wci)
+{
+	/*!
+	 	\ingroup gr_internals
+	*/
+	/*
+ 		note : an interleaved 
+			RSB_INFO(
+			#ifdef FOO
+			"string"
+			# endif
+			)
+			style of string composition breaks xlc, so we don't use it, in the following.
+		FIXME: pn should be reasonably short, as this routine does NOT check for buffer overflow. for this same reason this function shall remain INTERNAL always.	
+ 	*/
+	rsb_char_t buf[RSB_MAX_VERSION_STRING_LENGTH];
+	const rsb_char_t * sep = " ";
+	const rsb_char_t * nl = "\n";
+
+	RSB_STRCPY(buf,"");
+	if(!cs)
+	{
+		// TODO: missing error handling
+	       	goto done;
+	}
+#if 0
+	rsb__sprintf(buf,"%s version: %d.%d.%d\n",pn?pn:"librsb",RSB_LIBRSB_VER_MAJOR,RSB_LIBRSB_VER_MINOR,RSB_LIBRSB_VER_RELEASE);
+#else
+	rsb__sprintf(buf,"%s version: %s\n",pn?pn:"librsb",RSB_LIBRSB_VER_STRING);
+#endif
+	if(wci == RSB_BOOL_FALSE)
+	{
+		rsb__sprintf(cs,"%s",buf);
+            	rsb__sprintf(cs+strlen(cs),"%s.\n\n",RSB_COPYRIGHT_STRING);
+            	rsb__sprintf(cs+strlen(cs),"Written by %s.\n",RSB_PACKAGE_BUGREPORT);
+		goto done;
+	}
+	rsb_util_strcat(buf,"format switches:");
+#ifdef RSB_MATRIX_STORAGE_BCSR_STRING 
+	rsb_util_strcat(buf,"br");
+	rsb_util_strcat(buf,sep);
+#endif /* RSB_MATRIX_STORAGE_BCSR_STRING */
+#ifdef RSB_MATRIX_STORAGE_BCSC_STRING 
+	rsb_util_strcat(buf,"bc");
+	rsb_util_strcat(buf,sep);
+#endif /* RSB_MATRIX_STORAGE_BCSC_STRING */
+#ifdef RSB_MATRIX_STORAGE_VBR_STRING 
+	rsb_util_strcat(buf,"vr");
+	rsb_util_strcat(buf,sep);
+#endif /* RSB_MATRIX_STORAGE_VBR_STRING */
+#ifdef RSB_MATRIX_STORAGE_VBC_STRING 
+	rsb_util_strcat(buf,"vc");
+	rsb_util_strcat(buf,sep);
+#endif /* RSB_MATRIX_STORAGE_VBC_STRING */
+#ifdef RSB_MATRIX_STORAGE_LC_STRING 
+	rsb_util_strcat(buf,"lc");
+	rsb_util_strcat(buf,sep);
+#endif /* RSB_MATRIX_STORAGE_LC_STRING */
+#ifdef RSB_MATRIX_STORAGE_LR_STRING 
+	rsb_util_strcat(buf,"lr");
+	rsb_util_strcat(buf,sep);
+#endif /* RSB_MATRIX_STORAGE_LR_STRING */
+	rsb_util_strcat(buf,nl);
+	rsb_util_strcat(buf,"ops:");
+	rsb_util_strcat(buf,RSB_M4_MATRIX_META_OPS_STRING);
+	rsb_util_strcat(buf,nl);
+
+	rsb_util_strcat(buf,"types:");
+	rsb_util_strcat(buf,RSB_M4_MATRIX_TYPES_STRING);
+	rsb_util_strcat(buf,nl);
+	rsb_util_strcat(buf,"type char codes:");
+	rsb_util_strcat(buf,RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS );
+	rsb_util_strcat(buf,nl);
+	rsb_util_strcat(buf,"transposition codes:");
+	rsb_util_strcat(buf,RSB_TRANSPOSITIONS_PREPROCESSOR_SYMBOLS );
+	rsb_util_strcat(buf,nl);
+
+	rsb_util_strcat(buf,"restrict keyword is: ");
+#ifdef RSB_restrict
+	rsb_util_strcat(buf,"on" );
+#else /* RSB_restrict */
+	rsb_util_strcat(buf,"off" );
+#endif /* RSB_restrict */
+	rsb_util_strcat(buf,nl);
+
+	rsb_util_strcat(buf,"row unrolls:");
+	rsb_util_strcat(buf,RSB_M4_WANT_COLUMN_UNLOOP_FACTORS_STRING);
+	rsb_util_strcat(buf,nl);
+	rsb_util_strcat(buf,"column unrolls:");
+	rsb_util_strcat(buf,RSB_M4_WANT_ROW_UNLOOP_FACTORS_STRING	);
+	rsb_util_strcat(buf,nl);
+	rsb_util_strcat(buf,"reference benchmark sample minimum time (seconds):%lg\n");
+	rsb_util_strcat(buf,"reference benchmark sample minimum runs:%zd\n");
+	rsb_util_strcat(buf,"maximal configured block size:%zd\n");
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	rsb_util_strcat(buf,"oski comparative benchmarking enabled\n");
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	rsb_util_strcat(buf,"sizeof(rsb_nnz_idx_t):%zd\n");
+	rsb_util_strcat(buf,"sizeof(rsb_coo_idx_t):%zd\n");
+	rsb_util_strcat(buf,"sizeof(rsb_blk_idx_t):%zd\n");
+	rsb_util_strcat(buf,"sizeof(size_t):%zd\n");
+	rsb_util_strcat(buf,"sizeof(struct rsb_mtx_t):%zd\n");
+	rsb_util_strcat(buf,"sizeof(struct rsb_blas_sparse_matrix_t):%zd\n");
+	rsb_util_strcat(buf,"sizeof(struct rsb_coo_matrix_t):%zd\n");
+	rsb_util_strcat(buf,"RSB_MAX_MATRIX_DIM:%zd\n");
+	rsb_util_strcat(buf,"RSB_MAX_MATRIX_NNZ:%zd\n");
+	rsb_util_strcat(buf,"RSB_CONST_MAX_SUPPORTED_CORES:%zd\n");
+	rsb_util_strcat(buf,"RSB_BLAS_MATRICES_MAX:%zd\n");
+	rsb_util_strcat(buf,"RSB_CONST_MIN_NNZ_PER_ROW_FOR_COO_SWITCH:%zd\n");
+
+	rsb_util_strcat(buf,"RSB_USER_SET_MEM_HIERARCHY_INFO:%s\n");
+	rsb_util_strcat(buf,"RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t):%zd\n");
+	rsb_util_strcat(buf,"RSB_IOLEVEL:%d\n");
+	//RSB_INFO(
+	rsb__sprintf(cs,
+		buf,
+		RSB_BENCHMARK_MIN_SECONDS,
+		(rsb_printf_int_t)RSB_BENCHMARK_MIN_RUNS,
+		(rsb_printf_int_t)RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE,
+		sizeof(rsb_nnz_idx_t),
+		sizeof(rsb_coo_idx_t),
+		sizeof(rsb_blk_idx_t),
+		sizeof(size_t),
+		sizeof(struct rsb_mtx_t),
+		sizeof(struct rsb_blas_sparse_matrix_t),
+		sizeof(struct rsb_coo_matrix_t),
+		(rsb_printf_int_t)RSB_MAX_MATRIX_DIM,
+		(rsb_printf_int_t)RSB_MAX_MATRIX_NNZ,
+		(rsb_printf_int_t)RSB_CONST_MAX_SUPPORTED_CORES,
+		(rsb_printf_int_t)RSB_BLAS_MATRICES_MAX,
+		(rsb_printf_int_t)RSB_CONST_MIN_NNZ_PER_ROW_FOR_COO_SWITCH
+		,(rsb_printf_int_t)rsb__init_get_mem_hierarchy_info_string(RSB_BOOL_FALSE)?rsb__init_get_mem_hierarchy_info_string(RSB_BOOL_FALSE):NULL
+		,RSB_MAX_VALUE_FOR_TYPE(rsb_half_idx_t)
+		,RSB_IOLEVEL 
+	);
+done:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_blk_idx_t rsb__recursive_middle_block_index(rsb_blk_idx_t i)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the index of the split point index.
+	 */
+#if RSB_EXPERIMENTAL_MORTON_ORDERED_RECURSION  
+	rsb_blk_idx_t s = 0;
+	while( (1<<s+1) < i)
+		++s;
+	return (1<<s);
+#else /* RSB_EXPERIMENTAL_MORTON_ORDERED_RECURSION */
+#if 1
+	return (i+1)/2;
+#else
+	int p = 0;
+	while(i>>(p+1) && i > (1<<(p+1)))
+		++p;
+//	RSB_INFO("%d %d %d\n",i,p,(1<<p));
+	return (1<<p);
+#endif
+#endif /* RSB_EXPERIMENTAL_MORTON_ORDERED_RECURSION */
+}
+
+rsb_err_t rsb__recursive_middle_index(const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t * M_bp, rsb_coo_idx_t * K_bp )
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the index of ..
+	 */
+	rsb_blk_idx_t mBi;
+	rsb_blk_idx_t	kBi;
+	if(!pinfop || !M_bp || !K_bp)
+		return RSB_ERR_BADARGS;
+	mBi = rsb__recursive_middle_block_index(pinfop->M_b);
+	kBi = rsb__recursive_middle_block_index(pinfop->K_b);
+	if(pinfop->rpntr)
+		*M_bp = pinfop->rpntr[rsb__recursive_middle_block_index(mBi)];
+	else
+		*M_bp = rsb__recursive_middle_block_index(pinfop->nr);
+	if(pinfop->cpntr)
+		*K_bp = pinfop->cpntr[rsb__recursive_middle_block_index(kBi)];
+	else
+		*K_bp = rsb__recursive_middle_block_index(pinfop->nc);
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__recursive_split_point_parms_get(
+		const struct rsb_mtx_partitioning_info_t * pinfop,
+		rsb_coo_idx_t * moff, rsb_coo_idx_t * koff)
+{
+	/*!
+		\ingroup gr_internals
+
+		FIXME: this is function is OBSOLETE
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!pinfop || !pinfop->rpntr || !pinfop->cpntr)
+	{
+		errval = RSB_ERR_BADARGS;
+	}
+	RSB_DEBUG_ASSERT(moff);
+	RSB_DEBUG_ASSERT(koff);
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(pinfop->M_b));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(pinfop->K_b));
+	
+	*moff = pinfop->rpntr[ rsb__recursive_middle_block_index(pinfop->M_b) ];
+	*koff = pinfop->cpntr[ rsb__recursive_middle_block_index(pinfop->K_b) ];
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(*moff));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(*koff));
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_long_t rsb__terminal_recursive_matrix_count(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the count of leaf (terminal) matrices
+	 *
+	 * TODO : change this function type!
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_long_t smc = 0;
+
+	if(!mtxAp)
+	{smc = 0;goto done;}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{smc = 1;goto done;}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+		smc += rsb__terminal_recursive_matrix_count(submatrix);
+done:
+	return smc;
+}
+
+rsb_err_t rsb__do_compute_terminal_nnz_min_max_avg_count(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * minnz, rsb_nnz_idx_t * maxnz, rsb_nnz_idx_t * avgnz)
+{
+//	struct rsb_mtx_t * submatrix = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(minnz)
+		*minnz = RSB_MAX_MATRIX_NNZ;
+	if(maxnz)
+		*maxnz = 0;
+	if(avgnz)
+		*avgnz = 0;
+	errval = rsb__do_compute_terminal_nnz_min_max_count(mtxAp,minnz,maxnz);
+	if(avgnz)
+		*avgnz = mtxAp->nnz/rsb__terminal_recursive_matrix_count(mtxAp);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_compute_terminal_nnz_min_max_count(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * minnz, rsb_nnz_idx_t * maxnz)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return ...
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_BADARGS;
+	}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		if(minnz)
+//			RSB_STDOUT_MATRIX_SUMMARY(mtxAp), RSB_INFO(" <- MIN (from %d)\n",*minnz),
+			*minnz = RSB_MIN(*minnz,mtxAp->nnz);
+		if(maxnz)
+//			RSB_STDOUT_MATRIX_SUMMARY(mtxAp), RSB_INFO(" <- MAX (from %d)\n",*maxnz),
+			*maxnz = RSB_MAX(*maxnz,mtxAp->nnz);
+	}
+	else
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_compute_terminal_nnz_min_max_count(submatrix,minnz,maxnz));
+	}
+//done:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_long_t rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(const struct rsb_mtx_t *mtxAp, rsb_fmt_t matrix_storage, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the count of leaf (terminal) matrices
+	 *
+	 * TODO : change this function type!
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_long_t smc = 0;
+
+	if(!mtxAp)
+	{smc=0;goto done;}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		if((!RSB_DO_FLAG_HAS(mtxAp->flags,flags)) && (mtxAp->matrix_storage==matrix_storage))
+			smc = 1;
+		goto done;
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+		smc += rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(submatrix,matrix_storage,flags);
+done:
+	return smc;
+}
+
+rsb_long_t rsb__terminal_recursive_matrix_count_with_storage_and_flags(const struct rsb_mtx_t *mtxAp, rsb_fmt_t matrix_storage, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the count of leaf (terminal) matrices
+	 *
+	 * TODO : change this function type!
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_long_t smc = 0;
+
+	if(!mtxAp)
+	{smc = 0;goto done;}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,flags) && (mtxAp->matrix_storage==matrix_storage))
+			smc = 1;
+		goto done;
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+		smc += rsb__terminal_recursive_matrix_count_with_storage_and_flags(submatrix,matrix_storage,flags);
+done:
+	return smc;
+}
+
+rsb_long_t rsb__terminal_recursive_matrix_count_with_flags_but(const struct rsb_mtx_t *mtxAp, rsb_flags_t flags, rsb_flags_t nflags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the count of leaf (terminal) matrices
+	 *
+	 * TODO : change this function type!
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_long_t smc = 0;
+
+	if(!mtxAp)
+	{smc = 0;goto done;}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,flags) && !RSB_DO_FLAG_HAS(mtxAp->flags,nflags))
+			smc = 1;
+		goto done;
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+		smc += rsb__terminal_recursive_matrix_count_with_flags_but(submatrix,flags,nflags);
+done:
+	return smc;
+}
+
+rsb_long_t rsb__terminal_recursive_matrix_count_with_flags(const struct rsb_mtx_t *mtxAp, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return the count of leaf (terminal) matrices
+	 *
+	 * TODO : change this function type!
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_long_t smc = 0;
+
+	if(!mtxAp)
+	{smc = 0;goto done;}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,flags))
+			smc = 1;
+		goto done;
+	}
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+		smc += rsb__terminal_recursive_matrix_count_with_flags(submatrix,flags);
+done:
+	return smc;
+}
+
+rsb_trans_t rsb__do_transposition_from_char(rsb_char_t tc)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : document this
+	 */
+	if(tolower(tc)=='t')
+		return RSB_TRANSPOSITION_T;
+	else
+	if(tolower(tc)=='n')
+		return RSB_TRANSPOSITION_N;
+	else
+	if(tolower(tc)=='c')
+		return RSB_TRANSPOSITION_C;
+	else
+		return RSB_INVALID_TRANS;
+}
+
+rsb_trans_t rsb__do_transpose_transposition(rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : document this
+	 */
+	if(RSB_DOES_NOT_TRANSPOSE(transA))
+		return RSB_TRANSPOSITION_T;
+	if(transA == RSB_TRANSPOSITION_T)
+		return RSB_TRANSPOSITION_N;
+	if(transA == RSB_TRANSPOSITION_C)
+		return RSB_TRANSPOSITION_N;
+	return transA;
+}
+
+rsb_err_t rsb_spmm_inner(const struct rsb_mtx_t * mtxAp, const void * mrhs, void *mout, rsb_int_t bstride, rsb_int_t cstride, rsb_int_t nrhs, rsb_trans_t transA)
+{
+#ifdef RSB_HAVE_OPTYPE_SPMM_AZ
+	/*!
+	 * \ingroup gr_internals
+	 * fixme */
+
+	size_t el_size = mtxAp->el_size;
+
+	if( rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix = NULL;
+		rsb_coo_idx_t mB = (mtxAp->rpntr[rsb__recursive_middle_block_index(mtxAp->M_b)]);
+		rsb_coo_idx_t kB = (mtxAp->cpntr[rsb__recursive_middle_block_index(mtxAp->K_b)]);
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			rsb_coo_idx_t moff,koff;
+
+			moff = i*mB;
+			koff = j*kB;
+	
+			rsb_spmm_inner(submatrix,((rsb_byte_t*)mrhs)+koff*el_size,((rsb_byte_t*)mout)+moff*el_size,bstride,cstride,nrhs,transA);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+	else
+		return rsb__do_spmm_az(mtxAp,mrhs,mout,bstride,cstride,nrhs,transA);/*FIXME*/
+#else /* RSB_HAVE_OPTYPE_SPMM_AZ */
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_HAVE_OPTYPE_SPMM_AZ */
+}
+
+rsb_flags_t rsb__do_flip_uplo_flags(rsb_flags_t flags)
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER))
+		RSB_DO_FLAG_SUBST(flags,RSB_FLAG_UPPER,RSB_FLAG_LOWER);
+	else
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER))
+			RSB_DO_FLAG_SUBST(flags,RSB_FLAG_LOWER,RSB_FLAG_UPPER);
+	return flags;
+}
+
+rsb_flags_t rsb__do_detect_and_add_triangular_flags(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	rsb_nnz_idx_t i;
+	if(!IA || !JA || RSB_INVALID_NNZ_INDEX(nnz))
+		return flags;
+	/* FIXME: this code could be optimized a great deal, by introducing a state machine like scan. */
+	for(i=0;i<nnz;++i)
+	{
+		if(IA[i]==JA[i])
+			continue;
+		if(IA[i]>JA[i])
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+		else
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+	}
+	if(RSB_NAND(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER),RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)))
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_TRIANGULAR);
+		
+	/* it could be the case that both RSB_FLAG_LOWER and RSB_FLAG_UPPER flags get caught */
+	return flags;
+}
+
+rsb_err_t rsb__do_load_matrix_file_as_matrix_market(struct rsb_mtx_t ** mtxApp, const rsb_char_t * filename, rsb_flags_t flags, rsb_type_t typecode)
+{
+	/*!
+	 * FIXME: and typecode check ?
+	 * FIXME: UNFINISHED
+	 */
+	/** \ingroup gr_unfinished */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	void *VA = NULL;
+	rsb_coo_idx_t *IA = NULL, *JA = NULL;
+
+	if(!mtxApp)
+	{
+		RSB_ERROR(RSB_ERRM_E_MTXAP);
+		errval = RSB_ERR_BADARGS;
+	}
+	else
+	if(!filename)
+	{
+		RSB_ERROR(RSB_ERRM_NPSFF);
+		errval = RSB_ERR_BADARGS;
+	}
+	else
+	{
+		rsb_coo_idx_t m = 0,k = 0;
+		rsb_nnz_idx_t nnz = 0;
+#define RSB_20120309_FIX 1
+#if RSB_20120309_FIX
+		rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+		rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+		rsb_bool_t is_lower = RSB_BOOL_FALSE;
+		rsb_bool_t is_upper = RSB_BOOL_FALSE;
+		rsb_bool_t is_vector = RSB_BOOL_FALSE;
+		/* FIXME: shall update test_matops.c accordingly. */
+		/* FIXME: need limits checks! */
+		if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,&m,&k,&nnz,NULL,&is_symmetric,&is_hermitian,NULL,&is_lower,&is_upper,&is_vector) ) || is_vector )
+		{
+			RSB_PERR_GOTO(err,RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+		}
+		else
+		{
+			if( is_symmetric == RSB_BOOL_TRUE ) RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+			if(is_upper) RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+			if(is_lower) RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+		}
+
+		if( m==k && m>1 /* && want_only_lowtri*/ )
+			nnz += m;	/* the loading routine shall allocate nnz+m */
+		else
+ 			nnz = 0;	/* the loading routine should determine nnz */
+#endif /* RSB_20120309_FIX */
+		if((errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&m,&k,&nnz,typecode,flags,NULL,NULL))!=RSB_ERR_NO_ERROR)
+		{
+			RSB_ERROR(RSB_ERRM_ES);
+			rsb__do_perror(NULL,errval);
+			goto err;
+		}
+		if((mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnz,typecode,m,k,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags,&errval))==NULL)
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			// FIXME: incomplete error handling
+		}
+		if(mtxAp)
+			RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+		*mtxApp = mtxAp;
+		goto ok;
+	}
+err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_bool_t rsb__are_coo_matrices_equal(const struct rsb_coo_matrix_t *cm1, const struct rsb_coo_matrix_t *cm2)
+{
+	/* this is a debug routine and may print internal stuff */
+	const rsb_bool_t no = RSB_BOOL_FALSE;
+	const rsb_bool_t yes = RSB_BOOL_TRUE;
+	rsb_nnz_idx_t nnz;
+#if (RSB_WANT_VERBOSE_MESSAGES || 1)
+#define	RSB_GOTO_DIFFERING	{ RSB_PERR_GOTO(differing,RSB_ERRM_ES);}
+#else /* RSB_WANT_VERBOSE_MESSAGES */
+#define	RSB_GOTO_DIFFERING	{goto differing;}
+#endif /* RSB_WANT_VERBOSE_MESSAGES */
+	if( cm1 ==  cm2)
+		goto equal;
+	if(!cm1 || !cm2)
+		RSB_GOTO_DIFFERING
+	nnz = cm1->nnz;
+	if( RSB_INVALID_NNZ_INDEX(nnz) )
+		RSB_GOTO_DIFFERING
+	if(cm1->nr!= cm2->nr)
+		RSB_GOTO_DIFFERING
+	if(cm1->nc!= cm2->nc)
+		RSB_GOTO_DIFFERING
+	if(cm1->nnz!= cm2->nnz)
+		RSB_GOTO_DIFFERING
+	if(cm1->typecode!= cm2->typecode)
+		RSB_GOTO_DIFFERING
+	//if((!cm1->IA)&&(!cm2->IA)) return no;
+	//if((!cm1->IA)||(!cm2->IA)) return no;
+	//else
+		if((cm1->IA)&&(cm2->IA))
+		if(RSB_MEMCMP(cm1->IA,cm2->IA,sizeof(rsb_coo_idx_t)*nnz))
+			RSB_GOTO_DIFFERING
+	//if((!cm1->JA)&&(!cm2->JA)) return no;
+	//if((!cm1->JA)||(!cm2->JA)) return no;
+	//else
+		if((cm1->JA)&&(cm2->JA))
+		if(RSB_MEMCMP(cm1->JA,cm2->JA,sizeof(rsb_coo_idx_t)*nnz))
+			RSB_GOTO_DIFFERING
+	//if((!cm1->VA)&&(!cm2->VA)) return no;
+	//if((!cm1->VA)||(!cm2->VA)) return no;
+	//else
+		if((cm1->VA)&&(cm2->VA))
+		{
+#if 1
+			if(rsb__do_are_same(cm1->VA,cm2->VA, nnz,cm1->typecode, 1, 1))
+				RSB_GOTO_DIFFERING
+#else
+			if(RSB_MEMCMP(cm1->VA,cm2->VA,RSB_SIZEOF(cm1->typecode)*nnz)) /* This is too strict: for it, -0.0 != 0.0 */
+				RSB_GOTO_DIFFERING
+#endif
+		}
+
+
+equal:
+	return yes;
+differing:
+#if RSB_ALLOW_STDOUT
+#if (RSB_WANT_VERBOSE_MESSAGES || 1)
+	if(cm1)RSB_STDOUT_COO_MATRIX_SUMMARY(cm1);
+	if(cm2)RSB_STDOUT_COO_MATRIX_SUMMARY(cm2);
+#endif /* RSB_WANT_VERBOSE_MESSAGES */
+#endif /* RSB_ALLOW_STDOUT */
+	return no;
+#undef	RSB_GOTO_DIFFERING
+}
+
+static rsb_bool_t rsb__is_coo_matrix_empty(const struct rsb_coo_matrix_t *cm, rsb_flags_t flags)
+{
+	if(!cm)
+		return RSB_BOOL_FALSE;
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+		return RSB_BOOL_FALSE;
+	if(cm->nnz==0)
+		return RSB_BOOL_TRUE;
+	return (rsb_check_for_nonzeros(cm->VA,cm->nnz,cm->typecode)==0)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__are_coo_matrices_both_empty(const struct rsb_coo_matrix_t *cm1, rsb_flags_t flags1, const struct rsb_coo_matrix_t *cm2, rsb_flags_t flags2)
+{
+	rsb_bool_t im1e = RSB_BOOL_FALSE,im2e = RSB_BOOL_FALSE,abme = RSB_BOOL_FALSE;
+	im1e = rsb__is_coo_matrix_empty(cm1,flags1);
+	im2e = rsb__is_coo_matrix_empty(cm2,flags2);
+	abme = RSB_BOOL_OR(im1e,im2e);
+	return abme;
+}
+
+rsb_bool_t rsb__are_coo_matrices_equal_or_both_empty(const struct rsb_coo_matrix_t *cm1, rsb_flags_t flags1, const struct rsb_coo_matrix_t *cm2, rsb_flags_t flags2)
+{
+	rsb_bool_t acme = rsb__are_coo_matrices_equal(cm1,cm2);
+	if(acme)
+		;
+	else
+		acme = rsb__are_coo_matrices_both_empty(cm1,flags1,cm2,flags2);
+	return acme;
+}
+
+rsb_err_t rsb__get_row_dense(const struct rsb_mtx_t * mtxAp, void* row, rsb_coo_idx_t i )
+{
+	/*!
+	 * \ingroup gr_mops
+	 * Will write entire row i of matrix matrix in the row vector.
+	 *
+	 * \param i the specified row
+	 * \param row an already allocated vector of the same type as the matrix
+	 * \param mtxAp is a valid pointer to a rsb_mtx_t structure
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * FIXME: this function is unfinished.
+	 * */
+	if(!mtxAp)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO( row , mtxAp->el_size * mtxAp->nc);
+
+	return rsb__do_get_row_dense(mtxAp, row, i );
+}
+
+rsb_err_t rsb_spmv_unua(const struct rsb_mtx_t * mtxAp, const void * x, void * y, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_mops
+	 * computes \f$y \leftarrow y - op(A) \cdot x \f$
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * 
+	 * */
+	rsb_aligned_t mone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb__util_set_area_to_converted_integer(&mone[0],mtxAp->typecode,-1);
+	return rsb_do_spmv_general(transA,&mone[0],mtxAp,x,1,NULL,y,1,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+}
+
+rsb_err_t rsb_spmv_uaua(const struct rsb_mtx_t * mtxAp, const void * rhs, void * out, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_mops
+	 * computes \f$y \leftarrow y + op(A) \cdot x \f$
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * 
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb_do_spmv_general(transA,NULL,mtxAp,rhs,1,NULL,out,1,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_spmv_uauz(const struct rsb_mtx_t * mtxAp, const void * rhs, void * out, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_mops
+	 * computes \f$y \leftarrow op(A) \cdot x \f$
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * */
+
+	/* FIXME : TEMPORARY */
+	rsb_aligned_t zero[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+
+	if(!mtxAp)
+		return RSB_ERR_BADARGS;
+	rsb__util_set_area_to_converted_integer(&zero[0],mtxAp->typecode,0);
+	return rsb_do_spmv_general(transA,NULL,mtxAp,rhs,1,&zero[0],out,1,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+}
+
+#if 0
+static rsb_err_t rsb_spmv_sxsx(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+{
+	/*!
+	 * \ingroup gr_mops
+	 * computes \f$y \leftarrow \beta \cdot y + \alpha\cdot A\cdot x\f$
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 */
+	if(!alphap || !betap)
+		return RSB_ERR_BADARGS;
+	return rsb_do_spmv_general(transA,alphap,mtxAp,x,incx,betap,y,incy,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+}
+#endif
+
+
+
+struct rsb_mtx_t * rsb__load_matrix_file_as_binary(const rsb_char_t * filename, rsb_err_t *errvalp)
+{
+	/*!
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t*mtxAp; 
+	if(!errvalp || !filename)
+	{
+		errval = RSB_ERR_BADARGS;
+	}
+	else
+		errval = rsb__do_load_matrix_file_as_binary(&mtxAp,filename);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+rsb_err_t rsb__do_spsm(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * betap, const void * b, rsb_nnz_idx_t ldb, void * c, rsb_nnz_idx_t ldc)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * When x is a multivector with nrhs elements, b elements having stride ldb and c elements having stride ldc
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 * */
+	 /* FIXME : and error detection ? **/
+	 /* FIXME : UNTESTED, UNFINISHED **/
+	rsb_coo_idx_t l = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if RSB_ALLOW_ZERO_DIM
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+	{
+		goto err; /* FIXME: skipping further checks */
+	}
+#endif
+
+	if(!mtxAp || !b || !ldb || !c || !ldc || !nrhs /*  transA*/ || !alphap || !betap ||
+		( order != RSB_FLAG_WANT_COLUMN_MAJOR_ORDER && order != RSB_FLAG_WANT_ROW_MAJOR_ORDER ) )
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+
+	if( 0
+#if RSB_ENABLE_INNER_NRHS_SPSV
+		|| ( rsb_global_session_handle.want_outer_spmm==0 )
+#endif
+	  ) /* 0 == yes TODO: need want_outer_spsm here */
+	{
+		size_t outnri = ldc, rhsnri = ldb;
+
+		if(order == RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_spsv_general(transT,alphap,mtxAp,b,   1,c,   1,RSB_OP_FLAG_DEFAULT RSB_INNER_NRHS_SPSV_ARGS_IDS));
+		else
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_spsv_general(transT,alphap,mtxAp,b,nrhs,c,nrhs,RSB_OP_FLAG_DEFAULT RSB_INNER_NRHS_SPSV_ARGS_IDS));
+	}
+	else
+	{
+		if(order == RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			/*  column major */
+			for(l=0;l<nrhs;++l)
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spsv(transT,alphap,mtxAp, ((const rsb_byte_t*)b)+(mtxAp->el_size*ldb)*l,1, ((rsb_byte_t*)c)+(mtxAp->el_size*ldc)*l,1));
+		else
+			/*  row major */
+			for(l=0;l<nrhs;++l)
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spsv(transT,alphap,mtxAp, ((const rsb_byte_t*)b)+(mtxAp->el_size+l),ldb, ((rsb_byte_t*)c)+(mtxAp->el_size+l),ldc));
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__util_sort_row_major_buffered(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k,  rsb_type_t typecode , rsb_flags_t flags, void * WA, size_t wb )
+{
+	/*!
+
+	   Will sort as CSR (or CSC) the given coefficients.
+	   Will ignore any fancy sorting flags.
+	    
+	   \param \rsb_wr_va_ia_ja_desc_msg
+	   \param \rsb_flags_inp_param_msg
+	   \param \rsb_nnz_inp_param_msg
+	   \param \rsb_nrows_inp_param_msg
+	   \param \rsb_ncols_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \return \rsberrcodemsg
+	*/
+	// FIXME : should handle error conditions
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//struct rsb_mtx_partitioning_info_t pinfop;
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		errval = RSB_ERR_UNSUPPORTED_TYPE;goto err;
+	}
+#if 0
+	pinfop.rpntr = rsb__util_get_partitioning_array( 1, m , &pinfop.M_b, flags), 
+	pinfop.cpntr = rsb__util_get_partitioning_array( 1, k , &pinfop.K_b, flags),
+	rsb__pinfo_init( &pinfop, pinfop.M_b, pinfop.K_b, pinfop.rpntr, pinfop.cpntr, m,k);/* FIXME : is this ok ? */
+	errval = rsb__do_util_sortcoo(VA,IA,JA,m,k,nnz,typecode,&pinfop,flags,NULL,0);
+	RSB_CONDITIONAL_FREE(pinfop.rpntr);
+	RSB_CONDITIONAL_FREE(pinfop.cpntr);
+#else
+	RSB_DO_FLAG_SUBST(flags,RSB_INTERNAL_FLAG_CSR_SORTING_MASK,RSB_FLAG_WANT_BCSS_STORAGE|RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT);
+	errval = rsb__do_util_sortcoo(VA,IA,JA,m,k,nnz,typecode,NULL,flags,WA,wb);
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+const rsb_char_t *rsb__basename(const rsb_char_t *path)
+{
+	rsb_int_t sl;
+
+	if(!path)
+		return path;
+	sl = rsb__util_strlen(path);
+	while(sl>0 && path[sl-1]!=RSB_DIR_SEPARATOR)
+		--sl;
+	return path+sl;
+}
+
+rsb_err_t rsb__do_set_elements(struct rsb_mtx_t * mtxAp, const void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	rsb_coo_idx_t k;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t ifo = ( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+
+	if(!IA || !VA || !JA || !mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+#if RSB_WANT_COO_BEGIN 
+	if( RSB_MTX_HBDF( mtxAp) )
+	{
+		errval = rsb__BLAS_Xuscr_insert_entries(RSB_MTX_HBDFH(mtxAp),nnz,VA,IA,JA);
+		goto err;
+	}
+#endif /* RSB_WANT_COO_BEGIN */
+	for(k=0;k<nnz;++k)
+		errval |= rsb__do_upd_coo_element(mtxAp,((const rsb_char_t*)VA)+mtxAp->el_size*k,IA[k]-ifo,JA[k]-ifo,flags);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_spmm(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * b, rsb_nnz_idx_t ldb, const void * betap, void * c, rsb_nnz_idx_t ldc, enum rsb_op_flags_t op_flags)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations
+
+	   FIXME: this function name is WRONG
+
+	   when x is a multivector with nrhs elements, b elements having stride ldb and c elements having stride ldc
+
+	   \return \rsberrcodemsg
+	 */
+	 /* FIXME : and error detection ?
+	  * e.g.: 
+	    if(order == RSB_FLAG_WANT_COLUMN_MAJOR_ORDER && ldb<mtxAp->nr && transA=...)
+	  * **/
+	 /* TODO: incx,incy  **/
+	rsb_coo_idx_t l = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_coo_idx_t incx = 1,incy = 1;
+	const rsb_byte_t * bp = b;
+       	rsb_byte_t * cp = c;
+
+#if RSB_ALLOW_ZERO_DIM 
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+		goto err; /* FIXME: skipping checks on ldB, ldC, op_flags*/
+#endif
+
+	if(!mtxAp || !b || !c || !ldb || !ldc || !nrhs /*  transA*/ || !alphap || !betap ||
+		( order != RSB_FLAG_WANT_COLUMN_MAJOR_ORDER && order != RSB_FLAG_WANT_ROW_MAJOR_ORDER ) )
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+#ifdef RSB_HAVE_OPTYPE_SPMM_AZ
+	/*  
+		return ...
+	 SPMM_AZ is not yet complete
+	 */
+#endif /* RSB_HAVE_OPTYPE_SPMM_AZ */
+	if( rsb_global_session_handle.want_outer_spmm==0 ) /* 0 == yes */
+	{
+		/* inner loop: fast */
+		if(order == RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transA,alphap,mtxAp, bp,incx, betap, cp,incy,op_flags,nrhs,ldc, ldb  ));
+		else
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transA,alphap,mtxAp, bp,ldc , betap, cp, ldb ,op_flags,nrhs,incx,incy));
+	}
+	else
+	{
+		/* outer loop: slow */
+		if(order == RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			for(l=0;l<nrhs;++l)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transA,alphap,mtxAp,bp+(mtxAp->el_size*ldb)*l,incx, betap, cp+(mtxAp->el_size*ldc)*l,incy,op_flags RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS));
+		else
+			for(l=0;l<nrhs;++l)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transA,alphap,mtxAp,bp+(mtxAp->el_size+l  )  ,ldc , betap, cp+(mtxAp->el_size+l  )  ,ldb ,op_flags RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS));
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+
+}
+
+rsb_err_t rsb__do_spmm_general(const struct rsb_mtx_t * mtxAp, const void * b, void * c, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA, enum rsb_op_flags_t op_flags, rsb_flags_t order,const rsb_int_t nrhs, const size_t outnri, const size_t rhsnri)
+{
+	/* */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(nrhs != 1)
+		errval = rsb__do_spmm(transA, alphap, mtxAp, nrhs, order, b, rhsnri, betap, c, outnri, op_flags);
+	else
+		errval = rsb_do_spmv_general(transA, alphap, mtxAp, b, incx, betap, c, incy, op_flags RSB_OUTER_NRHS_SPMV_ARGS_IDS);
+	return errval;
+}
+
+rsb_err_t rsb__do_transpose(struct rsb_mtx_t ** mtxApp, rsb_bool_t want_conj)
+{ 
+	// TODO: and what to to if data arrays are externally allocated ?
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t*tmatrix = NULL;
+	struct rsb_mtx_t*mtxAp = NULL;
+	struct rsb_coo_matrix_t coo;
+	struct rsb_mtx_t *fm = NULL;
+
+	if(!mtxApp || !*mtxApp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err, RSB_ERRM_E_MTXAP);
+	}
+	mtxAp = *mtxApp;
+	RSB_DO_FLAG_FLIP_UPLO(mtxAp->flags);
+	RSB_INIT_CXX_FROM_MTX(&coo,mtxAp);
+	if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,coo.VA,coo.IA,coo.JA,0,mtxAp->nr-1,&coo.nnz,RSB_FLAG_NOFLAGS);
+	if(RSB_SOME_ERROR(errval))
+	{
+		goto err;
+	}
+	fm = rsb__do_get_first_submatrix(mtxAp);
+	if(!fm)
+		goto err; // FIXME
+	if(want_conj)
+		errval = rsb__util_do_conjugate(coo.VA,coo.typecode,coo.nnz);
+	if(RSB_SOME_ERROR(errval))goto err;
+	RSB_SWAP(rsb_coo_idx_t*,coo.IA,coo.JA);
+	RSB_SWAP(rsb_coo_idx_t,coo.nr,coo.nc);
+	RSB_COO_MEMCPY_parallel(fm->VA,fm->bpntr,fm->bindx,coo.VA,coo.IA,coo.JA,0,0,coo.nnz,fm->el_size);
+ 	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_SORTED_INPUT);
+	tmatrix = rsb__mtx_alloc_inner(fm->VA,fm->bpntr,fm->bindx,coo.nnz,0,0,coo.typecode,coo.nr,coo.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,mtxAp->flags,&errval);
+ 	RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	rsb__destroy_inner(mtxAp);
+	rsb__destroy_coo_matrix_t(&coo);
+	*mtxApp = tmatrix;
+	
+	//RSB_ERROR("FIXME: should implement tranpose functionality !");
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_elements(const struct rsb_mtx_t * mtxAp, void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	rsb_coo_idx_t k;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t ifo = ( flags & RSB_FLAG_FORTRAN_INDICES_INTERFACE )?1:0;
+
+	if(!IA || !VA || !JA || !mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	for(k=0;k<nnz;++k)
+		errval |= rsb__do_get_coo_element(mtxAp,((rsb_char_t*)VA)+mtxAp->el_size*k,IA[k]-ifo,JA[k]-ifo);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_set_initopt_as_string(const rsb_char_t *opn, const rsb_char_t *arg)
+{
+	/* FIXME: document me */
+	return rsb__stropts_set(opn,arg);
+}
+
+rsb_err_t rsb__do_lib_get_info_str(int what, rsb_char_t* sbuf, size_t buflen)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t rl = buflen;
+	
+	if( sbuf == NULL )
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	sbuf[0] = RSB_NUL;
+#ifdef RSB_CC    
+	snprintf(sbuf+(buflen-rl),rl,"CC=%s ",    RSB_CC    );
+#else /* RSB_CC */
+	errval |= RSB_ERR_GENERIC_ERROR;
+#endif /* RSB_CC */
+	rl -= strlen(sbuf);
+#ifdef RSB_CFLAGS
+	snprintf(sbuf+(buflen-rl),rl,"CFLAGS=%s",RSB_CFLAGS);
+#else /* RSB_CFLAGS */
+	errval |= RSB_ERR_GENERIC_ERROR;
+#endif /* RSB_CFLAGS */
+err:
+	return errval;
+}
+
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_coo_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp)
+
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VA_ = NULL;
+	rsb_coo_idx_t *IA_ = NULL, *JA_ = NULL;
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{
+		errval = /*RSB_ERR_BADARGS|*/RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_CNHEAF);
+	}
+
+	RSB_IF_NOFLAGS_SET_DEFAULT_MATRIX_FLAGS(flags);
+
+	if(nnzA>0)
+	{
+		rsb_coo_idx_t offi = 0;
+
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+			offi = 1, RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+		errval = rsb_util_coo_alloc_copy_and_stats(&VA_,&IA_,&JA_,VA,IA,JA,nrA?NULL:&nrA,ncA?NULL:&ncA,nnzA,0,typecode,offi,0,RSB_FLAG_NOFLAGS,&flags);
+
+		if(!VA_ || !IA_ || !JA_)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(err,RSB_ERRM_E_VIJ);
+		}
+	}
+	else
+	{
+#if !RSB_ALLOW_EMPTY_MATRICES
+		/* FIXUP CASE FOR 0-NNZ MATRICES AND IMPLICIT DIAGONAL */
+		if(RSB_INVALID_NNZ_COUNT_FOR_FLAGS(nnzA,flags))
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,RSB_ERRM_CBAEM);
+		}
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+	}
+	RSB_IF_NOFLAGS_SET_DEFAULT_MATRIX_FLAGS(flags);
+
+	mtxAp = rsb__mtx_alloc_inner(VA_,IA_,JA_,nnzA,0,0,typecode,nrA,ncA,brA,bcA,flags,&errval);
+	if(mtxAp && errval == RSB_ERR_NO_ERROR)
+		goto ok;
+	/* FIXME: and if !matrix but errval ? */
+err:
+	RSB_CONDITIONAL_FREE(IA_);
+	RSB_CONDITIONAL_FREE(JA_);
+	RSB_CONDITIONAL_FREE(VA_);
+ok:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_coo_inplace(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_coo_idx_t roff = 0,coff = 0;
+
+	RSB_ASSERT(VA || nnzA == 0 );
+	RSB_ASSERT(IA || nnzA == 0 );
+	RSB_ASSERT(JA || nnzA == 0 );
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+	RSB_IF_NOFLAGS_SET_DEFAULT_MATRIX_FLAGS(flags);
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		roff = -1,coff = -1, RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+	mtxAp = rsb__mtx_alloc_inner(VA,IA,JA,nnzA,roff,coff,typecode,nrA,ncA,brA,bcA,flags,&errval);
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_csr_const(const void *VA, const rsb_coo_idx_t * RP, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VA_ = NULL;
+	rsb_coo_idx_t *IA_ = NULL,*JA_ = NULL;
+	struct rsb_mtx_t * mtxAp = NULL;
+	size_t cnnz = RSB_MAX(nnzA,nrA+1), cis = sizeof(rsb_coo_idx_t),nvs = RSB_SIZEOF(typecode);
+	rsb_coo_idx_t roff = 0,coff = 0;
+
+	RSB_IF_NOFLAGS_SET_DEFAULT_MATRIX_FLAGS(flags);
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{
+		errval = /*RSB_ERR_BADARGS|*/RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_BFEANS);
+	}
+
+	IA_ = rsb__clone_area_with_extra(RP,cis*(nrA+1),0,cis*(cnnz-(nrA+1)));
+	JA_ = rsb__clone_area_with_extra(JA,cis*(nnzA ),0,cis*(cnnz-nnzA));
+	VA_ = rsb__clone_area_with_extra(VA,nvs*(nnzA ),0,nvs*(cnnz-nnzA));
+
+	if(!VA_ || !IA_ || !JA_)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_E_VIJ);
+	}
+	errval = rsb__util_uncompress_row_pointers_array((rsb_coo_idx_t*)RP,nrA,flags,RSB_FLAG_C_INDICES_INTERFACE,IA_);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		coff = -1, RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+
+	RSB_DEBUG_ASSERT(roff>=-1 && coff>=-1); /* for Fortran */
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+	mtxAp = rsb__mtx_alloc_inner(VA_,IA_,JA_,nnzA,roff,coff,typecode,nrA,ncA,brA,bcA,flags,errvalp);
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_csc_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * CP, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VA_ = NULL;
+	rsb_coo_idx_t *IA_ = NULL,*JA_ = NULL;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_nnz_idx_t maxdim = RSB_MAX(nnzA,RSB_MAX(nrA+1,ncA+1));
+
+	RSB_IF_NOFLAGS_SET_DEFAULT_MATRIX_FLAGS(flags);
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS))
+	{
+		errval = /*RSB_ERR_BADARGS|*/RSB_ERR_COULD_NOT_HONOUR_EXTERNALLY_ALLOCATION_FLAGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+	if(nnzA>0)
+	{
+		rsb_time_t dt;
+		rsb_coo_idx_t offi = 0;
+
+		if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&VA_,&IA_,&JA_,maxdim,typecode,RSB_BOOL_FALSE)))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_EM);
+		}
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+			offi = 1, RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+		//errval=
+		dt = - rsb_time();
+		rsb_util_csc2csr(VA,IA,CP,VA_,IA_,JA_,nrA,ncA,nnzA,typecode,offi,0,&flags);/* FIXME: assembly shall give the user chance to pass offo and offi */
+		dt += rsb_time();
+		//printf("csc 2 csr took %lg s\n",dt);
+	}
+	else
+	{
+	}
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+	mtxAp = rsb_mtx_alloc_from_csr_inplace (VA_,IA_,JA_,nnzA,typecode,nrA,ncA,brA,bcA,flags,errvalp);
+	if(mtxAp)
+		RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+err:
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxAp;
+}
+
+
+/* @endcond */
diff --git a/rsb_internals.h b/rsb_internals.h
new file mode 100644
index 0000000..5c4845d
--- /dev/null
+++ b/rsb_internals.h
@@ -0,0 +1,180 @@
+/*                                                                                                                            
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*
+ * @author Michele Martone
+ */
+#ifndef RSB_INTERNALS_H_INCLUDED
+#define RSB_INTERNALS_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb_common.h"
+
+/**
+ * @file
+ * @brief
+ * Low level routines and tools for our sparse matrix formats implementations.
+ *
+ */
+rsb_bool_t rsb__are_coo_matrices_equal(const struct rsb_coo_matrix_t *cm1, const struct rsb_coo_matrix_t *cm2);
+rsb_bool_t rsb__are_coo_matrices_both_empty(const struct rsb_coo_matrix_t *cm1, rsb_flags_t flags1, const struct rsb_coo_matrix_t *cm2, rsb_flags_t flags2);
+rsb_bool_t rsb__are_coo_matrices_equal_or_both_empty(const struct rsb_coo_matrix_t *cm1, rsb_flags_t flags1, const struct rsb_coo_matrix_t *cm2, rsb_flags_t flags2);
+void * rsb__destroy_coo_matrix_t(struct rsb_coo_matrix_t *cmp);
+void * rsb__allocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp);
+void * rsb__xallocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp, rsb_bool_t want_calloc, rsb_flags_t flags);
+void * rsb__callocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp);
+void * rsb__reallocate_coo_matrix_t(struct rsb_coo_matrix_t *cmp, rsb_nnz_idx_t nnnz);
+
+
+/*
+ * Please note that the sole use of this function is the major bottleneck during matrix creation.
+ * When thinking about optimizing matrix creation, come back here: this routine eats up to 90% 
+ * of the time required for matrix creation.
+ * */
+int rsb__nnz_coord_compar(const void *key, const void *am);
+
+
+/* initialization, destroying */
+void * rsb__init_options_t(struct rsb_options_t *o);
+void * rsb__init_struct(struct rsb_mtx_t *mtxAp);
+rsb_err_t rsb__fill_struct(struct rsb_mtx_t *mtxAp, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_type_t typecode, rsb_flags_t flags);
+void * rsb__fill_coo_struct(struct rsb_coo_matrix_t *mtxAp, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode);
+void * rsb__init_blank_pointers(struct rsb_mtx_t *mtxAp);
+void * rsb__transpose_coo_matrix_t(struct rsb_coo_matrix_t *cmp);
+void * rsb__do_mtx_free(struct rsb_mtx_t *mtxAp);
+size_t rsb__get_sizeof(const struct rsb_mtx_t *mtxAp );
+void * rsb__destroy_inner(struct rsb_mtx_t *mtxAp);
+void * rsb__destroy_options_t(struct rsb_options_t *o);
+rsb_err_t rsb__set_init_flags_and_stuff( struct rsb_mtx_t *mtxAp, struct rsb_options_t * o, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_nnz_idx_t block_count, rsb_nnz_idx_t element_count, rsb_type_t typecode, rsb_flags_t flags );
+rsb_err_t rsb__do_set_init_storage_flags(struct rsb_mtx_t *mtxAp, rsb_flags_t flags);
+
+/* allocation */
+rsb_bitmap_data_t * rsb__allocate_bitmap(rsb_blk_idx_t rows, rsb_blk_idx_t cols);
+rsb_bitmap_data_t * rsb__allocate_bitvector(rsb_blk_idx_t bits);
+struct rsb_mtx_t * rsb__allocate_from_coo_sorted(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t m, rsb_coo_idx_t k, struct rsb_options_t * o, rsb_type_t typecode, rsb_flags_t flags, rsb_err_t *errvalp);
+struct rsb_mtx_t * rsb__allocate_css_from_coo_sorted(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t m, rsb_coo_idx_t k, struct rsb_options_t * o, rsb_type_t typecode, rsb_flags_t flags, rsb_err_t *errvalp);
+rsb_err_t rsb__allocate_csr_arrays_from_coo_sorted(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_type_t typecode, void **VAp, rsb_coo_idx_t ** indxp, rsb_nnz_idx_t ** indptrp);
+rsb_err_t rsb__allocate_csc_arrays_from_coo_sorted(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_type_t typecode, void **VAp, rsb_coo_idx_t ** indxp, rsb_nnz_idx_t ** indptrp);
+
+/* bit handling */
+rsb_blk_idx_t rsb__bitmap_bit_count(const rsb_bitmap_data_t *bitmap, const rsb_blk_idx_t rows, const rsb_blk_idx_t cols);
+
+/* check */
+rsb_err_t rsb__recheck_insertion(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, const struct rsb_mtx_t *mtxAp, const struct rsb_options_t *o);
+const void * rsb__is_valid_options_t(const struct rsb_options_t *o, rsb_coo_idx_t m, rsb_coo_idx_t k);
+
+/* misc */
+void* rsb__get_block_address( rsb_blk_idx_t blockrow, rsb_blk_idx_t blockcolumn, const struct rsb_mtx_t *mtxAp);
+
+size_t rsb__get_g_rsb_memory_count(void);/* rsb_sys.c */
+
+rsb_err_t rsb__compute_partial_fillin_for_nnz_fractions(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,const  rsb_nnz_idx_t * nnz, const rsb_nnz_idx_t nnzn, struct rsb_mtx_partitioning_info_t * pinfop, size_t * element_countp, size_t * block_countp);
+rsb_err_t rsb__compute_partial_fillin_for_nnz_fraction(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,const  rsb_nnz_idx_t nnz, struct rsb_mtx_partitioning_info_t * pinfop, size_t * element_countp, size_t * block_countp);
+
+rsb_err_t rsb__fprint_matrix_implementation_code(const struct rsb_mtx_t *mtxAp, const rsb_char_t * op, rsb_flags_t inflags, FILE*fd);
+const rsb_char_t * rsb__sprint_matrix_implementation_code(const struct rsb_mtx_t *mtxAp, const rsb_char_t * op, rsb_flags_t inflags, rsb_char_t * buf);
+const rsb_char_t * rsb__sprint_matrix_implementation_code2(const struct rsb_mtx_t *mtxAp, rsb_char_t * buf, rsb_flags_t inflags);
+rsb_err_t rsb__util_get_bx_array(const rsb_char_t* optarg, int* bxlp, rsb_blk_idx_t **bxvp);
+rsb_nnz_idx_t rsb__util_atonnz(const rsb_char_t * optarg);
+rsb_long_t rsb__util_atol(const rsb_char_t *nptr);
+rsb_real_t rsb__util_atof(const rsb_char_t *nptr);
+int  rsb__util_atoi(const rsb_char_t *nptr);
+const rsb_char_t *rsb__basename(const rsb_char_t *path);
+size_t rsb__util_strlen(const rsb_char_t *s);
+rsb_err_t rsb__do_is_valid_pinfo_t(const struct rsb_mtx_partitioning_info_t * pinfop);
+rsb_err_t rsb__print_configuration_string(const char *pn, rsb_char_t * cs, rsb_bool_t wci);
+rsb_blk_idx_t rsb__recursive_middle_block_index(rsb_blk_idx_t i);
+rsb_err_t rsb__recursive_split_point_parms_get(const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t * moff, rsb_coo_idx_t * koff);
+rsb_err_t rsb__do_get_blocking_from_pinfo(const struct rsb_mtx_partitioning_info_t * pinfop, rsb_flags_t flags, rsb_blk_idx_t *mbp, rsb_blk_idx_t *kbp);
+
+/* fill */
+rsb_err_t rsb__do_insert_sorted( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop);
+rsb_err_t rsb__do_account_sorted( struct rsb_mtx_t * mtxAp, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop, rsb_nnz_idx_t * elements_per_block_row, rsb_nnz_idx_t * blocks_per_block_row);
+rsb_long_t rsb__terminal_recursive_matrix_count(const struct rsb_mtx_t *mtxAp);
+rsb_err_t rsb__copy_css_arrays(const void *iVA, const rsb_coo_idx_t * iINDX, const rsb_coo_idx_t * iXA, const rsb_nnz_idx_t nnz, rsb_coo_idx_t X, rsb_type_t typecode, void *oVA, rsb_coo_idx_t * oINDX, rsb_nnz_idx_t * oXA);
+rsb_long_t rsb__terminal_recursive_matrix_count_with_flags(const struct rsb_mtx_t *mtxAp, rsb_flags_t flags);
+rsb_long_t rsb__terminal_recursive_matrix_count_with_flags_but(const struct rsb_mtx_t *mtxAp, rsb_flags_t flags, rsb_flags_t nflags);
+rsb_err_t rsb__recursive_middle_index(const struct rsb_mtx_partitioning_info_t * pinfop, rsb_coo_idx_t * M_bp, rsb_coo_idx_t * K_bp );
+
+rsb_trans_t rsb__do_transpose_transposition(rsb_trans_t transA);
+struct rsb_mtx_t * rsb__do_get_first_submatrix(const struct rsb_mtx_t *mtxAp);
+
+rsb_err_t rsb_spmm_inner(const struct rsb_mtx_t * mtxAp, const void * mrhs, void *mout, rsb_int_t bstride, rsb_int_t cstride, rsb_int_t nrhs, rsb_trans_t transA);
+rsb_long_t rsb__terminal_recursive_matrix_count_with_storage_and_flags(const struct rsb_mtx_t *mtxAp, rsb_fmt_t matrix_storage, rsb_flags_t flags);
+rsb_long_t rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(const struct rsb_mtx_t *mtxAp, rsb_fmt_t matrix_storage, rsb_flags_t flags);
+rsb_err_t rsb__do_compute_terminal_nnz_min_max_avg_count(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * minnz, rsb_nnz_idx_t * maxnz, rsb_nnz_idx_t * avgnz);
+rsb_err_t rsb__do_compute_terminal_nnz_min_max_count(const struct rsb_mtx_t *mtxAp, rsb_nnz_idx_t * minnz, rsb_nnz_idx_t * maxnz);
+rsb_char_t rsb__do_get_symmetry_char(const struct rsb_mtx_t *mtxAp);
+rsb_flags_t rsb__do_flip_uplo_flags(rsb_flags_t flags);
+rsb_flags_t rsb__do_detect_and_add_triangular_flags(rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+rsb_trans_t rsb__do_transposition_from_char(rsb_char_t tc);
+rsb_err_t rsb__do_load_matrix_file_as_matrix_market(struct rsb_mtx_t ** mtxApp, const rsb_char_t * filename, rsb_flags_t flags, rsb_type_t typecode);
+rsb_err_t rsb__get_row_dense(const struct rsb_mtx_t * mtxAp, void* row, rsb_coo_idx_t i );
+rsb_err_t rsb__do_cleanup_nnz(void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t *onnzp, rsb_type_t typecode, rsb_flags_t flags);
+#if 0
+rsb_err_t rsb_spmv_aa  (const struct rsb_mtx_t * mtxAp, const void * x, void * y, rsb_trans_t transA);
+rsb_err_t rsb_spmv_sa  (const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, rsb_trans_t transA);
+rsb_err_t rsb_spmv_unua  (const struct rsb_mtx_t * mtxAp, const void * x, void * y, rsb_trans_t transA);
+rsb_err_t rsb_spmv_az  (const struct rsb_mtx_t * mtxAp, const void * x, void * y, rsb_trans_t transA);
+rsb_err_t rsb_spmv_uxux  (const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA);
+#endif
+/*rsb_err_t rsb_cssm(struct rsb_mtx_t * mtxAp, void * x, const void * y, void * alpha, void * beta, rsb_trans_t transT);*/
+#if 0
+rsb_err_t rsb_spmm_az (const struct rsb_mtx_t * mtxAp, const void * mrhs, void *mout, rsb_int_t bstride, rsb_int_t cstride, rsb_int_t nrhs, rsb_trans_t transA);
+
+rsb_err_t rsb_spmm_sxsx(const struct rsb_mtx_t * mtxAp, const void * b, void * c, rsb_nnz_idx_t ldb, rsb_nnz_idx_t ldc, rsb_coo_idx_t nrhs, rsb_trans_t transA, const void * alphap, const void * betap, rsb_flags_t order);
+#endif
+/* rsb_err_t rsb_spmv_sxsx(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_coo_idx_t incx, rsb_coo_idx_t incy); */
+
+/*rsb_err_t rsb__get_row_dense(const struct rsb_mtx_t * mtxAp, void* row, rsb_coo_idx_t i );*/
+/*rsb_err_t rsb_get_rows_dense(const struct rsb_mtx_t * mtxAp, void* row, rsb_coo_idx_t fr, rsb_coo_idx_t lr, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t *rnz, rsb_flags_t flags );*/
+rsb_err_t rsb__do_set_elements(struct rsb_mtx_t * mtxAp, const void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+struct rsb_mtx_t * rsb__load_matrix_file_as_binary(const rsb_char_t * filename, rsb_err_t *errvalp);
+rsb_err_t rsb_spmv_uaua(const struct rsb_mtx_t * mtxAp, const void * rhs, void * out, rsb_trans_t transA);
+rsb_err_t rsb_spmv_uauz(const struct rsb_mtx_t * mtxAp, const void * rhs, void * out, rsb_trans_t transA);
+rsb_err_t rsb__do_spsm(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * betap, const void * b, rsb_nnz_idx_t ldb, void * c, rsb_nnz_idx_t ldc);
+rsb_err_t rsb__print_matrix_unsorted_coo(const struct rsb_mtx_t *mtxAp);
+rsb_err_t rsb__util_sort_row_major_buffered(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k,  rsb_type_t typecode , rsb_flags_t flags, void * WA, size_t wb );
+rsb_err_t rsb__do_spmm(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * b, rsb_nnz_idx_t ldb, const void * betap, void * c, rsb_nnz_idx_t ldc, enum rsb_op_flags_t op_flags);
+rsb_err_t rsb__do_spmm_general(const struct rsb_mtx_t * mtxAp, const void * b, void * c, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA, enum rsb_op_flags_t op_flags, rsb_flags_t order,const rsb_int_t nrhs, const size_t outnri, const size_t rhsnri);
+rsb_err_t rsb__do_transpose(struct rsb_mtx_t ** mtxApp, rsb_bool_t want_conj);
+rsb_err_t rsb__do_get_elements(const struct rsb_mtx_t * mtxAp, void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+rsb_err_t rsb__stropts_set(const rsb_char_t *opn, const rsb_char_t *arg);/* FIXME: in stropts.c */
+rsb_err_t rsb__do_set_initopt_as_string(const rsb_char_t *opn, const rsb_char_t *arg);
+rsb_err_t rsb__do_get_matrix_info_from_string(const struct rsb_mtx_t *mtxAp, const rsb_char_t *mis, void* info, size_t buflen);
+rsb_err_t rsb__do_lib_get_info_str(int what, rsb_char_t* sbuf, size_t buflen);
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_coo_inplace(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_coo_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_csr_const(const void *VA, const rsb_coo_idx_t * RP, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp);
+struct rsb_mtx_t * rsb__do_mtx_alloc_from_csc_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * CP, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flags, rsb_err_t * errvalp);
+int rsb__util_atoi_km10(const rsb_char_t *nptr);
+int rsb__util_atoi_km2(const rsb_char_t *nptr);
+void rsb__cat_compver(rsb_char_t * buf);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_INTERNALS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_is.c b/rsb_is.c
new file mode 100644
index 0000000..9e86ef2
--- /dev/null
+++ b/rsb_is.c
@@ -0,0 +1,712 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix info getter functions.
+ * */
+
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_bool_t rsb__is_coo_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	rsb_bool_t is;
+	RSB_DEBUG_ASSERT(mtxAp);
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+
+	is = (
+#ifdef RSB_MATRIX_STORAGE_BCOR
+	 (mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR)||
+#endif /* RSB_MATRIX_STORAGE_BCOR */
+#ifdef RSB_MATRIX_STORAGE_BCOC
+	 (mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOC)||
+#endif /* RSB_MATRIX_STORAGE_BCOC */
+	  0
+	)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	return is;
+}
+
+rsb_bool_t rsb__is_square(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	RSB_DEBUG_ASSERT(mtxAp);
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+
+	return (mtxAp->nr == mtxAp->nc)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__is_hermitian(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+
+	return (rsb__get_hermitian_flag(mtxAp))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__is_triangle(rsb_flags_t flags)
+{
+	return (rsb__is_lower_triangle(flags) | rsb__is_upper_triangle(flags));
+}
+
+rsb_bool_t rsb__is_lower_triangle(rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	return (RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER|RSB_FLAG_TRIANGULAR));
+}
+
+rsb_bool_t rsb__is_upper_triangle(rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	return (RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER|RSB_FLAG_TRIANGULAR));
+}
+
+rsb_bool_t rsb__is_symmetric(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+
+	return (rsb__get_symmetry_flag(mtxAp))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__is_not_unsymmetric(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+
+	if(rsb__get_hermitian_flag(mtxAp) || rsb__get_symmetry_flag(mtxAp))
+		return RSB_BOOL_TRUE;
+	else
+		return RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__is_csr_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix is compressed sparse rows
+	 *
+	 * */
+	rsb_blk_idx_t br, bc;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+		return 0;
+
+	if((errval = rsb__get_blocking_size(mtxAp, &br, &bc))!=RSB_ERR_NO_ERROR)
+		goto err;
+
+	return ( br==1 && bc==1 && !rsb__have_linked_storage(mtxAp->flags) );
+err:
+	return 0;
+}
+
+rsb_bool_t rsb__is_bcss_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix is row or column block major
+	 *
+	 * */
+	rsb_bool_t ret = 0;
+
+	if(!mtxAp)
+		return ret;
+	ret = 
+#ifdef RSB_MATRIX_STORAGE_BCSR
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSR ||
+#endif /* RSB_MATRIX_STORAGE_BCSR */
+#ifdef RSB_MATRIX_STORAGE_BCSC
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSC ||
+#endif /* RSB_MATRIX_STORAGE_BCSC */
+		 0;
+
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+	if(ret)
+	{
+		RSB_ASSERT(mtxAp->br>0);
+		RSB_ASSERT(mtxAp->bc>0);
+	}
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+	return ret;
+}
+
+rsb_bool_t rsb__is_css_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix is not CSR or CSC.
+	 *
+	 * */
+	//rsb_bool_t ret = RSB_BOOL_FALSE;
+	rsb_blk_idx_t br, bc;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+
+	if((errval = rsb__get_blocking_size(mtxAp, &br, &bc))!=RSB_ERR_NO_ERROR)
+		return RSB_BOOL_FALSE;
+
+	return ( br==1 && bc==1 ) ? RSB_BOOL_TRUE : RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__is_bcsr_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix is row block major.
+	 *
+	 * */
+	rsb_bool_t ret = RSB_BOOL_FALSE;
+
+	if(!mtxAp)
+		return ret;
+	
+#ifdef RSB_MATRIX_STORAGE_BCSR
+	if( ( mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSR ) != 0 ) ret = RSB_BOOL_TRUE;
+#endif /* RSB_MATRIX_STORAGE_BCSR */
+
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+	if(ret)
+	{
+		RSB_ASSERT(mtxAp->br>0);
+		RSB_ASSERT(mtxAp->bc>0);
+	}
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+	return ret;
+}
+
+rsb_bool_t rsb__is_bcsc_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix is column block major.
+	 *
+	 * */
+	rsb_bool_t ret = 0;
+
+	if(!mtxAp)
+		return ret;
+	
+	ret = 
+#ifdef RSB_MATRIX_STORAGE_BCSC
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSC ||
+#endif /* RSB_MATRIX_STORAGE_BCSC */
+	0;
+
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS
+	if(ret)
+	{
+		RSB_ASSERT(mtxAp->br>0);
+		RSB_ASSERT(mtxAp->bc>0);
+	}
+#endif /* RSB_EXPERIMENTAL_USE_PURE_BCSS */
+	return ret;
+}
+
+rsb_bool_t rsb__have_fixed_blocks_matrix_flags(rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given flags are for a fixed block partitioning.
+	 * */
+	return RSB_DO_FLAG_HAS_INTERSECTION(flags,( RSB_FLAG_WANT_FIXED_BLOCKING_VBR | RSB_FLAG_WANT_BCSS_STORAGE | RSB_FLAG_WANT_COO_STORAGE ));
+}
+
+rsb_bool_t rsb__have_linked_storage(const rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given flags are for a linked lists storage.
+	 */
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+	return RSB_DO_FLAG_HAS(flags,RSB_FLAG_WANT_LINKED_STORAGE);
+#else /* RSB_FLAG_WANT_LINKED_STORAGE */
+	return RSB_BOOL_FALSE;
+#endif /* RSB_FLAG_WANT_LINKED_STORAGE */
+}
+
+rsb_bool_t rsb__is_terminal_recursive_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix is terminal
+	 * FIXME : is this function really needed ?
+	 * FIXME : should return one for terminal of non recursive ?
+	 * TODO rsb__is_terminal_recursive_matrix -> rsb_is_terminal_matrix or rsb_is_leaf_matrix
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	int smc = 0;
+
+	if(!mtxAp)
+		goto rz;
+
+	RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+	if(submatrix)
+		++smc;
+
+	return (smc==0);
+rz:
+	return 0; /* TODO: eliminate this case */
+}
+
+rsb_bool_t rsb__is_recursive_matrix(rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given flags are for a recursive storage.
+	 */
+	return 
+	(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING));
+}
+
+rsb_bool_t rsb__is_fixed_block_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \return nonzero if the given matrix was partitioned with a fixed blocking,
+	 *         thus enabling optimized operations on it.
+	 * */
+	if(!mtxAp)
+		return 0;
+	
+	if( rsb__have_fixed_blocks_matrix_flags(mtxAp->flags) )
+		return 1;
+
+	/* FIXME : is this ok ? */
+	if(
+#ifdef RSB_MATRIX_STORAGE_VBR
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_VBR ||
+#endif /* RSB_MATRIX_STORAGE_VBR */
+#ifdef RSB_MATRIX_STORAGE_VBC
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_VBC ||
+#endif/* RSB_MATRIX_STORAGE_VBC */
+		0 )
+		return RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_FIXED_BLOCKING_VBR);
+
+	else
+	return 
+#ifdef RSB_MATRIX_STORAGE_BCSR
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSR ||
+#endif /* RSB_MATRIX_STORAGE_BCSR */
+#ifdef RSB_MATRIX_STORAGE_BCSC
+		mtxAp->matrix_storage & RSB_MATRIX_STORAGE_BCSC ||
+#endif /* RSB_MATRIX_STORAGE_BCSC */
+		0;
+}
+
+rsb_bool_t rsb__util_are_flags_suitable_for_optimized_1x1_constructor(rsb_flags_t flags)
+{
+	/*!
+	 	\ingroup gr_internals
+		FIXME : temporary
+	*/
+	return	(RSB_DO_FLAG_HAS(  flags,RSB_FLAG_WANT_BCSS_STORAGE)  &&
+		 (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_AUTO_BLOCKING))    &&
+		 (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING )) );
+}
+
+rsb_bool_t rsb__is_root_matrix(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 	\ingroup gr_internals
+	*/
+	return (!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_NON_ROOT_MATRIX))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__mtx_chk(const struct rsb_mtx_t *mtxAp)
+{
+	/*!
+	 	\ingroup gr_internals
+
+		This is mainly used as a debugging tool when re-developing core functionality.
+		FIXME: will die in the presence of the RSB_FLAG_FORTRAN_INDICES_INTERFACE flag
+		TODO: move to rsb__mtx_check.c
+		TODO: invoke rsb__check_bounds.
+	*/
+	if(!mtxAp)
+	{
+	       	RSB_ERROR(RSB_ERRM_ES);
+		RSB_PERR_GOTO(err,RSB_ERRM_E_MTXAP);
+	}
+	
+	if( RSB_INVALID_COO_INDEX(mtxAp->Mdim)  || RSB_INVALID_COO_INDEX(mtxAp->nr) || RSB_INVALID_NNZ_INDEX(mtxAp->nnz) )
+	{
+	       	RSB_ERROR("out of allowed bounds dimensions !");
+		RSB_ERROR("bad matrix:"),RSB_ERROR_MATRIX_SUMMARY(mtxAp),RSB_ERROR(RSB_ERRM_NL);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+#if RSB_MERCY_FOR_LEGACY_INTERFACE
+	if(
+			rsb_get_matrix_n_rows(mtxAp)!=mtxAp->nr || 
+			rsb_get_matrix_n_columns(mtxAp)!=mtxAp->nc || 
+			rsb_get_matrix_n_rows(NULL)!=RSB_DEFAULT_UNDEFINED_COO_VALUE  || 
+			rsb_get_matrix_n_columns(NULL)!=RSB_DEFAULT_UNDEFINED_COO_VALUE 
+			)
+#else /* RSB_MERCY_FOR_LEGACY_INTERFACE */
+	if(0)
+#endif /* RSB_MERCY_FOR_LEGACY_INTERFACE */
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	/* if(rsb__is_recursive_matrix(mtxAp->flags) && ! RSB_DO_FLAG_HAS_INTERSECTION(mtxAp->flags,RSB_FLAG_NON_ROOT_MATRIX ) ) */
+	if( rsb__is_recursive_matrix(mtxAp->flags) )
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+
+		/* RSB_ASSERT( mtxAp->VA == NULL && mtxAp->bpntr == NULL && mtxAp->bindx == NULL ); */
+
+		if( rsb__is_root_matrix(mtxAp) ) /* 20140921 */
+		{
+			rsb_submatrix_idx_t smi;
+
+			RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi)
+			{
+				if(!submatrix)
+				{
+			       		RSB_PERR_GOTO(err,"leaf node %d (of %d) is NULL !?\n",smi,mtxAp->all_leaf_matrices_n);
+				}
+
+				if( rsb__is_root_matrix(submatrix) )
+				{
+			       		RSB_PERR_GOTO(err,"leaf node %d (of %d) has root flag in flags 0x%x=%d !?\n",smi,mtxAp->all_leaf_matrices_n,submatrix->flags,submatrix->flags);
+				}
+
+				if( submatrix->nnz > 0 && submatrix->VA == NULL )
+				{
+			       		RSB_PERR_GOTO(err,"leaf node %d (of %d) has %d nonzeroes and VA=NULL !?\n",smi,mtxAp->all_leaf_matrices_n,submatrix->nnz);
+				}
+
+				if( rsb__is_recursive_matrix(submatrix->flags) )
+				{
+			       		RSB_PERR_GOTO(err,"leaf node %d (of %d) has quad-partitioning flag in flags 0x%x=%d !?\n",smi,mtxAp->all_leaf_matrices_n,submatrix->flags,submatrix->flags);
+				}
+
+				if( submatrix->all_leaf_matrices != NULL )
+				{
+			       		RSB_PERR_GOTO(err,"leaf node %d (of %d) has a non-NULL submatrices pointer !?\n",smi,mtxAp->all_leaf_matrices_n);
+				}
+
+				if( submatrix->all_leaf_matrices_n > 0 )
+				{
+			       		RSB_PERR_GOTO(err,"leaf node %d (of %d) has a submatrices count of %d !?\n",smi,mtxAp->all_leaf_matrices_n,submatrix->all_leaf_matrices_n);
+				}
+			}
+		}
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			if(
+				(mtxAp->roff>submatrix->roff) || (mtxAp->coff>submatrix->coff) ||
+				(mtxAp->nr<submatrix->nr) || (mtxAp->nc<submatrix->nc) ||
+					0)
+			{
+			       	RSB_PERR_GOTO(err,RSB_ERRM_ES) 
+			}
+
+			if(!rsb__mtx_chk(submatrix))
+			{
+			       	RSB_PERR_GOTO(err,"submatrix at %d %d seems corrupted\n",submatrix->roff,submatrix->coff);
+			}
+		}
+	}
+	else
+	{
+		rsb_nnz_idx_t n;
+
+		if(!RSB_IS_MATRIX_STORAGE_ALLOWED_FOR_LEAF(mtxAp->matrix_storage))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	       	}
+
+		if(
+					RSB_INVALID_COO_INDEX(mtxAp->roff) || 
+					RSB_INVALID_COO_INDEX(mtxAp->coff) || 
+					RSB_INVALID_COO_INDEX(mtxAp->nr) || 
+					RSB_INVALID_COO_INDEX(mtxAp->nc) || 
+					(mtxAp->roff>mtxAp->broff) || 
+					(mtxAp->coff>mtxAp->bcoff) || 
+					(mtxAp->nr<mtxAp->bm) || 
+					(mtxAp->nc<mtxAp->bk) || 
+					0
+					)
+		{
+			RSB_PERR_GOTO(err,"submatrix bounds ([%d .. %d ... %d .. %d, %d .. %d ... %d .. %d]) are wrong!\n",
+					mtxAp->roff,
+					mtxAp->broff,
+					mtxAp->bm,
+					mtxAp->nr,
+					mtxAp->coff,
+					mtxAp->bcoff,
+					mtxAp->bk,
+					mtxAp->nc
+					);
+		}
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			if(rsb__is_coo_matrix(mtxAp))
+			if( (!RSB_INDICES_FIT_IN_HALFWORD(mtxAp->nr,mtxAp->nc)) || 0)
+			{
+				RSB_PERR_GOTO(err,"coo submatrix bounds are wrong, given the halfword indices!\n");
+			}
+
+			if(rsb__is_csr_matrix(mtxAp))
+			if( (!RSB_INDEX_FIT_IN_HALFWORD(mtxAp->nc)) || 0)
+			{
+				RSB_PERR_GOTO(err,"csr submatrix bounds are wrong, given the halfword indices!\n");
+			}
+		}
+
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			const rsb_coo_idx_t mai = 0; /* minimal allowed index */ /* FIXME: if one-based, this shall be 1 ! */
+
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					if( IA[n] < mai || JA[n] < mai )
+					{
+						RSB_PERR_GOTO(err,"negative halfword COO indices @%d: %d<%d || %d,%d!\n", n,IA[n],mai,JA[n],mai);
+					}
+
+					if( IA[n]>=mtxAp->Mdim || JA[n]>=mtxAp->mdim )
+					{
+						RSB_PERR_GOTO(err,"bad halfword COO indices @%d: %d>=%d || %d>=%d!\n", n,IA[n],mtxAp->Mdim,JA[n],mtxAp->mdim); 
+					}
+				}
+
+				if( rsb__util_is_halfword_coo_array_sorted_up_partial_order(IA,mtxAp->nnz) != RSB_BOOL_TRUE )
+				{
+					RSB_PERR_GOTO(err,"halfword COO input is not sorted! \n");
+				}
+				goto ok;
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					if( IA[n] < mai || JA[n] < mai )
+					{
+						RSB_PERR_GOTO(err,"negative fullword COO indices @%d: %d<%d || %d,%d!\n", n,IA[n],mai,JA[n],mai);
+					}
+
+					if( IA[n]>=mtxAp->Mdim || JA[n]>=mtxAp->mdim )
+					{
+						RSB_PERR_GOTO(err,"bad fullword COO indices @%d: %d>=%d || %d>=%d!\n",
+							n,IA[n],mtxAp->Mdim,JA[n],mtxAp->mdim);
+					}
+				}
+
+				if( rsb__util_is_nnz_array_sorted_up_partial_order(IA,mtxAp->nnz) != RSB_BOOL_TRUE )
+				{
+					RSB_PERR_GOTO(err,"fullword COO input is not sorted! \n");
+				}
+
+				goto ok;
+			}
+		}
+	
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_WANT_COO_STORAGE))
+		    && !RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+			;
+		//	goto ok;//{RSB_PERR_GOTO(err,"full word COO is not allowed on a leaf matrix!\n");}
+		//
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+		{
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES_COO)))
+			{
+			}
+			else
+			{
+				// FIXME: I am not sure whether this code is ever executed.
+				RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				{
+					if( IA[n]>=mtxAp->Mdim || JA[n]>=mtxAp->mdim )
+					{
+						RSB_PERR_GOTO(err,"bad fullword COO indices @%d: %d>=%d || %d>=%d!\n",
+							n,IA[n],mtxAp->Mdim,JA[n],mtxAp->mdim);
+					}
+				}
+			}
+			goto ok;
+		}
+		else
+				;/* ok */
+#if 0
+//		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+//		    && RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES_COO)))
+//				{RSB_PERR_GOTO(err,"both halfword COO and halfword CSR is not allowed on a leaf matrix!\n");}
+//		else
+//				;/* ok */
+#endif
+
+		if(!rsb__is_csr_matrix(mtxAp))
+		{
+			RSB_PERR_GOTO(err,"not a csr matrix ?\n");
+		}
+		if(mtxAp->element_count != mtxAp->nnz)
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_EM);
+		}
+		if(mtxAp->element_count != mtxAp->block_count)
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_EM);
+		}
+		if(!mtxAp->bpntr)
+		{
+			RSB_PERR_GOTO(err,"!bpntr!\n");
+		}
+		if(!mtxAp->bindx)
+		{
+			RSB_PERR_GOTO(err,"!bindx!\n");
+		}
+		if(mtxAp->bpntr[0]!=0)
+		{
+			RSB_PERR_GOTO(err,"bpntr[0]!=0!\n");
+		}
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+		{
+			if(!RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+			{
+				if(RSB_SOME_ERROR( rsb__util_is_sorted_coo_as_row_major(mtxAp->VA,mtxAp->bpntr,mtxAp->bindx,mtxAp->nnz,mtxAp->typecode,NULL,mtxAp->flags)) ) 
+				{
+					RSB_PERR_GOTO(err,"COO matrix seems unsorted!\n");
+				}
+			}
+			else
+			{
+				/* FIXME: missing */		
+			}
+		}
+		else
+		{
+			if(mtxAp->bpntr[mtxAp->Mdim]!=mtxAp->nnz)
+			{
+				RSB_PERR_GOTO(err,"%d=bpntr[Mdim]!=nnz=%d\n",(int)mtxAp->bpntr[mtxAp->Mdim],(int)mtxAp->nnz);
+			}
+			if(!rsb__util_is_nnz_array_sorted_up_partial_order(mtxAp->bpntr,mtxAp->Mdim+1))
+			{
+				RSB_PERR_GOTO(err,"bpntr seems unsorted!\n");
+			}
+		}
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_CSR_RESERVED))
+			for(n=0;RSB_LIKELY(n<mtxAp->Mdim);++n)
+			if( RSB_UNLIKELY( mtxAp->bpntr[n+1] - mtxAp->bpntr[n] > mtxAp->mdim ) )
+			{
+				RSB_ERROR("invalid CSR pointer:  mtxAp->bpntr[%d+1] - mtxAp->bpntr[%d] > mtxAp->mdim: %d - %d > %d !\n",n,n,mtxAp->bpntr[n+1],mtxAp->bpntr[n],mtxAp->mdim);
+				RSB_PERR_GOTO(err,"!\n");
+			}
+
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES_COO)))
+		{
+			/* FIXME: write me */
+		}
+		else
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES_CSR)))
+		{
+			for(n=0;RSB_LIKELY(n<mtxAp->Mdim);++n)
+			if(!rsb__util_is_halfword_coo_array_sorted_up(
+						((rsb_half_idx_t*)mtxAp->bindx)+mtxAp->bpntr[n],mtxAp->bpntr[n+1]-mtxAp->bpntr[n]))
+			{
+				RSB_PERR_GOTO(err,"(halfword) bindx seems unsorted!\n");
+			}
+
+
+
+	//		for(n=0;RSB_LIKELY(n<mtxAp->Mdim);++n)
+	//		{
+	//			rsb_nnz_idx_t i;
+	//			for(i=mtxAp->bpntr[n];i<mtxAp->bpntr[n+1];++i)
+	//				RSB_STDOUT("at %d %d\n",1+n,1+((rsb_half_idx_t*)(mtxAp->bindx))[i]);
+	//		}
+		}
+		else
+		{
+			if(RSB_SOME_ERROR(rsb__csr_chk(mtxAp->bpntr,mtxAp->bindx,mtxAp->Mdim,mtxAp->mdim,mtxAp->nnz,0)))
+			{
+				RSB_PERR_GOTO(err,"CSR submatrix seems corrupt!\n");
+			}
+		}
+	}
+ok:
+	return RSB_BOOL_TRUE;
+err:
+#if 1
+	RSB_ERROR("bad submatrix: "),RSB_ERROR_MATRIX_SUMMARY(mtxAp),RSB_ERROR(RSB_ERRM_NL);
+#endif
+	return RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb__do_is_matrix_binary_loaded(const struct rsb_mtx_t * mtxAp)
+{
+	rsb_bool_t is_bio; // binary I/O matrix
+	if(!mtxAp)
+		return RSB_BOOL_FALSE;
+#if 0
+	struct rsb_mtx_t *fsm = rsb__do_get_first_submatrix(mtxAp);
+	is_bio=!((long)mtxAp<((long)fsm->bpntr) || (long)(mtxAp)>=((long)fsm->bpntr+mtxAp->nnz));
+#else
+	is_bio = RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_FIX_FOR_BINARY_LOADED_MATRIX);
+#endif
+	return is_bio;
+}
+
+
+/* @endcond */
diff --git a/rsb_is.h b/rsb_is.h
new file mode 100644
index 0000000..946e0d4
--- /dev/null
+++ b/rsb_is.h
@@ -0,0 +1,59 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains mtxAp info getter functions.
+ * */
+
+#ifndef RSB_IS_H_INCLUDED
+#define RSB_IS_H_INCLUDED
+
+#include "rsb_internals.h"
+
+rsb_bool_t rsb__is_coo_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_square(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_fixed_block_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_css_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_bcsr_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_bcss_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_bcsc_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_recursive_matrix(rsb_flags_t flags);
+rsb_bool_t rsb__is_terminal_recursive_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_csr_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__have_linked_storage(const rsb_flags_t flags);
+rsb_bool_t rsb__have_fixed_blocks_matrix_flags(rsb_flags_t flags);
+rsb_bool_t rsb__util_are_flags_suitable_for_optimized_1x1_constructor(rsb_flags_t flags);
+rsb_bool_t rsb__is_symmetric(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_not_unsymmetric(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_root_matrix(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_hermitian(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__is_lower_triangle(rsb_flags_t flags);
+rsb_bool_t rsb__is_triangle(rsb_flags_t flags);
+rsb_bool_t rsb__is_upper_triangle(rsb_flags_t flags);
+rsb_bool_t rsb__mtx_chk(const struct rsb_mtx_t *mtxAp);
+rsb_bool_t rsb__do_is_matrix_binary_loaded(const struct rsb_mtx_t * mtxAp);
+
+#endif /* RSB_IS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl.c b/rsb_krnl.c
new file mode 100644
index 0000000..9a07a2a
--- /dev/null
+++ b/rsb_krnl.c
@@ -0,0 +1,34946 @@
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief Matrix type dispatching code, for each matrix operation.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_common.h"
+#include "rsb_krnl_bcss_spmv_u.h"	/* uhm */
+#include "rsb_krnl_bcss_spsv_u.h"	/* uhm */
+#include "rsb_krnl_bcss_misc_u.h"	/* uhm */
+
+
+
+
+rsb_err_t rsb__do_spmv_uaua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spmv_uaua" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uaua_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uaua_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uaua_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uaua_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_uauz(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spmv_uauz" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uauz_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uauz_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uauz_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uauz_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_uxua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spmv_uxua" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_uxua_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_uxua_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_uxua_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_uxua_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_unua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spmv_unua" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_unua_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_unua_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_unua_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_unua_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_unua_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_unua_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_sasa(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spmv_sasa" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sasa_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sasa_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sasa_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sasa_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spsv_uxua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spsv_uxua" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	else
+		errval = rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	else
+		errval = rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_sxsa(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spmv_sxsa" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_spmv_sxsa_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_spmv_sxsa_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_spmv_sxsa_double_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_spmv_sxsa_float_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_spsv_sxsx(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "spsv_sxsx" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	else
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uL(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	else
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uU(	mtxAp->VA,rhs,out,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,alphap,incx,incy);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_infty_norm(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "infty_norm" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_infty_norm_double_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_infty_norm_float_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_infty_norm_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_infty_norm_double_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_infty_norm_float_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_infty_norm_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_rowssums(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "rowssums" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_rowssums_double_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_rowssums_float_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_rowssums_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_rowssums_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_rowssums_double_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_rowssums_float_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_rowssums_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_rowssums_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,row_sums,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_scale(struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION macro */
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "scale" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t symmetry,diagonal;
+#ifdef RSB_COORDINATE_TYPE_H
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?RSB_COORDINATE_TYPE_H:RSB_COORDINATE_TYPE_C;
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=RSB_COORDINATE_TYPE_C;
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->typecode))
+		return RSB_ERR_BADARGS;
+
+	switch(diagonal)
+	{
+	case(RSB_DIAGONAL_E):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tN_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tN_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tN_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tT_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tT_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tT_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tC_sU_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tC_sS_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tC_sH_dE_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+		case(RSB_DIAGONAL_I):
+	switch(half_storage)
+	{
+	case(RSB_COORDINATE_TYPE_C):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_C_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_coo_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_COORDINATE_TYPE_H):
+	switch(transA)
+	{
+	case(RSB_TRANSPOSITION_N):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tN_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tN_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tN_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_T):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tT_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tT_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tT_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+		case(RSB_TRANSPOSITION_C):
+	switch(symmetry)
+	{
+	case(RSB_SYMMETRY_U):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tC_sU_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_S):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tC_sS_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+		case(RSB_SYMMETRY_H):
+	switch(mtxAp->matrix_storage)
+	{
+	case(RSB_MATRIX_STORAGE_BCOR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCOR_scale_double_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCOR_scale_float_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCOR_scale_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCOR_scale_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,mtxAp->nnz,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+		case(RSB_MATRIX_STORAGE_BCSR):
+	switch(mtxAp->typecode)
+	{
+	case(RSB_NUMERICAL_TYPE_DOUBLE ):
+		errval = rsb__BCSR_scale_double_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT ):
+		errval = rsb__BCSR_scale_float_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ):
+		errval = rsb__BCSR_scale_float_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+		case(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ):
+		errval = rsb__BCSR_scale_double_complex_H_u_tC_sH_dI_uG(	mtxAp->VA,mtxAp->Mdim,mtxAp->mdim,(rsb_half_idx_t*)mtxAp->bindx,mtxAp->bpntr,mtxAp->indptr,mtxAp->rpntr,mtxAp->cpntr,mtxAp->broff-mtxAp->roff,mtxAp->bm,mtxAp->roff,mtxAp->coff,mtxAp->flags,scale_factors);
+	break;
+			default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+			default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->matrix_storage);
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+			default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+}
+
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_uaua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spmv_uaua" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		const double* b = ((const double*)rhs)+mtxAp->cpntr[blockcolumn];
+		double* c = ((double*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+			c[i]+=rs;
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		const float* b = ((const float*)rhs)+mtxAp->cpntr[blockcolumn];
+		float* c = ((float*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+			c[i]+=rs;
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		const float complex* b = ((const float complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		float complex* c = ((float complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+			c[i]+=rs;
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		const double complex* b = ((const double complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		double complex* c = ((double complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+			c[i]+=rs;
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_uaua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spmv_uaua" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spmv_uaua	(mtxAp,rhs,out,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_uauz_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spmv_uauz" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mtxAp->nr,NULL,out,incy);
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		const double* b = ((const double*)rhs)+mtxAp->cpntr[blockcolumn];
+		double* c = ((double*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mtxAp->nr,NULL,out,incy);
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		const float* b = ((const float*)rhs)+mtxAp->cpntr[blockcolumn];
+		float* c = ((float*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mtxAp->nr,NULL,out,incy);
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		const float complex* b = ((const float complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		float complex* c = ((float complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mtxAp->nr,NULL,out,incy);
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		const double complex* b = ((const double complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		double complex* c = ((double complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_uauz(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spmv_uauz" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spmv_uauz	(mtxAp,rhs,out,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_uxua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spmv_uxua" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		const double* b = ((const double*)rhs)+mtxAp->cpntr[blockcolumn];
+		double* c = ((double*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		const float* b = ((const float*)rhs)+mtxAp->cpntr[blockcolumn];
+		float* c = ((float*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		const float complex* b = ((const float complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		float complex* c = ((float complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		const double complex* b = ((const double complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		double complex* c = ((double complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_uxua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spmv_uxua" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spmv_uxua	(mtxAp,rhs,out,alphap,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_unua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spmv_unua" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		const double* b = ((const double*)rhs)+mtxAp->cpntr[blockcolumn];
+		double* c = ((double*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+			c[i]-=rs;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		const float* b = ((const float*)rhs)+mtxAp->cpntr[blockcolumn];
+		float* c = ((float*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+			c[i]-=rs;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		const float complex* b = ((const float complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		float complex* c = ((float complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+			c[i]-=rs;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		const double complex* b = ((const double complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		double complex* c = ((double complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+			c[i]-=rs;
+;
+		}
+#endif /* 0 */
+
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_unua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spmv_unua" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spmv_unua	(mtxAp,rhs,out,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_sasa_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spmv_sasa" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		const double* b = ((const double*)rhs)+mtxAp->cpntr[blockcolumn];
+		double* c = ((double*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		const float* b = ((const float*)rhs)+mtxAp->cpntr[blockcolumn];
+		float* c = ((float*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		const float complex* b = ((const float complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		float complex* c = ((float complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		const double complex* b = ((const double complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		double complex* c = ((double complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_sasa(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spmv_sasa" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spmv_sasa	(mtxAp,rhs,out,incx,incy,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spsv_uxua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spsv_uxua" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spsv_uxua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spsv_uxua" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spsv_uxua	(mtxAp,rhs,out,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_sxsa_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spmv_sxsa" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		const double* b = ((const double*)rhs)+mtxAp->cpntr[blockcolumn];
+		double* c = ((double*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		const float* b = ((const float*)rhs)+mtxAp->cpntr[blockcolumn];
+		float* c = ((float*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		const float complex* b = ((const float complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		float complex* c = ((float complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			float complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		const double complex* b = ((const double complex*)rhs)+mtxAp->cpntr[blockcolumn];
+		double complex* c = ((double complex*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			double complex rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+;
+;
+		}
+#endif /* 0 */
+
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_sxsa(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spmv_sxsa" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spmv_sxsa	(mtxAp,rhs,out,alphap,incx,incy,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spsv_sxsx_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "spsv_sxsx" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+/*	FIXME : UNFINISHED */
+		
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spsv_sxsx(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "spsv_sxsx" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_spsv_sxsx	(mtxAp,rhs,out,alphap,incx,incy,transA);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__infty_norm_testing(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "infty_norm" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		double* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=fabs(a[i*columns+j]);
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		float* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=fabsf(a[i*columns+j]);
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		float complex* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=cabsf(a[i*columns+j]);
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		double complex* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=cabs(a[i*columns+j]);
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_infty_norm(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "infty_norm" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_infty_norm	(mtxAp,transA,row_sums);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__rowssums_testing(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "rowssums" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double* a = (const double*)bp;
+		double* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=a[i*columns+j];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float* a = (const float*)bp;
+		float* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=a[i*columns+j];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const float complex* a = (const float complex*)bp;
+		float complex* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=a[i*columns+j];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		const double complex* a = (const double complex*)bp;
+		double complex* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=a[i*columns+j];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_rowssums(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "rowssums" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_rowssums	(mtxAp,transA,row_sums);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__scale_testing(struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+{
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION macro */
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "scale" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+	register rsb_byte_t *bp=0;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		double* a = (double*)bp;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				a[i*columns+j]*=((const double*)scale_factors)[i];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		float* a = (float*)bp;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				a[i*columns+j]*=((const float*)scale_factors)[i];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		float complex* a = (float complex*)bp;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				a[i*columns+j]*=((const float complex*)scale_factors)[i];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	if(mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+		double complex* a = (double complex*)bp;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				a[i*columns+j]*=((const double complex*)scale_factors)[i];
+	
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_scale(double * elapsed_time, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "scale" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = rsb__do_scale	(mtxAp,transA,scale_factors);
+	
+	*elapsed_time += rsb_time(); 
+	return errval;
+}
+
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spmv_uaua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spmv_uaua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uaua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uaua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uaua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uaua,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spmv_uaua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spmv_uaua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uaua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uaua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uaua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uaua,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spmv_uaua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spmv_uaua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uaua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uaua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uaua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uaua,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spmv_uaua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spmv_uaua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uaua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uaua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uaua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uaua,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spmv_uauz" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spmv_uauz( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uauz" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uauz(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uauz(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uauz",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uauz,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spmv_uauz" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spmv_uauz( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uauz" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uauz(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uauz(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uauz",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uauz,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spmv_uauz" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spmv_uauz( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uauz" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uauz(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uauz(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uauz",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uauz,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spmv_uauz" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spmv_uauz( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uauz" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uauz(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uauz(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uauz",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uauz,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spmv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spmv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uxua(&elapsed_time,mtxAp,rhs,out,alphap,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uxua,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spmv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spmv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uxua(&elapsed_time,mtxAp,rhs,out,alphap,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uxua,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spmv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spmv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uxua(&elapsed_time,mtxAp,rhs,out,alphap,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uxua,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spmv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spmv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_uxua(&elapsed_time,mtxAp,rhs,out,alphap,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_uxua,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spmv_unua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spmv_unua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_unua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_unua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_unua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_unua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_unua,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spmv_unua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spmv_unua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_unua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_unua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_unua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_unua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_unua,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spmv_unua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spmv_unua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_unua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_unua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_unua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_unua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_unua,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spmv_unua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spmv_unua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_unua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_unua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_unua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_unua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_unua,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spmv_sasa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spmv_sasa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sasa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sasa(&elapsed_time,mtxAp,rhs,out,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sasa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sasa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sasa,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spmv_sasa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spmv_sasa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sasa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sasa(&elapsed_time,mtxAp,rhs,out,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sasa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sasa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sasa,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spmv_sasa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spmv_sasa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sasa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sasa(&elapsed_time,mtxAp,rhs,out,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sasa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sasa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sasa,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spmv_sasa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spmv_sasa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sasa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sasa(&elapsed_time,mtxAp,rhs,out,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sasa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sasa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sasa,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spsv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spsv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_uxua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_uxua,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spsv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spsv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_uxua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_uxua,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spsv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spsv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_uxua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_uxua,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spsv_uxua" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spsv_uxua( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_uxua" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_uxua(&elapsed_time,mtxAp,rhs,out,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_uxua(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_uxua",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_uxua,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spmv_sxsa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spmv_sxsa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sxsa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sxsa(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sxsa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sxsa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sxsa,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spmv_sxsa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spmv_sxsa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sxsa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sxsa(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sxsa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sxsa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sxsa,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spmv_sxsa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spmv_sxsa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sxsa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sxsa(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sxsa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sxsa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sxsa,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spmv_sxsa" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spmv_sxsa( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spmv_sxsa" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spmv_sxsa(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spmv_sxsa(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spmv_sxsa",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spmv_sxsa,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "spsv_sxsx" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_spsv_sxsx( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_sxsx" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_sxsx(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_sxsx(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_sxsx",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_sxsx,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "spsv_sxsx" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_spsv_sxsx( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_sxsx" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_sxsx(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_sxsx(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_sxsx",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_sxsx,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "spsv_sxsx" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_spsv_sxsx( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_sxsx" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_sxsx(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_sxsx(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_sxsx",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_sxsx,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "spsv_sxsx" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex *out=NULL,*rhs=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+			rsb_coo_idx_t nrhs=1;
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_spsv_sxsx( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,rhs,out,alphap,incx,incy,transA);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "spsv_sxsx" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_spsv_sxsx(&elapsed_time,mtxAp,rhs,out,alphap,incx,incy,transA);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_spsv_sxsx(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"spsv_sxsx",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (spsv_sxsx,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "infty_norm" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_infty_norm( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "infty_norm" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_infty_norm(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_infty_norm(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"infty_norm",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (infty_norm,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "infty_norm" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_infty_norm( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "infty_norm" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_infty_norm(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_infty_norm(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"infty_norm",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (infty_norm,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "infty_norm" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_infty_norm( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "infty_norm" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_infty_norm(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_infty_norm(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"infty_norm",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (infty_norm,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "infty_norm" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_infty_norm( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "infty_norm" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_infty_norm(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_infty_norm(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"infty_norm",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (infty_norm,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "rowssums" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_rowssums( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "rowssums" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_rowssums(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_rowssums(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"rowssums",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (rowssums,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "rowssums" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_rowssums( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "rowssums" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_rowssums(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_rowssums(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"rowssums",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (rowssums,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "rowssums" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	float complex * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_rowssums( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "rowssums" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_rowssums(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_rowssums(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"rowssums",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (rowssums,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "rowssums" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	double complex * row_sums=NULL;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_rowssums( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,row_sums);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(row_sums);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "rowssums" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_rowssums(&elapsed_time,mtxAp,transA,row_sums);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_rowssums(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"rowssums",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (rowssums,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double" type implementation of operation "scale" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			double * scale_factors = NULL;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_scale( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,scale_factors);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(scale_factors);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "scale" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_scale(&elapsed_time,mtxAp,transA,scale_factors);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_scale(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"scale",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (scale,double) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float" type implementation of operation "scale" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			float * scale_factors = NULL;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_scale( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,scale_factors);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(scale_factors);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "scale" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_scale(&elapsed_time,mtxAp,transA,scale_factors);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_scale(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"scale",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (scale,float) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "float complex" type implementation of operation "scale" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			float complex * scale_factors = NULL;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_float_complex_scale( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,scale_factors);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(scale_factors);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_float_complex_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "scale" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_scale(&elapsed_time,mtxAp,transA,scale_factors);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_scale(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"scale",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (scale,float complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags)
+/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "double complex" type implementation of operation "scale" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ;
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			double complex * scale_factors = NULL;
+			br = rua[ri];
+			bc = cua[ci];
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+			scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+			rsb__do_benchmark_double_complex_scale( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), mtxAp,transA,scale_factors);
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+			RSB_CONDITIONAL_FREE(scale_factors);
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+
+rsb_err_t rsb__do_benchmark_double_complex_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "scale" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = 		/* FIXME : use an even more general function here (the following is vbr-only!) */
+rsb_do_time_scale(&elapsed_time,mtxAp,transA,scale_factors);
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += rsb__estimate_mflops_per_op_scale(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"scale",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (scale,double complex) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+/* Dispatch table for type and scale specific benchmarks (FIXME : NEW) */
+ rsb_err_t (* rsb_benchmark_dispatch_table [RSB_IMPLEMENTED_TYPES][RSB_IMPLEMENTED_MOPS]) 
+   (void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags_pointer_table)
+ =  { 
+{
+ rsb__do_fullrangebenchmark_double_spmv_uaua,
+ rsb__do_fullrangebenchmark_double_spmv_uauz,
+ rsb__do_fullrangebenchmark_double_spmv_uxua,
+ rsb__do_fullrangebenchmark_double_spmv_unua,
+ rsb__do_fullrangebenchmark_double_spmv_sasa,
+ rsb__do_fullrangebenchmark_double_spsv_uxua,
+ rsb__do_fullrangebenchmark_double_spmv_sxsa,
+ rsb__do_fullrangebenchmark_double_spsv_sxsx,
+ rsb__do_fullrangebenchmark_double_infty_norm,
+ rsb__do_fullrangebenchmark_double_rowssums,
+ rsb__do_fullrangebenchmark_double_scale 
+}
+,
+{
+ rsb__do_fullrangebenchmark_float_spmv_uaua,
+ rsb__do_fullrangebenchmark_float_spmv_uauz,
+ rsb__do_fullrangebenchmark_float_spmv_uxua,
+ rsb__do_fullrangebenchmark_float_spmv_unua,
+ rsb__do_fullrangebenchmark_float_spmv_sasa,
+ rsb__do_fullrangebenchmark_float_spsv_uxua,
+ rsb__do_fullrangebenchmark_float_spmv_sxsa,
+ rsb__do_fullrangebenchmark_float_spsv_sxsx,
+ rsb__do_fullrangebenchmark_float_infty_norm,
+ rsb__do_fullrangebenchmark_float_rowssums,
+ rsb__do_fullrangebenchmark_float_scale 
+}
+,
+{
+ rsb__do_fullrangebenchmark_float_complex_spmv_uaua,
+ rsb__do_fullrangebenchmark_float_complex_spmv_uauz,
+ rsb__do_fullrangebenchmark_float_complex_spmv_uxua,
+ rsb__do_fullrangebenchmark_float_complex_spmv_unua,
+ rsb__do_fullrangebenchmark_float_complex_spmv_sasa,
+ rsb__do_fullrangebenchmark_float_complex_spsv_uxua,
+ rsb__do_fullrangebenchmark_float_complex_spmv_sxsa,
+ rsb__do_fullrangebenchmark_float_complex_spsv_sxsx,
+ rsb__do_fullrangebenchmark_float_complex_infty_norm,
+ rsb__do_fullrangebenchmark_float_complex_rowssums,
+ rsb__do_fullrangebenchmark_float_complex_scale 
+}
+,
+{
+ rsb__do_fullrangebenchmark_double_complex_spmv_uaua,
+ rsb__do_fullrangebenchmark_double_complex_spmv_uauz,
+ rsb__do_fullrangebenchmark_double_complex_spmv_uxua,
+ rsb__do_fullrangebenchmark_double_complex_spmv_unua,
+ rsb__do_fullrangebenchmark_double_complex_spmv_sasa,
+ rsb__do_fullrangebenchmark_double_complex_spsv_uxua,
+ rsb__do_fullrangebenchmark_double_complex_spmv_sxsa,
+ rsb__do_fullrangebenchmark_double_complex_spsv_sxsx,
+ rsb__do_fullrangebenchmark_double_complex_infty_norm,
+ rsb__do_fullrangebenchmark_double_complex_rowssums,
+ rsb__do_fullrangebenchmark_double_complex_scale 
+}
+ 
+};
+
+static rsb_err_t rsb_do_completetypebenchmark_double(const char * filename, struct rsb_mops_performance_info_t * mspi)/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION macro */
+{
+        /*!
+	 * \ingroup gr_bench
+	 * Will benchmark all supported matrix operations over the "double" type.
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	rsb_coo_idx_t rows=0,cols=0;
+	rsb_nnz_idx_t nnz=0;
+	void *VA=NULL;
+
+	struct rsb_mop_performance_info_t * mpi = &(mspi->pipmo[0]);
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ,flags=0;
+
+	RSB_BZERO(mspi,sizeof(mspi)); /* FIXME: may double check this */
+
+	if((rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&rows,&cols,&nnz,typecode,flags,NULL,NULL))!=0)
+	{
+		RSB_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+		goto err;
+	}
+	
+
+	/* we benchmark our double library implementation for operation spmv_uaua */
+	errval = rsb__do_fullrangebenchmark_double_spmv_uaua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spmv_uauz */
+	errval = rsb__do_fullrangebenchmark_double_spmv_uauz(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spmv_uxua */
+	errval = rsb__do_fullrangebenchmark_double_spmv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spmv_unua */
+	errval = rsb__do_fullrangebenchmark_double_spmv_unua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spmv_sasa */
+	errval = rsb__do_fullrangebenchmark_double_spmv_sasa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spsv_uxua */
+	errval = rsb__do_fullrangebenchmark_double_spsv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spmv_sxsa */
+	errval = rsb__do_fullrangebenchmark_double_spmv_sxsa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation spsv_sxsx */
+	errval = rsb__do_fullrangebenchmark_double_spsv_sxsx(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation infty_norm */
+	errval = rsb__do_fullrangebenchmark_double_infty_norm(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation rowssums */
+	errval = rsb__do_fullrangebenchmark_double_rowssums(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double library implementation for operation scale */
+	errval = rsb__do_fullrangebenchmark_double_scale(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+	mpi-=11;
+	
+
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spmv_uaua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spmv_uauz");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spmv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spmv_unua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spmv_sasa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spsv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spmv_sxsa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_spsv_sxsx");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_infty_norm");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_rowssums");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_scale");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	mpi-=11;
+
+	err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	return errval;
+}
+
+
+static rsb_err_t rsb_do_completetypebenchmark_float(const char * filename, struct rsb_mops_performance_info_t * mspi)/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION macro */
+{
+        /*!
+	 * \ingroup gr_bench
+	 * Will benchmark all supported matrix operations over the "float" type.
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	rsb_coo_idx_t rows=0,cols=0;
+	rsb_nnz_idx_t nnz=0;
+	void *VA=NULL;
+
+	struct rsb_mop_performance_info_t * mpi = &(mspi->pipmo[0]);
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT ,flags=0;
+
+	RSB_BZERO(mspi,sizeof(mspi)); /* FIXME: may double check this */
+
+	if((rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&rows,&cols,&nnz,typecode,flags,NULL,NULL))!=0)
+	{
+		RSB_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+		goto err;
+	}
+	
+
+	/* we benchmark our float library implementation for operation spmv_uaua */
+	errval = rsb__do_fullrangebenchmark_float_spmv_uaua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spmv_uauz */
+	errval = rsb__do_fullrangebenchmark_float_spmv_uauz(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spmv_uxua */
+	errval = rsb__do_fullrangebenchmark_float_spmv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spmv_unua */
+	errval = rsb__do_fullrangebenchmark_float_spmv_unua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spmv_sasa */
+	errval = rsb__do_fullrangebenchmark_float_spmv_sasa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spsv_uxua */
+	errval = rsb__do_fullrangebenchmark_float_spsv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spmv_sxsa */
+	errval = rsb__do_fullrangebenchmark_float_spmv_sxsa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation spsv_sxsx */
+	errval = rsb__do_fullrangebenchmark_float_spsv_sxsx(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation infty_norm */
+	errval = rsb__do_fullrangebenchmark_float_infty_norm(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation rowssums */
+	errval = rsb__do_fullrangebenchmark_float_rowssums(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float library implementation for operation scale */
+	errval = rsb__do_fullrangebenchmark_float_scale(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+	mpi-=11;
+	
+
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spmv_uaua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spmv_uauz");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spmv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spmv_unua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spmv_sasa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spsv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spmv_sxsa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_spsv_sxsx");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_infty_norm");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_rowssums");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_scale");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	mpi-=11;
+
+	err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	return errval;
+}
+
+
+static rsb_err_t rsb_do_completetypebenchmark_float_complex(const char * filename, struct rsb_mops_performance_info_t * mspi)/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION macro */
+{
+        /*!
+	 * \ingroup gr_bench
+	 * Will benchmark all supported matrix operations over the "float complex" type.
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	rsb_coo_idx_t rows=0,cols=0;
+	rsb_nnz_idx_t nnz=0;
+	void *VA=NULL;
+
+	struct rsb_mop_performance_info_t * mpi = &(mspi->pipmo[0]);
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,flags=0;
+
+	RSB_BZERO(mspi,sizeof(mspi)); /* FIXME: may double check this */
+
+	if((rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&rows,&cols,&nnz,typecode,flags,NULL,NULL))!=0)
+	{
+		RSB_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+		goto err;
+	}
+	
+
+	/* we benchmark our float complex library implementation for operation spmv_uaua */
+	errval = rsb__do_fullrangebenchmark_float_complex_spmv_uaua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spmv_uauz */
+	errval = rsb__do_fullrangebenchmark_float_complex_spmv_uauz(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spmv_uxua */
+	errval = rsb__do_fullrangebenchmark_float_complex_spmv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spmv_unua */
+	errval = rsb__do_fullrangebenchmark_float_complex_spmv_unua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spmv_sasa */
+	errval = rsb__do_fullrangebenchmark_float_complex_spmv_sasa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spsv_uxua */
+	errval = rsb__do_fullrangebenchmark_float_complex_spsv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spmv_sxsa */
+	errval = rsb__do_fullrangebenchmark_float_complex_spmv_sxsa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation spsv_sxsx */
+	errval = rsb__do_fullrangebenchmark_float_complex_spsv_sxsx(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation infty_norm */
+	errval = rsb__do_fullrangebenchmark_float_complex_infty_norm(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation rowssums */
+	errval = rsb__do_fullrangebenchmark_float_complex_rowssums(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our float complex library implementation for operation scale */
+	errval = rsb__do_fullrangebenchmark_float_complex_scale(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+	mpi-=11;
+	
+
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spmv_uaua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spmv_uauz");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spmv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spmv_unua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spmv_sasa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spsv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spmv_sxsa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_spsv_sxsx");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_infty_norm");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_rowssums");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_float_complex_scale");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	mpi-=11;
+
+	err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	return errval;
+}
+
+
+static rsb_err_t rsb_do_completetypebenchmark_double_complex(const char * filename, struct rsb_mops_performance_info_t * mspi)/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION macro */
+{
+        /*!
+	 * \ingroup gr_bench
+	 * Will benchmark all supported matrix operations over the "double complex" type.
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	rsb_coo_idx_t rows=0,cols=0;
+	rsb_nnz_idx_t nnz=0;
+	void *VA=NULL;
+
+	struct rsb_mop_performance_info_t * mpi = &(mspi->pipmo[0]);
+	rsb_flags_t typecode = RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,flags=0;
+
+	RSB_BZERO(mspi,sizeof(mspi)); /* FIXME: may double check this */
+
+	if((rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&rows,&cols,&nnz,typecode,flags,NULL,NULL))!=0)
+	{
+		RSB_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+		goto err;
+	}
+	
+
+	/* we benchmark our double complex library implementation for operation spmv_uaua */
+	errval = rsb__do_fullrangebenchmark_double_complex_spmv_uaua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spmv_uauz */
+	errval = rsb__do_fullrangebenchmark_double_complex_spmv_uauz(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spmv_uxua */
+	errval = rsb__do_fullrangebenchmark_double_complex_spmv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spmv_unua */
+	errval = rsb__do_fullrangebenchmark_double_complex_spmv_unua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spmv_sasa */
+	errval = rsb__do_fullrangebenchmark_double_complex_spmv_sasa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spsv_uxua */
+	errval = rsb__do_fullrangebenchmark_double_complex_spsv_uxua(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spmv_sxsa */
+	errval = rsb__do_fullrangebenchmark_double_complex_spmv_sxsa(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation spsv_sxsx */
+	errval = rsb__do_fullrangebenchmark_double_complex_spsv_sxsx(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation infty_norm */
+	errval = rsb__do_fullrangebenchmark_double_complex_infty_norm(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation rowssums */
+	errval = rsb__do_fullrangebenchmark_double_complex_rowssums(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+
+	/* we benchmark our double complex library implementation for operation scale */
+	errval = rsb__do_fullrangebenchmark_double_complex_scale(VA,IA,JA,nnz,rows,cols,mpi,flags);
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+	mpi-=11;
+	
+
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spmv_uaua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spmv_uauz");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spmv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spmv_unua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spmv_sasa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spsv_uxua");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spmv_sxsa");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_spsv_sxsx");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_infty_norm");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_rowssums");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"pi_double_complex_scale");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+	mpi-=11;
+
+	err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	return errval;
+}
+
+#if 0
+rsb_err_t rsb__do_spmv_uaua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_uaua(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_uaua" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spmv_uauz_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_uauz(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_uauz" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spmv_uxua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_uxua(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_uxua" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spmv_unua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_unua(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_unua" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spmv_sasa_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_sasa(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_sasa" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spsv_uxua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spsv_uxua(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spsv_uxua" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spmv_sxsa_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_sxsa(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_sxsa" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_spsv_sxsx_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spsv_sxsx(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spsv_sxsx" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_infty_norm_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_infty_norm(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "infty_norm" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_rowssums_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_rowssums(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "rowssums" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	Me /= M_;
+	return Me;
+}
+
+#if 0
+rsb_err_t rsb__do_scale_with_macros_vbr(struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors)
+{
+/* generated by the RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION macro */
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_scale(const struct rsb_mtx_t * mtxAp)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "scale" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+	Me /= M_;
+	return Me;
+}
+
+
+rsb_err_t rsb_do_completebenchmark(const int argc, char *const argv[])/* generated by the RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * A complete benchmark program.
+	 * Will benchmark all supported matrix operations over all supported (double,float,float complex,double complex)
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 * FIXME : UNFINISHED: should process and dump this info in a header file.
+	 */
+	struct rsb_global_performance_info_t mspis;
+	struct rsb_mops_performance_info_t * mspi = &(mspis.gpi[0]);
+
+	rsb_option options[] = {
+	    {"matrix-filename",	required_argument, NULL, 0x66},  /* f */
+	    {0,0,0,0}
+	};
+	const char * filename=NULL;
+	int c=0;
+	int opt_index=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(rsb_lib_init(RSB_NULL_INIT_OPTIONS))goto err;
+
+	for (;;)
+	{
+		c = rsb_getopt_long(argc, argv, "f:" , options, &opt_index);/* Flawfinder: ignore */
+		if (c == -1)break;
+		switch (c)
+		{
+			case 0x66:/* f */
+			filename = optarg;
+			break;
+	    	}
+	}
+
+
+	errval=rsb_do_completetypebenchmark_double(filename,mspi);
+	if(RSB_SOME_ERROR(errval)) return errval;
+	++mspi;
+
+	errval=rsb_do_completetypebenchmark_float(filename,mspi);
+	if(RSB_SOME_ERROR(errval)) return errval;
+	++mspi;
+
+	errval=rsb_do_completetypebenchmark_float_complex(filename,mspi);
+	if(RSB_SOME_ERROR(errval)) return errval;
+	++mspi;
+
+	errval=rsb_do_completetypebenchmark_double_complex(filename,mspi);
+	if(RSB_SOME_ERROR(errval)) return errval;
+	++mspi;
+
+	if( rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) )
+		return RSB_ERR_INTERNAL_ERROR;
+	return RSB_ERR_NO_ERROR;
+	err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+rsb_err_t rsb__dump_performance_array(const char * an, const double*array)
+/* generated by the RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_bench
+	 * A benchmark info dumping function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 * FIXME : UNFINISHED
+	 */
+#if RSB_ALLOW_STDOUT
+	int ri,ci;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	if(!array || !an)
+		return RSB_ERR_BADARGS;
+
+/*	RSB_STDOUT("const double %s [RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH] = \n",an);*/
+	RSB_STDOUT(".%s = \n",an);
+	RSB_STDOUT("{");
+	RSB_STDOUT("\t/*");
+	for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci) RSB_STDOUT("%d, ",cua[ci]);
+	RSB_STDOUT("columns per block */\n");
+		
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		RSB_STDOUT("\t{");
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			if(ci)RSB_STDOUT(",");
+			RSB_STDOUT(" %lg",array[ri*RSB_ROWS_UNROLL_ARRAY_LENGTH+ci]);
+		}
+		RSB_STDOUT(" }, /* %d rows per block */\n",rua[ri]);
+	}
+	RSB_STDOUT("},\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+
+
+
+
+/*!
+ @file
+ @brief ...
+ */
+/* @endcond */
diff --git a/rsb_krnl.h b/rsb_krnl.h
new file mode 100644
index 0000000..8bfed25
--- /dev/null
+++ b/rsb_krnl.h
@@ -0,0 +1,461 @@
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief Matrix type dispatching code, for each matrix operation.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+#ifndef RSB_DISPATCH_H_INCLUDED
+#define RSB_DISPATCH_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_common.h"
+#include "rsb_krnl_bcss_spmv_u.h"	/* uhm */
+#include "rsb_krnl_bcss_spsv_u.h"	/* uhm */
+#include "rsb_krnl_bcss_misc_u.h"	/* uhm */
+
+#define	RSB_BCSR_GET_NEXT_BLOCK_POINTER(BP,mtxAp,ROWVAR,COLVAR,BLOCKROWSPAR,BLOCKCOLSPAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	/*										\
+	 * *input*									\
+	 * mtxAp		should be a valid rsb_mtx_t structure pointer		\
+	 * BLOCKROWSPAR	should be set to the rows   count of this block			\
+	 * BLOCKCOLSPAR	should be set to the column count of this block			\
+	 * *output*									\
+	 * ROWVAR	will be set to the base row    of this block			\
+	 * COLVAR	will be set to the base column of this block			\
+	 * BP		will be set to the current block pointer			\
+	 * */										\
+	while( (mtxAp)->bpntr[_i] == (mtxAp)->bpntr[_i+1] ) 				/* skipping empty rows */	\
+	{++_i;_k=(mtxAp)->bpntr[_i];} 		/* _k is the first block index for the current row of blocks */	\
+	_j=(mtxAp)->bindx[_k]; 						/* the current block column index  */	\
+	_lastk=_k;	\
+	(BLOCKROWVAR)=_i;	\
+	(BLOCKCOLUMNVAR)=_j;	\
+	(ROWVAR)=(BLOCKROWSPAR)*_i;					/* _i is the current block row index */	\
+	(COLVAR)=(BLOCKCOLSPAR)*_j; 					/* the current block column index  */	\
+	BP+=(mtxAp)->options->el_size*(BLOCKROWSPAR)*(BLOCKCOLSPAR);			\
+	_k++; 		/* for the future macro calls */						\
+	if( _k >= (mtxAp)->bpntr[_i+1] )++_i;								\
+	;
+
+#define RSB_BCSR_GET_FIRST_BLOCK_POINTER(BP,mtxAp,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	int _i=0,_j=0,_k=0,_lastk=0;									\
+	(BLOCKROWSVAR)=(mtxAp)->rpntr[1]-(mtxAp)->rpntr[0];		/* _i is the current block row index */	\
+	(BLOCKCOLSVAR)=(mtxAp)->cpntr[1]-(mtxAp)->cpntr[0]; 		/* the current block column index  */	\
+	(BP)=(mtxAp)->VA;											\
+	RSB_BCSR_GET_NEXT_BLOCK_POINTER(BP,mtxAp,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)
+
+#define RSB_BCSR_GOT_LAST_BLOCK_POINTER(mtxAp)	( _lastk >= (mtxAp)->block_count )
+
+
+#define RSB_BENCHMARK_MIN_SECONDS	/*0.5*/1.0
+#define RSB_BENCHMARK_MIN_RUNS		/*5*/10 
+
+
+rsb_err_t rsb__do_spmv_uaua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spmv_uauz(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spmv_uxua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spmv_unua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spmv_sasa(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spsv_uxua(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spmv_sxsa(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_spsv_sxsx(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_infty_norm(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_rowssums(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_scale(struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_uaua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_uaua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_uauz_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_uauz(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_uxua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_uxua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_unua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_unua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_sasa_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_sasa(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spsv_uxua_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spsv_uxua(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spmv_sxsa_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spmv_sxsa(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__spsv_sxsx_testing(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_spsv_sxsx(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__infty_norm_testing(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_infty_norm(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__rowssums_testing(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_rowssums(double * elapsed_time, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t rsb__scale_testing(struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+#endif /* RSB_WANT_KERNELS_DEBUG */
+
+rsb_err_t rsb_do_time_scale(double * elapsed_time, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_uaua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_uaua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_uauz(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_uauz(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_unua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_unua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_sasa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_sasa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spsv_uxua(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spsv_uxua(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spmv_sxsa(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spmv_sxsa(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_spsv_sxsx(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_spsv_sxsx(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_infty_norm(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_infty_norm(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_rowssums(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_rowssums(double * total_elapsed_time, double * m_flops, const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+
+rsb_err_t rsb__do_fullrangebenchmark_float_complex_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_float_complex_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+
+rsb_err_t rsb__do_fullrangebenchmark_double_complex_scale(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags);
+
+rsb_err_t rsb__do_benchmark_double_complex_scale(double * total_elapsed_time, double * m_flops, struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+
+
+
+
+
+#if 0
+rsb_err_t rsb__do_spmv_uaua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_uaua(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spmv_uauz_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_uauz(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spmv_uxua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_uxua(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spmv_unua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_unua(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spmv_sasa_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_sasa(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spsv_uxua_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spsv_uxua(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spmv_sxsa_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spmv_sxsa(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_spsv_sxsx_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const void * restrict rhs, void * restrict out,const void * alphap,rsb_coo_idx_t incx, rsb_coo_idx_t incy,const rsb_trans_t transA);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_spsv_sxsx(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_infty_norm_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_infty_norm(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_rowssums_with_macros_vbr(const struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,void * row_sums);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_rowssums(const struct rsb_mtx_t * mtxAp);
+
+#if 0
+rsb_err_t rsb__do_scale_with_macros_vbr(struct rsb_mtx_t * mtxAp,const rsb_trans_t transA,const void * scale_factors);
+#endif /* 0 */
+double rsb__estimate_mflops_per_op_scale(const struct rsb_mtx_t * mtxAp);
+
+rsb_err_t rsb__dump_performance_array(const char * an, const double*array);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+
+#endif	/* RSB_DISPATCH_H_INCLUDED */
+
+
+
+
+/*!
+ @file
+ @brief ...
+ */
+/* @endcond */
diff --git a/rsb_krnl.m4 b/rsb_krnl.m4
new file mode 100644
index 0000000..f0efa6c
--- /dev/null
+++ b/rsb_krnl.m4
@@ -0,0 +1,171 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief Matrix type dispatching code, for each matrix operation.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+dnl
+RSB_M4_HEADER_MESSAGE()dnl
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_DISPATCH_H_INCLUDED
+#define RSB_DISPATCH_H_INCLUDED
+')dnl
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_linked_lists.m4')dnl
+include(`rsb_krnl_macros.m4')dnl
+dnl
+dnl #include "rsb_internals.h"
+#include "rsb_common.h"
+dnl #include "rsb_krnl_vb.h"	/* uhm */
+dnl #include "rsb_krnl_lb.h"	/* uhm */
+dnl #include "rsb_krnl_bcss.h"	/* uhm */
+dnl #include "rsb_krnl_bcss_u.h"	/* uhm */
+dnl #include "rsb_krnl_bcss_l.h"	/* uhm */
+#include "rsb_krnl_bcss_spmv_u.h"	/* uhm */
+#include "rsb_krnl_bcss_spsv_u.h"	/* uhm */
+#include "rsb_krnl_bcss_misc_u.h"	/* uhm */
+dnl
+dnl
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#define	RSB_BCSR_GET_NEXT_BLOCK_POINTER(BP,mtxAp,ROWVAR,COLVAR,BLOCKROWSPAR,BLOCKCOLSPAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	/*										\
+	 * *input*									\
+	 * mtxAp		should be a valid rsb_mtx_t structure pointer		\
+	 * BLOCKROWSPAR	should be set to the rows   count of this block			\
+	 * BLOCKCOLSPAR	should be set to the column count of this block			\
+	 * *output*									\
+	 * ROWVAR	will be set to the base row    of this block			\
+	 * COLVAR	will be set to the base column of this block			\
+	 * BP		will be set to the current block pointer			\
+	 * */										\
+	while( (mtxAp)->bpntr[_i] == (mtxAp)->bpntr[_i+1] ) 				/* skipping empty rows */	\
+	{++_i;_k=(mtxAp)->bpntr[_i];} 		/* _k is the first block index for the current row of blocks */	\
+	_j=(mtxAp)->bindx[_k]; 						/* the current block column index  */	\
+	_lastk=_k;	\
+	(BLOCKROWVAR)=_i;	\
+	(BLOCKCOLUMNVAR)=_j;	\
+	(ROWVAR)=(BLOCKROWSPAR)*_i;					/* _i is the current block row index */	\
+	(COLVAR)=(BLOCKCOLSPAR)*_j; 					/* the current block column index  */	\
+	BP+=(mtxAp)->options->el_size*(BLOCKROWSPAR)*(BLOCKCOLSPAR);			\
+	_k++; 		/* for the future macro calls */						\
+	if( _k >= (mtxAp)->bpntr[_i+1] )++_i;								\
+	;
+
+#define RSB_BCSR_GET_FIRST_BLOCK_POINTER(BP,mtxAp,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	int _i=0,_j=0,_k=0,_lastk=0;									\
+	(BLOCKROWSVAR)=(mtxAp)->rpntr[1]-(mtxAp)->rpntr[0];		/* _i is the current block row index */	\
+	(BLOCKCOLSVAR)=(mtxAp)->cpntr[1]-(mtxAp)->cpntr[0]; 		/* the current block column index  */	\
+	(BP)=(mtxAp)->VA;											\
+	RSB_BCSR_GET_NEXT_BLOCK_POINTER(BP,mtxAp,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)
+
+#define RSB_BCSR_GOT_LAST_BLOCK_POINTER(mtxAp)	( _lastk >= (mtxAp)->block_count )
+')dnl
+
+ifdef(`ONLY_WANT_HEADERS',`
+`#define RSB_BENCHMARK_MIN_SECONDS	'dnl
+RSB_M4_BENCHMARK_MIN_SECONDS
+`#define RSB_BENCHMARK_MIN_RUNS		'dnl
+RSB_M4_BENCHMARK_MIN_RUNS 
+')dnl
+dnl
+
+
+foreach(`mop',RSB_M4_MATRIX_ALL_OPS,`dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION(RSB_M4_MATRIX_TYPES,mop)
+')
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION(RSB_M4_MATRIX_TYPES,mop)
+RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION(RSB_M4_MATRIX_TYPES,mop)
+')
+dnl
+dnl
+dnl	FIXME : still not for transposed kernels
+dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`mtype',RSB_M4_MATRIX_TYPES,`dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(mop,mtype,`function_declaration')
+',`dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(mop,mtype,`function_definition')
+')dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION(RSB_M4_MATRIX_TYPES,mop)
+')dnl
+')dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`
+/* Dispatch table for type and mop specific benchmarks (FIXME : NEW) */
+ rsb_err_t (* rsb_benchmark_dispatch_table [RSB_IMPLEMENTED_TYPES][RSB_IMPLEMENTED_MOPS]) 
+   (RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(,void,`function_args')`_pointer_table)'
+` =  {' 
+foreach(`mtype',RSB_M4_MATRIX_TYPES,`dnl
+{
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+ RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(mop,mtype,`function_identifier')`'dnl
+ifelse(mop,RSB_M4_LAST_LIST_ELEMENT(WANT_MATRIX_OPS),` ',`,')
+')dnl
+}
+ifelse(mtype,RSB_M4_LAST_LIST_ELEMENT(WANT_TYPES),` ',`,')
+')dnl
+dnl 	the following breaks xlc:
+dnl        (void*)NULL	/* FIXME : is not this overflow declaration ? */
+};
+')dnl
+dnl
+dnl
+foreach(`mtype',RSB_M4_MATRIX_TYPES,`dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION(mtype)
+')dnl
+dnl
+dnl
+dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+#if 0
+ifdef(`ONLY_WANT_HEADERS',`dnl
+RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(RSB_M4_MATRIX_TYPES,mop,`function_declaration')
+',`dnl
+RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(RSB_M4_MATRIX_TYPES,mop,`function_definition')
+')dnl
+#endif /* 0 */
+RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION(mop)
+')dnl
+dnl
+dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION`'dnl
+dnl
+dnl
+RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION()
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+ifdef(`ONLY_WANT_HEADERS',`
+#endif	/* RSB_DISPATCH_H_INCLUDED */
+')
+
+
+
+dnl
+dnl NEW : FIXME
+/*!
+ @file
+ @brief ...
+ */
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcoo_macros.m4 b/rsb_krnl_bcoo_macros.m4
new file mode 100644
index 0000000..ac57223
--- /dev/null
+++ b/rsb_krnl_bcoo_macros.m4
@@ -0,0 +1,753 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+RSB_M4_HEADER_EXTRA_DECLARATIONS()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_BCOO_SPMV_KERNELS',`dnl
+dnl
+pushdef(`unrollings',$1)dnl
+dnl
+dnl	FIXED BLOCK SIZE KERNELS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',RSB_M4_BCOO_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+dnl ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),
+ifelse(1,1,`dnl
+foreach(`diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCOO_KERNEL_FUNCTION(`all',type,matrix_storage,transposition,symmetry,rowsu,colsu,unrolling,mop,citype,diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl	FIXED BLOCK SIZE DISPATCHERS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+dnl ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,..
+ifelse(1,1,`dnl
+foreach(`matrix_storage',RSB_M4_BCOO_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`all',type,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+popdef(`unrollings')dnl
+dnl	
+')dnl
+dnl	
+dnl	
+dnl
+dnl
+define(`RSB_M4_BCOO_KERNEL_FUNCTION',`dnl
+dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl	
+pushdef(`transposition',$4)dnl	
+pushdef(`symmetry',$5)dnl	
+pushdef(`b_rows',$6)dnl		block rows
+pushdef(`b_columns',$7)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`unrolling',$8)dnl	
+pushdef(`mop',$9)dnl	
+pushdef(`citype',$10)dnl	
+pushdef(`diagonal',$11)dnl	
+pushdef(`uplo',$12)dnl	
+dnl
+pushdef(`total_columns',ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`Mdim',`mdim'))dnl
+pushdef(`total_rows',ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`mdim',`Mdim'))dnl
+pushdef(`out_dim',ifelse(transposition,RSB_M4_TRANS_T,total_columns,total_rows))dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+pushdef(`mi',`i')dnl
+pushdef(`Mi',`j')dnl
+')dnl
+ifelse(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),1,`dnl
+pushdef(`mi',`j')dnl
+pushdef(`Mi',`i')dnl
+')dnl
+dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_N),1,`dnl
+pushdef(`tmi',mi)dnl
+pushdef(`tMi',Mi)dnl
+')dnl
+ifelse(RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)),1,`dnl
+pushdef(`tmi',Mi)dnl
+pushdef(`tMi',mi)dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+pushdef(`postmult',`(alpha)*')dnl
+',`dnl
+dnl
+ifelse(RSB_M4_IS_SPMX_OP_NEGATING_KERNEL_MOP(mop),1,`dnl
+pushdef(`postmult',`(-1)*')dnl
+',`dnl
+pushdef(`postmult',`')dnl
+')dnl
+dnl
+')dnl
+dnl
+pushdef(`ttransposition',`RSB_M4_TRANSPOSE_TRANSPOSITION(transposition)')dnl
+dnl
+pushdef(`tsymmetry',`RSB_M4_TRANSPOSE_SYMMETRY(symmetry)')dnl
+dnl
+pushdef(`toskipbecauseofsymmetry',`RSB_M4_AND(RSB_M4_IS_SPMX_KERNEL_MOP(mop),RSB_M4_NOT(RSB_M4_IS_COMPLEX_TYPE(mtype)),RSB_M4_IS_NOT_UNSYMMETRIC(symmetry),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)))')dnl
+dnl
+dnl
+ifelse(RSB_M4_ARE_KERNEL_GENERATION_PARMS_ALLOWED(want_what,mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo),`1',`dnl
+dnl
+ifelse(want_what,`DOC',`dnl
+	/*  TODO */
+')dnl
+ifelse(want_what,`all',`dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCOO(matrix_storage),1,`dnl
+rsb_err_t RSB_M4_BCOO_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop,citype,diagonal,uplo)dnl
+RSB_M4_BCOO_KERNEL_FUNCTION(`ARGS',mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop,citype,diagonal,uplo)dnl
+')dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_BCOO_KERNEL_FUNCTION(`BODY',mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop,citype,diagonal,uplo)dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ID',`dnl
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop,citype,diagonal,uplo)`'dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo)`'dnl
+')dnl
+dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+dnl
+{
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`0',`dnl
+pushdef(`incx',`1')dnl
+pushdef(`incy',`1')dnl
+')dnl
+RSB_M4_BXXX_KERNEL_FUNCTION_HELP($@)
+dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_SPMX_KERNEL_MOP(mop),RSB_M4_IS_DIAGONAL_IMPLICIT(diagonal)),1,`dnl
+	RSB_M4_FAKE_DIAG_IMPLICIT_MSG
+')dnl
+dnl
+ifelse(toskipbecauseofsymmetry,1,`dnl
+dnl
+	/* Symmetric `transposed' reverts to symmetric `not transposed' */
+	return RSB_M4_BCOO_KERNEL_FUNCTION(`ID',mtype,matrix_storage,RSB_M4_TRANS_N,symmetry,b_rows,b_columns,unrolling,mop,citype,diagonal,uplo)dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCOO_KERNEL_FUNCTION(`ARGS',mtype,matrix_storage,RSB_M4_TRANS_N,symmetry,b_rows,b_columns,unrolling,mop,citype,diagonal,uplo)));
+dnl
+')dnl
+dnl
+ifelse(toskipbecauseofsymmetry,0,`dnl
+dnl
+dnl	the i,j type has to be the same as the arrays one.
+dnl	if not, mismatch on the copied bytes will occur.
+ifelse(RSB_M4_AND(RSB_M4_NOT(RSB_M4_IS_RC_BIASED_KERNEL_MOP(mop)),RSB_M4_NOT(RSB_M4_AND(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),RSB_M4_NOT(RSB_M4_IS_NOT_UNSYMMETRIC(symmetry))))),`1',`dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`dnl
+	register rsb_coo_idx_t i=0,j=0;
+',`dnl
+	register citype i=0,j=0;
+dnl 20110227 if declaring short indices, we should care about proper conversion
+')dnl
+	const citype *IA=(const citype*)bpntr, *JA=(const citype*)bindx;
+dnl
+',`dnl
+dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_N),`0',`dnl
+	const citype *JA=(const citype*)bindx;
+	register citype j=0;
+',`dnl
+	const citype *IA=(const citype*)bpntr;
+	register citype i=0;	
+')dnl
+')dnl
+dnl ifelse(mop,`scale',`',`dnl
+dnl ')dnl	20121005 shall change this condition when enabling transpose scale as well
+	register rsb_nnz_idx_t n=0;
+ifelse(RSB_M4_IS_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+	const mtype alpha=*alphap;`'dnl
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),`1',`dnl
+	const mtype beta=*betap;`'dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`0',`dnl
+	dnl const rsb_coo_idx_t incx=1,incy=1;`'
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_SPMX_KERNEL_MOP(mop)),1,`dnl
+dnl
+
+dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(symmetry),1,`dnl
+	const mtype *trhs = rhs+incx*(roff-coff);`'// symmetry
+	mtype *tout=out+incy*(coff-roff);`'
+
+')dnl
+dnl
+ifelse(RSB_M4_IS_ZEROING_KERNEL_MOP(mop),1,`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype),out_dim,NULL,out,incy);
+')dnl
+dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),1,`dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`dnl
+	if(beta!=1)rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype),out_dim,&beta,out,ystride);
+',`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype), out_dim,&beta, out, 1);
+')dnl
+')dnl
+dnl
+ifelse(transposition,RSB_M4_TRANS_N,`dnl
+',`dnl
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+')dnl
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(symmetry),1,`dnl
+	if(roff==coff)
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_UNSYMMETRIC(symmetry),1,`dnl
+dnl
+ifelse(1,1,`dnl
+dnl
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL..
+	RSB_M4_SIMPLE_LOOP_UNROLL_5S(`n',`LI',`0',`nnz',`dnl
+',`dnl
+	i=IA[n+LI]; j=JA[n+LI];
+	out[tMi*incy]+=`'postmult`'RSB_M4_CONJ(VA[n+LI],mtype,transposition,RSB_M4_SYMBOL_UNSYMMETRIC)*rhs[tmi*incx];
+dnl
+',`',`',`RSB_M4_EARLY_EVICT_INSTRUCTION((IA+n,JA+n,VA+n))`'dnl
+',RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_SMALL)
+dnl
+',`dnl
+dnl
+RSB_M4_SIMPLE_LOOP_UNROLL_5S(`n',`LI',`0',`nnz',`dnl
+dnl
+dnl
+',`dnl
+dnl
+			`const rsb_coo_idx_t' `i_'``''LI`'=IA[n+LI];
+			`const rsb_coo_idx_t' `j_'``''LI`'=JA[n+LI];
+			`const mtype b_'``''LI`'=rhs[tmi``_''LI`'*incx];
+			`const mtype a_'``''LI`'=VA[n+LI];
+dnl
+',`dnl
+			if(tMi``_''0`'== tMi``_''eval(RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_MEDIUM-1)`')
+			{
+				mtype cacc = RSB_M4_ZERO(mtype);
+forloop(`_LI_',0,decr(RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_MEDIUM),`dnl
+				cacc+=`'postmult`'RSB_M4_CONJ(`a_'``''_LI_,mtype,transposition,RSB_M4_SYMBOL_UNSYMMETRIC)`*b_'``''_LI_;
+')dnl
+			out[tMi``_''0`'*incy]+=cacc;
+`'dnl
+			}
+			else
+			{
+',`dnl
+				out[tMi``_''LI`'*incy]+=`'postmult`RSB_M4_CONJ(a``_''``''LI`',mtype,transposition,RSB_M4_SYMBOL_UNSYMMETRIC)'`*b_'``''LI;
+',`dnl
+			}
+',RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_MEDIUM)
+dnl
+')dnl
+dnl
+',`dnl
+dnl
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+dnl		assert(i< Mdim);
+dnl		assert(j< mdim);
+		out[tMi*incy]+=`'postmult`'RSB_M4_CONJ(VA[n],mtype,transposition,RSB_M4_SYMBOL_UNSYMMETRIC)*rhs[tmi*incx];
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(symmetry),1,`dnl
+		if(RSB_LIKELY(tMi!=tmi))
+			out[tmi*incy]+=`'postmult`'RSB_M4_CONJ(VA[n],mtype,transposition,symmetry)*rhs[tMi*incx];
+')dnl
+dnl
+	}
+dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(symmetry),1,`dnl
+	if(roff!=coff)
+	RSB_M4_SIMPLE_LOOP_UNROLL(`n',`LI',`0',`nnz',`dnl
+		i=IA[n+LI];
+		j=JA[n+LI];
+dnl		assert(i< Mdim);
+dnl		assert(j< mdim);
+		out[tMi*incy]+=`'postmult`'RSB_M4_CONJ(VA[n+LI],mtype,transposition,RSB_M4_SYMBOL_UNSYMMETRIC)*rhs[tmi*incx];
+		tout[tmi*incy]+=`'postmult`'RSB_M4_CONJ(VA[n+LI],mtype,transposition,symmetry)*trhs[tMi*incx];
+dnl
+	',RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_SMALL)
+')dnl
+dnl
+	return RSB_ERR_NO_ERROR;
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop)),1,`dnl
+dnl
+dnl	FIXME: and roff and coff ?
+dnl
+dnl
+pushdef(`is_an_externally_backward_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_XOR(RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)),RSB_M4_SAME(uplo,`u')))')dnl
+pushdef(`is_vector_updating_spsv',RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)))dnl
+dnl
+	rsb_coo_idx_t ii;
+ifelse(is_an_externally_backward_kernel,1,`
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+',`dnl
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+')dnl
+	{
+		mtype ax;
+ifelse(is_vector_updating_spsv,1,`dnl
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(diagonal),1,`dnl
+dnl	..
+',`dnl
+dnl		const mtype aa;
+		mtype aa;
+ifelse(RSB_M4_WANT_SPSM_DIAG_CHECK,1,`dnl
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+')dnl
+		aa=VA[n];
+ifelse(RSB_M4_WANT_SPSM_DIAG_CHECK,1,`dnl
+		if(VA[n]==RSB_M4_ZERO(mtype))return RSB_ERR_INVALID_NUMERICAL_DATA;
+')dnl
+ifelse(is_an_externally_backward_kernel,1,`
+		n--;
+',`dnl
+		n++;
+')dnl
+		out[ii*incy]/=aa;
+')dnl
+		ax=out[ii*incy];
+',`dnl
+		ax=0;
+')dnl
+ifelse(is_an_externally_backward_kernel,1,`
+		for(;RSB_LIKELY(n+1>0);--n)
+',`dnl
+		for(;RSB_LIKELY(n<nnz);++n)
+')dnl
+		{
+			i=IA[n];
+			j=JA[n];
+ifelse(is_vector_updating_spsv,1,`dnl
+			if(RSB_UNLIKELY(!(i==ii )))
+',`dnl
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+')dnl
+				break;
+ifelse(is_vector_updating_spsv,1,`dnl
+			out[j*incy]-=RSB_M4_CONJ(VA[n],mtype,transposition,symmetry)*ax;
+',`dnl
+			ax += RSB_M4_CONJ(VA[n],mtype,transposition,symmetry)*out[j*incy];
+')dnl
+		}
+
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(diagonal),1,`dnl
+ifelse(is_vector_updating_spsv,1,`dnl
+		out[ii*incy]=(`'postmult`'out[ii*incy]);
+',`dnl
+		out[ii*incy]=(`'postmult`'out[ii*incy]-ax);
+')dnl
+',`dnl
+dnl
+dnl	FIXME: goto err is illegal for nnz=0 ...
+dnl
+dnl		if(!(i==ii && i==j))
+dnl			goto err;
+ifelse(is_vector_updating_spsv,1,`dnl
+		out[ii*incy]=(`'postmult`'out[ii*incy]);
+',`dnl
+ifelse(RSB_M4_WANT_SPSM_DIAG_CHECK,1,`dnl
+		if(n==nnz || VA[n]==RSB_M4_ZERO(mtype))return RSB_ERR_INVALID_NUMERICAL_DATA;
+')dnl
+		out[ii*incy]=(`'postmult`'out[ii*incy]-ax)/VA[n];
+ifelse(is_an_externally_backward_kernel,1,`dnl
+		--n;
+',`dnl
+		++n;
+')dnl
+')dnl
+')dnl
+	}
+	return RSB_ERR_NO_ERROR;
+dnl err:
+dnl	return RSB_ERR_BADARGS;
+dnl
+popdef(`is_an_externally_backward_kernel')dnl
+popdef(`is_vector_updating_spsv')dnl
+dnl
+')dnl
+dnl
+dnl ifelse(RSB_M4_NOT(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop)),1,`dnl
+dnl 	return RSB_ERR_UNIMPLEMENTED_YET;
+dnl ')dnl
+dnl
+ifelse(mop,`scale',`dnl
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+	dnl
+dnl	FIXME: what about hermitian ?
+dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_N),1,`dnl
+		i=IA[n];
+		VA[n]*=scale_factors[i];
+',`dnl
+		j=JA[n];
+dnl		i=IA[n];
+dnl		VA[n]*=scale_factors[i];
+		VA[n]*=scale_factors[j];
+')dnl
+dnl
+	}
+	return RSB_ERR_NO_ERROR;
+')dnl
+dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+	dnl
+	dnl	TODO: do we need vector blank ?
+	dnl
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+dnl
+ifelse(RSB_M4_IS_UNSYMMETRIC(symmetry),1,`dnl
+dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_N),1,`dnl
+		i=IA[n];
+ifelse(mop,`infty_norm',`dnl
+		row_sums[roff+i]+=RSB_M4_ABS(mtype,VA[n]);
+')dnl
+ifelse(mop,`rowssums',`dnl
+		row_sums[roff+i]+=VA[n];
+')dnl
+',`dnl
+		j=JA[n];
+ifelse(mop,`infty_norm',`dnl
+		row_sums[coff+j]+=RSB_M4_ABS(mtype,VA[n]);
+')dnl
+ifelse(mop,`rowssums',`dnl
+		row_sums[coff+j]+=VA[n];
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_UNSYMMETRIC(symmetry),0,`dnl
+dnl
+		i=IA[n];
+		j=JA[n];
+dnl
+ifelse(mop,`infty_norm',`dnl
+		row_sums[roff+i]+=RSB_M4_ABS(mtype,VA[n]);
+')dnl
+ifelse(mop,`rowssums',`dnl
+		row_sums[roff+i]+=VA[n];
+')dnl
+		if( roff+i != coff+j )
+ifelse(mop,`infty_norm',`dnl
+			row_sums[coff+j]+=RSB_M4_ABS(mtype,VA[n]);
+')dnl
+ifelse(mop,`rowssums',`dnl
+			row_sums[coff+j]+=VA[n];
+')dnl
+')dnl
+dnl
+	}
+	return RSB_ERR_NO_ERROR;
+')dnl
+dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`0',`dnl
+popdef(`incx')dnl
+popdef(`incy')dnl
+')dnl
+dnl
+}
+dnl
+')dnl
+dnl
+')dnl
+dnl
+
+popdef(`toskipbecauseofsymmetry')dnl
+popdef(`ttransposition')dnl
+popdef(`tsymmetry')dnl
+popdef(`postmult')dnl
+popdef(`tmi')dnl
+popdef(`tMi')dnl
+popdef(`mi')dnl
+popdef(`Mi')dnl
+popdef(`total_columns')dnl
+popdef(`total_rows')dnl
+popdef(`out_dim')dnl
+dnl
+popdef(`uplo')dnl
+popdef(`diagonal')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`unrolling')dnl
+popdef(`itype')dnl
+popdef(`b_columns')dnl
+popdef(`b_rows')dnl
+popdef(`symmetry')dnl
+popdef(`transposition')dnl
+popdef(`matrix_storage')dnl
+popdef(`mtype')dnl
+popdef(`want_what')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION',`dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl
+pushdef(`transposition',$4)dnl
+pushdef(`symmetry',$5)dnl
+pushdef(`unrolling',$6)dnl	
+dnl pushdef(`b_rows',$7)dnl		block rows
+dnl pushdef(`b_columns',$8)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`mop',`$9')dnl
+pushdef(`citype',`$10')dnl
+pushdef(`diagonal',`$11')dnl
+pushdef(`uplo',$12)dnl
+dnl
+dnl
+dnl
+ifelse(RSB_M4_ARE_KERNEL_GENERATION_PARMS_ALLOWED(want_what,mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo),`1',`dnl
+dnl
+ifelse(want_what,`DOC',`dnl
+	/*  TODO */
+')dnl
+dnl
+ifelse(want_what,`all',`dnl
+dnl `/* This code is intended for a block compressed sparse stripe matrix. */'
+ifdef(`ONLY_WANT_HEADERS',`dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`function_declaration',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo)
+',`dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`function_definition',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo)
+')dnl
+dnl
+dnl
+dnl
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+rsb_err_t RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,unrolling,mop,citype,diagonal,uplo)dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo)
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`BODY',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo)
+')dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+rsb_err_t RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,unrolling,mop,citype,diagonal,uplo)dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo);dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+dnl
+dnl
+pushdef(`matrix_structs',`const itype Mdim,const itype mdim,const citype * RSB_M4_RESTRICT bindx,const rsb_nnz_idx_t * RSB_M4_RESTRICT bpntr,const rsb_nnz_idx_t *RSB_M4_RESTRICT indptr,const rsb_coo_idx_t * RSB_M4_RESTRICT rpntr,const rsb_coo_idx_t * RSB_M4_RESTRICT cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz')dnl
+(`'dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+dnl
+dnl	no restrict on aliasing ops
+dnl
+ifelse(RSB_M4_IS_ALLOWING_ALIASING_KERNEL_MOP(mop),1,`dnl
+const mtype * RSB_M4_RESTRICT VA, const mtype * rhs, mtype * out, matrix_structs`'dnl
+',`dnl
+const mtype * RSB_M4_RESTRICT VA, const mtype * RSB_M4_RESTRICT rhs, mtype * RSB_M4_RESTRICT out, matrix_structs`'dnl
+')dnl
+')dnl
+ifelse(RSB_M4_IS_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+,const mtype * RSB_M4_RESTRICT alphap`'dnl
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),`1',`dnl
+,const mtype * RSB_M4_RESTRICT betap`'dnl
+')dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`1',`dnl
+,rsb_coo_idx_t incx, rsb_coo_idx_t incy`'dnl
+')dnl
+ifelse(mop,`spmm_az',`dnl
+dnl
+dnl	FIXME
+dnl
+const itype bstride, const itype cstride, const itype nrhs`'dnl
+')dnl
+ifelse(mop,`scale',`dnl
+mtype * VA, matrix_structs, const mtype *scale_factors`'dnl
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+const mtype * VA, mtype * row_sums, matrix_structs`'dnl
+')dnl
+ifelse(mop,`negation',`dnl
+mtype * VA, matrix_structs`'dnl
+')dnl
+)dnl
+dnl
+')dnl
+dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+dnl
+dnl
+{
+	RSB_M4_DEBUGINFO(``$0'')dnl
+dnl	/*!  \ingroup rsb_doc_kernels
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("mop") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+ifelse(RSB_M4_IS_FORMAT_BCOO(matrix_storage),1,`dnl
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+dnl	 *
+dnl	 * Note: We assume this quantity is the same for each block.
+dnl	 *
+dnl	 * WARNING : EXPERIMENTAL FUNCTION
+dnl	 * for block bigger than ~12x12 it seems that inline matrix multiplication code slows down the whole thing
+')dnl
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+dnl #if RSB_EXPERIMENTAL_WANT_PURE_BCOO
+ifelse(RSB_M4_WANT_20110206_BOUNDED_BOX_PATCH,1,`dnl
+dnl 20110206	set the following 
+		columns = rows=1;	/* experimental, for the bounded box patch */
+',`dnl
+dnl 20110206	and commented the following 
+		columns=bc,rows=br;
+')dnl
+dnl #else
+dnl 		columns = rows=1;
+dnl #endif
+
+ifelse(RSB_M4_IS_FORMAT_BCOO(matrix_storage),1,`dnl
+pushdef(`args',`RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo))')dnl
+switch(rows)
+{
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+	case rowsu:
+	{switch(columns)
+	{
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+		case colsu:/* rowsu colsu matrix_storage */
+		errval = RSB_M4_BCOO_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,symmetry,rowsu,colsu,unrolling,mop,citype,diagonal,uplo)( args );
+		break;
+')dnl
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = RSB_M4_BCOO_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,symmetry,rowsu,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,citype,diagonal,uplo)( args );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+')dnl
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = RSB_M4_BCOO_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,symmetry,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,citype,diagonal,uplo)( args );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+popdef(`args')dnl
+')dnl
+	dnl errval = RSB_ERR_UNSUPPORTED_TYPE;
+	return errval;
+}
+dnl
+')dnl
+dnl
+')dnl
+dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+dnl popdef(`b_rows')dnl
+dnl popdef(`b_columns')dnl
+popdef(`transposition')dnl
+popdef(`symmetry')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+popdef(`diagonal')dnl
+popdef(`want_what')dnl
+popdef(`uplo')dnl
+')dnl
+dnl
+dnl
+dnl
diff --git a/rsb_krnl_bcoo_spmv_u.c b/rsb_krnl_bcoo_spmv_u.c
new file mode 100644
index 0000000..57e6ab1
--- /dev/null
+++ b/rsb_krnl_bcoo_spmv_u.c
@@ -0,0 +1,186189 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block coordinates format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// S
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double *trhs = rhs+1*(roff-coff);// H
+	double *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// S
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;
+	const double *trhs = rhs+incx*(roff-coff);// H
+	double *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		double aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// S
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float *trhs = rhs+1*(roff-coff);// H
+	float *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy
+);
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// S
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;
+	const float *trhs = rhs+incx*(roff-coff);// H
+	float *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG
+(VA
+,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy
+);
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		float aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=fabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=fabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// S
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conjf(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conjf(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conjf(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const float complex *trhs = rhs+1*(roff-coff);// H
+	float complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conjf(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conjf(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conjf(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conjf(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conjf(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// S
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conjf(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conjf(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;
+	const float complex *trhs = rhs+incx*(roff-coff);// H
+	float complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conjf(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conjf(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		float complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conjf(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conjf(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const float complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		float complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabsf(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabsf(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim,NULL,out,1);
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(alpha)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(alpha)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(alpha)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(alpha)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(alpha)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(alpha)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(alpha)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(alpha)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(alpha)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(alpha)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(alpha)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(alpha)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(alpha)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*VA[n]*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+1 ]*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+2 ]*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+3 ]*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*VA[n+0 ]*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// S
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*1]+=(-1)*VA[n]*rhs[j*1];
+		if(RSB_LIKELY(i!=j))
+			out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*1]+=(-1)*VA[n+1 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+1 ])*trhs[i*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*1]+=(-1)*VA[n+2 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+2 ])*trhs[i*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*1]+=(-1)*VA[n+3 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+3 ])*trhs[i*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*1]+=(-1)*VA[n+0 ]*rhs[j*1];
+		tout[j*1]+=(-1)*conj(VA[n+0 ])*trhs[i*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*VA[n]*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*conj(VA[n])*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*VA[n+1 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+1 ])*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*VA[n+2 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+2 ])*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*VA[n+3 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+3 ])*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*VA[n+0 ]*rhs[i*1];
+		tout[i*1]+=(-1)*conj(VA[n+0 ])*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	
+	const double complex *trhs = rhs+1*(roff-coff);// H
+	double complex *tout=out+1*(coff-roff);
+
+	rhs=(rhs-coff*(1))+roff*(1); out=(out-roff*(1))+coff*(1);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*1]+=(-1)*conj(VA[n])*rhs[i*1];
+		if(RSB_LIKELY(j!=i))
+			out[i*1]+=(-1)*VA[n]*rhs[j*1];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*1]+=(-1)*conj(VA[n+1 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+1 ]*trhs[j*1];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*1]+=(-1)*conj(VA[n+2 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+2 ]*trhs[j*1];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*1]+=(-1)*conj(VA[n+3 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+3 ]*trhs[j*1];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*1]+=(-1)*conj(VA[n+0 ])*rhs[i*1];
+		tout[i*1]+=(-1)*VA[n+0 ]*trhs[j*1];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*1]=(out[ii*1]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*1]/=aa;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*1];
+		}
+
+		out[ii*1]=(out[ii*1]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=conj(VA[n])*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*1];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*1]-=VA[n]*ax;
+		}
+
+		out[ii*1]=(out[ii*1]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	{
+for(n=0;n+3<nnz;n+=4){
+	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+	i=IA[n+1 ]; j=JA[n+1 ];
+	out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+	i=IA[n+2 ]; j=JA[n+2 ];
+	out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+	i=IA[n+3 ]; j=JA[n+3 ];
+	out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+}
+for(     ;n<nnz;++n){	i=IA[n+0 ]; j=JA[n+0 ];
+	out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+}
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+1 ]*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+2 ]*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+3 ]*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*VA[n+0 ]*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// S
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+		if(RSB_LIKELY(i!=j))
+			out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[i*incy]+=(alpha)*VA[n+1 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+1 ])*trhs[i*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[i*incy]+=(alpha)*VA[n+2 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+2 ])*trhs[i*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[i*incy]+=(alpha)*VA[n+3 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+3 ])*trhs[i*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[i*incy]+=(alpha)*VA[n+0 ]*rhs[j*incx];
+		tout[j*incy]+=(alpha)*conj(VA[n+0 ])*trhs[i*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*VA[n]*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*conj(VA[n])*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*VA[n+1 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+1 ])*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*VA[n+2 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+2 ])*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*VA[n+3 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+3 ])*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*VA[n+0 ]*rhs[i*incx];
+		tout[i*incy]+=(alpha)*conj(VA[n+0 ])*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;
+	const double complex *trhs = rhs+incx*(roff-coff);// H
+	double complex *tout=out+incy*(coff-roff);
+
+	rhs=(rhs-coff*(incx))+roff*(incx); out=(out-roff*(incy))+coff*(incy);
+	if(roff==coff)
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		out[j*incy]+=(alpha)*conj(VA[n])*rhs[i*incx];
+		if(RSB_LIKELY(j!=i))
+			out[i*incy]+=(alpha)*VA[n]*rhs[j*incx];
+	}
+	if(roff!=coff)
+	{
+for(n=0;n+3<nnz;n+=4){
+		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+			i=IA[n+1 ];
+		j=JA[n+1 ];
+		out[j*incy]+=(alpha)*conj(VA[n+1 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+1 ]*trhs[j*incx];
+			i=IA[n+2 ];
+		j=JA[n+2 ];
+		out[j*incy]+=(alpha)*conj(VA[n+2 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+2 ]*trhs[j*incx];
+			i=IA[n+3 ];
+		j=JA[n+3 ];
+		out[j*incy]+=(alpha)*conj(VA[n+3 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+3 ]*trhs[j*incx];
+	}
+for(     ;n<nnz;++n){ 		i=IA[n+0 ];
+		j=JA[n+0 ];
+		out[j*incy]+=(alpha)*conj(VA[n+0 ])*rhs[i*incx];
+		tout[i*incy]+=(alpha)*VA[n+0 ]*trhs[j*incx];
+	 }
+}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		--n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		if(n==nnz || VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		out[ii*incy]=((alpha)*out[ii*incy]-ax)/VA[n];
+		++n;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+		n++;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		double complex aa;
+		if(n>=nnz)return RSB_ERR_INVALID_NUMERICAL_DATA;
+		aa=VA[n];
+		if(VA[n]==((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		n--;
+		out[ii*incy]/=aa;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += VA[n]*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=0;
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=0;
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii && j!=i)))
+				break;
+			ax += conj(VA[n])*out[j*incy];
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]-ax);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=conj(VA[n])*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+	for(n=0,ii=0;RSB_LIKELY(ii<Mdim);++ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+		for(;RSB_LIKELY(n<nnz);++n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+	const double complex alpha=*alphap;	rsb_coo_idx_t ii;
+
+	for(n=nnz-1,ii=Mdim-1;RSB_LIKELY(ii+1>0) ;--ii)
+	{
+		double complex ax;
+		ax=out[ii*incy];
+
+		for(;RSB_LIKELY(n+1>0);--n)
+		{
+			i=IA[n];
+			j=JA[n];
+			if(RSB_UNLIKELY(!(i==ii )))
+				break;
+			out[j*incy]-=VA[n]*ax;
+		}
+
+		out[ii*incy]=((alpha)*out[ii*incy]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=cabs(VA[n]);
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=cabs(VA[n]);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		row_sums[roff+i]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		j=JA[n];
+		row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr, *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_half_idx_t i=0,j=0;
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr, *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_nnz_idx_t n=0;
+					for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		i=IA[n];
+		j=JA[n];
+		row_sums[roff+i]+=VA[n];
+		if( roff+i != coff+j )
+			row_sums[coff+j]+=VA[n];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *IA=(const rsb_coo_idx_t*)bpntr;
+	register rsb_coo_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *IA=(const rsb_half_idx_t*)bpntr;
+	register rsb_half_idx_t i=0;	
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			i=IA[n];
+		VA[n]*=scale_factors[i];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_coo_idx_t *JA=(const rsb_coo_idx_t*)bindx;
+	register rsb_coo_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+
+{
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCOR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	const rsb_half_idx_t *JA=(const rsb_half_idx_t*)bindx;
+	register rsb_half_idx_t j=0;
+	register rsb_nnz_idx_t n=0;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+			j=JA[n];
+		VA[n]*=scale_factors[j];
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_float_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uU
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uL
+( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sU_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sU_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sS_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sS_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tN_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tT_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_C__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sH_dE_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	register rsb_coo_idx_t columns,rows;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCOR */
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS  */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS  */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCOR_scale_double_complex_H__tC_r1_c1_ul_sH_dI_uG
+( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,nnz,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+/* @endcond */
diff --git a/rsb_krnl_bcoo_spmv_u.h b/rsb_krnl_bcoo_spmv_u.h
new file mode 100644
index 0000000..c685b10
--- /dev/null
+++ b/rsb_krnl_bcoo_spmv_u.h
@@ -0,0 +1,21824 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block coordinates format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCOO_SPMV_U_H_INCLUDED
+#define RSB_BCOO_SPMV_U_H_INCLUDED
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_id [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_i [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alphap [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uU
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uL
+(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const double * restrict alpha [...]
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sU_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sS_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sH_dE_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sU_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sS_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tT_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H__tC_r1_c1_uu_sH_dI_uG
+(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sU_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sS_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sH_dE_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sU_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sS_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tN_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tN_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tT_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tT_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C__tC_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H__tC_r1_c1_uu_sH_dI_uG
+(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_idx_t incx, rsb_coo_idx_t incy)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rsb [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uU
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uL
+(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const float * restrict alphap,rs [...]
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sU_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sS_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sH_dE_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sU_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sS_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tT_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H__tC_r1_c1_uu_sH_dI_uG
+(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sU_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sS_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sH_dE_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sU_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sS_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tN_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tN_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tT_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tT_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C__tC_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H__tC_r1_c1_uu_sH_dI_uG
+(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_i [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_coo_ [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const flo [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const fl [...]
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sU_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sS_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sH_dE_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sU_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sS_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tN_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tN_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tT_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tT_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C__tC_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H__tC_r1_c1_uu_sH_dI_uG
+(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_co [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,rsb_c [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const  [...]
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uU
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uL
+(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz,const [...]
+;
+
+
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sU_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sS_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sH_dE_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sU_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sS_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tN_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tN_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tT_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tT_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C__tC_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H__tC_r1_c1_uu_sH_dI_uG
+(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors)
+;
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_C_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_H_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_C_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_H_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_C_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_float_complex_H_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uaua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uauz_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_uxua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_unua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sasa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCOR_spsv_uxua_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCOR_spmv_sxsa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCOR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_infty_norm_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_rowssums_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_C_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCOR_scale_double_complex_H_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const rsb_nnz_idx_t nnz, const double complex *scale_factors);
+
+
+#endif /* RSB_BCOO_SPMV_U_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcoo_spmv_u.m4 b/rsb_krnl_bcoo_spmv_u.m4
new file mode 100644
index 0000000..15cb687
--- /dev/null
+++ b/rsb_krnl_bcoo_spmv_u.m4
@@ -0,0 +1,56 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block coordinates format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_bcoo_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCOO_SPMV_U_H_INCLUDED
+#define RSB_BCOO_SPMV_U_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCOR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCOC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCOO formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+RSB_M4_INCLUDE_HEADERS
+dnl
+RSB_M4_BCOO_SPMV_KERNELS((`u'))
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCOO_SPMV_U_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcss.c b/rsb_krnl_bcss.c
new file mode 100644
index 0000000..4578f39
--- /dev/null
+++ b/rsb_krnl_bcss.c
@@ -0,0 +1,38 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+/* @endcond */
diff --git a/rsb_krnl_bcss.h b/rsb_krnl_bcss.h
new file mode 100644
index 0000000..6666be2
--- /dev/null
+++ b/rsb_krnl_bcss.h
@@ -0,0 +1,5387 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCSS_H_INCLUDED
+#define RSB_BCSS_H_INCLUDED
+#include "rsb_internals.h"
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uaua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uaua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uauz_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uauz_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_uxua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_unua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_unua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sasa_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sasa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_uxua_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_uxua_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spmv_sxsa_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spmv_sxsa_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_spsv_sxsx_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_spsv_sxsx_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_infty_norm_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_infty_norm_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_infty_norm_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_rowssums_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_rowssums_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_rowssums_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tN_r1_c1_ul_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tT_r1_c1_ul_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_l_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tC_r1_c1_ul_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_l_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_l_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_N(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tN_r1_c1_uu_sN_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_T(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tT_r1_c1_uu_sT_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+/* a macro is faster than a switch construct */
+#define RSB_double_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_float_complex_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_float_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_double_complex_kernel_dispatcher_BCSR_scale_u_C(R,C) ( (R)==(1) && (C)==(1)?  rsb__BCSR_scale_double_complex___tC_r1_c1_uu_sC_d_u \
+ :  (\
+ )) 
+
+/* a macro is faster than a switch construct */
+#define RSB_type_kernel_dispatcher_BCSR_scale_u_BCSR(TYPE,R,C) \
+(  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  ? (void*)RSB_double_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  ? (void*)RSB_float_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ? (void*)RSB_float_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+  (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ? (void*)RSB_double_complex_kernel_dispatcher_BCSR_scale_u_BCSR(R,C) : \
+NULL ) 
+#endif /* RSB_BCSS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcss.m4 b/rsb_krnl_bcss.m4
new file mode 100644
index 0000000..f324278
--- /dev/null
+++ b/rsb_krnl_bcss.m4
@@ -0,0 +1,84 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCSS_H_INCLUDED
+#define RSB_BCSS_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCSR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCSC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCSS formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+#include "rsb_internals.h"
+dnl
+ifelse(RSB_M4_WANT_OMP_IN_KERNELS,`1',`dnl
+#include <omp.h>	/* OpenMP parallelism (EXPERIMENTAL) */
+')dnl
+dnl
+dnl
+dnl Now the following macros are split across files.
+dnl RSB_M4_BCSS_KERNELS((`l',`u'))
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',(`l',`u'),`dnl
+foreach(`symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`mtype',RSB_M4_MATRIX_TYPES,`dnl
+DOUBLE_LINEAR_KERNEL_DISPATCHER_SEARCH_MACRO_(mop,mtype,unrolling,matrix_storage,transposition,symmetry,`UNLOOP_R_C_PAIRS`RSB_M4_BCSS_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop)dnl
+'')
+')dnl
+DOUBLE_LINEAR_KERNEL_DISPATCHER_TYPE_SEARCH_MACRO_(mop,unrolling,matrix_storage,transposition)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCSS_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcss_l.c b/rsb_krnl_bcss_l.c
new file mode 100644
index 0000000..a120a3a
--- /dev/null
+++ b/rsb_krnl_bcss_l.c
@@ -0,0 +1,40 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with explicit loops, for any blockings.
+ FIXME : OBSOLETE.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+RSB_EMPTY_FILE_FILLER 
+/* @endcond */
diff --git a/rsb_krnl_bcss_l.h b/rsb_krnl_bcss_l.h
new file mode 100644
index 0000000..2b9e197
--- /dev/null
+++ b/rsb_krnl_bcss_l.h
@@ -0,0 +1,42 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with explicit loops, for any blockings.
+ FIXME : OBSOLETE.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCSS_L_H_INCLUDED
+#define RSB_BCSS_L_H_INCLUDED
+#include "rsb_internals.h"
+#endif /* RSB_BCSS_L_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcss_l.m4 b/rsb_krnl_bcss_l.m4
new file mode 100644
index 0000000..7196572
--- /dev/null
+++ b/rsb_krnl_bcss_l.m4
@@ -0,0 +1,77 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with explicit loops, for any blockings.
+ FIXME : OBSOLETE.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCSS_L_H_INCLUDED
+#define RSB_BCSS_L_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCSR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCSC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCSS formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+#include "rsb_internals.h"
+dnl
+ifelse(WANT_LOOPING_KERNELS,`1',`dnl
+dnl
+dnl
+#ifdef RSB_WANT_LOOPING_KERNELS 
+dnl
+RSB_M4_BCSS_KERNELS((`l'))
+dnl
+#endif /* RSB_WANT_LOOPING_KERNELS */
+dnl
+dnl
+',`dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+RSB_EMPTY_FILE_FILLER 
+')dnl
+dnl
+dnl
+')dnl WANT_LOOPING_KERNELS
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCSS_L_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcss_macros.m4 b/rsb_krnl_bcss_macros.m4
new file mode 100644
index 0000000..f064670
--- /dev/null
+++ b/rsb_krnl_bcss_macros.m4
@@ -0,0 +1,1509 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+dnl
+define(`RSB_M4_ARE_KERNEL_GENERATION_PARMS_ALLOWED',`dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl
+pushdef(`transposition',$4)dnl
+pushdef(`k_symmetry',$5)dnl
+pushdef(`unrolling',$6)dnl	
+pushdef(`b_rows',$7)dnl		block rows
+pushdef(`b_columns',$8)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`mop',`$9')dnl
+pushdef(`citype',`$10')dnl
+pushdef(`k_diagonal',`$11')dnl
+pushdef(`uplo',$12)dnl
+dnl
+RSB_M4_AND(dnl
+RSB_M4_IMPLY(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_NOT(RSB_M4_SAME(uplo,`g'))),dnl
+RSB_M4_IMPLY(RSB_M4_NOT(RSB_M4_IS_SPSX_KERNEL_MOP(mop)),RSB_M4_SAME(uplo,`g')),dnl
+1)`'dnl
+dnl
+dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`transposition')dnl
+popdef(`k_symmetry')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+popdef(`k_diagonal')dnl
+popdef(`want_what')dnl
+popdef(`uplo')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	These functions dispatch on the column size, calling the
+dnl	proper kernels.
+dnl
+dnl	They assume type dispatching has just been performed.
+dnl
+dnl
+dnl	RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(want_what,mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+dnl	-----------------------------------------------------------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_BCXX_KERNEL_SIZE_DISPATCH_FUNCTION',`dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCOO(matrix_storage),`1',`dnl
+dnl
+RSB_M4_BCOO_KERNEL_SIZE_DISPATCH_FUNCTION($@)`'dnl
+dnl
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),`1',`dnl
+dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION($@)`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION',`dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl
+pushdef(`transposition',$4)dnl
+pushdef(`k_symmetry',$5)dnl
+pushdef(`unrolling',$6)dnl	
+dnl pushdef(`b_rows',$7)dnl		block rows
+dnl pushdef(`b_columns',$8)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`mop',`$9')dnl
+pushdef(`citype',`$10')dnl
+pushdef(`k_diagonal',`$11')dnl
+pushdef(`uplo',$12)dnl
+dnl
+ifelse(RSB_M4_ARE_KERNEL_GENERATION_PARMS_ALLOWED(want_what,mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo),`1',`dnl
+dnl
+ifelse(want_what,`DOC',`dnl
+	/*  TODO */
+')dnl
+dnl
+ifelse(want_what,`all',`dnl
+dnl `/* This code is intended for a block compressed sparse stripe matrix. */'
+ifdef(`ONLY_WANT_HEADERS',`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`function_declaration',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+',`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`function_definition',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+')dnl
+dnl
+dnl
+dnl
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+rsb_err_t RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,unrolling,mop,citype,k_diagonal,uplo)dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`BODY',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+')dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+rsb_err_t RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,unrolling,mop,citype,k_diagonal,uplo)dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo);dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+dnl
+dnl
+pushdef(`matrix_structs',`const itype Mdim,const itype mdim,const citype * RSB_M4_RESTRICT bindx,const rsb_nnz_idx_t * RSB_M4_RESTRICT bpntr,const rsb_nnz_idx_t *RSB_M4_RESTRICT indptr,const rsb_coo_idx_t * RSB_M4_RESTRICT rpntr,const rsb_coo_idx_t * RSB_M4_RESTRICT cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags')dnl
+(`'dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+dnl
+dnl	no restrict on aliasing ops
+dnl
+ifelse(RSB_M4_IS_ALLOWING_ALIASING_KERNEL_MOP(mop),1,`dnl
+const mtype * RSB_M4_RESTRICT VA, const mtype * rhs, mtype * out, matrix_structs`'dnl
+',`dnl
+const mtype * RSB_M4_RESTRICT VA, const mtype * RSB_M4_RESTRICT rhs, mtype * RSB_M4_RESTRICT out, matrix_structs`'dnl
+')dnl
+')dnl
+ifelse(RSB_M4_IS_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+,const mtype * RSB_M4_RESTRICT alphap`'dnl
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),`1',`dnl
+,const mtype * RSB_M4_RESTRICT betap`'dnl
+')dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`1',`dnl
+,rsb_coo_idx_t incx, rsb_coo_idx_t incy`'dnl
+')dnl
+ifelse(mop,`spmm_az',`dnl
+dnl
+dnl	FIXME
+dnl
+const itype bstride, const itype cstride, const itype nrhs`'dnl
+')dnl
+ifelse(mop,`scale',`dnl
+mtype * VA, matrix_structs, const mtype *scale_factors`'dnl
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+const mtype * VA, mtype * row_sums, matrix_structs`'dnl
+')dnl
+ifelse(mop,`negation',`dnl
+mtype * VA, matrix_structs`'dnl
+')dnl
+)dnl
+dnl
+')dnl
+dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+dnl
+dnl
+{
+	RSB_M4_DEBUGINFO(``$0'')dnl
+dnl	/*!  \ingroup rsb_doc_kernels
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("mop") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+dnl	 *
+dnl	 * Note: We assume this quantity is the same for each block.
+dnl	 *
+dnl	 * WARNING : EXPERIMENTAL FUNCTION
+dnl	 * for block bigger than ~12x12 it seems that inline matrix multiplication code slows down the whole thing
+')dnl
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+pushdef(`args',`RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo))')dnl
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+dnl #if RSB_EXPERIMENTAL_WANT_PURE_BCSS
+ifelse(RSB_M4_WANT_20110206_BOUNDED_BOX_PATCH,1,`dnl
+dnl 20110206	set the following 
+		columns = rows=1;	/* experimental, for the bounded box patch */
+',`dnl
+dnl 20110206	and commented the following 
+		columns=bc,rows=br;
+')dnl
+dnl #else
+dnl 		columns = rows=1;
+dnl #endif
+
+switch(rows)
+{
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+	case rowsu:
+	{switch(columns)
+	{
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+		case colsu:/* rowsu colsu matrix_storage */
+		errval = RSB_M4_BCSS_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)( args );
+		break;
+')dnl
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = RSB_M4_BCSS_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,k_symmetry,rowsu,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,citype,k_diagonal,uplo)( args );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+')dnl
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = RSB_M4_BCSS_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,k_symmetry,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,citype,k_diagonal,uplo)( args );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+popdef(`args')dnl
+')dnl
+	dnl errval = RSB_ERR_UNSUPPORTED_TYPE;
+	return errval;
+}
+dnl
+')dnl
+dnl
+')dnl
+dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+dnl popdef(`b_rows')dnl
+dnl popdef(`b_columns')dnl
+popdef(`transposition')dnl
+popdef(`k_symmetry')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+popdef(`k_diagonal')dnl
+popdef(`want_what')dnl
+popdef(`uplo')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	These functions will perform their operations on fixed block matrices.
+dnl
+define(`RSB_M4_BXXX_KERNEL_FUNCTION_HAS_IMPLEMENTATION',`dnl
+dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl	
+pushdef(`transposition',$4)dnl	
+pushdef(`k_symmetry',$5)dnl	
+pushdef(`b_rows',$6)dnl		block rows
+pushdef(`b_columns',$7)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`unrolling',$8)dnl	
+pushdef(`mop',$9)dnl	
+pushdef(`citype',$10)dnl	
+pushdef(`k_diagonal',$11)dnl	
+pushdef(`uplo',$12)dnl	
+dnl
+ifelse(dnl
+dnl
+dnl	The following are cases which are NOT implemented.
+dnl	Each line emits a non empty character (`*') to block an implementation.
+dnl
+dnl	CSC SPSV gets blocked:
+dnl ifelse(RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),RSB_M4_NOT(transposed)),1,`no',`')`'dnl
+dnl	CSR transposed SPSV gets blocked:
+dnl ifelse(RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),transposed),1,`no'`')dnl
+dnl	SPSV for non 1x1 blockings gets blocked
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,ifelse(RSB_M4_AND(RSB_M4_SAME(b_rows,1),RSB_M4_SAME(b_columns,1)),`1',`',`no'))`'dnl
+dnl
+dnl	any symmetric kernel for non 1x1 blockings gets blocked
+dnl	TODO : should modify RSB_M4_EXTRA_SYMMETRIC_DIAGONAL_FIXING_KERNEL to support k_symmetry and blocking
+ifelse(RSB_M4_OR(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),RSB_M4_AND(RSB_M4_SAME(b_rows,1),RSB_M4_SAME(b_columns,1))),1,`',`no')`'dnl
+dnl	any SPSV symmetric gets blocked
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),1,`no',`'))`'dnl
+dnl
+,`',`1',`0')dnl
+dnl
+popdef(`uplo')dnl
+popdef(`want_what')dnl
+popdef(`k_diagonal')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`k_symmetry')dnl
+popdef(`transposition')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_BXXX_KERNEL_FUNCTION_HELP',`dnl
+dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl	
+pushdef(`transposition',$4)dnl	
+pushdef(`k_symmetry',$5)dnl	
+pushdef(`b_rows',$6)dnl		block rows
+pushdef(`b_columns',$7)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`unrolling',$8)dnl	
+pushdef(`mop',$9)dnl	
+pushdef(`citype',$10)dnl	
+pushdef(`k_diagonal',$11)dnl	
+pushdef(`uplo',$12)dnl	
+dnl
+	/**
+	 * \ingroup rsb_doc_kernels
+ifelse(RSB_M4_MEMBER(mop,`spsv_uxua'),1,`dnl
+	 * Computes \f$y \leftarrow RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A')^{-1} \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`spmv_unua',`dnl
+	 * Computes \f$y \leftarrow y - RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`spmv_uaua',`dnl
+	 * Computes \f$y \leftarrow y + RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`spmv_sxsa',`dnl
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+	 * with incx and incy as x and y vector strides
+')dnl
+ifelse(mop,`spmv_sxsx',`dnl
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+	 * with incx and incy as x and y vector strides
+')dnl
+ifelse(mop,`spmv_sasa',`dnl
+	 * Computes \f$y \leftarrow y + RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`spmv_uxua',`dnl
+	 * Computes \f$y \leftarrow y + \alpha \cdot RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`spmm_az',`dnl
+	 * Computes \f$y \leftarrow RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`infty_norm',`dnl
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A').\f$
+')dnl
+ifelse(mop,`rowssums',`dnl
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A').\f$
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+	 * Computes \f$y \leftarrow RSB_M4_TRANSPOSITION_OP_EFFECT(transposition,`A') \cdot x, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A'). \f$
+')dnl
+ifelse(mop,`scale',`dnl
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where RSB_M4_SYMMETRY_EFFECT(k_symmetry,`A').\f$
+')dnl
+ifelse(mop,`negation',`dnl
+	 * Computes \f$A \leftarrow - A \f$
+')dnl
+         * Matrix A should be blocked b_rows x b_columns, stored in matrix_storage format, RSB_M4_MATRIX_DIAGONAL_DENOMINATION(k_diagonal), of `type' mtype, with citype column indices.
+dnl
+ifelse(RSB_M4_BXXX_KERNEL_FUNCTION_HAS_IMPLEMENTATION($@),`1',`dnl
+	 * \return \rsb_errval_inp_param_msg
+	 */
+',`dnl
+dnl	FIXME: the return error is not always adequate, here.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+dnl	/* or RSB_ERR_UNSUPPORTED_FEATURE ? */
+')dnl
+dnl
+popdef(`uplo')dnl
+popdef(`want_what')dnl
+popdef(`k_diagonal')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`k_symmetry')dnl
+popdef(`transposition')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	These functions will perform their operations on fixed block matrices.
+dnl
+define(`RSB_M4_BCSS_KERNEL_FUNCTION',`dnl
+dnl
+dnl
+pushdef(`want_what',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`matrix_storage',$3)dnl	
+pushdef(`transposition',$4)dnl	
+pushdef(`k_symmetry',$5)dnl	
+pushdef(`b_rows',$6)dnl		block rows
+pushdef(`b_columns',$7)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t ')dnl integer type (for indices)
+pushdef(`unrolling',$8)dnl	
+pushdef(`mop',$9)dnl	
+pushdef(`citype',$10)dnl	
+pushdef(`k_diagonal',$11)dnl	
+pushdef(`uplo',$12)dnl	
+dnl
+ifelse(RSB_M4_ARE_KERNEL_GENERATION_PARMS_ALLOWED(want_what,mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo),`1',`dnl
+dnl
+ifelse(want_what,`all',`dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+rsb_err_t RSB_M4_BCSS_KERNEL_FUNCTION(`ID',mtype,matrix_storage,transposition,k_symmetry,b_rows,b_columns,unrolling,mop,citype,k_diagonal,uplo)dnl
+RSB_M4_BCSS_KERNEL_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,b_rows,b_columns,unrolling,mop,citype,k_diagonal,uplo)dnl
+')dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_BCSS_KERNEL_FUNCTION(`BODY',mtype,matrix_storage,transposition,k_symmetry,b_rows,b_columns,unrolling,mop,citype,k_diagonal,uplo)dnl
+')dnl
+')dnl
+dnl
+ifelse(want_what,`ID',`dnl
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,b_rows,b_columns,unrolling,mop,citype,k_diagonal,uplo)`'dnl
+')dnl
+dnl
+ifelse(want_what,`ARGS',`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)`'dnl
+')dnl
+dnl
+ifelse(want_what,`BODY',`dnl
+dnl
+{
+dnl
+dnl	The body of a CSR/CSC computational kernel.
+dnl
+dnl	RSB_M4_DEBUGINFO(``$0'')dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+dnl
+pushdef(`total_block_columns',ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`Mdim',`mdim'))dnl
+pushdef(`total_block_rows',ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`mdim',`Mdim'))dnl
+pushdef(`total_rows',ifelse(unrolling,`l',rpntr[total_block_rows],total_block_rows*b_rows))dnl
+pushdef(`total_columns',ifelse(unrolling,`l',cpntr[total_block_columns],total_block_columns*b_columns))dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+pushdef(`mi',`i')dnl
+pushdef(`Mi',`j')dnl
+')dnl
+ifelse(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),1,`dnl
+pushdef(`mi',`j')dnl
+pushdef(`Mi',`i')dnl
+')dnl
+dnl
+dnl	FIXME : out_dim should depend on the operation!
+dnl
+pushdef(`out_dim',ifelse(transposition,RSB_M4_TRANS_T,total_columns,total_rows))dnl
+dnl
+pushdef(`is_zero_acc_spsv_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_OR(RSB_M4_AND(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),RSB_M4_SAME(transposition,RSB_M4_TRANS_N)),RSB_M4_AND(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)))))')dnl
+dnl pushdef(`is_zero_acc_spsv_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_OR(RSB_M4_AND(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),RSB_M4_SAME(transposition,RSB_M4_TRANS_N))))')dnl
+dnl
+pushdef(`is_diag_d_spsv_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_NOT(RSB_M4_OR(RSB_M4_AND(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),RSB_M4_SAME(transposition,RSB_M4_TRANS_N)),RSB_M4_AND(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N))))))')dnl
+dnl
+dnl pushdef(`is_an_externally_backward_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_XOR(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),RSB_M4_SAME(transposition,RSB_M4_TRANS_N)))')dnl
+dnl pushdef(`is_an_externally_backward_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)))')dnl
+pushdef(`is_an_externally_backward_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_XOR(RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)),RSB_M4_SAME(uplo,`u')))')dnl
+dnl
+pushdef(`is_a_backward_kernel',is_an_externally_backward_kernel)dnl
+dnl pushdef(`is_a_backward_kernel',`RSB_M4_AND(RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N)))')dnl
+dnl
+pushdef(`block_backward',`ifelse(is_a_backward_kernel,1,`a += rows*columns',`a -= rows*columns')')dnl
+pushdef(`block_forward',`ifelse(is_a_backward_kernel,1,`a -= rows*columns',`a += rows*columns')')dnl
+dnl
+dnl
+dnl	FIXME : and so the stride x/y association
+dnl
+dnl pushdef(`extra_xstride',ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`incx',`0'))dnl
+dnl pushdef(`extra_ystride',ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`incy',`0'))dnl
+pushdef(`xstride',ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`(incx)',`1'))dnl
+pushdef(`ystride',ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`(incy)',`1'))dnl
+pushdef(`extra_xstride',ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`xstride',`1'))dnl
+pushdef(`extra_ystride',ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`ystride',`1'))dnl
+dnl
+dnl	NEW:
+dnl
+pushdef(`transposed',ifelse(transposition,RSB_M4_TRANS_N,0,1))dnl
+dnl pushdef(`transposed',dnl
+dnl ifelse(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),1,eval(transposed),eval(1-transposed))dnl
+dnl )dnl
+dnl
+dnl
+pushdef(`brin',`(i*extra_ystride)')dnl
+pushdef(`bcin',`(j*extra_xstride)')dnl
+dnl
+ifelse(transposed,`1',`dnl
+dnl
+dnl	block row index, block column index
+dnl
+pushdef(`bci',`(i*extra_xstride)')dnl
+pushdef(`bri',`(j*extra_ystride)')dnl
+pushdef(`bcit',`(j*extra_xstride)')dnl
+pushdef(`brit',`(i*extra_ystride)')dnl
+',`dnl
+pushdef(`bri',`(i*extra_ystride)')dnl
+pushdef(`bci',`(j*extra_xstride)')dnl
+pushdef(`brit',`(j*extra_ystride)')dnl
+pushdef(`bcit',`(i*extra_xstride)')dnl
+')dnl
+dnl
+pushdef(`should_init_out_vector_before_outer_loop',`dnl
+RSB_M4_OR(RSB_M4_IS_SCALING_KERNEL_MOP(mop),dnl
+RSB_M4_AND(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),RSB_M4_NOT(eval(transposed))),dnl
+RSB_M4_AND(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),eval(transposed)),dnl
+RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry))dnl
+')dnl
+dnl
+dnl
+dnl
+pushdef(`has_implementation',`dnl
+RSB_M4_BXXX_KERNEL_FUNCTION_HAS_IMPLEMENTATION($@)`'dnl
+')dnl
+dnl
+')
+RSB_M4_BXXX_KERNEL_FUNCTION_HELP($@)
+ifelse(RSB_M4_AND(RSB_M4_IS_SPMX_KERNEL_MOP(mop),RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal)),1,`dnl
+	RSB_M4_FAKE_DIAG_IMPLICIT_MSG
+')dnl
+ifelse(has_implementation,`1',`dnl
+',`dnl
+dnl	/* or RSB_ERR_UNSUPPORTED_FEATURE ? */
+	return RSB_ERR_UNIMPLEMENTED_YET;
+')dnl
+dnl
+ifelse(has_implementation,`1',`dnl
+dnl	Comments
+dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_SPMX_KERNEL_MOP(mop),RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry)),1,`dnl
+	/*
+ifelse(RSB_M4_want_verbose_comments,`1',`dnl
+		WARNING : This function assumes the matrix symmetric, and therefore 
+		will write the output vector in the 0,Mdim and -roff+coff,-roff+coff+Mdim range.
+		So if you are using this function in a parallel environment, you should care about
+		proper locking of the output vectors.
+')dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_SPMX_SCALING_KERNEL_MOP(mop),RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry)),1,`dnl
+		The output vector zero-ing is impacted, too, so if you are using this kernel with
+		recursive storage, you should care about the proper zeroing of the whole output vector.
+')dnl
+	*/
+')dnl
+dnl
+dnl
+dnl
+dnl
+ifelse(RSB_M4_OR(RSB_M4_AND(RSB_M4_NOT(RSB_M4_IS_COMPLEX_TYPE(type)),RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),RSB_M4_NOT(transposition,RSB_M4_TRANS_N))),1,`dnl
+dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_C),1,`dnl
+	/* `For non complex types, hermitian defaults to plain transposition.' */
+	return RSB_M4_BCSS_KERNEL_FUNCTION(`ID',type,matrix_storage,RSB_M4_H2T_TRANSPOSITION(transposition),k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCSS_KERNEL_FUNCTION(`ARGS',type,matrix_storage,RSB_M4_H2T_TRANSPOSITION(transposition),k_symmetry,rowsu,colsu,unrolling,mop,citype)));
+')dnl
+dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_T),1,`dnl
+	/* `This kernel performs the same as its transposed', transposition -> RSB_M4_TRANSPOSE_TRANSPOSITION(transposition). */
+	return RSB_M4_BCSS_KERNEL_FUNCTION(`ID',type,matrix_storage,RSB_M4_TRANSPOSE_TRANSPOSITION(transposition),k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCSS_KERNEL_FUNCTION(`ARGS',type,matrix_storage,RSB_M4_TRANSPOSE_TRANSPOSITION(transposition),k_symmetry,rowsu,colsu,unrolling,mop,citype)));
+')dnl
+dnl
+',`dnl
+ifelse(RSB_M4_OR(RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(type),RSB_M4_SAME(k_symmetry,`hNEVEROCCURINGFIXME'),RSB_M4_SAME(transposition,RSB_M4_TRANS_C)),RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(type),RSB_M4_SAME(k_symmetry,`s'),RSB_M4_SAME(transposition,RSB_M4_TRANS_T))),1,`dnl
+dnl
+	/* `This kernel performs the same as its transposed', transposition -> RSB_M4_TRANSPOSE_TRANSPOSITION(transposition). */
+	return RSB_M4_BCSS_KERNEL_FUNCTION(`ID',type,matrix_storage,RSB_M4_TRANSPOSE_TRANSPOSITION(transposition),k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCSS_KERNEL_FUNCTION(`ARGS',type,matrix_storage,RSB_M4_TRANSPOSE_TRANSPOSITION(transposition),k_symmetry,rowsu,colsu,unrolling,mop,citype)));
+dnl
+dnl ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_C),1,`dnl
+dnl 	/*
+dnl 		The matrix is treated as symmetric hermitian.
+dnl 		FIXME: missing implementation.
+dnl 	*/
+dnl 	return RSB_ERR_UNIMPLEMENTED_YET;
+dnl ')dnl
+dnl
+',`dnl
+dnl
+ifelse(RSB_M4_AND(RSB_M4_NOT(RSB_M4_IS_COMPLEX_TYPE(type)),RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N))),1,`dnl
+dnl
+	/* Symmetric `transposed' reverts to symmetric `not transposed' */
+	return RSB_M4_BCSS_KERNEL_FUNCTION(`ID',type,matrix_storage,RSB_M4_TRANS_N,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCSS_KERNEL_FUNCTION(`ARGS',type,matrix_storage,RSB_M4_TRANS_N,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)));
+dnl
+',`dnl
+dnl
+dnl
+ifelse(unrolling,`l',/* FIXME : l-unrolled functions are broken */)dnl
+dnl
+dnl	BEGIN VARIABLES DECLARATIONS
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),`1',`dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),1,`dnl
+	register rsb_coo_idx_t i=0,j=0;
+',`dnl
+	register rsb_coo_idx_t i=0;
+')dnl
+',`dnl
+	register rsb_coo_idx_t i=0,j=0;
+')dnl
+	register rsb_nnz_idx_t k=0;
+dnl
+ifelse(RSB_M4_NOT(RSB_M4_IS_SPMV_KERNEL_MOP(mop)),`1',`dnl
+ifelse(unrolling,`l',`dnl
+	const register rsb_coo_idx_t columns=cpntr[1]-cpntr[0];	/* we assume that block_count >= 1 */
+	const register rsb_coo_idx_t rows   =rpntr[1]-rpntr[0];	/* we assume that block_count >= 1 */
+',`dnl
+	const register rsb_coo_idx_t columns=b_columns,rows=b_rows;
+')dnl
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_READONLY_KERNEL_MOP(mop),1,`dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),`0',`dnl
+	const mtype *a=VA;
+')dnl
+')dnl
+ifelse(RSB_M4_IS_WRITEONLY_KERNEL_MOP(mop),1,`dnl
+	mtype *a=VA;
+')dnl
+dnl
+ifelse(RSB_M4_IS_RC_BIASED_KERNEL_MOP(mop),`0',`dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`0',`dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),`0',`dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),`0',`dnl
+	const rsb_coo_idx_t incx=1,incy=1;`'
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+	const mtype alpha=*alphap;`'dnl
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),`1',`dnl
+	const mtype beta=*betap;`'dnl
+')dnl
+dnl
+ifelse(RSB_M4_is_transposed_spmv,1,`dnl
+	const mtype *trhs = rhs+xstride*(roff-coff);`'
+	mtype *tout=out+ystride*(coff-roff);`'
+
+')dnl
+ifelse(RSB_M4_IS_SPXX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+')dnl
+dnl
+dnl
+dnl	END VARIABLES DECLARATIONS
+dnl
+dnl	BEGIN CONDITIONAL VECTOR SCALING
+dnl
+ifelse(should_init_out_vector_before_outer_loop,1,`dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),1,`dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`dnl
+	if(beta!=1)rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),out_dim,&beta,out,ystride);
+',`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type), out_dim,&beta, out, 1);
+') /* we scale the destination vector */
+')dnl
+ifelse(RSB_M4_IS_ZEROING_KERNEL_MOP(mop),1,`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),out_dim,NULL,out,ystride);
+')dnl
+')dnl
+dnl
+dnl	END CONDITIONAL VECTOR SCALING
+dnl
+dnl	BEGIN COMMON EXTERNAL LOOP BEGINNING
+dnl
+ifelse(RSB_M4_want_verbose_comments,`1',` /*	Outer loop. Occurs on the major dimension.	*/ ')dnl
+dnl
+ifelse(is_an_externally_backward_kernel,1,`
+	for(Mi=Mdim-1; RSB_LIKELY((Mi+1)>0 /*trick for unsigned indices */);--Mi) //RSB_M4_IS_SPSX_KERNEL_MOP(mop),RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),RSB_M4_NOT(RSB_M4_SAME(transposition,RSB_M4_TRANS_N))
+	{
+',`dnl
+dnl
+ifelse(RSB_M4_AND(RSB_M4_WANT_20110206_BOUNDED_BOX_PATCH,RSB_M4_NOT(RSB_M4_IS_SCALING_OR_ZEROING_KERNEL_MOP(mop))),1,`dnl
+dnl	really, the above condition should also check for transposition! but in this way it does no wrong.
+	for(Mi=br;RSB_LIKELY(Mi<bc);++Mi)	/* experimental, for the bounded box patch */
+',`dnl
+	for(Mi=0;RSB_LIKELY(Mi<Mdim);++Mi)
+')dnl
+dnl
+	{
+')dnl
+dnl
+ifelse(RSB_M4_want_verbose_comments,`1',`dnl
+		/* logically,  i is the working block row, j is the working block column */
+		/* physically, Mi is the working block row, mi is the working block column */
+')dnl
+dnl
+pushdef(`colsu',ifelse(unrolling,`l',columns,colsu))dnl
+pushdef(`rowsu',ifelse(unrolling,`l',rows,rowsu))dnl
+pushdef(`tcolsu',ifelse(transposition,RSB_M4_TRANS_T,rowsu,colsu))dnl
+pushdef(`trowsu',ifelse(transposition,RSB_M4_TRANS_T,colsu,rowsu))dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+pushdef(`postalphamult',`(alpha)*')dnl
+',`dnl
+dnl
+ifelse(RSB_M4_IS_SPMX_OP_NEGATING_KERNEL_MOP(mop),1,`dnl
+pushdef(`postalphamult',`(-1)*')dnl
+',`dnl
+pushdef(`postalphamult',`')dnl
+')dnl
+dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),`1',`dnl
+ifelse(transposed,`0',`dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),0,`dnl
+		const mtype *a=VA;
+')dnl
+')dnl
+ifelse(RSB_M4_OR(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),RSB_M4_AND(RSB_M4_IS_UNSYMMETRIC(k_symmetry),RSB_M4_NOT(transposed))),1,`dnl
+		register mtype cacc = RSB_M4_ZERO(mtype);
+dnl		mtype *outi=out+(trowsu*i*ystride);
+',`dnl
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+ifelse(RSB_M4_is_transposed_spmv,1,`dnl
+		const mtype bt=postalphamult`'trhs[(tcolsu*xstride*(Mi))];
+dnl		const mtype *b = rhs+(tcolsu*bci);
+',`dnl
+dnl		const mtype bn = rhs[(tcolsu*xstride*(Mi))];	/*20120915: spurious instruction commented out*/
+')dnl
+')dnl
+		const rsb_nnz_idx_t fk=bpntr[Mi],lk=bpntr[Mi+1];
+dnl
+dnl
+dnl	END COMMON EXTERNAL LOOP BEGINNING
+dnl
+dnl	BEGIN EXTERNAL LOOP VECTOR SCALING
+dnl
+ifelse(RSB_NOT(RSB_M4_IS_ALLOWING_ALIASING_KERNEL_MOP(mop)),1,`
+ifelse(should_init_out_vector_before_outer_loop,0,`dnl
+ifelse(unrolling,`l',`
+ifelse(mop,`spmv_uxux',`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type), b_rows,&beta, out+rows*i, 1);/* we scale the destination vector */
+')dnl
+ifelse(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),1,`dnl
+ifelse(mop,`spmv_uauz',`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),b_rows,NULL,out+rows*i,ystride);
+')dnl
+')dnl
+dnl
+',`dnl
+dnl
+ifelse(RSB_M4_IS_ZEROING_KERNEL_MOP(mop),1,`dnl
+		forloop(`row',0,decr(trowsu),`out[trowsu*bri+row]=0;')
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),1,`dnl
+		forloop(`row',0,decr(trowsu),`out[trowsu*bri+row]*=beta;')
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+ifelse(should_init_out_vector_before_outer_loop,0,`dnl
+ifelse(RSB_M4_IS_ZEROING_KERNEL_MOP(mop),1,`dnl
+		forloop(`row',0,decr(trowsu),`out[trowsu*bri+row]=0;')
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),1,`dnl
+		forloop(`row',0,decr(trowsu),`out[trowsu*bri+row]*=beta;')
+')dnl
+')dnl
+dnl
+dnl	END EXTERNAL LOOP VECTOR SCALING
+dnl
+ifelse(RSB_M4_want_verbose_comments,`1',` /*		Inner loop. Occurs on the minor dimension.	*/ ')dnl
+dnl
+dnl	BEGIN KERNELS DEFINITION
+dnl
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),`1',`dnl
+dnl
+dnl	BEGIN SPMV KERNEL DEF
+dnl		/* SPMV KERNEL BEGINS HERE */
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),1----,`dnl
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),1,`dnl
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),1,`',`dnl
+ifelse(RSB_M4_want_verbose_comments,`1',`dnl
+/*
+		Symmetric kernels should process the first block separately, if it contains `diagonal' elements.
+		FIXME : this is NOT the case for blocked code.
+*/
+')dnl
+		k=fk;
+		if(RSB_UNLIKELY(lk==k)) continue;/* nothing to do here */
+		mi=bindx[k];
+		if(mi==Mi && ((lk-k)>1) && roff==coff)	/* a `diagonal' element, and not the only one, on a diagonally positioned matrix */
+		{
+			const mtype *b = rhs+(tcolsu*bci);
+			mtype *c=out+(trowsu*bri);
+dnl			const mtype *b = rhs+(trowsu*bri);
+dnl			mtype *c=out+(tcolsu*bci);
+dnl
+dnl	/* FIXME : THIS IS AN EXAMPLE : SHOULD INTRODUCE DIAGONAL-SUBTRACTION CODELET */
+dnl
+{RSB_M4_EXTRA_SYMMETRIC_DIAGONAL_FIXING_KERNEL(`row',`rows',b_rows,`column',`columns',b_columns,mtype,,mop,unrolling,transposition,RSB_M4_SYMMETRY_SWITCH(k_symmetry))}
+		}
+')dnl
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_SPMX_KERNEL_MOP(mop),RSB_M4_SAME(transposed,1)),1,`dnl
+ifelse(RSB_M4_want_verbose_comments,`1',`dnl
+dnl		/* `Since this is a transposed kernel, we apply a correction to the output vector locations.' */
+')dnl
+dnl		rhs=(rhs-coff*(xstride))+roff*(xstride); out=(out-roff*(ystride))+coff*(ystride);
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_UNSYMMETRIC(k_symmetry),1,`dnl
+ifelse(transposed,`0',`dnl
+dnl
+dnl	RSB_M4_EARLY_EVICT_INSTRUCTION((a+k,bindx+k))`'dnl
+dnl
+dnl RSB_M4_SIMPLE_LOOP_UNROLL_2S_J..
+RSB_M4_SIMPLE_LOOP_UNROLL_5S(`k',`LI',`fk',`lk',`dnl
+',`dnl
+dnl
+			`const rsb_coo_idx_t' `j_'``''LI`'=bindx[k+LI];
+			`const mtype b_'``''LI`'=rhs[tcolsu*(`j_'``''LI`')*xstride];
+			`const mtype a_'``''LI`'=a[k+LI];
+dnl
+',`dnl
+',`dnl
+dnl			cacc+=a[k+LI]*b_``''LI;
+dnl			cacc+=a_``''LI*b_``''LI;
+			``cacc+=a_''``''LI``*b_''``''LI;
+',`dnl RSB_M4_EARLY_EVICT_INSTRUCTION((a+k,bindx+k))`'dnl
+',RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_SMALL)
+dnl
+dnl	RSB_M4_EARLY_EVICT_INSTRUCTION((a+k,bindx+k))`'dnl
+dnl	RSB_M4_EARLY_EVICT_INSTRUCTION((outi+k-12))`'dnl
+dnl
+')dnl
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_UNSYMMETRIC(k_symmetry),1,`dnl
+ifelse(transposed,`1',`dnl
+dnl
+RSB_M4_SIMPLE_LOOP_UNROLL_2S_J(`k',`LI',`fk',`lk',`dnl
+dnl
+			`const rsb_coo_idx_t' `j_'``''LI`'=bindx[k+LI];
+			`const mtype a_'``''LI`'=RSB_M4_CONJ(VA[k+LI],mtype,transposition,k_symmetry);
+			`mtype c_'``''LI`'=a_``''LI*bt;
+dnl
+',`dnl
+			tout[(tcolsu)*(`j_'``''LI`')*ystride]+=`c_'``''LI`';
+',RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_SMALL)
+dnl
+dnl
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),1,`dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_HERMITIAN,`dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_C),1,`dnl
+pushdef(`ntransposition',transposition)dnl
+pushdef(`ttransposition',RSB_M4_TRANSPOSE_TRANSPOSITION(transposition))dnl
+')dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_T),1,`dnl
+pushdef(`ntransposition',transposition)dnl
+pushdef(`ttransposition',RSB_M4_TRANS_C)dnl
+')dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_N),1,`dnl
+pushdef(`ntransposition',RSB_M4_TRANS_C)dnl
+pushdef(`ttransposition',transposition)dnl
+')dnl
+',`dnl
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_C),1,`dnl
+pushdef(`ntransposition',transposition)dnl
+pushdef(`ttransposition',transposition)dnl
+',`dnl
+pushdef(`ntransposition',RSB_M4_TRANSPOSE_TRANSPOSITION(transposition))dnl
+pushdef(`ttransposition',RSB_M4_TRANSPOSE_TRANSPOSITION(transposition))dnl
+')dnl
+')dnl
+dnl			// nt: ntransposition ttransposition
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += RSB_M4_CONJ(VA[k]*rhs[tcolsu*j*xstride],mtype,ntransposition,k_symmetry);
+			if(roff!=coff || (j!=i))
+				tout[(tcolsu)*(j)*ystride]+=RSB_M4_CONJ(VA[k]*bt,mtype,ttransposition,k_symmetry);
+			++k;
+dnl RSB_M4_SIMPLE_LOOP_UNROLL_2S..
+RSB_M4_SIMPLE_LOOP_UNROLL_2S_J(`k',`LI',`fk+1',`lk-1',`dnl
+dnl
+			`const rsb_coo_idx_t' `j_'``''LI`'=bindx[k+LI];
+			`const mtype b_'``''LI`'=rhs[tcolsu*(`j_'``''LI`')*xstride];
+			`const mtype a_'``''LI`'=VA[k+LI];
+			`mtype c_'``''LI`'=RSB_M4_CONJ_SYM(mtype,ttransposition,k_symmetry)( `a_'``''LI)*bt;
+dnl			`mtype c_'``''LI`'=RSB_M4_CONJ(( `a_'``''LI *bt ),mtype,transposition,k_symmetry);
+dnl
+',`dnl
+			cacc += RSB_M4_CONJ_SYM(mtype,ntransposition,k_symmetry)(`a_'``''LI)*b_``''LI;
+			tout[(tcolsu)*(`j_'``''LI`')*ystride]+=`c_'``''LI`';
+',RSB_M4_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR_SMALL)
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += RSB_M4_CONJ(VA[k]*rhs[trowsu*j*xstride],mtype,ntransposition,k_symmetry);
+				if(roff!=coff || (j!=i))
+					tout[(tcolsu)*(j)*ystride]+=RSB_M4_CONJ(VA[k]*bt,mtype,ttransposition,k_symmetry);
+				++k;
+			}
+popdef(`ntransposition')dnl
+popdef(`ttransposition')dnl
+dnl
+')dnl
+dnl
+ifelse(RSB_M4_should_merge_value_after_inner_loop,`1',`dnl
+dnl			outi[0]+=postalphamult`cacc';
+			out[(trowsu*i*ystride)]+=postalphamult`cacc';
+')dnl
+dnl
+dnl		}
+dnl
+dnl
+dnl
+dnl	FIXME : this code is only a quick hack for CSR!
+dnl
+dnl
+dnl		/* SPMV KERNEL ENDS HERE */
+popdef(`postalphamult')dnl
+dnl	END SPMV KERNEL DEF
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),`1',`dnl
+dnl	BEGIN SPSV KERNEL DEF
+dnl	/* SPSV KERNEL BEGINS HERE */
+dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,`dnl
+dnl		const mtype bb_0=rhs[(trowsu*bri)];
+ifelse(is_diag_d_spsv_kernel,1,`',`dnl
+ifelse(RSB_M4_OR(RSB_M4_IS_SPSX_OP_SCALING_KERNEL_MOP(mop),RSB_M4_IS_SPSX_OP_SETTING_KERNEL_MOP(mop)),1,`dnl
+		const mtype bb_0=rhs[(trowsu*Mi*extra_xstride)];
+')dnl
+')dnl
+		mtype ax_0;
+dnl
+ifelse(is_diag_d_spsv_kernel,1,`dnl
+dnl	
+dnl	FIXME: missing incx, incy support here!
+dnl
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),1,`dnl
+		const mtype aa=1;
+',`dnl
+		const mtype aa=VA[ifelse(uplo,`u',`fk',`lk-1')];
+ifelse(RSB_M4_WANT_SPSM_DIAG_CHECK(),1,`dnl
+		if(aa == RSB_M4_ZERO(mtype))return RSB_ERR_INVALID_NUMERICAL_DATA;
+')dnl
+')dnl
+dnl
+
+ifelse(RSB_M4_IS_SPSX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+dnl
+dnl		out[tcolsu*bci]/=RSB_M4_CONJ(VA[bpntr[Mi+1]-1],mtype,transposition,k_symmetry);
+dnl
+',`dnl
+dnl		out[tcolsu*bci]/=RSB_M4_CONJ(VA[bpntr[Mi+1]-1],mtype,transposition,k_symmetry);
+dnl
+')dnl
+dnl		
+		out[tcolsu*bci]/=aa;
+dnl
+')dnl
+dnl
+ifelse(is_zero_acc_spsv_kernel,1,`dnl
+		ax_0=0;
+',`dnl
+		ax_0=out[tcolsu*bci];
+')dnl
+dnl
+dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),`1',`dnl
+pushdef(`skip_head_row_elements',ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),`1',`0',ifelse(uplo,`u',`1',`0')))dnl
+pushdef(`skip_tail_row_elements',ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),`1',`0',ifelse(uplo,`u',`0',`1')))dnl
+',`dnl
+pushdef(`skip_head_row_elements',0)dnl
+pushdef(`skip_tail_row_elements',0)dnl
+')dnl
+dnl
+ifelse(is_a_backward_kernel,1,`
+dnl
+dnl	FIXME : backward kernels are noly used for SPSV, and they start with one element less
+dnl
+		for(k=lk-1-skip_tail_row_elements`'dnl
+,a=VA+k,mi=bindx[k];k+1>=fk+1+skip_head_row_elements  ;--k,block_forward,mi=bindx[k])
+dnl	/* k is the index of the block */
+',`dnl
+		ifelse(skip_head_row_elements,1,block_forward;)
+		for(k=fk+skip_head_row_elements,mi=bindx[k];k<lk-skip_tail_row_elements  ;++k,block_forward,mi=bindx[k])
+dnl	/* k is the index of the block */
+')dnl
+		{
+ifelse(RSB_M4_SAME(transposition,RSB_M4_TRANS_N),1,`dnl
+			const mtype *b=out + (tcolsu*bci);
+			mtype *c=&ax_0;
+')dnl
+dnl
+dnl	Fixed for Hermitian k_symmetry.
+dnl
+ifelse(is_diag_d_spsv_kernel,1,`dnl
+		out[trowsu*bri]-=RSB_M4_CONJ(*a*ax_0,mtype,transposition,k_symmetry);
+',`dnl
+{RSB_M4_KERNEL_FUNCTION_BODY(`row',`rows',b_rows,`column',`columns',b_columns,mtype,,mop,unrolling,RSB_M4_SYMBOL_UNSYMMETRIC)}
+')dnl
+dnl
+		}
+dnl
+ifelse(is_diag_d_spsv_kernel,1,`dnl
+ifelse(RSB_M4_IS_SPSX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+		out[tcolsu*bci]*=alpha;
+')dnl
+')dnl
+dnl
+ifelse(is_diag_d_spsv_kernel,1,`',`dnl
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),1,`',`dnl
+		if(lk-fk>0)
+dnl	/* if this row block was not empty */
+')dnl
+		{
+			/* `the last element (which for a lower triangular solve is on the diagonal')*/
+dnl			block_backward;
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			mtype *c_0=out+(trowsu*bri);
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),1,`dnl
+			const mtype aa=1;
+',`dnl
+dnl			elements on the diagonal are real, and no conjugation is needed 
+			const mtype aa=VA[ifelse(uplo,`u',`fk',`lk-1')];
+ifelse(RSB_M4_WANT_SPSM_DIAG_CHECK(),1,`dnl
+		if(aa == RSB_M4_ZERO(mtype))return RSB_ERR_INVALID_NUMERICAL_DATA;
+')dnl
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_SPSX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+')dnl
+ifelse(RSB_M4_IS_SPSX_OP_SETTING_KERNEL_MOP(mop),1,`dnl
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+')dnl
+dnl
+ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),1,`',`dnl
+			block_forward;
+')dnl
+		}
+')dnl
+dnl
+popdef(`skip_head_row_elements')dnl
+popdef(`skip_tail_row_elements')dnl
+dnl
+dnl
+dnl		/* SPSV KERNEL ENDS HERE */
+dnl	END SPSV KERNEL DEF
+')dnl
+dnl
+ifelse(RSB_M4_NOT(RSB_M4_IS_SPXX_KERNEL_MOP(mop)),`1',`dnl
+dnl	BEGIN MISC KERNEL DEF
+dnl
+ 		/* touppercase(mop) KERNEL HERE */
+dnl		for(k=fk,mi=bindx[k];k<lk;++k,block_forward,mi=bindx[k]) 20120915 /*buggy loop */
+		for(k=fk;k<lk;++k,block_forward)
+		{
+		mi=bindx[k];
+		{
+ifelse(mop,`scale',`dnl
+			/*a=VA+indptr[(k)];*/
+			const mtype *d=scale_factors+(trowsu*bri);
+')dnl
+ifelse(mop,`negation',`dnl
+			/*a=VA+indptr[k];*/
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+			/*a=VA+indptr[k];*/
+			mtype *local_row_sums = row_sums+(trowsu*bri);
+')dnl
+dnl {RSB_M4_KERNEL_FUNCTION_BODY(`row',`rows',b_rows,`column',`columns',b_columns,mtype,,mop,unrolling,RSB_M4_SYMBOL_UNSYMMETRIC)}
+{RSB_M4_KERNEL_FUNCTION_BODY(`row',`rows',b_rows,`column',`columns',b_columns,mtype,,mop,unrolling,k_symmetry)}
+		}
+		}
+dnl
+dnl	END MISC KERNEL DEF
+')dnl
+dnl
+dnl	END KERNELS DEFINITION
+dnl
+dnl	BEGIN COMMON EXTERNAL LOOP CLOSING
+	}
+dnl	END COMMON EXTERNAL LOOP CLOSING
+dnl
+dnl ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`1',dnl
+dnl	`incx--;incy--;/* we are interested in the increment off 1 */
+dnl ')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+dnl	this check would be good for no-looped functions only!
+dnl	if(columns != b_columns || rows != b_rows)return RSB_ERR_BADARGS; /* a non comprehensive check of course*/
+
+dnl	FIXME : ONLY EXPERIMENTAL OPENMP SUPPORT
+dnl
+dnl
+ifelse(RSB_M4_WANT_OMP_IN_KERNELS,`1',`dnl
+	size_t tn;
+	size_t nt;
+`#'dnl
+       pragma omp parallel num_threads(rsb_global_session_handle.rsb_g_threads) private(mi,Mi,k,tn,nt) 
+	{
+	tn = omp_get_thread_num();
+	nt = omp_get_num_threads();
+	/*RSB_INFO("working on %d / %d threads\n",tn,nt);*/
+	//for(Mi=tn;Mi<Mdim;Mi+=nt)
+	size_t ui=((Mdim/nt)*(tn+1));
+	size_t li=(Mdim/nt)*tn;
+	if(ui>Mdim)ui=Mdim;
+dnl	#pragma omp for schedule(static,1)		/* shared L1 cache */
+#pragma omp for schedule(static,(Mdim+1)/2)		/* separate L1 caches */
+	for(Mi=li;RSB_LIKELY(Mi<ui);++Mi)
+	{
+	//RSB_INFO("row %d working on %d / %d threads\n",mi,tn,nt);
+',`dnl
+dnl
+')dnl
+dnl ifelse(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),1,`dnl
+dnl 		/* should zero output block here (for efficiency) instead of function top */
+dnl ')dnl
+dnl
+dnl		FIXME: the following is NEW, and useful also for SYMMETRIC
+dnl		/* transpose.. is transposed */
+dnl		/* useless for storage matrix_storage */
+dnl		/*if(bpntr[Mi]==bpntr[Mi+1])continue;*/ /* empty  */
+ifelse(mop,`spmv_uauz',`dnl
+dnl		mtype *c=out+(rowsu*mi); /* declaration of c put here for experimental purposes */
+')dnl
+dnl
+dnl
+dnl	FIXME : blocked TRS kernels are broken, in this way
+dnl
+dnl			mi=bindx[k];
+dnl			/* `mop' is mop */
+dnl
+dnl
+dnl
+dnl
+dnl
+popdef(`is_diag_d_spsv_kernel')dnl
+popdef(`tcolsu')dnl
+popdef(`trowsu')dnl
+popdef(`colsu')dnl
+popdef(`rowsu')dnl
+popdef(`transposed')dnl 1/2
+dnl popdef(`transposed')dnl 2/2
+popdef(`should_init_out_vector_before_outer_loop')dnl
+popdef(`total_block_columns')dnl
+popdef(`total_block_rows')dnl
+popdef(`total_rows')dnl
+popdef(`total_columns')dnl
+dnl
+dnl
+ifelse(RSB_M4_WANT_OMP_IN_KERNELS,`1',`dnl
+	}
+')dnl
+popdef(`mi')dnl
+popdef(`Mi')dnl
+popdef(`brit')dnl
+popdef(`bcit')dnl
+popdef(`brin')dnl
+popdef(`bcin')dnl
+popdef(`bri')dnl
+popdef(`bci')dnl
+')dnl
+dnl
+	return RSB_ERR_NO_ERROR;
+dnl
+')dnl
+')dnl
+dnl
+')')dnl
+dnl
+dnl
+popdef(`skip_implementation')dnl
+popdef(`out_dim')dnl
+popdef(`is_a_backward_kernel')dnl
+popdef(`is_an_externally_backward_kernel')dnl
+popdef(`is_zero_acc_spsv_kernel')dnl
+popdef(`block_forward')dnl
+popdef(`block_backward')dnl
+popdef(`extra_xstride')dnl
+popdef(`extra_ystride')dnl
+}
+dnl
+')dnl
+dnl
+')dnl
+dnl
+popdef(`uplo')dnl
+popdef(`want_what')dnl
+popdef(`k_diagonal')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`k_symmetry')dnl
+popdef(`transposition')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_BCSS_MISC_KERNELS',`dnl
+dnl
+pushdef(`unrollings',$1)dnl
+dnl
+dnl	FIXED BLOCK SIZE KERNELS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop)RSB_M4_IS_SPMV_KERNEL_MOP(mop),00,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl	FIXED BLOCK SIZE DISPATCHERS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop)RSB_M4_IS_SPMV_KERNEL_MOP(mop),00,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+popdef(`unrollings')dnl
+dnl	
+')dnl
+dnl	
+dnl	
+dnl
+dnl
+define(`RSB_M4_BCSS_SPMV_KERNELS',`dnl
+dnl
+pushdef(`unrollings',$1)dnl
+dnl
+dnl	FIXED BLOCK SIZE KERNELS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl	FIXED BLOCK SIZE DISPATCHERS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+popdef(`unrollings')dnl
+dnl	
+')dnl
+dnl	
+dnl	
+dnl
+dnl
+define(`RSB_M4_BCSS_SPSV_KERNELS',`dnl
+dnl
+pushdef(`unrollings',$1)dnl
+dnl
+dnl	FIXED BLOCK SIZE KERNELS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl	FIXED BLOCK SIZE DISPATCHERS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+popdef(`unrollings')dnl
+dnl	
+')dnl
+dnl	
+dnl	
+dnl
+dnl
+dnl
+define(`RSB_M4_BCSS_KERNELS',`dnl
+dnl
+pushdef(`unrollings',$1)dnl
+dnl
+dnl	FIXED BLOCK SIZE KERNELS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,rowsu,colsu,unrolling,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl	FIXED BLOCK SIZE DISPATCHERS :
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+foreach(`matrix_storage',RSB_M4_BCSS_FORMATS,`dnl
+foreach(`unrolling',unrollings,`dnl
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+foreach(`uplo',RSB_M4_MATRIX_UPLO_TYPES,`dnl
+RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`all',type,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,uplo)
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+popdef(`unrollings')dnl
+dnl	
+')dnl
+dnl	
+dnl	
+dnl
diff --git a/rsb_krnl_bcss_misc_u.c b/rsb_krnl_bcss_misc_u.c
new file mode 100644
index 0000000..d871567
--- /dev/null
+++ b/rsb_krnl_bcss_misc_u.c
@@ -0,0 +1,42472 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+#include "rsb.h"
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += fabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += fabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(j*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float *local_row_sums = row_sums+(1*(i*1));
+{
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG(VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dE_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dI_uG(VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors);
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += cabsf(conjf(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			float complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register float complex sum_0=0;
+	
+	sum_0 += conjf(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const float complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{\infty} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{j=0}^{mdim} A_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* INFTY_NORM KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += cabs(conj(a[(0*1)+0]));
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(i*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[roff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[coff+0+(j*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += a[(0*1)+0];
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$ \|A\|_{1} \f$ (or rather, \f$ row\_sums_i \leftarrow \sum_{i=0}^{Mdim} A^{T}_{ij} ), where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const rsb_coo_idx_t incx=1,incy=1;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* ROWSSUMS KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[k];*/
+			double complex *local_row_sums = row_sums+(1*(j*1));
+{/* FIXME : THE FOLLOWING CODE IS NOT CORRECT */
+
+	/* NOTE : should better use some intrinsic here. */
+/* generated by the RSB_M4_INFTY_NORM_FUNCTION_BODY_UNROLLED macro */
+	register double complex sum_0=0;
+	
+	sum_0 += conj(a[(0*1)+0]);
+	
+	
+	local_row_sums[coff+0]+=sum_0;
+	
+	if(roff!=coff || i!=j)
+		row_sums[roff+0+(i*1)]+=sum_0;
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A \neq A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^T.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(i*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$A \leftarrow A\cdot P, P_{ii}=s_{i}, where A == A^H.\f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+ 		/* SCALE KERNEL HERE */
+		for(k=fk;k<lk;++k,a += rows*columns)
+		{
+		j=bindx[k];
+		{
+			/*a=VA+indptr[(k)];*/
+			const double complex *d=scale_factors+(1*(j*1));
+{/* generated by the RSB_M4_ROW_SCALE_FUNCTION_BODY_UNROLLED macro */
+
+	a[(0*1)+0]*=d[0];
+	
+	
+
+
+}
+		}
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("infty_norm") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("rowssums") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,row_sums,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("scale") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_scale_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,scale_factors );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+/* @endcond */
diff --git a/rsb_krnl_bcss_misc_u.h b/rsb_krnl_bcss_misc_u.h
new file mode 100644
index 0000000..0d9fbcc
--- /dev/null
+++ b/rsb_krnl_bcss_misc_u.h
@@ -0,0 +1,3499 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCSS_MISC_U_H_INCLUDED
+#define RSB_BCSS_MISC_U_H_INCLUDED
+#include "rsb_internals.h"
+#include "rsb.h"
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tN_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tT_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H__tC_r1_c1_uu_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tN_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tN_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tT_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tT_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C__tC_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H__tC_r1_c1_uu_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tN_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tT_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H__tC_r1_c1_uu_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tN_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tN_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tT_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tT_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C__tC_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H__tC_r1_c1_uu_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tN_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tN_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tT_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tT_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C__tC_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H__tC_r1_c1_uu_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tN_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tN_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tT_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tT_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C__tC_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H__tC_r1_c1_uu_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sU_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sU_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sS_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sS_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tN_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tT_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_C_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sH_dE_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_H_u_tC_sH_dI_uG(const double * VA, double * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sU_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sU_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sS_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sS_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tN_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tT_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_C_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sH_dE_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_H_u_tC_sH_dI_uG(double * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sU_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sU_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sS_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sS_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tN_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tT_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_C_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sH_dE_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_H_u_tC_sH_dI_uG(const float * VA, float * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sU_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sU_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sS_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sS_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tN_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tT_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_C_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sH_dE_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_H_u_tC_sH_dI_uG(float * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sU_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sU_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sS_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sS_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tN_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tT_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_C_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sH_dE_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_float_complex_H_u_tC_sH_dI_uG(const float complex * VA, float complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sU_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sU_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sS_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sS_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tN_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tT_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_C_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sH_dE_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_float_complex_H_u_tC_sH_dI_uG(float complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const float complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_infty_norm_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sU_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sU_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sS_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sS_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tN_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tT_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_C_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sH_dE_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_rowssums_double_complex_H_u_tC_sH_dI_uG(const double complex * VA, double complex * row_sums, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sU_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sU_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sS_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sS_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tN_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tT_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_C_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sH_dE_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+
+rsb_err_t rsb__BCSR_scale_double_complex_H_u_tC_sH_dI_uG(double complex * VA, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags, const double complex *scale_factors);
+
+
+#endif /* RSB_BCSS_MISC_U_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcss_misc_u.m4 b/rsb_krnl_bcss_misc_u.m4
new file mode 100644
index 0000000..36a1a2a
--- /dev/null
+++ b/rsb_krnl_bcss_misc_u.m4
@@ -0,0 +1,57 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCSS_MISC_U_H_INCLUDED
+#define RSB_BCSS_MISC_U_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCSR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCSC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCSS formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+#include "rsb_internals.h"
+#include "rsb.h"
+dnl
+RSB_M4_BCSS_MISC_KERNELS((`u'))
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCSS_MISC_U_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcss_spmv_u.c b/rsb_krnl_bcss_spmv_u.c
new file mode 100644
index 0000000..d118b7d
--- /dev/null
+++ b/rsb_krnl_bcss_spmv_u.c
@@ -0,0 +1,100282 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+1*(roff-coff);
+	double *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*1];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*1];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*1];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*1];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double *a=VA;
+		register double cacc = ((double)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double a_1 =VA[k+1 ];
+			double c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double a_2 =VA[k+2 ];
+			double c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double a_3 =VA[k+3 ];
+			double c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double a_0 =VA[k+0 ];
+			double c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double alpha=*alphap;	const double *trhs = rhs+(incx)*(roff-coff);
+	double *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double cacc = ((double)(0));
+		const double bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double b_1 =rhs[1*(j_1 )*(incx)];
+			const double a_1 =VA[k+1 ];
+			double c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double b_2 =rhs[1*(j_2 )*(incx)];
+			const double a_2 =VA[k+2 ];
+			double c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double b_3 =rhs[1*(j_3 )*(incx)];
+			const double a_3 =VA[k+3 ];
+			double c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double b_0 =rhs[1*(j_0 )*(incx)];
+			const double a_0 =VA[k+0 ];
+			double c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+1*(roff-coff);
+	float *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*1];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*1];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*1];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*1];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float *a=VA;
+		register float cacc = ((float)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float a_1 =VA[k+1 ];
+			float c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float a_2 =VA[k+2 ];
+			float c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float a_3 =VA[k+3 ];
+			float c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float a_0 =VA[k+0 ];
+			float c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float alpha=*alphap;	const float *trhs = rhs+(incx)*(roff-coff);
+	float *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float cacc = ((float)(0));
+		const float bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float b_1 =rhs[1*(j_1 )*(incx)];
+			const float a_1 =VA[k+1 ];
+			float c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float b_2 =rhs[1*(j_2 )*(incx)];
+			const float a_2 =VA[k+2 ];
+			float c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float b_3 =rhs[1*(j_3 )*(incx)];
+			const float a_3 =VA[k+3 ];
+			float c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float b_0 =rhs[1*(j_0 )*(incx)];
+			const float a_0 =VA[k+0 ];
+			float c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	/* Symmetric transposed reverts to symmetric not transposed */
+	return rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG(VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy);
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+1*(roff-coff);
+	float complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*1];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*1];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*1];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*1];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex *a=VA;
+		register float complex cacc = ((float complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex a_1 =conjf(VA[k+1 ]);
+			float complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex a_2 =conjf(VA[k+2 ]);
+			float complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex a_3 =conjf(VA[k+3 ]);
+			float complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex a_0 =conjf(VA[k+0 ]);
+			float complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =( a_3 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conjf(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conjf(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conjf(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =( a_0 )*bt;
+			cacc += conjf(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conjf(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const float complex alpha=*alphap;	const float complex *trhs = rhs+(incx)*(roff-coff);
+	float complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register float complex cacc = ((float complex)(0));
+		const float complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const float complex b_1 =rhs[1*(j_1 )*(incx)];
+			const float complex a_1 =VA[k+1 ];
+			float complex c_1 =conjf( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const float complex b_2 =rhs[1*(j_2 )*(incx)];
+			const float complex a_2 =VA[k+2 ];
+			float complex c_2 =conjf( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const float complex b_3 =rhs[1*(j_3 )*(incx)];
+			const float complex a_3 =VA[k+3 ];
+			float complex c_3 =conjf( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const float complex b_0 =rhs[1*(j_0 )*(incx)];
+			const float complex a_0 =VA[k+0 ];
+			float complex c_0 =conjf( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conjf(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		out[1*(i*1)+0]=0;
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	rsb__cblas_Xscal(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,Mdim*1,NULL,out,1);
+	for(i=0;RSB_LIKELY(i<Mdim);++i)
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*1]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*1]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y - {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+1*(roff-coff);
+	double complex *tout=out+1*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(-1)*trhs[(1*1*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*1];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*1]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*1];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*1];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*1];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*1]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*1]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*1]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*1];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*1]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*1];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*1]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*1)]+=(-1)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^T} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow y + {A^H} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex *a=VA;
+		register double complex cacc = ((double complex)(0));
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =a[k+1 ];
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =a[k+2 ];
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =a[k+3 ];
+			cacc+=a_0 *b_0 ;
+			cacc+=a_1 *b_1 ;
+			cacc+=a_2 *b_2 ;
+			cacc+=a_3 *b_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =a[k+0 ];
+			cacc+=a_0 *b_0 ;
+}
+}
+
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A \neq A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	register rsb_coo_idx_t i=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+{
+for(k=fk;k+3<lk;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex a_1 =conj(VA[k+1 ]);
+			double complex c_1 =a_1 *bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex a_2 =conj(VA[k+2 ]);
+			double complex c_2 =a_2 *bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex a_3 =conj(VA[k+3 ]);
+			double complex c_3 =a_3 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex a_0 =conj(VA[k+0 ]);
+			double complex c_0 =a_0 *bt;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^T. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^T} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += conj(VA[k]*rhs[1*j*(incx)]);
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=VA[k]*bt;
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =( a_3 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += conj(a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += conj(a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += conj(a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =( a_0 )*bt;
+			cacc += conj(a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += conj(VA[k]*rhs[1*j*(incx)]);
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=VA[k]*bt;
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow \beta \cdot y + \alpha \cdot {A^H} \cdot x, where A == A^H. \f$
+	 * with incx and incy as x and y vector strides
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	/* NOTE: Diagonal implicit is not really handled here: look at caller level. */
+	/*
+	*/
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const double complex alpha=*alphap;	const double complex *trhs = rhs+(incx)*(roff-coff);
+	double complex *tout=out+(incy)*(coff-roff);
+
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		register double complex cacc = ((double complex)(0));
+		const double complex bt=(alpha)*trhs[(1*(incx)*(i))];
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+			k=fk;
+			if(k==lk)continue;
+			j=bindx[k];
+			cacc += VA[k]*rhs[1*j*(incx)];
+			if(roff!=coff || (j!=i))
+				tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+			++k;
+{
+for(k=fk+1;k+3<lk-1;k+=4){
+			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			const rsb_coo_idx_t j_1 =bindx[k+1 ];
+			const double complex b_1 =rhs[1*(j_1 )*(incx)];
+			const double complex a_1 =VA[k+1 ];
+			double complex c_1 =conj( a_1 )*bt;
+			const rsb_coo_idx_t j_2 =bindx[k+2 ];
+			const double complex b_2 =rhs[1*(j_2 )*(incx)];
+			const double complex a_2 =VA[k+2 ];
+			double complex c_2 =conj( a_2 )*bt;
+			const rsb_coo_idx_t j_3 =bindx[k+3 ];
+			const double complex b_3 =rhs[1*(j_3 )*(incx)];
+			const double complex a_3 =VA[k+3 ];
+			double complex c_3 =conj( a_3 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+			cacc += (a_1 )*b_1 ;
+			tout[(1)*(j_1 )*(incy)]+=c_1 ;
+			cacc += (a_2 )*b_2 ;
+			tout[(1)*(j_2 )*(incy)]+=c_2 ;
+			cacc += (a_3 )*b_3 ;
+			tout[(1)*(j_3 )*(incy)]+=c_3 ;
+}
+for(     ;k<lk-1;++k){			const rsb_coo_idx_t j_0 =bindx[k+0 ];
+			const double complex b_0 =rhs[1*(j_0 )*(incx)];
+			const double complex a_0 =VA[k+0 ];
+			double complex c_0 =conj( a_0 )*bt;
+			cacc += (a_0 )*b_0 ;
+			tout[(1)*(j_0 )*(incy)]+=c_0 ;
+}
+}
+
+			if(k<lk)
+			{
+				j=bindx[k];
+				cacc += VA[k]*rhs[1*j*(incx)];
+				if(roff!=coff || (j!=i))
+					tout[(1)*(j)*(incy)]+=conj(VA[k]*bt);
+				++k;
+			}
+			out[(1*i*(incy))]+=(alpha)*cacc;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uaua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uauz") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_unua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sasa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sU_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sS_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dE_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spmv_sxsa") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_ul_sH_dI_uG( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+/* @endcond */
diff --git a/rsb_krnl_bcss_spmv_u.h b/rsb_krnl_bcss_spmv_u.h
new file mode 100644
index 0000000..834232d
--- /dev/null
+++ b/rsb_krnl_bcss_spmv_u.h
@@ -0,0 +1,6957 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCSS_SPMV_U_H_INCLUDED
+#define RSB_BCSS_SPMV_U_H_INCLUDED
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tN_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tT_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H__tC_r1_c1_uu_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tN_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tT_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H__tC_r1_c1_uu_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tN_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tT_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H__tC_r1_c1_uu_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tN_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tT_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H__tC_r1_c1_uu_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rs [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,r [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sU_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sU_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sS_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sS_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tN_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tT_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_C_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sH_dE_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_H_u_tC_sH_dI_uG(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,rsb_c [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sU_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sU_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sS_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sS_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tN_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tT_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_C_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sH_dE_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_H_u_tC_sH_dI_uG(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sU_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sU_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sS_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sS_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tN_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tT_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_C_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sH_dE_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_float_complex_H_u_tC_sH_dI_uG(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uaua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uauz_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_uxua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_unua_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sasa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sU_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sU_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sS_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sS_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tN_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tT_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_C_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sH_dE_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+rsb_err_t rsb__BCSR_spmv_sxsa_double_complex_H_u_tC_sH_dI_uG(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+#endif /* RSB_BCSS_SPMV_U_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcss_spmv_u.m4 b/rsb_krnl_bcss_spmv_u.m4
new file mode 100644
index 0000000..acc8943
--- /dev/null
+++ b/rsb_krnl_bcss_spmv_u.m4
@@ -0,0 +1,59 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+RSB_M4_HEADER_EXTRA_DECLARATIONS()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCSS_SPMV_U_H_INCLUDED
+#define RSB_BCSS_SPMV_U_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCSR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCSC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCSS formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+dnl	#include "rsb_internals.h"
+dnl	#include "rsb.h"
+RSB_M4_INCLUDE_HEADERS
+dnl
+RSB_M4_BCSS_SPMV_KERNELS((`u'))
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCSS_SPMV_U_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcss_spsv_u.c b/rsb_krnl_bcss_spsv_u.c
new file mode 100644
index 0000000..727522e
--- /dev/null
+++ b/rsb_krnl_bcss_spsv_u.c
@@ -0,0 +1,45000 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+#include "rsb.h"
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*1)];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*1));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*1));
+			const double aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[fk];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=VA[lk-1];
+		if(aa == ((double)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double bb_0=rhs[(1*i*(incx))];
+		double ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double *b=out + (1*(j*(incx)));
+			double *c=&ax_0;
+{	{
+
+		register double c_0 = ((double)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double *c_0=out+(1*(i*(incy)));
+			const double aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double *a=VA;
+	const double alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double ax_0;
+		const double aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*1)];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*1));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*1));
+			const float aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[fk];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=VA[lk-1];
+		if(aa == ((float)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float bb_0=rhs[(1*i*(incx))];
+		float ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float *b=out + (1*(j*(incx)));
+			float *c=&ax_0;
+{	{
+
+		register float c_0 = ((float)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float *c_0=out+(1*(i*(incy)));
+			const float aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float *a=VA;
+	const float alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float ax_0;
+		const float aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*1)];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*1));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*1));
+			const float complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conjf(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[fk];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=VA[lk-1];
+		if(aa == ((float complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const float complex bb_0=rhs[(1*i*(incx))];
+		float complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const float complex *b=out + (1*(j*(incx)));
+			float complex *c=&ax_0;
+{	{
+
+		register float complex c_0 = ((float complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			float complex *c_0=out+(1*(i*(incy)));
+			const float complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const float complex *a=VA;
+	const float complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		float complex ax_0;
+		const float complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conjf(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type float complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*1)];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*1));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*1));
+			const double complex aa=1;
+			*c_0=(bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=*a*ax_0;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A \neq A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*1)]/=aa;
+		ax_0=out[1*(i*1)];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*1)]-=conj(*a*ax_0);
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^T. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^T}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+	 * Computes \f$y \leftarrow {A^H}^{-1} \cdot x, where A == A^H. \f$
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+1  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a -= rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-1  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		if(lk-fk>0)
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+			a += rows*columns;
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[fk];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		a += rows*columns;
+		for(k=fk+1,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=VA[lk-1];
+		if(aa == ((double complex)(0)))return RSB_ERR_INVALID_NUMERICAL_DATA;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-1,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal explicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,0
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		const double complex bb_0=rhs[(1*i*(incx))];
+		double complex ax_0;
+		ax_0=0;
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+			const double complex *b=out + (1*(j*(incx)));
+			double complex *c=&ax_0;
+{	{
+
+		register double complex c_0 = ((double complex)(0));
+				
+
+		c_0 += a[(0*1)+0]*b[0];
+			c[0]+= c_0 ;
+	}	
+}
+		}
+		{
+			/* the last element (which for a lower triangular solve is on the diagonal)*/
+			/* Lx=y ; x_0=y_0/L_1_1  */
+			double complex *c_0=out+(1*(i*(incy)));
+			const double complex aa=1;
+			*c_0 =(alpha*bb_0 - ax_0)/aa;	/* ax_0 + *a * *c_0=bb_0 -> (*c_0)=(bb_0 - ax_0 )/(*a) */
+		}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=*a*ax_0;
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;	for(i=br;RSB_LIKELY(i<bc);++i)	/* experimental, for the bounded box patch */
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+		
+		for(k=fk+0,j=bindx[k];k<lk-0  ;++k,a += rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	register rsb_coo_idx_t i=0,j=0;
+	register rsb_nnz_idx_t k=0;
+	const register rsb_coo_idx_t columns=1,rows=1;
+	const double complex *a=VA;
+	const double complex alpha=*alphap;
+	for(i=Mdim-1; RSB_LIKELY((i+1)>0 /*trick for unsigned indices */);--i) //1,0,1
+	{
+		const rsb_nnz_idx_t fk=bpntr[i],lk=bpntr[i+1];
+		double complex ax_0;
+		const double complex aa=1;
+
+		out[1*(i*(incx))]/=aa;
+		ax_0=out[1*(i*(incx))];
+
+		for(k=lk-1-0,a=VA+k,j=bindx[k];k+1>=fk+1+0  ;--k,a -= rows*columns,j=bindx[k])
+		{
+		out[1*(j*(incy))]-=conj(*a*ax_0);
+		}
+		out[1*(i*(incx))]*=alpha;
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_coo_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+{
+
+	/**
+	 * \ingroup rsb_doc_kernels
+         * Matrix A should be blocked 1 x 1, stored in BCSR format, diagonal implicit, of type double complex, with rsb_half_idx_t column indices.
+	 * \return RSB_ERR_UNIMPLEMENTED_YET (this function is not implemented).
+	 */
+
+	return RSB_ERR_UNIMPLEMENTED_YET;
+}
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags)
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_uxua") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sU_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sS_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dE_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uU( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+{
+	/* generated by the RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION macro */
+	/*
+	 * This function will dispatch the specialized looped kernel function for 
+	 * performing the desired matrix operation ("spsv_sxsx") for the current fixed
+	 * block size.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 *
+	 * Since this is strictly blocked code, you should allow the rhs and the out
+	 * vector to accept a small overflow not bigger, respectively, than
+	 *       mod(blockrows-mod(matrixrows,blockrows),blockrows)
+	 * and
+	 *       mod(blockcols-mod(matrixcols,blockcols),blockcols)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+
+	register rsb_coo_idx_t columns,rows;
+	if(cpntr && rpntr)
+	{
+		columns=cpntr[1]-cpntr[0];
+		rows   =rpntr[1]-rpntr[0];
+	}
+	else
+		columns = rows=1;	/* experimental, for the bounded box patch */
+
+switch(rows)
+{
+	case 1:
+	{switch(columns)
+	{
+		case 1:/* 1 1 BCSR */
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+		break;
+	default: 
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+	}}
+	break;
+	default:
+#ifdef RSB_WANT_LOOPING_KERNELS 
+		errval = rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_ul_sH_dI_uL( VA,rhs,out,Mdim,mdim,bindx,bpntr,indptr,rpntr,cpntr,br,bc,roff,coff,flags,alphap,incx,incy );
+#else /* RSB_WANT_LOOPING_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_LOOPING_KERNELS */
+};
+		return errval;
+}
+
+
+
+
+
+/* @endcond */
diff --git a/rsb_krnl_bcss_spsv_u.h b/rsb_krnl_bcss_spsv_u.h
new file mode 100644
index 0000000..879bed3
--- /dev/null
+++ b/rsb_krnl_bcss_spsv_u.h
@@ -0,0 +1,2923 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCSS_SPSV_U_H_INCLUDED
+#define RSB_BCSS_SPSV_U_H_INCLUDED
+#include "rsb_internals.h"
+#include "rsb.h"
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tN_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tT_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H__tC_r1_c1_uu_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tN_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tT_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flag [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H__tC_r1_c1_uu_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t fla [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rs [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const r [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tN_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tT_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_i [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H__tC_r1_c1_uu_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,cons [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,con [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tN_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tT_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H__tC_r1_c1_uu_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * rhs, double * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sU_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sS_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tN_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tT_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_C_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dE_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dE_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dI_uU(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_H_u_tC_sH_dI_uL(const double * restrict VA, const double * restrict rhs, double * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,c [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * rhs, float * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags);
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sU_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sS_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tN_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tT_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_C_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const  [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dE_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dE_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dI_uU(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_H_u_tC_sH_dI_uL(const float * restrict VA, const float * restrict rhs, float * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_t flags,const [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * rhs, float complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_flags [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sU_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sS_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tN_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tT_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_C_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t cof [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dE_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uU(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_float_complex_H_u_tC_sH_dI_uL(const float complex * restrict VA, const float complex * restrict rhs, float complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t co [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_fl [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+rsb_err_t rsb__BCSR_spsv_uxua_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * rhs, double complex * out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t coff,const rsb_f [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sU_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sS_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tN_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tT_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_C_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_coo_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_t [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dE_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uU(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+rsb_err_t rsb__BCSR_spsv_sxsx_double_complex_H_u_tC_sH_dI_uL(const double complex * restrict VA, const double complex * restrict rhs, double complex * restrict out, const rsb_coo_idx_t  Mdim,const rsb_coo_idx_t  mdim,const rsb_half_idx_t * restrict bindx,const rsb_nnz_idx_t * restrict bpntr,const rsb_nnz_idx_t *restrict indptr,const rsb_coo_idx_t * restrict rpntr,const rsb_coo_idx_t * restrict cpntr,const rsb_coo_idx_t br,const rsb_coo_idx_t bc,const rsb_coo_idx_t roff,const rsb_coo_idx_ [...]
+
+
+
+#endif /* RSB_BCSS_SPSV_U_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcss_spsv_u.m4 b/rsb_krnl_bcss_spsv_u.m4
new file mode 100644
index 0000000..401d63d
--- /dev/null
+++ b/rsb_krnl_bcss_spsv_u.m4
@@ -0,0 +1,57 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCSS_SPSV_U_H_INCLUDED
+#define RSB_BCSS_SPSV_U_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCSR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCSC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCSS formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+#include "rsb_internals.h"
+#include "rsb.h"
+dnl
+RSB_M4_BCSS_SPSV_KERNELS((`u'))
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCSS_SPSV_U_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_bcss_u.c b/rsb_krnl_bcss_u.c
new file mode 100644
index 0000000..43774c8
--- /dev/null
+++ b/rsb_krnl_bcss_u.c
@@ -0,0 +1,42 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+#include "rsb_krnl_bcss_spsv_u.h"
+#include "rsb_krnl_bcss_spmv_u.h"
+#include "rsb_krnl_bcss_misc_u.h"
+RSB_EMPTY_FILE_FILLER 
+/* @endcond */
diff --git a/rsb_krnl_bcss_u.h b/rsb_krnl_bcss_u.h
new file mode 100644
index 0000000..5ea519e
--- /dev/null
+++ b/rsb_krnl_bcss_u.h
@@ -0,0 +1,45 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_BCSS_U_H_INCLUDED
+#define RSB_BCSS_U_H_INCLUDED
+#include "rsb_internals.h"
+#include "rsb_krnl_bcss_spsv_u.h"
+#include "rsb_krnl_bcss_spmv_u.h"
+#include "rsb_krnl_bcss_misc_u.h"
+RSB_EMPTY_FILE_FILLER 
+#endif /* RSB_BCSS_U_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_bcss_u.m4 b/rsb_krnl_bcss_u.m4
new file mode 100644
index 0000000..ba01ba3
--- /dev/null
+++ b/rsb_krnl_bcss_u.m4
@@ -0,0 +1,61 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+/* @cond INNERDOC */
+dnl
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl FIXME : RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME
+dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_BCSS_U_H_INCLUDED
+#define RSB_BCSS_U_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`BCSR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`BCSC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No BCSS formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+dnl
+#include "rsb_internals.h"
+#include "rsb_krnl_bcss_spsv_u.h"
+#include "rsb_krnl_bcss_spmv_u.h"
+#include "rsb_krnl_bcss_misc_u.h"
+dnl
+dnl RSB_M4_BCSS_KERNELS((`u'))
+RSB_EMPTY_FILE_FILLER 
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_BCSS_U_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_lb.c b/rsb_krnl_lb.c
new file mode 100644
index 0000000..a963e55
--- /dev/null
+++ b/rsb_krnl_lb.c
@@ -0,0 +1,100 @@
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief Matrix type dispatching code, for each matrix operation.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/**
+ * No linked lists formats compiled in.
+ */
+/* @endcond */
diff --git a/rsb_krnl_lb.h b/rsb_krnl_lb.h
new file mode 100644
index 0000000..9be72ff
--- /dev/null
+++ b/rsb_krnl_lb.h
@@ -0,0 +1,103 @@
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief Matrix type dispatching code, for each matrix operation.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_LL_H_INCLUDED
+#define RSB_LL_H_INCLUDED
+/**
+ * No linked lists formats compiled in.
+ */
+#endif /* RSB_LL_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_lb.m4 b/rsb_krnl_lb.m4
new file mode 100644
index 0000000..b194b43
--- /dev/null
+++ b/rsb_krnl_lb.m4
@@ -0,0 +1,70 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/*! 
+ @file
+ @brief Matrix type dispatching code, for each matrix operation.
+ */
+dnl
+include(`rsb_misc.m4')dnl
+dnl
+RSB_M4_HEADER_MESSAGE()dnl
+dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_linked_lists.m4')dnl
+include(`rsb_krnl_macros.m4')dnl
+dnl
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_linked_lists.m4')dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_LL_H_INCLUDED
+#define RSB_LL_H_INCLUDED
+')dnl
+dnl 
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`LR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`LC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No linked lists formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+#include "rsb_internals.h"
+dnl
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',(`LR',`LC'),`dnl
+foreach(`unrolling',(`l',`u'),`dnl
+dnl RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(want_what,type,matrix_storage,unrolling,,,mop,citype,diagonal,uplo)
+RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION(type,matrix_storage,unrolling,mop)
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_LL_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_linked_lists.m4 b/rsb_krnl_linked_lists.m4
new file mode 100644
index 0000000..0ee8286
--- /dev/null
+++ b/rsb_krnl_linked_lists.m4
@@ -0,0 +1,467 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_ARGS(mtype,matrix_storage,unrolling,mop)
+dnl	--------------------------------------------------------------------------------
+dnl
+dnl	FIXME : THERE ARE NO TRANSPOSED KERNELS
+dnl	FIXME : THERE ARE NO SYMMETRY HANDLING KERNELS
+dnl
+define(`RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_ARGS',`dnl
+dnl
+include(`do_unroll.m4')dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl	
+pushdef(`unrolling',$3)dnl	
+pushdef(`mop',$4)dnl	
+pushdef(`matrix_structs',`const itype Mdim, const itype mdim, const rsb_nnz_idx_t * RSB_M4_RESTRICT bindx, const rsb_nnz_idx_t * RSB_M4_RESTRICT bpntr, const rsb_nnz_idx_t *RSB_M4_RESTRICT indptr, const rsb_coo_idx_t * RSB_M4_RESTRICT rpntr, const rsb_coo_idx_t * RSB_M4_RESTRICT cpntr, const rsb_coo_idx_t dummy_br, const rsb_coo_idx_t dummy_bc')dnl	
+(
+ifelse(mop,`scale',`dnl
+	mtype * VA, 
+	matrix_structs,
+	const mtype *scale_factors
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs
+')dnl
+ifelse(mop,`spmv_uaua',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs
+')dnl
+ifelse(mop,`spmv_unua',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs,
+	const mtype * alphap, const mtype * betap
+')dnl
+ifelse(mop,`spmm_az',`dnl
+	const mtype * VA, const mtype * mrhs, mtype * mout,
+	matrix_structs,
+	const itype bstride, 
+	const itype cstride,
+	const itype nrhs
+')dnl
+ifelse(mop,`infty_norm',`dnl
+	const mtype * VA, mtype * global_row_sums,/* ! */
+	matrix_structs
+')dnl
+ifelse(mop,`negation',`dnl
+	mtype * VA, 
+	matrix_structs
+')dnl
+)dnl
+popdef(`matrix_structs')dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_PROTOTYPE
+dnl	-------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_PROTOTYPE',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`unrolling',$3)dnl
+pushdef(`mop',$4)dnl
+pushdef(`citype',$5)dnl
+pushdef(`diagonal',RSB_M4_DEFAULT_DIAGONAL_TYPE)dnl
+rsb_err_t RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,unrolling,mop,citype,diagonal,uplo)dnl
+RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_ARGS(mtype,matrix_storage,unrolling,mop,citype)dnl
+popdef(`diagonal')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_BODY(mtype,matrix_storage,unrolling,mop)
+dnl	--------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_BODY',`
+dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`unrolling',$3)dnl
+pushdef(`mop',$4)dnl
+pushdef(`citype',`rsb_coo_idx_t')dnl
+{
+RSB_M4_DEBUGINFO(``$0'')dnl
+	/*!
+	 * This function is experimental.
+	 */
+
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+dnl	FIXME: the following call uses non declared arguments
+pushdef(`args',`RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCSS_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,symmetry,unrolling,,,mop,citype,diagonal,uplo))')dnl
+	/* this is sample code for scanning a whole linked list format matrix */
+	struct rsb_block_tail_t * bt;
+	const char *data = (const char*) VA;
+
+
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+pushdef(`out_dim',rpntr[Mdim])dnl
+',`dnl
+pushdef(`out_dim',rpntr[mdim])dnl
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+	/* this is slow, however this is column based scanning.  FIXME : optimize spmv_uxux */
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type), out_dim, betap, out, 1);/* we scale the destination vector */
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),out_dim,NULL,out,incy);
+')dnl
+
+	bt = (struct rsb_block_tail_t*)data;
+	while(RSB_LIKELY(bt->block_rows))
+	{
+		data += sizeof(struct rsb_block_tail_t);
+		/* do stuff */
+/*		RSB_STDERR("%d %d ",bt->block_columns , bt->block_rows);
+		RSB_STDERR("%d %d ",bt->block_column , bt->block_row);
+		RSB_STDERR("%d %d ",bt->base_column , bt->base_row);
+		RSB_STDERR(" : %d\n",*(int*)data);*/
+
+		//int j = bt->block_column;
+		//int i = bt->block_row;
+		register rsb_coo_idx_t columns = bt->block_columns;
+		register rsb_coo_idx_t rows = bt->block_rows;
+		//int colsu = bt->block_columns;
+		//int rowsu = bt->block_rows;
+		type * a = (type*) data;
+
+ifelse(mop,`scale',`dnl
+			const mtype *d = scale_factors+bt->base_row;
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+			const mtype *b = rhs+bt->base_column;
+dnl			/*mtype *c = out+(rowsu*i);*/ /* experimentally commented out and put up */
+			mtype *c = out+bt->base_row;
+')dnl
+ifelse(mop,`spmv_uaua',`dnl
+			const mtype *b = rhs+bt->base_column;
+dnl			/*mtype *c = out+(rowsu*i);*/ /* experimentally commented out and put up */
+			mtype *c = out+bt->base_row;
+')dnl
+ifelse(mop,`spmv_unua',`dnl
+			const mtype *b = rhs+bt->base_column;
+dnl			/*mtype *c = out+(rowsu*i);*/ /* experimentally commented out and put up */
+			mtype *c = out+bt->base_row;
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+			/*const mtype *b = rhs+(colsu*j);*/
+			const mtype *b = rhs+bt->base_column;
+			/*mtype *c = out+(rowsu*i);*/
+			mtype *c = out+bt->base_row;
+')dnl
+ifelse(mop,`spmm_az',`dnl
+			/*const mtype *b = mrhs+(colsu*j);
+			mtype *c = mout+(rowsu*i);*/
+			const mtype *b = mrhs+bt->base_column;
+			mtype *c = mout+bt->base_row;
+')dnl
+ifelse(mop,`infty_norm',`dnl
+			/*mtype *row_sums = global_row_sums+(rowsu*i);*/
+			mtype *row_sums = global_row_sums+bt->base_row;
+')dnl
+ifelse(mop,`negation',`dnl
+')dnl
+			/* `mop' is mop */
+dnl {RSB_M4_KERNEL_FUNCTION_BODY(`row',`rows',b_rows,`column',`columns',b_columns,mtype,,mop,unrolling)}
+
+	DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_IDENTIFIER(mop,mtype,unrolling)(rows,columns)
+ifelse(mop,`scale',`dnl
+			(
+					a,
+					scale_factors+bt->base_row,
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+			(
+					a,b,c,
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmv_unua',`dnl
+			(
+					a,b,c,
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmv_uaua',`dnl
+			(
+					a,b,c,
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmm_az',`dnl
+			(
+					a,
+					mrhs+bt->base_column,
+					mout+bt->base_row,
+					rows, columns,
+					bstride, cstride, nrhs
+					);
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+			(
+					a,
+					rhs+bt->base_column,
+					out+bt->base_row,
+					rows, columns,
+					alphap, betap
+					);
+')dnl
+ifelse(mop,`infty_norm',`dnl
+			(
+					a,
+					global_row_sums+bt->base_row,
+					rows, columns
+					);
+')dnl
+ifelse(mop,`negation',`dnl
+			(
+					a,
+					rows, columns
+					);
+')dnl
+
+
+		data += /*(size_t)*/(int)(bt->block_columns * bt->block_rows)*(int)sizeof(type);
+		bt = (struct rsb_block_tail_t*)data;
+	}
+
+dnl
+
+popdef(`args')dnl
+')dnl
+	return RSB_ERR_NO_ERROR;
+popdef(`out_dim')dnl
+}
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	These functions dispatch on the column size, calling the
+dnl	proper kernels.
+dnl
+dnl	They assume type dispatching has just been performed.
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION(mtype,matrix_storage,unrolling,mop)
+dnl	---------------------------------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION',`
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`unrolling',$3)dnl	
+dnl pushdef(`b_rows',$3)dnl		block rows
+dnl pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t')dnl integer type (for indices)
+pushdef(`mop',`$4')dnl
+`/* This code is intended for a block compressed sparse stripe matrix. */'
+RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_PROTOTYPE(mtype,matrix_storage,unrolling,mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`dnl
+RSB_M4_LL_KERNEL_SIZE_DISPATCH_FUNCTION_BODY(mtype,matrix_storage,unrolling,mop)
+')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_FUNCTION_NAME(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	-----------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_FUNCTION_NAME',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`int')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl	
+pushdef(`mop',$6)dnl	
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop) dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_FUNCTION_ARGS(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	-----------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_FUNCTION_ARGS',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`int')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl	
+pushdef(`mop',$6)dnl	
+pushdef(`matrix_structs',`const itype Mdim, const itype mdim, const rsb_nnz_idx_t * bindx, const rsb_nnz_idx_t * bpntr, const rsb_nnz_idx_t *indptr, const rsb_coo_idx_t * rpntr, const rsb_coo_idx_t * cpntr')dnl	
+(
+ifelse(mop,`scale',`dnl
+	mtype * VA, 
+	matrix_structs,
+	const mtype *scale_factors
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs,
+	const mtype * alphap, const mtype * betap
+')dnl
+ifelse(mop,`spmm_az',`dnl
+	const mtype * VA, const mtype * mrhs, mtype * mout,
+	matrix_structs,
+	const itype bstride, 
+	const itype cstride,
+	const itype nrhs 
+')dnl
+ifelse(mop,`infty_norm',`dnl
+	const mtype * VA, mtype * global_row_sums, 
+	matrix_structs
+')dnl
+ifelse(mop,`negation',`dnl
+	mtype * VA, 
+	matrix_structs
+')dnl
+dnl
+dnl
+dnl
+dnl
+)
+popdef(`matrix_structs')dnl	
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_FUNCTION_PROTOTYPE(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	----------------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_FUNCTION_PROTOTYPE',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`int')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl	
+pushdef(`mop',$6)dnl	
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+rsb_err_t RSB_M4_LL_KERNEL_FUNCTION_NAME(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)dnl
+RSB_M4_LL_KERNEL_FUNCTION_ARGS(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)dnl
+')dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_FUNCTION_BODY(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	-----------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_LL_KERNEL_FUNCTION_BODY',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`int')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl
+pushdef(`mop',$6)dnl
+{
+RSB_M4_DEBUGINFO(``$0'')dnl
+	/* FIXME : STUB */
+dnl
+	return RSB_ERR_NO_ERROR;
+}
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_LL_KERNEL_FUNCTION(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	------------------------------------------------------------------------------
+dnl
+dnl	These functions will perform their operations on fixed block matrices.
+dnl
+define(`RSB_M4_LL_KERNEL_FUNCTION',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl	
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`int')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl	
+pushdef(`mop',$6)dnl	
+RSB_M4_LL_KERNEL_FUNCTION_PROTOTYPE(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`dnl
+RSB_M4_LL_KERNEL_FUNCTION_BODY(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)dnl
+')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl	
+dnl	
+dnl
diff --git a/rsb_krnl_macros.m4 b/rsb_krnl_macros.m4
new file mode 100644
index 0000000..1042081
--- /dev/null
+++ b/rsb_krnl_macros.m4
@@ -0,0 +1,1418 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+include(`rsb_krnl_vb_macros.m4')dnl
+include(`rsb_krnl_bcss_macros.m4')dnl
+include(`rsb_krnl_bcoo_macros.m4')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(mop)
+dnl	------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS',`dnl
+pushdef(`mop',$1)dnl
+dnl
+`('RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(,mop,`function_args')`)'dnl
+dnl (const struct rsb_mtx_t * mtxAp, const struct rsb_options_t *o, const void * rhs, void * out)dnl
+dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(mop,transposition)
+dnl	------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`transposition',`')dnl
+dnl
+RSB_M4_PREFIX`'do_`'mop`'`'dnl
+dnl
+popdef(`transposition')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_NAME(mop)
+dnl	------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_NAME',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`transposition',`')dnl
+dnl
+rsb_err_t RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(mop,transposition)dnl
+dnl
+popdef(`mop')dnl
+popdef(`transposition')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_GET_NEXT_BLOCK_POINTER_MACRO(matrix_storage)
+dnl	---------------------------------------------------
+dnl
+define(`RSB_M4_GET_NEXT_BLOCK_POINTER_MACRO',`dnl
+pushdef(`matrix_storage',$1)dnl
+dnl
+ifelse(matrix_storage,`VBR',`RSB_GET_NEXT_BLOCK_POINTER')`'dnl
+ifelse(matrix_storage,`BCSR',`RSB_BCSR_GET_NEXT_BLOCK_POINTER')`'dnl
+dnl	else  ? should give error! fixme :)
+dnl
+popdef(`matrix_storage')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_GET_FIRST_BLOCK_POINTER_MACRO(matrix_storage)
+dnl	----------------------------------------------------
+dnl
+define(`RSB_M4_GET_FIRST_BLOCK_POINTER_MACRO',`dnl
+pushdef(`matrix_storage',$1)dnl
+dnl
+ifelse(matrix_storage,`VBR',`RSB_GET_FIRST_BLOCK_POINTER')`'dnl
+ifelse(matrix_storage,`BCSR',`RSB_BCSR_GET_FIRST_BLOCK_POINTER')`'dnl
+dnl	else  ? should give error! fixme :)
+dnl
+popdef(`matrix_storage')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_GOT_LAST_BLOCK_POINTER_MACRO(matrix_storage)
+dnl	---------------------------------------------------
+dnl
+define(`RSB_M4_GOT_LAST_BLOCK_POINTER_MACRO',`dnl
+pushdef(`matrix_storage',$1)dnl
+dnl
+ifelse(matrix_storage,`VBR',`RSB_GOT_LAST_BLOCK_POINTER')`'dnl
+ifelse(matrix_storage,`BCSR',`RSB_BCSR_GOT_LAST_BLOCK_POINTER')`'dnl
+dnl	else  ? should give error! fixme :)
+dnl
+popdef(`matrix_storage')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION(types,mop)
+dnl	-------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION',`dnl
+pushdef(`types',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`unrolling',`u')dnl
+dnl pushdef(`transposition',`')dnl
+dnl pushdef(`transposition',$3)dnl
+dnl
+dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_NAME(mop,`')dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_DEBUGINFO(``$0'')dnl
+ifelse(RSB_M4_IS_IMPLEMENTED_MOP(mop),0,`dnl
+{
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+}
+',`dnl
+{
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * A run-time kernel dispatching function.
+	 * 
+	 * Will use the right "mop" kernel for each matrix block.
+	 * 
+	 * However, there could be some overhead in the process of dispatching
+	 * the right function kernel for each block, especially for matrices
+	 * partitioned in same-size blocks.
+	 * 
+	 * In that case, it is better to use some specialized function.
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+dnl	register rsb_coo_idx_t baserow,basecolumn,rows,columns;
+dnl	register rsb_coo_idx_t blockrow,blockcolumn;
+dnl	register char *bp=NULL;
+	rsb_flags_t symmetry,diagonal;
+`#ifdef' RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t')
+	rsb_int_t half_storage = rsb__do_is_candidate_size_for_halfword(mtxAp->Mdim,mtxAp->mdim,/*nnz*/0,mtxAp->flags)?`'dnl
+RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t'):`'dnl
+RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_coo_idx_t');
+#else /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+	rsb_int_t half_storage=`'dnl
+RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_coo_idx_t');
+#endif /* RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(`rsb_half_idx_t') */
+
+	if(!mtxAp /*|| !mtxAp->options */)
+		return RSB_ERR_BADARGS;
+
+	symmetry = rsb__get_symmetry_type_flag(mtxAp);
+	diagonal = rsb__get_diagonal_type_flag(mtxAp);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(mtxAp->`typecode'))
+		return RSB_ERR_BADARGS;
+
+dnl ifelse(mop,`spmv_uxux',`dnl
+dnl	if(RSB_IS_ELEMENT_ZERO(betap,mtxAp->`typecode') && RSB_IS_ELEMENT_ONE(alphap,mtxAp->`typecode') )
+dnl		return rsb_spmv(...);
+dnl')dnl
+	switch(diagonal)
+	{
+foreach(`k_diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+	case(RSB_M4_MATRIX_DIAGONAL_PREPROCESSOR_SYMBOL(k_diagonal)):
+	switch(half_storage)
+	{
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+	case(RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(citype)):
+	switch(transA)
+	{
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+	case(RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(transposition)):
+dnl //	switch(mtxAp->`flags' | RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL(RSB_M4_SYMBOL_SYMMETRIC))
+	switch(symmetry)
+	{
+foreach(`k_symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+	case(RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL(k_symmetry)):
+	switch(mtxAp->`matrix_storage')
+	{
+foreach(`matrix_storage',RSB_M4_MATRIX_STORAGE,`dnl
+	case(RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(matrix_storage)):
+dnl		/* return RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(matrix_storage,unrolling,mop,identifier)(...); */
+	switch(mtxAp->`typecode')
+	{
+foreach(`mtype',types,`dnl
+	case(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)):
+dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,`dnl
+	if(rsb__is_lower_triangle(mtxAp->flags))
+		errval = RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,unrolling,mop,citype,k_diagonal,`l')`'dnl
+(RSB_M4_ACTUAL_ARGS_APPLY_MEMBERSHIP(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCXX_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,`l'))));
+	else
+		errval = RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,unrolling,mop,citype,k_diagonal,`u')`'dnl
+(RSB_M4_ACTUAL_ARGS_APPLY_MEMBERSHIP(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCXX_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,`u'))));
+dnl
+',`dnl
+dnl
+dnl		/* FIXME: the following line could cause severe compiler warnings (e.g.: 1506-280 (W) Function argument assignment between types "const unsigned short* restrict" and "int*" is not allowed) */
+dnl
+		errval = RSB_M4_KERNEL_SIZE_DISPATCH_FUNCTION_NAME(mtype,matrix_storage,transposition,k_symmetry,unrolling,mop,citype,k_diagonal,`g')`'dnl
+(RSB_M4_ACTUAL_ARGS_APPLY_MEMBERSHIP(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_BCXX_KERNEL_SIZE_DISPATCH_FUNCTION(`ARGS',mtype,matrix_storage,transposition,k_symmetry,unrolling,,,mop,citype,k_diagonal,`g'))));
+')dnl
+dnl
+popdef(`specific_args')dnl
+	break;
+	')dnl
+		default:
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	break;
+	')dnl
+		default:
+		{
+		RSB_ERROR("Sorry, matrix storage \"%c\" currently not supported.\n",mtxAp->`matrix_storage');
+dnl		FIXME : SOMEWHERE SOMEONE FORGETS TO POPDEF(`matrix_storage') ...
+		errval = RSB_ERR_UNSUPPORTED_FORMAT;
+		}
+	}
+	break;
+	')dnl
+		default:
+		{
+			RSB_ERROR("Sorry, this symmetry case (0x%xd) is not supported.\n",(rsb_int)symmetry);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+	')dnl
+		default:
+		{
+			RSB_ERROR("Sorry, this transposition case (0x%xd) is not supported.\n",(rsb_int)transA);
+			errval = RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+	}
+	break;
+	')dnl
+		default:
+		{
+			RSB_ERROR("Sorry, this coordinate index (0x%xd) is not supported.\n",(rsb_int)half_storage);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	break;
+	')dnl
+		default:
+		{
+			RSB_ERROR("Sorry, this diagonal type (0x%xd) is not supported.\n",(rsb_int)diagonal);
+			errval = RSB_ERR_UNSUPPORTED_FEATURE;
+		}
+	}
+	return errval;
+dnl	return RSB_ERR_INTERNAL_ERROR;	
+}
+')dnl
+')dnl
+dnl
+popdef(`unrolling')dnl
+popdef(`types')dnl
+popdef(`mop')dnl
+dnl popdef(`transposition')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ACTUAL_ARGS(mop)
+dnl	--------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ACTUAL_ARGS',`dnl
+pushdef(`mop',$1)dnl
+dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(,mop,`function_args')))dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ARGS(mop)
+dnl	-------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ARGS',`dnl
+pushdef(`mop',$1)dnl
+dnl
+`('double * elapsed_time, RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(,mop,`function_args')`)'dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_IDENTIFIER(mop)
+dnl	-------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+dnl
+rsb_do_time_`'mop`'dnl
+dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_NAME(mop)
+dnl	-------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_NAME',`dnl
+pushdef(`mop',$1)dnl
+dnl
+rsb_err_t RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_IDENTIFIER($1)`'dnl
+dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION(types,mop)
+dnl	--------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION',`dnl
+pushdef(`types',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`transposition',RSB_M4_TRANS_N)dnl FIXNE
+dnl
+dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_NAME(mop)dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ARGS(mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_DEBUGINFO(``$0'')dnl
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will perform the "mop" operation, 
+	 * measuring the time elapsed in seconds and writing it in a
+	 * user set variable.
+         * 
+	 * Note that this dispatch function is matrix type indipendent.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( ! elapsed_time ) return RSB_ERR_BADARGS;
+
+	*elapsed_time = - rsb_time();
+	errval = RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(mop,transposition)dnl
+	(RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ACTUAL_ARGS(mop));
+	
+	*elapsed_time += rsb_time(); dnl 	FIXME!
+
+	return errval;
+}
+')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ACTUAL_ARGS(mop)
+dnl	-----------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ACTUAL_ARGS',`dnl
+pushdef(`mop',$1)dnl
+dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(,mop,`function_args')))dnl
+dnl
+popdef(`mop')dnl
+popdef(`transposition')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ARGS(mop,mtype)
+dnl	----------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ARGS',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+double * total_elapsed_time, double * m_flops, RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(,mop,`function_args')`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_IDENTIFIER(mop,mtype)
+dnl	----------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+RSB_M4_PREFIX`'do_benchmark_`'RSB_M4_CHOPSPACES(mtype)`_'mop`'dnl
+dnl
+dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_NAME(mop,mtype)
+dnl	----------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_NAME',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+rsb_err_t RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_IDENTIFIER(mop,mtype)`'dnl
+dnl
+dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION(types,mop)
+dnl	-----------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION',`dnl
+pushdef(`types',$1)dnl
+pushdef(`mop',$2)dnl
+dnl
+dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_NAME(mop,mtype)dnl
+(RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ARGS(mop,mtype))dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This wrapper function will benchmark the "mop" operation
+	 * a number of times, measuring the elapsed time in seconds
+	 * and writing it in a user set location for a specified matrix.
+	 *
+	 * It will also add  the performed millions of floating point
+	 * operation count in another user specified location.
+	 *
+	 * \param total_elapsed_time if > 0 on input, will benchmark at least total_elapsed_time seconds
+	 * \param m_flops if m_flops > 0 on input, will benchmark at least m_flops times
+	 *
+	 * If neither of the two input arguments will be set on input,
+	 * the benchmark will cease after RSB_BENCHMARK_MIN_RUNS runs or RSB_BENCHMARK_MIN_SECONDS seconds.
+	 *
+	 * Assuming time_limit = *total_elapsed_time :
+	 *
+	 * if(time_limit <= 0) will benchmark at least min_runs times
+	 * if(time_limit >  0) will benchmark at least min_runs times and for time_limit seconds
+	 *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 */
+
+	double time_limit;
+	double elapsed_time;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int runs=0,min_runs=0;
+
+        if( ! total_elapsed_time || ! m_flops)
+		return RSB_ERR_BADARGS;
+
+	time_limit = *total_elapsed_time;	/* we read input (FIXME) */
+	min_runs   = (int)*m_flops;			/* we read input (FIXME) */
+
+	*total_elapsed_time = RSB_TIME_ZERO;
+	*m_flops = RSB_TIME_ZERO;
+
+	if(time_limit <= 0 )
+	{
+		time_limit = RSB_BENCHMARK_MIN_SECONDS;
+	}
+
+	if(min_runs   <= 0 )
+	{
+		min_runs = RSB_BENCHMARK_MIN_RUNS ;	/* NOTE : this is a completely arbitrary number (FIXME) */
+	}
+
+	//RSB_INFO("will perform min  %d runs, for %lg seconds\n",min_runs, time_limit);
+
+	// FIXME : seems like this affects performance ...
+	// *total_elapsed_time = - rsb_time();
+	*total_elapsed_time =0;
+
+	while( ( time_limit? ( *total_elapsed_time < time_limit):0 ) || ( min_runs ? ( runs < min_runs ) : 0 ) )
+	{
+		//elapsed_time = RSB_TIME_ZERO;
+		//errval = dnl
+		/* FIXME : use an even more general function here (the following is vbr-only!) */
+RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_IDENTIFIER(mop)dnl
+`'(&elapsed_time,RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ACTUAL_ARGS(mop));
+dnl		errval = RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(mop)dnl
+dnl (RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ACTUAL_ARGS(mop));
+
+		//*total_elapsed_time += rsb_time();
+/*		RSB_INFO("tl : %lg\n",time_limit );*/
+/*		RSB_INFO("ss : %lg\n",*total_elapsed_time );*/
+/*		RSB_INFO("sse : %lg\n",elapsed_time );*/
+
+		*total_elapsed_time  +=  elapsed_time;
+		*m_flops += RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_IDENTIFIER(mop)(mtxAp);
+		if(RSB_SOME_ERROR(errval)) return errval;
+		++runs;
+	}
+	/* FIXME : get rid of this line */
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDERR("%s : ",rsb__sprint_matrix_implementation_code(mtxAp,"mop",RSB_FLAG_NOFLAGS,buf));}
+	RSB_STDERR("performed %d runs, %lg/%lg seconds (mop,mtype) \n",runs, *total_elapsed_time,time_limit);
+
+	/*
+         * FIXME : this is a candidate location for a conditional performance data printout
+         */
+
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION_ACTUAL_ARGS(mop,mtype)
+dnl	--------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION_ACTUAL_ARGS',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+dnl RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_ARGS(mop)))`'dnl
+dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(mop,mtype)
+dnl	--------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION',`dnl
+dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+pushdef(`want_what',$3)dnl
+pushdef(`args',`$1,$2')dnl
+dnl
+ifelse(want_what,`function_identifier',`dnl
+RSB_M4_PREFIX`do_fullrangebenchmark_'RSB_M4_CHOPSPACES(mtype)`_'mop`'dnl
+')dnl
+ifelse(want_what,`function_declaration',`dnl
+rsb_err_t $0(args,`function_identifier')dnl
+($0(args,`function_args'));
+')dnl
+ifelse(want_what,`function_args',`dnl
+void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, struct rsb_mop_performance_info_t * mpi, rsb_flags_t flags`'dnl
+')dnl
+ifelse(want_what,`function_definition',`dnl
+rsb_err_t $0(args,`function_identifier')dnl
+($0(args,`function_args'))
+RSB_M4_DEBUGINFO(``$0'')dnl
+{
+	/*!
+	 * \ingroup gr_bench
+	 * Will benchmark the "mtype" type implementation of operation "mop" 
+	 * for a single matrix, but for the whole range of different block sizes
+	 * partitionings.
+         * 
+         * Therefore, the VBR features of this library will be NOT used here.
+	 *
+	 * The performance information will be written in a user supplied structure.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	rsb_flags_t typecode = RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype);
+	int ri=0,ci=0;
+	rsb_blk_idx_t br=0,bc=0;
+	//rsb_blk_idx_t M_b,K_b;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	mtype *out=NULL,*rhs=NULL;
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+	mtype * row_sums=NULL;
+')dnl
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`dnl
+	rsb_coo_idx_t incx=1,incy=1;
+',`dnl
+dnl
+dnl	incx is sometimes needed for scaling a vector, even if the op itself is strided 1
+dnl
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+')dnl
+
+	if(!VA || !IA || !JA || !mpi)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(mpi);
+	mpi->rows = rows;
+	mpi->cols=cols;
+	mpi->nnz=nnz;
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+			rsb_coo_idx_t bstride = 0;
+			rsb_coo_idx_t cstride = 0;
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+ifelse(RSB_M4_IS_SPXM_KERNEL_MOP(mop),`1',`dnl
+			rsb_coo_idx_t nrhs=4;
+',`dnl
+			rsb_coo_idx_t nrhs=1;
+')dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_OP_SCALING_KERNEL_MOP(mop),1,`dnl
+			double alpha=1.0;/* FIXME */
+			double * alphap = α
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_SCALING_KERNEL_MOP(mop),1,`dnl
+			double beta=1.0;/* FIXME */
+			double * betap=&beta ;
+')dnl
+dnl
+ifelse(mop,`scale',`dnl
+			mtype * scale_factors = NULL;
+')dnl
+dnl
+			br = rua[ri];
+			bc = cua[ci];
+dnl			mtxAp = rsb_allocate_bcsr_sparse_matrix(VA, IA, JA, nnz, typecode, rows, cols, br,bc, flags,&errval);
+			mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,rows,cols,br,bc,flags,&errval);
+			if(!mtxAp||RSB_SOME_ERROR(errval)) {goto erri;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+			{
+
+				/* no need for further benchmarks (FIXME : a temporary, horrible hack! ) */
+				ri=ci=-1;
+				for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+					for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+						if( rua[ri] == mtxAp->rpntr[1] - mtxAp->rpntr[0] )
+							if( cua[ci] == mtxAp->cpntr[1] - mtxAp->cpntr[0] )
+								goto ok; /* lol */
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+
+			ok:
+				br = rua[ri];
+				bc = cua[ci];
+				/* autoblocking found a blocking among the supported ones.
+				 * we fill in performance info and quit.
+				 */
+
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+			bstride=cols+bc;
+			cstride = rows+br;
+			rhs = rsb__malloc(mtxAp->el_size*(bstride)*nrhs);
+			out = rsb__malloc(mtxAp->el_size*(cstride)*nrhs);
+			if(!out || rsb__fill_with_ones(out,mtxAp->typecode,cstride*nrhs,incy)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!rhs || rsb__fill_with_ones(rhs,mtxAp->typecode,bstride*nrhs,incx)){errval = RSB_ERR_ENOMEM;goto erri;}
+			if(!out || !rhs) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(rhs,mtxAp->typecode,(cols)*nrhs,cols))     {errval = RSB_ERR_ENOMEM;goto erri;}
+			/* FIXME : are we sure this is correct ?*/
+			if(rsb__cblas_Xscal(mtxAp->typecode,(rows+br)*nrhs,NULL,out,incy)) {errval = RSB_ERR_ENOMEM;goto erri;}
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+			row_sums = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!row_sums) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(row_sums,mtxAp->typecode,cols,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+')dnl
+ifelse(mop,`scale',`dnl
+			scale_factors = rsb__malloc(mtxAp->el_size*(rows+br));
+			if(!scale_factors) {errval = RSB_ERR_ENOMEM;goto erri;}
+			if(rsb__fill_with_ones(scale_factors,mtxAp->typecode,rows,1))     {errval = RSB_ERR_ENOMEM;goto erri;}
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+')dnl
+ifelse(mop,`negation',`dnl
+			int please_fix_RSB_M4_ARGS_TO_ACTUAL_ARGS=-1;
+')dnl
+			
+			mpi->seconds[ri][ci] = RSB_BENCHMARK_MIN_SECONDS; /* min seconds */
+			mpi->m_flops[ri][ci] = (double)RSB_BENCHMARK_MIN_RUNS; /* min runs */
+
+dnl			struct rsb_options_t * o = mtxAp->options;
+			RSB_M4_DIRECT_KERNEL_DISPATCH_BENCHMARK_FUNCTION_IDENTIFIER(mop,mtype)dnl
+( &(mpi->seconds[ri][ci]), &(mpi->m_flops[ri][ci]), RSB_M4_DIRECT_KERNEL_DISPATCH_TIMING_FUNCTION_ACTUAL_ARGS(mop,mtype));
+			mpi->fillin[ri][ci] = rsb__do_get_matrix_fillin(mtxAp);
+			mpi->e_mflops[ri][ci] =	mpi->m_flops[ri][ci] / mpi->fillin[ri][ci] ;/* new */
+			erri:
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+			RSB_CONDITIONAL_FREE(row_sums);
+')dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+			RSB_CONDITIONAL_FREE(out);
+			RSB_CONDITIONAL_FREE(rhs);
+')dnl
+ifelse(mop,`scale',`dnl
+			RSB_CONDITIONAL_FREE(scale_factors);
+')dnl
+			RSB_MTX_FREE(mtxAp);
+			if(RSB_SOME_ERROR(errval)){rsb__do_perror(NULL,errval);return errval;}
+
+			if( ( flags & RSB_FLAG_AUTO_BLOCKING ) != 0)
+				return errval;/* no need for further benchmarks (FIXME : a temporary hack! ) */
+		}
+	}
+	return errval;
+}
+')dnl
+dnl
+popdef(`args')dnl
+popdef(`want_what')dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS(mop,mtype)
+dnl	-----------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS',`dnl
+pushdef(`mop',$1)dnl
+pushdef(`mtype',$2)dnl
+dnl
+dnl	FIXME
+dnl
+popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ARGS(mtype)
+dnl	------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ARGS',`dnl
+pushdef(`mtype',$1)dnl
+dnl
+(const char * filename, struct rsb_mops_performance_info_t * mspi)dnl
+dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_IDENTIFIER(mtype)
+dnl	------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_IDENTIFIER',`dnl
+pushdef(`mtype',$1)dnl
+dnl
+`rsb_do_completetypebenchmark_'RSB_M4_CHOPSPACES(mtype)`'dnl
+dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_NAME(mtype)
+dnl	------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_NAME',`dnl
+pushdef(`mtype',$1)dnl
+dnl
+rsb_err_t RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_IDENTIFIER(mtype)`'dnl
+dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION(mtype)
+dnl	-------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION',`dnl
+dnl
+pushdef(`mtype',$1)dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+',`
+static RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_NAME(mtype)dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ARGS(mtype)dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+{
+        /*!
+	 * \ingroup gr_bench
+	 * Will benchmark all supported matrix operations over the "mtype" type.
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * IA=NULL,*JA=NULL;
+	rsb_coo_idx_t rows=0,cols=0;
+	rsb_nnz_idx_t nnz=0;
+	void *VA=NULL;
+
+	struct rsb_mop_performance_info_t * mpi = &(mspi->pipmo[0]);
+	rsb_flags_t typecode = RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype),flags=0;
+
+	RSB_BZERO(mspi,sizeof(mspi)); /* FIXME: may double check this */
+
+	if((rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&rows,&cols,&nnz,typecode,flags,NULL,NULL))!=0)
+	{
+		RSB_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+		goto err;
+	}
+	
+pushdef(`mopcode',0)dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+pushdef(`mopcode',incr(mopcode))dnl
+
+	/* we benchmark our mtype library implementation for operation mop */
+	errval = dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(mop,mtype,`function_identifier')dnl
+(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION(mop,mtype,`function_args'))));
+	++mpi;
+	if(RSB_SOME_ERROR(errval))goto err;
+')dnl
+	mpi-=mopcode;
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+popdef(`mopcode')dnl
+')dnl
+popdef(`mopcode')dnl
+dnl
+	
+dnl		performance info dumpout
+
+pushdef(`mopcode',0)dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+pushdef(`mopcode',incr(mopcode))dnl
+	/* FIXME : WE SHOULD DUMP OUT PERFORMANCE INFORMATION HERE ! */
+	errval = rsb__dump_performance_info(mpi,"RSB_M4_DUMP_PERFOMANCE_INFO_RECORD_IDENTIFIER(mtype,mop)");
+	if(RSB_SOME_ERROR(errval))goto err;
+	++mpi;
+')dnl
+	mpi-=mopcode;
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+popdef(`mopcode')dnl
+')dnl
+popdef(`mopcode')dnl
+dnl
+
+	err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	return errval;
+}
+popdef(`mtype')dnl
+')dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION_IDENTIFIER',`dnl
+dnl
+RSB_M4_PREFIX`dump_performance_array'dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION_ARGS',`dnl
+dnl
+`(const char * an, const double*array)'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION
+dnl	-------------------------------------------
+dnl	FIXME : this should go in some other file
+dnl
+define(`RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION',`dnl
+dnl
+rsb_err_t` 'dnl
+RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION_IDENTIFIER()dnl
+RSB_M4_DUMP_PERFORMANCE_INFO_ARRAY_FUNCTION_ARGS()dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_DEBUGINFO(``$0'')dnl
+{
+	/*!
+	 * \ingroup gr_bench
+	 * A benchmark info dumping function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 * FIXME : UNFINISHED
+	 */
+#if RSB_ALLOW_STDOUT
+	int ri,ci;
+	rsb_blk_idx_t rua[]=RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[]=RSB_COLUMNS_UNROLL_ARRAY;
+	if(!array || !an)
+		return RSB_ERR_BADARGS;
+
+/*	RSB_STDOUT("const double %s [RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH] = \n",an);*/
+	RSB_STDOUT(".%s = \n",an);
+	RSB_STDOUT("{");
+	RSB_STDOUT("\t/*");
+	for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci) RSB_STDOUT("%d, ",cua[ci]);
+	RSB_STDOUT("columns per block */\n");
+		
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		RSB_STDOUT("\t{");
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+			if(ci)RSB_STDOUT(",");
+			RSB_STDOUT(" %lg",array[ri*RSB_ROWS_UNROLL_ARRAY_LENGTH+ci]);
+		}
+		RSB_STDOUT(" }, /* %d rows per block */\n",rua[ri]);
+	}
+	RSB_STDOUT("},\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+dnl
+')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DUMP_PERFOMANCE_INFO_RECORD_IDENTIFIER(MTYPE,MOP)
+dnl	--------------------------------------------------------
+dnl
+dnl
+define(`RSB_M4_DUMP_PERFOMANCE_INFO_RECORD_IDENTIFIER',`dnl
+pushdef(`mtype',$1)dnl
+pushdef(`mop',$2)dnl
+dnl
+`pi_'RSB_M4_CHOPSPACES(mtype)`_'mop`'dnl
+dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS(mtype)
+dnl	-------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS',`dnl
+pushdef(`mtype',$1)dnl
+dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ARGS(mtype))dnl
+dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_ARGS()
+dnl	---------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_ARGS',`dnl
+dnl
+(const int argc, char *const argv[])dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_IDENTIFIER(mop)
+dnl	-------------------------------------------------------
+dnl
+define(`RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_IDENTIFIER',`dnl
+pushdef(`mop',$1)dnl
+dnl
+RSB_M4_PREFIX`estimate_mflops_per_op_'mop`'dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_ARGS(mop)
+dnl	-------------------------------------------------
+dnl
+define(`RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_ARGS',`dnl
+pushdef(`mop',$1)dnl
+dnl
+`(const struct rsb_mtx_t * mtxAp)'dnl
+dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION(mop)
+dnl	--------------------------------------------
+dnl
+define(`RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION',`dnl
+pushdef(`mop',$1)dnl
+dnl
+`double 'dnl
+RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_IDENTIFIER(mop)dnl
+RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_ARGS(mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_DEBUGINFO(``$0'')dnl
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "mop" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	const double Ec = ((double)mtxAp->element_count); double Me = Ec;
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(mtxAp->typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+')dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),`1',`dnl
+	if(rsb__is_not_unsymmetric(mtxAp)){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+')dnl
+ifelse(mop,`negation',`dnl
+')dnl
+ifelse(mop,`scale',`dnl
+')dnl
+	Me /= M_;
+	return Me;
+}
+dnl
+')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_IDENTIFIER()
+dnl	---------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_IDENTIFIER',`dnl
+dnl
+`rsb_do_completebenchmark'dnl
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_NAME()
+dnl	---------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_NAME',`dnl
+dnl
+rsb_err_t`' RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_IDENTIFIER`'dnl
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION()
+dnl	----------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION',`dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+',`
+RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_NAME`'dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETEBENCHMARK_FUNCTION_ARGS`'dnl
+RSB_M4_DEBUGINFO(``$0'')dnl
+{
+	/*!
+	 * \ingroup gr_bench
+	 * A complete benchmark program.
+	 * Will benchmark all supported matrix operations over all supported types
+	 * over all supported matrix partitionings for a fixed block size.
+         *
+	 * \return \rsb_errval_inp_param_msg
+         *
+	 * FIXME : UNFINISHED: should process and dump this info in a header file.
+	 */
+	struct rsb_global_performance_info_t mspis;
+	struct rsb_mops_performance_info_t * mspi = &(mspis.gpi[0]);
+
+	rsb_option options[] = {
+	    {"matrix-filename",	required_argument, NULL, 0x66},  /* f */
+	    {0,0,0,0}
+	};
+	const char * filename=NULL;
+	int c=0;
+	int opt_index=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(rsb_lib_init(RSB_NULL_INIT_OPTIONS))goto err;
+
+	for (;;)
+	{
+		c = rsb_getopt_long(argc, argv, "f:" , options, &opt_index);/* Flawfinder: ignore */
+		if (c == -1)break;
+		switch (c)
+		{
+			case 0x66:/* f */
+			filename = optarg;
+			break;
+	    	}
+	}
+
+foreach(`mtype',types,`dnl
+
+	errval=dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_IDENTIFIER(mtype)dnl
+(RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS(mtype));
+	if(RSB_SOME_ERROR(errval)) return errval;
+	++mspi;
+')dnl
+
+	if( rsb_lib_exit(RSB_NULL_EXIT_OPTIONS) )
+		return RSB_ERR_INTERNAL_ERROR;
+	return RSB_ERR_NO_ERROR;
+	err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION(types,mop,want_what)
+dnl	---------------------------------------------------------------------
+dnl
+dnl	EDIT THIS MACRO TO SPECIFY ARGS TO NEW KERNELS
+dnl
+define(`RSB_M4_MULTI_BLOCK_KERNEL_TYPE_DISPATCH_FUNCTION',`dnl
+pushdef(`types',$1)dnl
+pushdef(`mop',$2)dnl
+pushdef(`want_what',$3)dnl
+pushdef(`args',`$1,$2')dnl
+dnl
+dnl
+ifelse(want_what,`function_identifier',`dnl
+RSB_M4_PREFIX`do_'mop`_with_macros_vbr'dnl
+')dnl
+dnl
+ifelse(want_what,`function_declaration',`dnl
+rsb_err_t $0(args,`function_identifier')dnl
+($0(args,`function_args'));dnl
+')dnl
+dnl
+ifelse(want_what,`function_args',`dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_READONLY_KERNEL_MOP(mop),1,`dnl
+const struct rsb_mtx_t * mtxAp`'dnl
+',`dnl
+struct rsb_mtx_t * mtxAp`'dnl
+')dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+,const void * RSB_M4_RESTRICT rhs, void * RSB_M4_RESTRICT out`'dnl
+')dnl
+ifelse(RSB_M4_IS_OP_SCALING_KERNEL_MOP(mop),`1',`dnl
+,const void * alphap`'dnl
+')dnl
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),`1',`dnl
+,const void * betap`'dnl
+')dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),`1',`dnl
+,rsb_coo_idx_t incx, rsb_coo_idx_t incy`'dnl
+')dnl
+dnl
+,const rsb_trans_t transA`'dnl
+ifelse(mop,`scale',`dnl
+,const void * scale_factors`'dnl
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+,void * row_sums`'dnl
+')dnl
+ifelse(mop,`negation',`dnl
+,int please_fix_RSB_M4_ARGS_TO_ACTUAL_ARGS`'dnl
+')dnl
+dnl
+dnl
+dnl
+')dnl
+dnl
+ifelse(want_what,`function_definition',`dnl
+rsb_err_t $0(args,`function_identifier')dnl
+($0(args,`function_args'))
+{
+RSB_M4_DEBUGINFO(``$0'')dnl
+	/*!
+	 * \ingroup rsb_doc_kernels
+	 * Kernel function dispatching will be performed inline, after type dispatching, in a separate function.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+dnl
+dnl	removed old junk in revision 625 ...
+dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+}
+')dnl body
+dnl
+popdef(`args')dnl
+popdef(`want_what')dnl
+popdef(`mop')dnl
+popdef(`types')dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION(types,mop)
+dnl	---------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_TESTING_FUNCTION',`dnl
+dnl
+pushdef(`types',$1)dnl
+pushdef(`mop',$2)dnl
+dnl
+dnl	
+#ifdef RSB_WANT_KERNELS_DEBUG
+rsb_err_t RSB_M4_PREFIX`'mop`_testing'dnl
+RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+{
+RSB_M4_DEBUGINFO(``$0'')dnl
+	/*!
+	 * \ingroup gr_debug
+	 * This is a trivial reference implementation of the "mop" kernel, and 
+	 * its numerical results will be used to shed some evidence if bugs 
+	 * should be introduced in performance computational kernels.
+	 * 
+	 * It should be used for debugging or comparing with performance optimized
+	 * functions.
+         *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	
+	register rsb_coo_idx_t baserow = RSB_INI,basecolumn = RSB_INI,rows = RSB_INI,columns = RSB_INI;
+	register rsb_coo_idx_t blockrow = RSB_INI,blockcolumn = RSB_INI;
+dnl	register char *bp=0;
+	register rsb_byte_t *bp=0;
+ifelse(RSB_M4_NOT(RSB_M4_IS_STRIDED_KERNEL_MOP(mop)),1,`dnl
+	rsb_coo_idx_t incx=1,incy=1;
+	incx=1,incy=1;	/* just to avoid "unused variable"-like  just to avoid "unused variable"-like warnings warnings */
+')dnl
+
+	if(!mtxAp /*|| !mtxAp->options*/ )return RSB_ERR_BADARGS;
+	{
+	RSB_GET_FIRST_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+foreach(`mtype',types,`dnl
+	if(mtxAp->`typecode' == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	{
+
+ifelse(RSB_M4_IS_SCALING_KERNEL_MOP(mop),1,`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype), betap, mtxAp->nr, out, 1);/* we scale the destination vector */
+')dnl
+ifelse(RSB_M4_IS_ZEROING_KERNEL_MOP(mop),1,`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype),mtxAp->nr,NULL,out,incy);
+')dnl
+ifelse(RSB_M4_IS_SPXM_KERNEL_MOP(mop),`1',`dnl
+dnl	if(mtxAp && mout) rsb__cblas_Xscal(mtxAp->typecode,nrhs*mtxAp->nr,NULL,mout,incy);// NEW
+	if(mtxAp && out) rsb__cblas_Xscal(mtxAp->typecode,nrhs*mtxAp->nr,NULL,out,incy);// NEW
+')dnl
+	
+	while(!RSB_GOT_LAST_BLOCK_POINTER(mtxAp))
+	{
+ifelse(mop,`scale',`dnl
+		mtype* a = (mtype*)bp;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				a[i*columns+j]*=((const mtype*)scale_factors)[i];
+')dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+		const mtype* a = (const mtype*)bp;
+		const mtype* b = ((const mtype*)rhs)+mtxAp->cpntr[blockcolumn];
+		mtype* c = ((mtype*)out)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j;
+		c=c;/* here just to prevent from compiler warning */
+
+#if 0
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				c[i]+=a[i*columns+j]*b[j];
+#else
+		/*
+		 * this code will emulate the same kernel order!
+		 * it should generate the same numerical roundoff errors the current kernel would
+		 * */
+		for(i=0;i<rows;++i)
+		{
+			mtype rs=0;
+			for(j=0;j<columns;++j)
+				rs+=a[i*columns+j]*b[j];
+ifelse(mop,`spmv_uaua',`dnl
+			c[i]+=rs;
+');
+ifelse(mop,`spmv_unua',`dnl
+			c[i]-=rs;
+');
+		}
+#endif /* 0 */
+
+')dnl
+ifelse(RSB_M4_IS_SPXM_KERNEL_MOP(mop),`1',`dnl
+		const mtype* a = (const mtype*)bp;
+dnl		const mtype* b = ((const mtype*)mrhs)+mtxAp->cpntr[blockcolumn];
+		const mtype* b = ((const mtype*)rhs)+mtxAp->cpntr[blockcolumn];
+		mtype* c = ((mtype*)out)+mtxAp->rpntr[blockrow];
+dnl		mtype* c = ((mtype*)mout)+mtxAp->rpntr[blockrow];
+		rsb_coo_idx_t i,j,k;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				for(k=0;k<nrhs;++k)
+					c[k*cstride+i]+=a[i*columns+j]*b[j+k*bstride];
+
+')dnl
+ifelse(RSB_M4_MEMBER(mop,`spsv_uxua',`spsv_sxsx'),1,`dnl
+/*	FIXME : UNFINISHED */
+')dnl
+ifelse(RSB_M4_IS_STRIDED_KERNEL_MOP(mop),1,`dnl
+	dnl	rsb_coo_idx_t incx=1,incy=1;
+')dnl
+ifelse(RSB_M4_MEMBER(mop,`spmv_sxsx',`spsv_sa',`spmv_uxux'),1,`dnl
+/*	FIXME : UNFINISHED */
+')dnl
+ifelse(RSB_M4_IS_ACC_WRITING_KERNEL_MOP(mop),`1',`dnl
+		const mtype* a = (const mtype*)bp;
+		mtype* row_sums_=row_sums;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+ifelse(mop,`infty_norm',`dnl
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=RSB_M4_ABS(mtype,a[i*columns+j]);
+')dnl
+ifelse(mop,`rowssums',`dnl
+				row_sums_[mtxAp->rpntr[blockrow]+i]+=a[i*columns+j];
+')dnl
+')dnl
+	
+ifelse(mop,`negation',`dnl
+		mtype* a = (mtype*)bp;
+		rsb_coo_idx_t i,j;
+		for(i=0;i<rows;++i)
+			for(j=0;j<columns;++j)
+				a[i*columns+j]=-a[i*columns+j];
+')dnl
+		RSB_GET_NEXT_BLOCK_POINTER(bp,mtxAp,baserow,basecolumn,rows,columns,blockrow,blockcolumn);
+	}
+	}
+	else
+')dnl
+	{
+		RSB_ERROR("Sorry, data type \"%c\" currently not supported.\n",mtxAp->typecode);
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	}
+	return RSB_ERR_NO_ERROR;	
+}
+dnl
+')dnl
+dnl
+#endif /* RSB_WANT_KERNELS_DEBUG */
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS(mtype)
+dnl	-------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ACTUAL_ARGS',`dnl
+pushdef(`mtype',$1)dnl
+dnl
+RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_DIRECT_KERNEL_DISPATCH_COMPLETETYPEBENCHMARK_FUNCTION_ARGS(mtype))dnl
+dnl
+popdef(`mtype')dnl
+')dnl
+dnl
+dnl
diff --git a/rsb_krnl_vb.c b/rsb_krnl_vb.c
new file mode 100644
index 0000000..467c4b1
--- /dev/null
+++ b/rsb_krnl_vb.c
@@ -0,0 +1,40 @@
+/* @cond INNERDOC */
+/**
+ @file
+ @brief
+
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ For variable block partitioned matrices.
+
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+/* Function definitions */
+/* @endcond */
diff --git a/rsb_krnl_vb.h b/rsb_krnl_vb.h
new file mode 100644
index 0000000..1ca0297
--- /dev/null
+++ b/rsb_krnl_vb.h
@@ -0,0 +1,43 @@
+/* @cond INNERDOC */
+/**
+ @file
+ @brief
+
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ For variable block partitioned matrices.
+
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_VBR_X_H_INCLUDED
+#define RSB_VBR_X_H_INCLUDED 
+#include "rsb_internals.h"
+/* Function definitions */
+#endif /* RSB_VBR_X_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_krnl_vb.m4 b/rsb_krnl_vb.m4
new file mode 100644
index 0000000..a307acb
--- /dev/null
+++ b/rsb_krnl_vb.m4
@@ -0,0 +1,56 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/**
+ @file
+ @brief
+
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ For variable block partitioned matrices.
+
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_vb_macros.m4')dnl
+dnl
+dnl
+dnl	---------------------------------------------------------------------------	dnl
+dnl				Function definitions
+dnl	---------------------------------------------------------------------------	dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_VBR_X_H_INCLUDED
+#define RSB_VBR_X_H_INCLUDED 
+')dnl
+dnl 
+dnl
+dnl
+#include "rsb_internals.h"
+dnl
+dnl	VARIABLE BLOCK SIZE KERNELS :
+dnl	:
+dnl foreach(`matrix_storage',(`vbr',`block',WANT_MATRIX_VB_STORAGE),`dnl
+dnl
+/* Function definitions */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`matrix_storage',(WANT_MATRIX_VB_STORAGE),`dnl
+foreach(`unrolling',(`l',`u'),`dnl
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION(type,matrix_storage,,,unrolling,mop)dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_VBR_X_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_krnl_vb_macros.m4 b/rsb_krnl_vb_macros.m4
new file mode 100644
index 0000000..ff3ad6c
--- /dev/null
+++ b/rsb_krnl_vb_macros.m4
@@ -0,0 +1,410 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl	The generated code will expand from here
+dnl	Take care of compiling this code without loop unrolling optimizations (-fno-unroll-loops, or -ON with N<=2 on gcc)
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+dnl
+dnl
+dnl
+dnl	FIXME : THERE ARE NO TRANSPOSED KERNELS
+dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop) dnl	-----------------------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`transposition',$3)dnl
+pushdef(`symmetry',$4)dnl
+pushdef(`b_rows',$5)dnl		block rows
+pushdef(`b_columns',$6)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t')dnl integer type (for indices)
+pushdef(`unrolling',$7)dnl	
+pushdef(`mop',$8)dnl	
+pushdef(`citype',$9)dnl	
+dnl
+dnl pushdef(`diagonal',ifelse($10,`',`e',diagonal,uplo))dnl	FIXME: for now this prameter is optional
+pushdef(`diagonal',$10)dnl	FIXME: new
+pushdef(`uplo',$11)dnl
+dnl
+RSB_M4_PREFIX`'matrix_storage`_'mop`_'RSB_M4_TYPE_CODE(mtype)`_'dnl
+RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE(citype)`_'dnl
+ifelse(matrix_storage,`fixed_block',`_r'b_rows`_c'b_columns)`'dnl
+`_t'touppercase(RSB_M4_TRANSPOSITION_CODE(transposition))`'dnl
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`_r'b_rows`_c'b_columns)`'dnl
+ifelse(RSB_M4_IS_FORMAT_BCOO(matrix_storage),1,`_r'b_rows`_c'b_columns)`'dnl
+dnl ifelse(matrix_storage,`VBR',`_r'b_rows`_c'b_columns)`'dnl
+dnl ifelse(matrix_storage,`VBC',`_r'b_rows`_c'b_columns)`'dnl
+`_u'unrolling`_s'touppercase(symmetry)`'dnl
+`_d'touppercase(diagonal)`'dnl
+`_u'touppercase(uplo)`'dnl
+popdef(`uplo')dnl
+popdef(`citype')dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`symmetry')dnl
+popdef(`transposition')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+popdef(`diagonal')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_ARGS(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	-----------------------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_ARGS',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl	
+pushdef(`mop',$6)dnl	
+pushdef(`matrix_structs',`const itype Mdim, const itype mdim, const rsb_nnz_idx_t * RSB_M4_RESTRICT bindx, const rsb_nnz_idx_t * RSB_M4_RESTRICT bpntr, const rsb_nnz_idx_t *RSB_M4_RESTRICT indptr, const rsb_coo_idx_t * RSB_M4_RESTRICT rpntr, const rsb_coo_idx_t * RSB_M4_RESTRICT cpntr, const rsb_coo_idx_t dummy_br, const rsb_coo_idx_t dummy_bc')dnl	
+(
+ifelse(mop,`scale',`dnl
+	mtype * VA, 
+	matrix_structs, 
+	const mtype *scale_factors
+')dnl
+ifelse(RSB_M4_MEMBER(mop,`spmv_uauz',`spmv_uaua',`spmv_unua'),1,`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs
+')dnl
+ifelse(mop,`spmm_az',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs,
+	const itype bstride, const itype cstride, const itype nrhs
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+	const mtype * VA, const mtype * rhs, mtype * out,
+	matrix_structs,
+	const mtype * alphap, const mtype * betap
+')dnl
+ifelse(mop,`infty_norm',`dnl
+ifelse(matrix_storage,`_fixed_block',`dnl
+	const mtype * VA, mtype * local_row_sums,
+	matrix_structs
+',`dnl
+	const mtype * VA, mtype * global_row_sums, 
+	matrix_structs
+')dnl
+')dnl
+ifelse(mop,`negation',`dnl
+ifelse(matrix_storage,`_fixed_block',`dnl
+	mtype * VA, 
+	matrix_structs
+',`dnl
+	mtype * VA, 
+	matrix_structs
+')dnl
+')dnl
+)dnl
+popdef(`matrix_structs')dnl	
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_PROTOTYPE(mtype,matrix_storage,transposition,b_rows,b_columns,unrolling,mop)
+dnl	----------------------------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_PROTOTYPE',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`transposition',$3)dnl
+pushdef(`b_rows',$4)dnl		block rows
+pushdef(`b_columns',$5)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t')dnl integer type (for indices)
+pushdef(`unrolling',$6)dnl	
+pushdef(`mop',$7)dnl	
+rsb_err_t RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_NAME(mtype,matrix_storage,transposition,symmetry,b_rows,b_columns,unrolling,mop)dnl
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_ARGS(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)dnl
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`transposition')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_BODY(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+dnl	-----------------------------------------------------------------------------------------------
+dnl
+define(`RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_BODY',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`b_rows',$3)dnl		block rows
+pushdef(`b_columns',$4)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t')dnl integer type (for indices)
+pushdef(`unrolling',$5)dnl
+pushdef(`mop',$6)dnl
+{
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+pushdef(`out_dim',rpntr[Mdim])dnl
+pushdef(`mi',`i')dnl
+pushdef(`Mi',`j')dnl
+')dnl
+ifelse(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),1,`dnl
+pushdef(`out_dim',rpntr[mdim])dnl
+pushdef(`mi',`j')dnl
+pushdef(`Mi',`i')dnl
+')dnl
+dnl
+
+	/**
+	 * \ingroup rsb_doc_kernels
+ifelse(matrix_storage,`VBR',`dnl
+	 * This code is intended for a pure VBR partitioned matrix.
+	 * It does not dispatch the kernel function for each block,
+	 * but employ explicitly inlined kernels.
+')dnl
+	 *
+	 * \return \rsb_errval_inp_param_msg
+	 */
+	register rsb_coo_idx_t i,j;
+	register rsb_nnz_idx_t k;
+	rsb_coo_idx_t columns;
+	rsb_coo_idx_t rows   ;
+
+ifelse(mop,`spmv_uxux',`dnl
+	/* this is slow, however this is column based scanning.  FIXME : optimize spmv_uxux */
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type), out_dim, betap, out, 1);/* we scale the destination vector */
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+	rsb__cblas_Xscal(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type), out_dim, NULL, out, 1);// FIXME: stride is bad
+')dnl
+
+	for(Mi=0;Mi<Mdim;++Mi)
+	{
+		/* Mi is the working block row */
+		if(bpntr[Mi]==bpntr[Mi+1])continue;/* empty block row (or column) */
+		for(k=bpntr[Mi];k<bpntr[Mi+1];++k)	/* k is the index of the block */
+		{
+			/* mi is the working block column (or row) */
+			mi=bindx[k];
+ifelse(matrix_storage,`VBR',`dnl
+			columns=cpntr[j+1]-cpntr[j];
+			rows   =rpntr[i+1]-rpntr[i];
+')dnl
+ifelse(1,0,`dnl
+ifelse(RSB_M4_MEMBER(matrix_storage,`VBR',`VBC'),1,`dnl
+/* matrix_storage ! mop  */
+			DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_IDENTIFIER(mop,mtype,unrolling)(rows,columns)
+ifelse(mop,`scale',`dnl
+			(
+					VA+indptr[(k)],
+					scale_factors+rpntr[i],
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+			(
+					VA+indptr[(k)],
+					rhs+cpntr[j],
+					out+rpntr[i],
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmv_unua',`dnl
+			(
+					VA+indptr[(k)],
+					rhs+cpntr[j],
+					out+rpntr[i],
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmv_uaua',`dnl
+			(
+					VA+indptr[(k)],
+					rhs+cpntr[j],
+					out+rpntr[i],
+					rows, columns
+					);
+')dnl
+ifelse(mop,`spmm_az',`dnl
+			(
+					VA+indptr[(k)],
+					rhs+cpntr[j],
+					out+rpntr[i],
+					rows, columns,
+					bstride, cstride, nrhs
+					);
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+			(
+					VA+indptr[(k)],
+					rhs+cpntr[j],
+					out+rpntr[i],
+					rows, columns,
+					alphap, betap
+					);
+')dnl
+ifelse(mop,`infty_norm',`dnl
+			(
+					VA+indptr[(k)],
+					global_row_sums+cpntr[j],
+					rows, columns
+					);
+')dnl
+ifelse(mop,`negation',`dnl
+			(
+					VA+indptr[(k)],
+					rows, columns
+					);
+')dnl
+')dnl
+')dnl
+ifelse(RSB_M4_MEMBER(matrix_storage,`VBR',`VBC'),1,`dnl
+ifelse(mop,`scale',`dnl
+			mtype *a = VA+indptr[(k)];
+			const mtype *d = scale_factors+rpntr[i];
+')dnl
+ifelse(RSB_M4_MEMBER(mop,`spmv_uaua',`spmv_unua',`spmv_uauz',`spmm_az',`spmv_uxux'),1,`dnl
+			const mtype *a = VA+indptr[(k)];
+			const mtype *b = rhs+cpntr[j];
+			mtype *c = out+rpntr[i];
+')dnl
+ifelse(mop,`infty_norm',`dnl
+			const mtype *a = VA+indptr[(k)];
+			mtype *local_row_sums = global_row_sums+rpntr[i];
+')dnl
+ifelse(mop,`negation',`dnl
+			mtype *a = VA+indptr[(k)];
+')dnl
+			columns=cpntr[j+1]-cpntr[j];
+			rows   =rpntr[i+1]-rpntr[i];
+			/* we jump to the right point */
+dnl
+ifelse(1,0,`
+#if 0
+/* it will not happen */
+#ifdef RSB_WANT_BLOCK_TRAILING_STRUCT	/* EXPERIMENTAL */
+ifelse(mop,`scale',`dnl
+			a = (mtype*) (((char*)a) + (RSB_BLOCK_EXTRA_BYTES)*(k+1)) ;
+')dnl
+ifelse(mop,`spmv_uauz',`dnl
+			a = (const mtype*) (((const char*)a) + (RSB_BLOCK_EXTRA_BYTES)*(k+1)) ;
+')dnl
+ifelse(mop,`spmm_az',`dnl
+			a = (const mtype*) (((const char*)a) + (RSB_BLOCK_EXTRA_BYTES)*(k+1)) ;
+')dnl
+ifelse(mop,`spmv_uxux',`dnl
+			a = (const mtype*) (((const char*)a) + (RSB_BLOCK_EXTRA_BYTES)*(k+1)) ;
+')dnl
+ifelse(mop,`infty_norm',`dnl
+			a = (const mtype*) (((const char*)a) + (RSB_BLOCK_EXTRA_BYTES)*(k+1)) ;
+')dnl
+ifelse(mop,`negation',`dnl
+			a = (mtype*) (((char*)a) + (RSB_BLOCK_EXTRA_BYTES)*(k+1)) ;
+')dnl
+dnl
+#endif /* RSB_WANT_BLOCK_TRAILING_STRUCT */
+#endif /* 0 */
+')dnl
+
+switch(rows)
+{
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+	case rowsu:
+	switch(columns)
+	{
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+		case colsu:
+dnl			goto RSB_M4_KERNEL_FUNCTION_NAME(mtype,rowsu,colsu,`',mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE);
+{RSB_M4_KERNEL_FUNCTION_BODY(`row',`rows',rowsu,`column',`columns',colsu,mtype,,mop,`')}
+			break;
+')dnl
+	default: goto RSB_M4_KERNEL_FUNCTION_NAME(mtype,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE);
+	}
+	break;
+')dnl
+	default: goto RSB_M4_KERNEL_FUNCTION_NAME(mtype,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE);
+	RSB_M4_KERNEL_FUNCTION_NAME(mtype,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE):
+{RSB_M4_KERNEL_FUNCTION_BODY(`row',`rows',RSB_M4_ROWS_FALLBACK_UNROLL,`column',`columns',RSB_M4_COLUMNS_FALLBACK_UNROLL,mtype,,mop,`l')}
+};
+')dnl end VBR
+		}
+	}
+popdef(`mi')dnl
+popdef(`Mi')dnl
+dnl
+dnl
+dnl
+popdef(`out_dim')dnl
+	return RSB_ERR_NO_ERROR;
+}
+popdef(`mop')dnl
+popdef(`mtype')dnl
+popdef(`matrix_storage')dnl
+popdef(`b_rows')dnl
+popdef(`b_columns')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION(mtype,matrix_storage,transposition,b_rows,b_columns,unrolling,mop)
+dnl	------------------------------------------------------------------------------------------
+dnl
+dnl	These functions will perform their operations on fixed block matrices.
+dnl
+define(`RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION',`dnl
+dnl
+dnl
+pushdef(`mtype',$1)dnl
+pushdef(`matrix_storage',$2)dnl
+pushdef(`transposition',$3)dnl
+pushdef(`b_rows',$4)dnl		block rows
+pushdef(`b_columns',$5)dnl	block columns
+pushdef(`itype',`rsb_coo_idx_t')dnl integer type (for indices)
+pushdef(`unrolling',$6)dnl	
+pushdef(`mop',$7)dnl	
+ifelse(matrix_storage,`fixed_block',dnl
+`/* This code is intended for a purely blocked matrix. */',dnl
+`/* This code is intended for a pure VBR partitioned matrix. */')
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_PROTOTYPE(mtype,matrix_storage,transposition,b_rows,b_columns,unrolling,mop)dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`
+RSB_M4_KERNEL_DIRECT_DISPATCHER_FUNCTION_BODY(mtype,matrix_storage,b_rows,b_columns,unrolling,mop)
+')dnl
+popdef(`mop')dnl
+popdef(`matrix_storage')dnl
+popdef(`transposition')dnl
+popdef(`mtype')dnl
+popdef(`itype')dnl
+popdef(`unrolling')dnl
+')dnl
+dnl
+dnl
+dnl
diff --git a/rsb_lbl.h b/rsb_lbl.h
new file mode 100644
index 0000000..963f974
--- /dev/null
+++ b/rsb_lbl.h
@@ -0,0 +1,235 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/** @file
+ *  @brief	Macros for linked block formats (and more)
+ *  @author Michele Martone
+ * */
+
+#ifndef RSB_LBL_H_INCLUDED
+#define RSB_LBL_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef RSB_FLAG_WANT_LINKED_STORAGE
+
+/**
+ * \brief A local matrix block info structure.
+ * */
+struct rsb_block_tail_t
+{
+	/* TODO : USE SHORT INT'S */
+/*	void *next_block_on_row;
+	void *next_block_on_col;*/
+	/* seems like using integers is faster ! */
+#if RSB_BLOCK_SMALL_INDICES
+	/* 
+	 * DANGER  :
+	 * WARNING : MISSING CHECKS IN THE CODE FOR BIGGER BLOCKS !
+	 * DANGER  :
+	 */
+	#define intrablock_int unsigned char
+	#define interblock_int unsigned short int
+	#define index_int      unsigned short int
+#else
+/*	#define intrablock_int short unsigned int*/
+	#define intrablock_int unsigned int	/* FIXME : temporary (for debugging!) */
+	#define interblock_int unsigned int
+	#define index_int      unsigned int
+#endif /* RSB_BLOCK_SMALL_INDICES */
+	intrablock_int block_rows;
+	intrablock_int block_columns;
+	interblock_int block_row;
+	interblock_int block_column;
+	index_int base_column;
+	index_int base_row;
+#if 0
+	void(*block_spmv_f_p[])(const void*,const void*,void*,int,int) /* block multiplication function pointer */ 
+	rsb_flags_t flags;
+#endif
+	/* only for test purposes right now  */
+	/* FIXME : and data alignment ? we should not break it ! */
+#ifdef RSB_WANT_BLOCK_TRAILING_STRUCT_QUICK
+	void(*spmv_fp)(const double*,const double*,double*,int,int); /* sample spmv protorype */
+#if RSB_BLOCK_SMALL_INDICES
+	short int foo;	/* dummy 16 bits bits for 64 bits alignment purposes : FIXME : temporary */
+#else
+	int foo;	/* dummy 32 bits bits for 64 bits alignment purposes : FIXME : temporary */
+#endif
+#endif /* RSB_WANT_BLOCK_TRAILING_STRUCT_QUICK */
+};
+
+
+#define RSB_BLOCK_EXTRA_BYTES (sizeof(struct rsb_block_tail_t))
+
+#define RSB_BLOCK_TRAILING_STRUCT_TRANSPOSE(bt) \
+	{\
+	intrablock_int ib_tmp; \
+	index_int ii_tmp; \
+	ib_tmp=bt->block_columns;bt->block_columns=bt->block_rows;bt->block_rows=ib_tmp; \
+	ib_tmp=bt->block_column ;bt->block_column =bt->block_row ;bt->block_row =ib_tmp; \
+	ii_tmp=bt->base_column;  bt->base_column  =bt->base_row  ;bt->base_row  =ii_tmp; \
+	/*printf("br:%d\n",(bt)->block_row);*/ \
+	/*printf("bc:%d\n",(bt)->block_column);*/ \
+	/*printf("%d %d\n",block_rows_,block_columns_);*/ \
+	}
+
+/* trailing struct, full */
+#define RSB_BLOCK_TRAILING_STRUCT_SET_(bt,block_row_,block_column_,block_rows_,block_columns_,base_row_,base_column_) \
+	{\
+	{(bt)->block_row =(block_row_ );} \
+	{(bt)->block_rows=(block_rows_);} \
+	{(bt)->block_column=(block_column_);} \
+	{(bt)->block_columns=(block_columns_);} \
+	{(bt)->base_row =(base_row_ );} \
+	{(bt)->base_column=(base_column_);} \
+	/*printf("br:%d\n",(bt)->block_row);*/ \
+	/*printf("bc:%d\n",(bt)->block_column);*/ \
+	/*printf("%d %d\n",block_rows_,block_columns_);*/ \
+	}
+
+/*	{(bt)->flags=0x0;}*/
+
+/* still unsupported */
+#ifdef RSB_WANT_BLOCK_TRAILING_STRUCT_QUICK
+#define RSB_BLOCK_TRAILING_STRUCT_SET(bt,block_row,block_column,block_rows,block_columns,base_row,base_column) \
+	{RSB_BLOCK_TRAILING_STRUCT_SET_(((struct rsb_block_tail_t *)(bt)),(block_row),(block_column),(block_rows),(block_columns),(base_row),(base_column)) \
+	{((struct rsb_block_tail_t *)(bt))->spmv_fp=(RSB_double_spmv((block_rows),(block_columns)));} }
+#else
+#define RSB_BLOCK_TRAILING_STRUCT_SET(bt,block_row,block_column,block_rows,block_columns,base_row,base_column) \
+	{RSB_BLOCK_TRAILING_STRUCT_SET_(((struct rsb_block_tail_t *)(bt)),(block_row),(block_column),(block_rows),(block_columns),(base_row),(base_column))}
+#endif /* RSB_WANT_BLOCK_TRAILING_STRUCT_QUICK */
+
+#define RSB_BLOCK_TRAILING_STRUCT_NEXT(m,bt) \
+	(struct rsb_block_tail_t*)(((char*)(bt))+((m)->el_size)*((rsb_coo_idx_t)(bt)->block_rows)*((rsb_coo_idx_t)(bt)->block_columns) + RSB_BLOCK_EXTRA_BYTES)
+
+
+
+
+/* * This macro returns the block offset (in term of bytes) of the k^th block of matrix m */
+#define RSB_BLOCK_OFFSET(m,k) (((m)->indptr[(k)]*(m)->el_size) + (((m)->flags&RSB_FLAG_WANT_LINKED_STORAGE)?(k+1)*(RSB_BLOCK_EXTRA_BYTES):0 ))
+
+/* * This macro returns the count of needed bytes for the blocks array. */
+#define RSB_TOTAL_BLOCK_BYTES(matrix,options) \
+	((RSB_TOTAL_BLOCK_ELEMENT_COUNT(matrix))*(matrix)->el_size+((matrix)->block_count+1)*(((matrix)->flags&RSB_FLAG_WANT_LINKED_STORAGE)?sizeof(struct rsb_block_tail_t):0))
+
+
+
+
+/* the extra structure is placed at the block first byte */
+#define RSB_BLOCK_TRAILING_STRUCT_GET(m,k)  \
+	((struct rsb_block_tail_t*)((char*)(RSB_BLOCK_ADDRESS(m,k))-sizeof(struct rsb_block_tail_t)))
+
+/* new */
+/*#define _VBR_BLOCK_TRAILING_STRUCT_GET(m,k)  \
+	(((m)->flags & RSB_FLAG_WANT_LINKED_STORAGE)?(RSB_BLOCK_TRAILING_STRUCT_GET(m,k)):-1) 
+*/
+
+
+
+
+
+
+/*	*	*	*	*	*	*	*	*	*	*	*/
+
+/*
+ * The following three macros should be used for scanning a whole matrix.
+ * They are intended to be used in a situation when performance is important, when
+ * specialized kernels are called on each matrix sub block.
+ * */
+#if 0
+#ifdef RSB_WANT_BLOCK_TRAILING_STRUCT
+/* Implementation for the trailing structures enhanced variant. */
+/* WARNING : UNTESTED */
+#define	RSB_GET_NEXT_BLOCK_POINTER(BP,M,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	/*										\
+	 * *input*									\
+	 * M		should be a valid rsb_mtx_t structure pointer		\
+	 * *output*									\
+	 * ROWVAR	will be set to the base row    of this block			\
+	 * COLVAR	will be set to the base column of this block			\
+	 * BLOCKROWSVAR	will be set to the rows   count of this block			\
+	 * BLOCKCOLSVAR	will be set to the column count of this block			\
+	 * BP		 will be set to the current block pointer			\
+	 * */										\
+	_bt = RSB_BLOCK_TRAILING_STRUCT_NEXT((M),_bt);					\
+	_lastk=_k;									\
+	(BLOCKROWVAR)=_bt->block_row;							\
+	(BLOCKCOLUMNVAR)=_bt->block_column;						\
+	(BLOCKROWSVAR)=_bt->block_rows;							\
+	(BLOCKCOLSVAR)=_bt->block_columns;						\
+	(ROWVAR)=_bt->base_row;								\
+	(COLVAR)=_bt->base_column;							\
+	(BP)=((char*)_bt)+sizeof(struct rsb_block_tail_t);						\
+	_k++;			/* for the future macro calls */			\
+	;
+/* WARNING : UNTESTED */
+#define RSB_GET_FIRST_BLOCK_POINTER(BP,M,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)	\
+	int /*_i=0,_j=0,*/_k=0,_lastk=0;						\
+	const struct rsb_block_tail_t *_bt;							\
+	_bt = (struct rsb_block_tail_t*)RSB_BLOCK_TRAILING_STRUCT_GET((M),0) ;					\
+	(BLOCKROWVAR)=_bt->block_row;							\
+	(BLOCKCOLUMNVAR)=_bt->block_column;						\
+	(BLOCKROWSVAR)=_bt->block_rows;							\
+	(BLOCKCOLSVAR)=_bt->block_columns;						\
+	(ROWVAR)=_bt->base_row;								\
+	(COLVAR)=_bt->base_column;							\
+	(BP)=((char*)_bt)+sizeof(struct rsb_block_tail_t);						\
+	/*int _lasti=0;*/											\
+	/*int _lastj=0;*/											\
+	/*RSB_GET_NEXT_BLOCK_POINTER(BP,M,ROWVAR,COLVAR,BLOCKROWSVAR,BLOCKCOLSVAR,BLOCKROWVAR,BLOCKCOLUMNVAR)*/
+
+/* WARNING : UNTESTED */
+#define RSB_GOT_LAST_BLOCK_POINTER(M)	( _k >= (M)->block_count )
+#endif	/* ifdef RSB_WANT_BLOCK_TRAILING_STRUCT */
+#endif	/* 0 */
+
+
+#else	/* RSB_FLAG_WANT_LINKED_STORAGE */
+
+/* * This macro returns the block offset (in term of bytes) of the k^th block of matrix m */
+#define RSB_BLOCK_OFFSET(m,k) (((m)->indptr[(k)]*(m)->el_size))
+
+/* * This macro returns the count of needed bytes for the blocks array. */
+#define RSB_TOTAL_BLOCK_BYTES(matrix,options) \
+	((RSB_TOTAL_BLOCK_ELEMENT_COUNT(matrix))*(matrix)->el_size)
+
+#define RSB_TOTAL_BLOCK_ELEMENT_COUNT(matrix) ((matrix)->element_count)
+
+/*	((RSB_TOTAL_BLOCK_ELEMENT_COUNT(matrix))*(matrix)->el_size+((matrix)->block_count)*(RSB_BLOCK_EXTRA_BYTES))*/
+
+
+#endif	/* RSB_FLAG_WANT_LINKED_STORAGE */
+
+/* * This macro returns the block address of the k^th block of matrix m */
+#define RSB_BLOCK_ADDRESS(m,k) (((char*)((m)->VA))+RSB_BLOCK_OFFSET((m),(k)))
+
+
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif	/* RSB_LBL_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_libspblas.c b/rsb_libspblas.c
new file mode 100644
index 0000000..3a93923
--- /dev/null
+++ b/rsb_libspblas.c
@@ -0,0 +1,3698 @@
+
+
+/*!
+        @file
+        @author Michele Martone
+
+	@brief  This file implements Sparse BLAS for librsb.
+	Supported types  :(float,double,float complex,double complex) .
+	Unsupported types:() .
+	Level 1 ops      :(dot,axpy,ga,gz,sc) .
+	Level 2 ops      :(mv,sv) .
+	Level 3 ops      :(mm,sm) .
+*/
+
+#ifndef BLAS_ENUM_H
+#define BLAS_ENUM_H
+
+  /* Enumerated types */
+
+/*! Used to specify a dense array's elements layout. */
+enum blas_order_type {
+            blas_rowmajor = 101, /*!< Row major. */
+            blas_colmajor = 102  /*!< Column major. */ };
+
+/*! Used to specify a transposition operator to a matrix operand. */
+enum blas_trans_type {
+            blas_no_trans   = 111, /*!< No transposition. */
+            blas_trans      = 112, /*!< Transposition. */
+            blas_conj_trans = 113  /*!< Transposition and conjugation. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) upper or lower triangularity of a matrix. */
+enum blas_uplo_type  {
+            blas_upper = 121, /*!< Upper triangular matrix. */
+            blas_lower = 122  /*!< Lower triangular matrix. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) whether the diagonal of a matrix is (implicitly) unitary or not. */
+enum blas_diag_type {
+            blas_non_unit_diag = 131,  /*!< Unit diagional matrix. */
+            blas_unit_diag     = 132   /*!< Non unit diagional matrix (the default). */ };
+
+/*! Unused/Unsupported. */
+enum blas_side_type {
+            blas_left_side  = 141, /*!< Unsupported. */ 
+            blas_right_side = 142  /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_cmach_type {
+            blas_base      = 151, /*!< Unsupported. */ 
+            blas_t         = 152, /*!< Unsupported. */ 
+            blas_rnd       = 153, /*!< Unsupported. */ 
+            blas_ieee      = 154, /*!< Unsupported. */ 
+            blas_emin      = 155, /*!< Unsupported. */ 
+            blas_emax      = 156, /*!< Unsupported. */ 
+            blas_eps       = 157, /*!< Unsupported. */ 
+            blas_prec      = 158, /*!< Unsupported. */ 
+            blas_underflow = 159, /*!< Unsupported. */ 
+            blas_overflow  = 160, /*!< Unsupported. */ 
+            blas_sfmin     = 161  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_norm_type {
+            blas_one_norm       = 171, /*!< Unsupported. */ 
+            blas_real_one_norm  = 172, /*!< Unsupported. */ 
+            blas_two_norm       = 173, /*!< Unsupported. */ 
+            blas_frobenius_norm = 174, /*!< Unsupported. */ 
+            blas_inf_norm       = 175, /*!< Unsupported. */ 
+            blas_real_inf_norm  = 176, /*!< Unsupported. */ 
+            blas_max_norm       = 177, /*!< Unsupported. */ 
+            blas_real_max_norm  = 178  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_sort_type {
+            blas_increasing_order = 181,  /*!< Unsupported. */ 
+            blas_decreasing_order = 182   /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_conj_type {
+            blas_conj    = 191, /*!< Unsupported. */
+            blas_no_conj = 192  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_jrot_type {
+            blas_jrot_inner  = 201, /*!< Unsupported. */
+            blas_jrot_outer  = 202, /*!< Unsupported. */
+            blas_jrot_sorted = 203  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_prec_type {
+            blas_prec_single     = 211, /*!< Unsupported. */
+            blas_prec_double     = 212, /*!< Unsupported. */
+            blas_prec_indigenous = 213, /*!< Unsupported. */
+            blas_prec_extra      = 214  /*!< Unsupported. */ };
+
+/*! Index base (valid at matrix build/modify time). */
+enum blas_base_type {
+            blas_zero_base = 221, /*!< Zero based indices (default when matrix created using the C interface). */
+            blas_one_base  = 222  /*!< Zero based indices (default when matrix created using the Fortran interface). */ };
+
+/*! Symmetry properties. If not specified otherwise, valid for the both of #BLAS_ussp and #BLAS_usgp.
+ */
+enum blas_symmetry_type {
+            blas_general          = 231, /*!< General unsymmetric matrix (default). For #BLAS_usgp only. */
+            blas_symmetric        = 232, /*!< Symmetric matrix (either #blas_lower_symmetric or #blas_upper_symmetric). For #BLAS_usgp only. */
+            blas_hermitian        = 233, /*!< Hermitian matrix (either #blas_lower_hermitian or #blas_upper_hermitian). For #BLAS_usgp only. */
+            blas_triangular       = 234, /*!< Triangular matrix (either #blas_lower_triangular or #blas_upper_triangular). For #BLAS_usgp only. */
+            blas_lower_triangular = 235, /*!< Lower triangular matrix. */
+            blas_upper_triangular = 236, /*!< Upper triangular matrix. */
+            blas_lower_symmetric  = 237, /*!< Lower symmetric matrix. */
+            blas_upper_symmetric  = 238, /*!< Upper symmetric matrix. */
+            blas_lower_hermitian  = 239, /*!< Lower hermitian matrix. */
+            blas_upper_hermitian  = 240  /*!< Upper hermitian matrix. */ };
+
+/*! Numerical field type; can be used with #BLAS_usgp to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). */
+enum blas_field_type {
+            blas_complex          = 241, /*!< Will succeed if matrix is of 'C' or 'Z' type. */
+            blas_real             = 242, /*!< Will succeed if matrix is of 'S' or 'D' type. */
+            blas_double_precision = 243, /*!< Will succeed if matrix is of 'D' or 'Z' type. */
+            blas_single_precision = 244  /*!< Will succeed if matrix is of 'S' or 'C' type. */ };
+
+/*! Quantities that can be obtained via #BLAS_usgp. */
+enum blas_size_type {
+            blas_num_rows      = 251, /*!< Get the matrix rows count. */
+            blas_num_cols      = 252, /*!< Get the matrix columns count. */
+            blas_num_nonzeros  = 253  /*!< Get the matrix nonzeros count. */ };
+
+/*! The following are not fully implemented. Usable with #BLAS_usgp. */
+enum blas_handle_type{
+            blas_invalid_handle = 261, /*!< Used to check whether the handle is invalid. */
+			blas_new_handle     = 262, /*!< Will give 1 if the handle is new. */
+			blas_open_handle    = 263, /*!< will give 1 if the handle is open. */
+			blas_valid_handle   = 264  /*!< Will give 1 if the handle is valid (that is, after #BLAS_duscr_end/#BLAS_zuscr_end/#BLAS_cuscr_end/#BLAS_zuscr_end). */ };
+
+/*! The following are usable with #BLAS_usgp only. */
+enum blas_sparsity_optimization_type {
+            blas_regular       = 271, /*!< Will give 0. */
+            blas_irregular     = 272, /*!< Will give 1. */
+            blas_block         = 273, /*!< Will give 0. */
+            blas_unassembled   = 274  /*!< Complementary to #blas_valid_handle. */ };
+
+/*! Properties suitable to be used with #BLAS_ussp/#BLAS_usgp. All of these are not in the Sparse BLAS standard. */
+enum blas_rsb_ext_type {
+            blas_rsb_spmv_autotuning_on   = 6660,	/*!< Turn on executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. As an extension to the standard, the autotuning properties can be turned on/off at any time; if the autotuning feature has not been enabled at build time, using these properties will make the call fail. For more information, see #rsb_tune_spmm. (EXPERIMENTAL) */
+            blas_rsb_spmv_autotuning_off  = 6661,	/*!< Turn off executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_on   = 6662,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_off  = 6663,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_on   = 6664,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_off  = 6665,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_autotune_next_operation= 6666,	/*!< Turn on executing threads autotuning for the next operation among #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv). See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_rep_rsb         = 9995,	/*!< Request/check for RSB representation. */
+            blas_rsb_rep_csr         = 9996,	/*!< Request/check for CSR representation. */
+            blas_rsb_rep_coo         = 9997,	/*!< Request/check for COO representation. */
+            blas_rsb_duplicates_ovw   = 9998,	/*!< Request/check for duplicate nonzeroes overwriting policy. */
+            blas_rsb_duplicates_sum   = 9999 	/*!< Request/check for duplicate nonzeroes summation policy. */
+};
+
+#endif
+   /* BLAS_ENUM_H */
+#include "rsb_libspblas_handle.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* @cond INNERDOC  */
+RSB_INTERNALS_COMMON_HEAD_DECLS
+/* @endcond */
+               /* Level 1 Computational Routines */
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susdot rsb_wp_BLAS_susdot
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susdot(enum blas_conj_type conj, int nnz, const float * x,
+		const int *indx, const float * y, int incy, float * r,
+		enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusdot(RSB_NUMERICAL_TYPE_FLOAT ,conj,nnz,x,indx,y,incy,r,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susdot_ rsb_wp_blas_susdot_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susdot_(enum blas_conj_type*conj,int*nnz,const float *x,const int *indx,const float *y,int*incy,float *r,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_susdot(*conj,*nnz,x,indx,y,*incy,r,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusdot rsb_wp_BLAS_dusdot
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusdot(enum blas_conj_type conj, int nnz, const double * x,
+		const int *indx, const double * y, int incy, double * r,
+		enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusdot(RSB_NUMERICAL_TYPE_DOUBLE ,conj,nnz,x,indx,y,incy,r,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusdot_ rsb_wp_blas_dusdot_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusdot_(enum blas_conj_type*conj,int*nnz,const double *x,const int *indx,const double *y,int*incy,double *r,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_dusdot(*conj,*nnz,x,indx,y,*incy,r,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusdot rsb_wp_BLAS_cusdot
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusdot(enum blas_conj_type conj, int nnz, const void *x,
+		const int *indx, const void *y, int incy, void *r,
+		enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusdot(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,conj,nnz,x,indx,y,incy,r,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusdot_ rsb_wp_blas_cusdot_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusdot_(enum blas_conj_type*conj,int*nnz,const void *x,const int *indx,const void *y,int*incy,void *r,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_cusdot(*conj,*nnz,x,indx,y,*incy,r,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusdot rsb_wp_BLAS_zusdot
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusdot(enum blas_conj_type conj, int nnz, const void *x,
+		const int *indx, const void *y, int incy, void *r,
+		enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusdot(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,conj,nnz,x,indx,y,incy,r,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusdot_ rsb_wp_blas_zusdot_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusdot_(enum blas_conj_type*conj,int*nnz,const void *x,const int *indx,const void *y,int*incy,void *r,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_dot_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_zusdot(*conj,*nnz,x,indx,y,*incy,r,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susaxpy rsb_wp_BLAS_susaxpy
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susaxpy(int nnz, float  alpha, const float * x, const int *indx,
+                 float * y, int incy, enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusaxpy(RSB_NUMERICAL_TYPE_FLOAT ,/* FIXME: this is an exception; shall use a formal substitution technique, rather */nnz,&alpha,x,indx,y,incy,index_base ))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susaxpy_ rsb_wp_blas_susaxpy_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susaxpy_(int*nnz,float*alpha,const float *x,const int *indx,float *y,int*incy,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_susaxpy(*nnz,*alpha,x,indx,y,*incy,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusaxpy rsb_wp_BLAS_dusaxpy
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusaxpy(int nnz, double  alpha, const double * x, const int *indx,
+                 double * y, int incy, enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusaxpy(RSB_NUMERICAL_TYPE_DOUBLE ,/* FIXME: this is an exception; shall use a formal substitution technique, rather */nnz,&alpha,x,indx,y,incy,index_base ))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusaxpy_ rsb_wp_blas_dusaxpy_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusaxpy_(int*nnz,double*alpha,const double *x,const int *indx,double *y,int*incy,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_dusaxpy(*nnz,*alpha,x,indx,y,*incy,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusaxpy rsb_wp_BLAS_cusaxpy
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusaxpy(int nnz, const void * alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusaxpy(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,/* FIXME: this is an exception; shall use a formal substitution technique, rather */nnz,&alpha,x,indx,y,incy,index_base ))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusaxpy_ rsb_wp_blas_cusaxpy_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusaxpy_(int*nnz,const void *alpha,const void *x,const int *indx,void *y,int*incy,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_cusaxpy(*nnz,alpha,x,indx,y,*incy,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusaxpy rsb_wp_BLAS_zusaxpy
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusaxpy(int nnz, const void * alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusaxpy(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,/* FIXME: this is an exception; shall use a formal substitution technique, rather */nnz,&alpha,x,indx,y,incy,index_base ))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusaxpy_ rsb_wp_blas_zusaxpy_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusaxpy_(int*nnz,const void *alpha,const void *x,const int *indx,void *y,int*incy,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_axpy_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_zusaxpy(*nnz,alpha,x,indx,y,*incy,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susga rsb_wp_BLAS_susga
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susga(int nnz, const float * y, int incy, float * x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusga(RSB_NUMERICAL_TYPE_FLOAT ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susga_ rsb_wp_blas_susga_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susga_(int*nnz,const float *y,int*incy,float *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_susga(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusga rsb_wp_BLAS_dusga
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusga(int nnz, const double * y, int incy, double * x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusga(RSB_NUMERICAL_TYPE_DOUBLE ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusga_ rsb_wp_blas_dusga_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusga_(int*nnz,const double *y,int*incy,double *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_dusga(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusga rsb_wp_BLAS_cusga
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusga(int nnz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusga(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusga_ rsb_wp_blas_cusga_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusga_(int*nnz,const void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_cusga(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusga rsb_wp_BLAS_zusga
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusga(int nnz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusga(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusga_ rsb_wp_blas_zusga_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusga_(int*nnz,const void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_ga_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_zusga(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susgz rsb_wp_BLAS_susgz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susgz(int nnz, float * y, int incy, float * x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusgz(RSB_NUMERICAL_TYPE_FLOAT ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susgz_ rsb_wp_blas_susgz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susgz_(int*nnz,float *y,int*incy,float *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_susgz(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusgz rsb_wp_BLAS_dusgz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusgz(int nnz, double * y, int incy, double * x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusgz(RSB_NUMERICAL_TYPE_DOUBLE ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusgz_ rsb_wp_blas_dusgz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusgz_(int*nnz,double *y,int*incy,double *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_dusgz(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusgz rsb_wp_BLAS_cusgz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusgz(int nnz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusgz(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusgz_ rsb_wp_blas_cusgz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusgz_(int*nnz,void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_cusgz(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusgz rsb_wp_BLAS_zusgz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusgz(int nnz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusgz(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,nnz,y,incy,x,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusgz_ rsb_wp_blas_zusgz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusgz_(int*nnz,void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_gz_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_zusgz(*nnz,y,*incy,x,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_sussc rsb_wp_BLAS_sussc
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_sussc(int nnz, const float * x, float * y, int incy, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xussc(RSB_NUMERICAL_TYPE_FLOAT ,nnz,x,y,incy,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_sussc_ rsb_wp_blas_sussc_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_sussc_(int*nnz,const float *x,float *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_sussc(*nnz,x,y,*incy,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dussc rsb_wp_BLAS_dussc
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dussc(int nnz, const double * x, double * y, int incy, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xussc(RSB_NUMERICAL_TYPE_DOUBLE ,nnz,x,y,incy,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dussc_ rsb_wp_blas_dussc_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dussc_(int*nnz,const double *x,double *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_dussc(*nnz,x,y,*incy,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cussc rsb_wp_BLAS_cussc
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cussc(int nnz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xussc(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,nnz,x,y,incy,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cussc_ rsb_wp_blas_cussc_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cussc_(int*nnz,const void *x,void *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_cussc(*nnz,x,y,*incy,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zussc rsb_wp_BLAS_zussc
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zussc(int nnz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_return_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+	RSB_SPB_INTERFACE_PREAMBLE
+
+#if RSB_WANT_SPARSE_BLAS_LEVEL_1
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xussc(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,nnz,x,y,incy,indx,index_base))
+#else  /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+	RSB_SPB_INTERFACE_RETURN(RSB_BLAS_ERROR);
+#endif /* RSB_WANT_SPARSE_BLAS_LEVEL_1 */
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zussc_ rsb_wp_blas_zussc_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zussc_(int*nnz,const void *x,void *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat)
+{
+
+	/*!
+	  \ingroup rsb_doc_sparse_blas
+	  \rsb_spblasl1_sc_msg\rsb_spblas_istat_msg
+	  \warning \rsb_spblasl1_msg
+	*/
+
+	int istatv = BLAS_zussc(*nnz,x,y,*incy,indx,*index_base );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	return;
+}
+
+               /* Level 2 Computational Routines */
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susmv rsb_wp_BLAS_susmv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susmv(enum blas_trans_type transA, float alpha,
+    blas_sparse_matrix A, const float * x, int incx, float * y, int incy)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const float beta = ((float)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmv(transA,&alpha,A,x,incx,&beta,y,incy))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susmv_ rsb_wp_blas_susmv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susmv_(enum blas_trans_type*transA,float*alpha,blas_sparse_matrix*A,const float *x,int*incx,float *y,int*incy,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susmv(*transA,*alpha,*A,x,*incx,y,*incy);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusmv rsb_wp_BLAS_dusmv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusmv(enum blas_trans_type transA, double alpha,
+    blas_sparse_matrix A, const double * x, int incx, double * y, int incy)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const double beta = ((double)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmv(transA,&alpha,A,x,incx,&beta,y,incy))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusmv_ rsb_wp_blas_dusmv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusmv_(enum blas_trans_type*transA,double*alpha,blas_sparse_matrix*A,const double *x,int*incx,double *y,int*incy,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusmv(*transA,*alpha,*A,x,*incx,y,*incy);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusmv rsb_wp_BLAS_cusmv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusmv(enum blas_trans_type transA, const void *alpha,
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const float complex beta = ((float complex)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmv(transA,alpha,A,x,incx,&beta,y,incy))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusmv_ rsb_wp_blas_cusmv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusmv_(enum blas_trans_type*transA,const void *alpha,blas_sparse_matrix*A,const void *x,int*incx,void *y,int*incy,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusmv(*transA,alpha,*A,x,*incx,y,*incy);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusmv rsb_wp_BLAS_zusmv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusmv(enum blas_trans_type transA, const void *alpha,
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const double complex beta = ((double complex)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmv(transA,alpha,A,x,incx,&beta,y,incy))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusmv_ rsb_wp_blas_zusmv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusmv_(enum blas_trans_type*transA,const void *alpha,blas_sparse_matrix*A,const void *x,int*incx,void *y,int*incy,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusmv(*transA,alpha,*A,x,*incx,y,*incy);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_sussv rsb_wp_BLAS_sussv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_sussv(enum blas_trans_type transT, float alpha,
+    blas_sparse_matrix T, float * x, int incx)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsv(rsb_blas_trans_to_rsb_trans(transT),&alpha,mtxAp,x,incx,x,incx)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_sussv_ rsb_wp_blas_sussv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_sussv_(enum blas_trans_type*transT,float*alpha,blas_sparse_matrix*T,float *x,int*incx,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_sussv(*transT,*alpha,*T,x,*incx);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dussv rsb_wp_BLAS_dussv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dussv(enum blas_trans_type transT, double alpha,
+    blas_sparse_matrix T, double * x, int incx)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsv(rsb_blas_trans_to_rsb_trans(transT),&alpha,mtxAp,x,incx,x,incx)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dussv_ rsb_wp_blas_dussv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dussv_(enum blas_trans_type*transT,double*alpha,blas_sparse_matrix*T,double *x,int*incx,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dussv(*transT,*alpha,*T,x,*incx);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cussv rsb_wp_BLAS_cussv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cussv(enum blas_trans_type transT, const void *alpha,
+    blas_sparse_matrix T, void *x, int incx)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsv(rsb_blas_trans_to_rsb_trans(transT),alpha,mtxAp,x,incx,x,incx)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cussv_ rsb_wp_blas_cussv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cussv_(enum blas_trans_type*transT,const void *alpha,blas_sparse_matrix*T,void *x,int*incx,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cussv(*transT,alpha,*T,x,*incx);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zussv rsb_wp_BLAS_zussv
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zussv(enum blas_trans_type transT, const void *alpha,
+    blas_sparse_matrix T, void *x, int incx)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsv(rsb_blas_trans_to_rsb_trans(transT),alpha,mtxAp,x,incx,x,incx)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zussv_ rsb_wp_blas_zussv_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zussv_(enum blas_trans_type*transT,const void *alpha,blas_sparse_matrix*T,void *x,int*incx,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sv_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zussv(*transT,alpha,*T,x,*incx);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+
+               /* Level 3 Computational Routines */
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susmm rsb_wp_BLAS_susmm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, float alpha, blas_sparse_matrix A, const float * b, int ldb,
+       float *  c, int ldc)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const float beta = ((float)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmm(transA,&alpha,A,b,ldb,&beta,c,ldc,nrhs,order))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susmm_ rsb_wp_blas_susmm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,float*alpha,blas_sparse_matrix*A,const float *b,int*ldb,float *c,int*ldc,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susmm(*order,*transA,*nrhs,*alpha,*A,b,*ldb,c,*ldc);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusmm rsb_wp_BLAS_dusmm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, double alpha, blas_sparse_matrix A, const double * b, int ldb,
+       double *  c, int ldc)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const double beta = ((double)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmm(transA,&alpha,A,b,ldb,&beta,c,ldc,nrhs,order))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusmm_ rsb_wp_blas_dusmm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,double*alpha,blas_sparse_matrix*A,const double *b,int*ldb,double *c,int*ldc,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusmm(*order,*transA,*nrhs,*alpha,*A,b,*ldb,c,*ldc);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusmm rsb_wp_BLAS_cusmm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, int ldb,
+       void * c, int ldc)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const float complex beta = ((float complex)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmm(transA,alpha,A,b,ldb,&beta,c,ldc,nrhs,order))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusmm_ rsb_wp_blas_cusmm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,const void *alpha,blas_sparse_matrix*A,const void *b,int*ldb,void *c,int*ldc,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusmm(*order,*transA,*nrhs,alpha,*A,b,*ldb,c,*ldc);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusmm rsb_wp_BLAS_zusmm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, int ldb,
+       void * c, int ldc)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const double complex beta = ((double complex)(1.0));
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusmm(transA,alpha,A,b,ldb,&beta,c,ldc,nrhs,order))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusmm_ rsb_wp_blas_zusmm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,const void *alpha,blas_sparse_matrix*A,const void *b,int*ldb,void *c,int*ldc,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_mm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusmm(*order,*transA,*nrhs,alpha,*A,b,*ldb,c,*ldc);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_sussm rsb_wp_BLAS_sussm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_sussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, float alpha, blas_sparse_matrix T, float * b, int ldb)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const float beta = ((float)(0));
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsm(rsb_blas_trans_to_rsb_trans(transT),&alpha,rsb__BLAS_inner_matrix_retrieve(T),nrhs,rsb_blas_order_to_rsb_order(order),&beta,b,ldb,b,ldb)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_sussm_ rsb_wp_blas_sussm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_sussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,float*alpha,blas_sparse_matrix*T,float *b,int*ldb,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_sussm(*order,*transT,*nrhs,*alpha,*T,b,*ldb);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dussm rsb_wp_BLAS_dussm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, double alpha, blas_sparse_matrix T, double * b, int ldb)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const double beta = ((double)(0));
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsm(rsb_blas_trans_to_rsb_trans(transT),&alpha,rsb__BLAS_inner_matrix_retrieve(T),nrhs,rsb_blas_order_to_rsb_order(order),&beta,b,ldb,b,ldb)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dussm_ rsb_wp_blas_dussm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,double*alpha,blas_sparse_matrix*T,double *b,int*ldb,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dussm(*order,*transT,*nrhs,*alpha,*T,b,*ldb);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cussm rsb_wp_BLAS_cussm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, const void *alpha, blas_sparse_matrix T, void *b, int ldb)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const float complex beta = ((float complex)(0));
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsm(rsb_blas_trans_to_rsb_trans(transT),alpha,rsb__BLAS_inner_matrix_retrieve(T),nrhs,rsb_blas_order_to_rsb_order(order),&beta,b,ldb,b,ldb)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cussm_ rsb_wp_blas_cussm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,const void *alpha,blas_sparse_matrix*T,void *b,int*ldb,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cussm(*order,*transT,*nrhs,alpha,*T,b,*ldb);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zussm rsb_wp_BLAS_zussm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, const void *alpha, blas_sparse_matrix T, void *b, int ldb)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+{
+	const double complex beta = ((double complex)(0));
+	RSB_SPB_INTERFACE_RETURN(RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsm(rsb_blas_trans_to_rsb_trans(transT),alpha,rsb__BLAS_inner_matrix_retrieve(T),nrhs,rsb_blas_order_to_rsb_order(order),&beta,b,ldb,b,ldb)))
+}
+	}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zussm_ rsb_wp_blas_zussm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,const void *alpha,blas_sparse_matrix*T,void *b,int*ldb,int*istat)
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_sm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zussm(*order,*transT,*nrhs,alpha,*T,b,*ldb);
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+
+               /* Handle Management Routines */
+               /*             +              */
+               /* Creation Routines */
+               /*             +              */
+               /* Insertion Routines */
+               /*             +              */
+               /* Completion of Construction Routines */
+               /*             +              */
+               /* Matrix Property Routines */
+               /*             +              */
+               /* Destruction Routine */
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_begin rsb_wp_BLAS_suscr_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_suscr_begin( int m, int n )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_begin(m,n,RSB_NUMERICAL_TYPE_FLOAT ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_begin_ rsb_wp_blas_suscr_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_begin_( int*m,int*n,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_suscr_begin(*m,*n );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_begin rsb_wp_BLAS_duscr_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_duscr_begin( int m, int n )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_begin(m,n,RSB_NUMERICAL_TYPE_DOUBLE ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_begin_ rsb_wp_blas_duscr_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_begin_( int*m,int*n,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_duscr_begin(*m,*n );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_begin rsb_wp_BLAS_cuscr_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_cuscr_begin( int m, int n )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_begin(m,n,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_begin_ rsb_wp_blas_cuscr_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_begin_( int*m,int*n,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_cuscr_begin(*m,*n );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_begin rsb_wp_BLAS_zuscr_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_zuscr_begin( int m, int n )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_begin(m,n,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_begin_ rsb_wp_blas_zuscr_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_begin_( int*m,int*n,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_begin_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_zuscr_begin(*m,*n );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_block_begin rsb_wp_BLAS_suscr_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_suscr_block_begin( int Mb, int Nb, int k, int l )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_block_begin(Mb,Nb,k,l,RSB_NUMERICAL_TYPE_FLOAT ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_block_begin_ rsb_wp_blas_suscr_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_block_begin_( int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_suscr_block_begin(*Mb,*Nb,*k,*l );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_block_begin rsb_wp_BLAS_duscr_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_duscr_block_begin( int Mb, int Nb, int k, int l )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_block_begin(Mb,Nb,k,l,RSB_NUMERICAL_TYPE_DOUBLE ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_block_begin_ rsb_wp_blas_duscr_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_block_begin_( int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_duscr_block_begin(*Mb,*Nb,*k,*l );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_block_begin rsb_wp_BLAS_cuscr_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_cuscr_block_begin( int Mb, int Nb, int k, int l )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_block_begin(Mb,Nb,k,l,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_block_begin_ rsb_wp_blas_cuscr_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_block_begin_( int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_cuscr_block_begin(*Mb,*Nb,*k,*l );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_block_begin rsb_wp_BLAS_zuscr_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_zuscr_block_begin( int Mb, int Nb, int k, int l )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_block_begin(Mb,Nb,k,l,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_block_begin_ rsb_wp_blas_zuscr_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_block_begin_( int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_block_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_zuscr_block_begin(*Mb,*Nb,*k,*l );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_variable_block_begin rsb_wp_BLAS_suscr_variable_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_suscr_variable_block_begin( int Mb, int Nb,
+		const int *K, const int *L )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_variable_block_begin(Mb,Nb,K,L,RSB_NUMERICAL_TYPE_FLOAT ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_variable_block_begin_ rsb_wp_blas_suscr_variable_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_variable_block_begin_( int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_suscr_variable_block_begin(*Mb,*Nb,K,L );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_variable_block_begin rsb_wp_BLAS_duscr_variable_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_duscr_variable_block_begin( int Mb, int Nb,
+		const int *K, const int *L )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_variable_block_begin(Mb,Nb,K,L,RSB_NUMERICAL_TYPE_DOUBLE ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_variable_block_begin_ rsb_wp_blas_duscr_variable_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_variable_block_begin_( int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_duscr_variable_block_begin(*Mb,*Nb,K,L );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_variable_block_begin rsb_wp_BLAS_cuscr_variable_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_cuscr_variable_block_begin( int Mb, int Nb,
+		const int *K, const int *L )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_variable_block_begin(Mb,Nb,K,L,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_variable_block_begin_ rsb_wp_blas_cuscr_variable_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_variable_block_begin_( int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_cuscr_variable_block_begin(*Mb,*Nb,K,L );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_variable_block_begin rsb_wp_BLAS_zuscr_variable_block_begin
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+blas_sparse_matrix BLAS_zuscr_variable_block_begin( int Mb, int Nb,
+		const int *K, const int *L )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblas_return_mtx_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN_HDL(rsb__BLAS_Xuscr_variable_block_begin(Mb,Nb,K,L,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_variable_block_begin_ rsb_wp_blas_zuscr_variable_block_begin_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_variable_block_begin_( int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_vbr_msg\rsb_spblasl2_Ap_msg \rsb_spblas_istat_msg \rsb_spblas_set_mtx_msg
+         */
+
+	int istatv = BLAS_zuscr_variable_block_begin(*Mb,*Nb,K,L );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+	RSB_SET_IF_NOT_NULL(A,istatv);
+	if(*A && (*A != RSB_BLAS_INVALID_VAL))
+	{
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_NO_ERROR);
+		rsb__BLAS_ussp(*A,blas_one_base);
+	}
+	else
+		RSB_SET_IF_NOT_NULL(istat,RSB_BLAS_ERROR);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_end rsb_wp_BLAS_suscr_end
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_end( blas_sparse_matrix A )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_end(A))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_end_ rsb_wp_blas_suscr_end_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_end_( blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_end(*A );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_end rsb_wp_BLAS_duscr_end
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_end( blas_sparse_matrix A )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_end(A))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_end_ rsb_wp_blas_duscr_end_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_end_( blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_end(*A );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_end rsb_wp_BLAS_cuscr_end
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_end( blas_sparse_matrix A )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_end(A))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_end_ rsb_wp_blas_cuscr_end_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_end_( blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_end(*A );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_end rsb_wp_BLAS_zuscr_end
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_end( blas_sparse_matrix A )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_end(A))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_end_ rsb_wp_blas_zuscr_end_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_end_( blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_end(*A );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_insert_entry rsb_wp_BLAS_suscr_insert_entry
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_insert_entry( blas_sparse_matrix A, float  val, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entry(A,&val,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_insert_entry_ rsb_wp_blas_suscr_insert_entry_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_insert_entry_( blas_sparse_matrix*A,float*val,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_insert_entry(*A,*val,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_insert_entry rsb_wp_BLAS_duscr_insert_entry
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_insert_entry( blas_sparse_matrix A, double  val, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entry(A,&val,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_insert_entry_ rsb_wp_blas_duscr_insert_entry_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_insert_entry_( blas_sparse_matrix*A,double*val,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_insert_entry(*A,*val,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_insert_entry rsb_wp_BLAS_cuscr_insert_entry
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_insert_entry( blas_sparse_matrix A, const void * val, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entry(A,val,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_insert_entry_ rsb_wp_blas_cuscr_insert_entry_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_insert_entry_( blas_sparse_matrix*A,const void *val,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_insert_entry(*A,val,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_insert_entry rsb_wp_BLAS_zuscr_insert_entry
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_insert_entry( blas_sparse_matrix A, const void * val, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entry(A,val,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_insert_entry_ rsb_wp_blas_zuscr_insert_entry_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_insert_entry_( blas_sparse_matrix*A,const void *val,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entry_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_insert_entry(*A,val,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_insert_entries rsb_wp_BLAS_suscr_insert_entries
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_insert_entries( blas_sparse_matrix A, int nnz, const float * val,
+                            const int *indx, const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entries(A,nnz,val,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_insert_entries_ rsb_wp_blas_suscr_insert_entries_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_insert_entries_( blas_sparse_matrix*A,int*nnz,const float *val,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_insert_entries(*A,*nnz,val,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_insert_entries rsb_wp_BLAS_duscr_insert_entries
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_insert_entries( blas_sparse_matrix A, int nnz, const double * val,
+                            const int *indx, const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entries(A,nnz,val,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_insert_entries_ rsb_wp_blas_duscr_insert_entries_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_insert_entries_( blas_sparse_matrix*A,int*nnz,const double *val,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_insert_entries(*A,*nnz,val,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_insert_entries rsb_wp_BLAS_cuscr_insert_entries
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_insert_entries( blas_sparse_matrix A, int nnz, const void *val,
+                            const int *indx, const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entries(A,nnz,val,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_insert_entries_ rsb_wp_blas_cuscr_insert_entries_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_insert_entries_( blas_sparse_matrix*A,int*nnz,const void *val,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_insert_entries(*A,*nnz,val,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_insert_entries rsb_wp_BLAS_zuscr_insert_entries
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_insert_entries( blas_sparse_matrix A, int nnz, const void *val,
+                            const int *indx, const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_entries(A,nnz,val,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_insert_entries_ rsb_wp_blas_zuscr_insert_entries_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_insert_entries_( blas_sparse_matrix*A,int*nnz,const void *val,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_entries_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_insert_entries(*A,*nnz,val,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_insert_col rsb_wp_BLAS_suscr_insert_col
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_insert_col( blas_sparse_matrix A, int j, int nnz,
+                           const float * val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_col(A,j,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_insert_col_ rsb_wp_blas_suscr_insert_col_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_insert_col_( blas_sparse_matrix*A,int*j,int*nnz,const float *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_insert_col(*A,*j,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_insert_col rsb_wp_BLAS_duscr_insert_col
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_insert_col( blas_sparse_matrix A, int j, int nnz,
+                           const double * val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_col(A,j,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_insert_col_ rsb_wp_blas_duscr_insert_col_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_insert_col_( blas_sparse_matrix*A,int*j,int*nnz,const double *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_insert_col(*A,*j,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_insert_col rsb_wp_BLAS_cuscr_insert_col
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_insert_col( blas_sparse_matrix A, int j, int nnz,
+                           const void *val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_col(A,j,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_insert_col_ rsb_wp_blas_cuscr_insert_col_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_insert_col_( blas_sparse_matrix*A,int*j,int*nnz,const void *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_insert_col(*A,*j,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_insert_col rsb_wp_BLAS_zuscr_insert_col
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_insert_col( blas_sparse_matrix A, int j, int nnz,
+                           const void *val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_col(A,j,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_insert_col_ rsb_wp_blas_zuscr_insert_col_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_insert_col_( blas_sparse_matrix*A,int*j,int*nnz,const void *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_col_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_insert_col(*A,*j,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_insert_row rsb_wp_BLAS_suscr_insert_row
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_insert_row( blas_sparse_matrix A, int i, int nnz,
+                           const float * val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_row(A,i,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_insert_row_ rsb_wp_blas_suscr_insert_row_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_insert_row_( blas_sparse_matrix*A,int*i,int*nnz,const float *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_insert_row(*A,*i,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_insert_row rsb_wp_BLAS_duscr_insert_row
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_insert_row( blas_sparse_matrix A, int i, int nnz,
+                           const double * val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_row(A,i,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_insert_row_ rsb_wp_blas_duscr_insert_row_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_insert_row_( blas_sparse_matrix*A,int*i,int*nnz,const double *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_insert_row(*A,*i,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_insert_row rsb_wp_BLAS_cuscr_insert_row
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_insert_row( blas_sparse_matrix A, int i, int nnz,
+                           const void *val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_row(A,i,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_insert_row_ rsb_wp_blas_cuscr_insert_row_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_insert_row_( blas_sparse_matrix*A,int*i,int*nnz,const void *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_insert_row(*A,*i,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_insert_row rsb_wp_BLAS_zuscr_insert_row
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_insert_row( blas_sparse_matrix A, int i, int nnz,
+                           const void *val, const int *indx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_row(A,i,nnz,val,indx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_insert_row_ rsb_wp_blas_zuscr_insert_row_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_insert_row_( blas_sparse_matrix*A,int*i,int*nnz,const void *val,const int *indx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_row_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_insert_row(*A,*i,*nnz,val,indx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_insert_clique rsb_wp_BLAS_suscr_insert_clique
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_insert_clique( blas_sparse_matrix A, const int k, const int l,
+                       const float * val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_clique(A,k,l,val,row_stride,col_stride,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_insert_clique_ rsb_wp_blas_suscr_insert_clique_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_insert_clique_( blas_sparse_matrix*A,const int*k,const int*l,const float *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_insert_clique(*A,*k,*l,val,*row_stride,*col_stride,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_insert_clique rsb_wp_BLAS_duscr_insert_clique
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_insert_clique( blas_sparse_matrix A, const int k, const int l,
+                       const double * val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_clique(A,k,l,val,row_stride,col_stride,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_insert_clique_ rsb_wp_blas_duscr_insert_clique_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_insert_clique_( blas_sparse_matrix*A,const int*k,const int*l,const double *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_insert_clique(*A,*k,*l,val,*row_stride,*col_stride,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_insert_clique rsb_wp_BLAS_cuscr_insert_clique
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_insert_clique( blas_sparse_matrix A, const int k, const int l,
+                       const void *val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_clique(A,k,l,val,row_stride,col_stride,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_insert_clique_ rsb_wp_blas_cuscr_insert_clique_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_insert_clique_( blas_sparse_matrix*A,const int*k,const int*l,const void *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_insert_clique(*A,*k,*l,val,*row_stride,*col_stride,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_insert_clique rsb_wp_BLAS_zuscr_insert_clique
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_insert_clique( blas_sparse_matrix A, const int k, const int l,
+                       const void *val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_clique(A,k,l,val,row_stride,col_stride,indx,jndx))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_insert_clique_ rsb_wp_blas_zuscr_insert_clique_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_insert_clique_( blas_sparse_matrix*A,const int*k,const int*l,const void *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_clique_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_insert_clique(*A,*k,*l,val,*row_stride,*col_stride,indx,jndx );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_suscr_insert_block rsb_wp_BLAS_suscr_insert_block
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_suscr_insert_block( blas_sparse_matrix A, const float * val,
+                        int row_stride, int col_stride, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_block(A,val,row_stride,col_stride,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_suscr_insert_block_ rsb_wp_blas_suscr_insert_block_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_suscr_insert_block_( blas_sparse_matrix*A,const float *val,int*row_stride,int*col_stride,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_suscr_insert_block(*A,val,*row_stride,*col_stride,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_duscr_insert_block rsb_wp_BLAS_duscr_insert_block
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_duscr_insert_block( blas_sparse_matrix A, const double * val,
+                        int row_stride, int col_stride, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_block(A,val,row_stride,col_stride,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_duscr_insert_block_ rsb_wp_blas_duscr_insert_block_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_duscr_insert_block_( blas_sparse_matrix*A,const double *val,int*row_stride,int*col_stride,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_duscr_insert_block(*A,val,*row_stride,*col_stride,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cuscr_insert_block rsb_wp_BLAS_cuscr_insert_block
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cuscr_insert_block( blas_sparse_matrix A, const void *val,
+                        int row_stride, int col_stride, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_block(A,val,row_stride,col_stride,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cuscr_insert_block_ rsb_wp_blas_cuscr_insert_block_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cuscr_insert_block_( blas_sparse_matrix*A,const void *val,int*row_stride,int*col_stride,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cuscr_insert_block(*A,val,*row_stride,*col_stride,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zuscr_insert_block rsb_wp_BLAS_zuscr_insert_block
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zuscr_insert_block( blas_sparse_matrix A, const void *val,
+                        int row_stride, int col_stride, int i, int j )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_insert_block(A,val,row_stride,col_stride,i,j))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zuscr_insert_block_ rsb_wp_blas_zuscr_insert_block_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zuscr_insert_block_( blas_sparse_matrix*A,const void *val,int*row_stride,int*col_stride,int*i,int*j,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_insert_block_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zuscr_insert_block(*A,val,*row_stride,*col_stride,*i,*j );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_uscr_end rsb_wp_BLAS_uscr_end
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_uscr_end( blas_sparse_matrix A )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xuscr_end(A))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_uscr_end_ rsb_wp_blas_uscr_end_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_uscr_end_( blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_cr_end_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_uscr_end(*A );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_usds rsb_wp_BLAS_usds
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_usds( blas_sparse_matrix A )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_ds_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusds(A))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_usds_ rsb_wp_blas_usds_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_usds_( blas_sparse_matrix*A,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2_ds_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_usds(*A );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susrows_scale rsb_wp_BLAS_susrows_scale
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susrows_scale( blas_sparse_matrix A,const float *  d, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusrows_scale(A,d,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susrows_scale_ rsb_wp_blas_susrows_scale_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susrows_scale_( blas_sparse_matrix*A,const float *d,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susrows_scale(*A,d,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusrows_scale rsb_wp_BLAS_dusrows_scale
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusrows_scale( blas_sparse_matrix A,const double *  d, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusrows_scale(A,d,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusrows_scale_ rsb_wp_blas_dusrows_scale_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusrows_scale_( blas_sparse_matrix*A,const double *d,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusrows_scale(*A,d,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusrows_scale rsb_wp_BLAS_cusrows_scale
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusrows_scale( blas_sparse_matrix A,const void * d, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusrows_scale(A,d,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusrows_scale_ rsb_wp_blas_cusrows_scale_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusrows_scale_( blas_sparse_matrix*A,const void *d,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusrows_scale(*A,d,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusrows_scale rsb_wp_BLAS_zusrows_scale
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusrows_scale( blas_sparse_matrix A,const void * d, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusrows_scale(A,d,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusrows_scale_ rsb_wp_blas_zusrows_scale_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusrows_scale_( blas_sparse_matrix*A,const void *d,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usrows_scale_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusrows_scale(*A,d,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susget_diag rsb_wp_BLAS_susget_diag
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susget_diag( blas_sparse_matrix A,float *  d )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_diag(A,d))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susget_diag_ rsb_wp_blas_susget_diag_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susget_diag_( blas_sparse_matrix*A,float *d,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susget_diag(*A,d );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusget_diag rsb_wp_BLAS_dusget_diag
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusget_diag( blas_sparse_matrix A,double *  d )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_diag(A,d))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusget_diag_ rsb_wp_blas_dusget_diag_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusget_diag_( blas_sparse_matrix*A,double *d,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusget_diag(*A,d );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusget_diag rsb_wp_BLAS_cusget_diag
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusget_diag( blas_sparse_matrix A,void * d )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_diag(A,d))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusget_diag_ rsb_wp_blas_cusget_diag_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusget_diag_( blas_sparse_matrix*A,void *d,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusget_diag(*A,d );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusget_diag rsb_wp_BLAS_zusget_diag
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusget_diag( blas_sparse_matrix A,void * d )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_diag(A,d))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusget_diag_ rsb_wp_blas_zusget_diag_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusget_diag_( blas_sparse_matrix*A,void *d,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_diag_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusget_diag(*A,d );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susget_rows_nnz rsb_wp_BLAS_susget_rows_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susget_rows_nnz( blas_sparse_matrix A, int fr, int lr, int * nnzp )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_nnz(A,fr,lr,nnzp))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susget_rows_nnz_ rsb_wp_blas_susget_rows_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susget_rows_nnz_( blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susget_rows_nnz(*A,*fr,*lr,nnzp );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusget_rows_nnz rsb_wp_BLAS_dusget_rows_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusget_rows_nnz( blas_sparse_matrix A, int fr, int lr, int * nnzp )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_nnz(A,fr,lr,nnzp))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusget_rows_nnz_ rsb_wp_blas_dusget_rows_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusget_rows_nnz_( blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusget_rows_nnz(*A,*fr,*lr,nnzp );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusget_rows_nnz rsb_wp_BLAS_cusget_rows_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusget_rows_nnz( blas_sparse_matrix A, int fr, int lr, int * nnzp )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_nnz(A,fr,lr,nnzp))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusget_rows_nnz_ rsb_wp_blas_cusget_rows_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusget_rows_nnz_( blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusget_rows_nnz(*A,*fr,*lr,nnzp );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusget_rows_nnz rsb_wp_BLAS_zusget_rows_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusget_rows_nnz( blas_sparse_matrix A, int fr, int lr, int * nnzp )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_nnz(A,fr,lr,nnzp))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusget_rows_nnz_ rsb_wp_blas_zusget_rows_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusget_rows_nnz_( blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusget_rows_nnz(*A,*fr,*lr,nnzp );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susget_rows_sparse rsb_wp_BLAS_susget_rows_sparse
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susget_rows_sparse( blas_sparse_matrix A, float *  VA, int * IA, int * JA, int * nnz, int fr, int lr )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_sparse(A,VA,IA,JA,nnz,fr,lr))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susget_rows_sparse_ rsb_wp_blas_susget_rows_sparse_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susget_rows_sparse_( blas_sparse_matrix*A,float *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susget_rows_sparse(*A,VA,IA,JA,nnz,*fr,*lr );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusget_rows_sparse rsb_wp_BLAS_dusget_rows_sparse
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusget_rows_sparse( blas_sparse_matrix A, double *  VA, int * IA, int * JA, int * nnz, int fr, int lr )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_sparse(A,VA,IA,JA,nnz,fr,lr))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusget_rows_sparse_ rsb_wp_blas_dusget_rows_sparse_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusget_rows_sparse_( blas_sparse_matrix*A,double *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusget_rows_sparse(*A,VA,IA,JA,nnz,*fr,*lr );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusget_rows_sparse rsb_wp_BLAS_cusget_rows_sparse
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusget_rows_sparse( blas_sparse_matrix A, void * VA, int * IA, int * JA, int * nnz, int fr, int lr )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_sparse(A,VA,IA,JA,nnz,fr,lr))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusget_rows_sparse_ rsb_wp_blas_cusget_rows_sparse_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusget_rows_sparse_( blas_sparse_matrix*A,void *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusget_rows_sparse(*A,VA,IA,JA,nnz,*fr,*lr );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusget_rows_sparse rsb_wp_BLAS_zusget_rows_sparse
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusget_rows_sparse( blas_sparse_matrix A, void * VA, int * IA, int * JA, int * nnz, int fr, int lr )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_rows_sparse(A,VA,IA,JA,nnz,fr,lr))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusget_rows_sparse_ rsb_wp_blas_zusget_rows_sparse_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusget_rows_sparse_( blas_sparse_matrix*A,void *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_rows_sparse_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusget_rows_sparse(*A,VA,IA,JA,nnz,*fr,*lr );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susget_matrix_nnz rsb_wp_BLAS_susget_matrix_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susget_matrix_nnz( blas_sparse_matrix A,int * nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_matrix_nnz(A,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susget_matrix_nnz_ rsb_wp_blas_susget_matrix_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susget_matrix_nnz_( blas_sparse_matrix*A,int *nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susget_matrix_nnz(*A,nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusget_matrix_nnz rsb_wp_BLAS_dusget_matrix_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusget_matrix_nnz( blas_sparse_matrix A,int * nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_matrix_nnz(A,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusget_matrix_nnz_ rsb_wp_blas_dusget_matrix_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusget_matrix_nnz_( blas_sparse_matrix*A,int *nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusget_matrix_nnz(*A,nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusget_matrix_nnz rsb_wp_BLAS_cusget_matrix_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusget_matrix_nnz( blas_sparse_matrix A,int * nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_matrix_nnz(A,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusget_matrix_nnz_ rsb_wp_blas_cusget_matrix_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusget_matrix_nnz_( blas_sparse_matrix*A,int *nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusget_matrix_nnz(*A,nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusget_matrix_nnz rsb_wp_BLAS_zusget_matrix_nnz
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusget_matrix_nnz( blas_sparse_matrix A,int * nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_matrix_nnz(A,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusget_matrix_nnz_ rsb_wp_blas_zusget_matrix_nnz_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusget_matrix_nnz_( blas_sparse_matrix*A,int *nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_matrix_nnz_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusget_matrix_nnz(*A,nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susget_infinity_norm rsb_wp_BLAS_susget_infinity_norm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susget_infinity_norm( blas_sparse_matrix A,float * in, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_infinity_norm(A,in,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susget_infinity_norm_ rsb_wp_blas_susget_infinity_norm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susget_infinity_norm_( blas_sparse_matrix*A,float *in,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susget_infinity_norm(*A,in,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusget_infinity_norm rsb_wp_BLAS_dusget_infinity_norm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusget_infinity_norm( blas_sparse_matrix A,double * in, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_infinity_norm(A,in,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusget_infinity_norm_ rsb_wp_blas_dusget_infinity_norm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusget_infinity_norm_( blas_sparse_matrix*A,double *in,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusget_infinity_norm(*A,in,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusget_infinity_norm rsb_wp_BLAS_cusget_infinity_norm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusget_infinity_norm( blas_sparse_matrix A,void *in, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_infinity_norm(A,in,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusget_infinity_norm_ rsb_wp_blas_cusget_infinity_norm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusget_infinity_norm_( blas_sparse_matrix*A,void *in,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusget_infinity_norm(*A,in,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusget_infinity_norm rsb_wp_BLAS_zusget_infinity_norm
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusget_infinity_norm( blas_sparse_matrix A,void *in, enum blas_trans_type trans )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_infinity_norm(A,in,trans))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusget_infinity_norm_ rsb_wp_blas_zusget_infinity_norm_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusget_infinity_norm_( blas_sparse_matrix*A,void *in,enum blas_trans_type*trans,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_infinity_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusget_infinity_norm(*A,in,*trans );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susset_elements rsb_wp_BLAS_susset_elements
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susset_elements( blas_sparse_matrix A,const int * ia, const int *ja, const float *  va, int nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_elements(A,ia,ja,va,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susset_elements_ rsb_wp_blas_susset_elements_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susset_elements_( blas_sparse_matrix*A,const int *ia,const int *ja,const float *va,int*nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susset_elements(*A,ia,ja,va,*nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusset_elements rsb_wp_BLAS_dusset_elements
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusset_elements( blas_sparse_matrix A,const int * ia, const int *ja, const double *  va, int nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_elements(A,ia,ja,va,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusset_elements_ rsb_wp_blas_dusset_elements_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusset_elements_( blas_sparse_matrix*A,const int *ia,const int *ja,const double *va,int*nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusset_elements(*A,ia,ja,va,*nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusset_elements rsb_wp_BLAS_cusset_elements
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusset_elements( blas_sparse_matrix A,const int * ia, const int *ja, const void * va, int nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_elements(A,ia,ja,va,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusset_elements_ rsb_wp_blas_cusset_elements_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusset_elements_( blas_sparse_matrix*A,const int *ia,const int *ja,const void *va,int*nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusset_elements(*A,ia,ja,va,*nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusset_elements rsb_wp_BLAS_zusset_elements
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusset_elements( blas_sparse_matrix A,const int * ia, const int *ja, const void * va, int nnz )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_elements(A,ia,ja,va,nnz))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusset_elements_ rsb_wp_blas_zusset_elements_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusset_elements_( blas_sparse_matrix*A,const int *ia,const int *ja,const void *va,int*nnz,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_elements_norm_msg.\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusset_elements(*A,ia,ja,va,*nnz );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susset_element rsb_wp_BLAS_susset_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susset_element( blas_sparse_matrix A,int i, int j, float *  v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susset_element_ rsb_wp_blas_susset_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susset_element_( blas_sparse_matrix*A,int*i,int*j,float *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susset_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusset_element rsb_wp_BLAS_dusset_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusset_element( blas_sparse_matrix A,int i, int j, double *  v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusset_element_ rsb_wp_blas_dusset_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusset_element_( blas_sparse_matrix*A,int*i,int*j,double *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusset_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusset_element rsb_wp_BLAS_cusset_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusset_element( blas_sparse_matrix A,int i, int j, void * v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusset_element_ rsb_wp_blas_cusset_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusset_element_( blas_sparse_matrix*A,int*i,int*j,void *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusset_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusset_element rsb_wp_BLAS_zusset_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusset_element( blas_sparse_matrix A,int i, int j, void * v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusset_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusset_element_ rsb_wp_blas_zusset_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusset_element_( blas_sparse_matrix*A,int*i,int*j,void *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usset_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusset_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_susget_element rsb_wp_BLAS_susget_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_susget_element( blas_sparse_matrix A,int i, int j, float *  v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_susget_element_ rsb_wp_blas_susget_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_susget_element_( blas_sparse_matrix*A,int*i,int*j,float *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_susget_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_dusget_element rsb_wp_BLAS_dusget_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_dusget_element( blas_sparse_matrix A,int i, int j, double *  v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_dusget_element_ rsb_wp_blas_dusget_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_dusget_element_( blas_sparse_matrix*A,int*i,int*j,double *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_dusget_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_cusget_element rsb_wp_BLAS_cusget_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_cusget_element( blas_sparse_matrix A,int i, int j, void * v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_cusget_element_ rsb_wp_blas_cusget_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_cusget_element_( blas_sparse_matrix*A,int*i,int*j,void *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_cusget_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define BLAS_zusget_element rsb_wp_BLAS_zusget_element
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+int BLAS_zusget_element( blas_sparse_matrix A,int i, int j, void * v )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_return_msg
+         */
+	RSB_SPB_INTERFACE_PREAMBLE
+
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_Xusget_element(A,i,j,v))
+}
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define blas_zusget_element_ rsb_wp_blas_zusget_element_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_zusget_element_( blas_sparse_matrix*A,int*i,int*j,void *v,int*istat )
+{
+         /*!
+           \ingroup rsb_doc_sparse_blas
+           \rsb_spblasl2e_usget_element_norm_msg\rsb_spblas_istat_msg
+         */
+
+	int istatv = BLAS_zusget_element(*A,*i,*j,v );
+	RSB_SET_IF_NOT_NULL(istat,istatv);
+}
+
+
+
+
+
+int BLAS_usgp( blas_sparse_matrix A, int pname ) /*  FIXME: temporarily here */
+{
+	/**
+	 \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_gp_msg
+	 \rsb_spblas_return_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	RSB_SPB_INTERFACE_RETURN_EXP(rsb__BLAS_usgp(A,pname))
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define  blas_usgp_	rsb_wp_blas_usgp_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_usgp_( blas_sparse_matrix*A, int*pname, int * istat ) /*  FIXME: temporarily here */
+{
+	/** \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_gp_msg
+	 \rsb_spblas_istat_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	*istat=BLAS_usgp(*A,*pname); /*  FIXME: temporarily here */
+	RSB_SPB_INTERFACE_RETURN_VOID()
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+#define  blas_ussp_	rsb_wp_blas_ussp_
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+void blas_ussp_( blas_sparse_matrix*A, int*pname, int * istat ) /*  FIXME: temporarily here */
+{
+	/**
+	 \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_sp_msg
+	 \rsb_spblas_istat_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	*istat=BLAS_ussp(*A,*pname); /*  FIXME: temporarily here */
+	RSB_SPB_INTERFACE_RETURN_VOID()
+}
+
+int BLAS_ussp( blas_sparse_matrix A, int pname ) /*  FIXME: temporarily here */
+{
+	/**
+	 \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_sp_msg
+	 \rsb_spblas_return_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_ussp(A,pname))
+}
+
+
+
+struct rsb_mtx_t * rsb_blas_get_mtx(blas_sparse_matrix A)
+{
+	/*!
+ 	\ingroup rsb_doc_sparse_blas
+	\rsb_BLAS_get_mtx_msg
+
+	\rsb_spblasl2_A_msg
+	\return \rsbmtxpmessage_bg
+
+	\n
+	
+	\rsb_BLAS_get_mtx_example
+	\see_rsb_BLAS_get_mtx_msg
+	\rsb_BLAS_get_mtx_msg_todo
+	\rsb_BLAS_get_mtx_msg_note
+	\rsb_BLAS_get_mtx_msg_warn
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+{
+	struct rsb_mtx_t * mtxAp = NULL;
+	mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	RSB_SPB_INTERFACE_RETURN_EXP( mtxAp )
+}
+}
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+
+
diff --git a/rsb_libspblas.h b/rsb_libspblas.h
new file mode 100644
index 0000000..e1ed033
--- /dev/null
+++ b/rsb_libspblas.h
@@ -0,0 +1,577 @@
+
+
+/*!
+        @file
+        @author Michele Martone
+
+	@brief  This file specifies the Sparse BLAS interface to librsb.
+	Supported types  :(float,double,float complex,double complex) .
+	Unsupported types:() .
+	Level 1 ops      :(dot,axpy,ga,gz,sc) .
+	Level 2 ops      :(mv,sv) .
+	Level 3 ops      :(mm,sm) .
+*/
+
+#ifndef RSB_LIBSPBLAS_H_INCLUDED
+#define RSB_LIBSPBLAS_H_INCLUDED
+#ifndef RSB_RSB_H_INCLUDED
+#error "You are using Sparse BLAS headers from librsb -- You should include <rsb.h> first!"
+#endif /* RSB_RSB_H_INCLUDED */
+#ifndef BLAS_ENUM_H
+#define BLAS_ENUM_H
+
+  /* Enumerated types */
+
+/*! Used to specify a dense array's elements layout. */
+enum blas_order_type {
+            blas_rowmajor = 101, /*!< Row major. */
+            blas_colmajor = 102  /*!< Column major. */ };
+
+/*! Used to specify a transposition operator to a matrix operand. */
+enum blas_trans_type {
+            blas_no_trans   = 111, /*!< No transposition. */
+            blas_trans      = 112, /*!< Transposition. */
+            blas_conj_trans = 113  /*!< Transposition and conjugation. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) upper or lower triangularity of a matrix. */
+enum blas_uplo_type  {
+            blas_upper = 121, /*!< Upper triangular matrix. */
+            blas_lower = 122  /*!< Lower triangular matrix. */ };
+
+/*! Specifies (#BLAS_ussp) or inquiries (#BLAS_usgp) whether the diagonal of a matrix is (implicitly) unitary or not. */
+enum blas_diag_type {
+            blas_non_unit_diag = 131,  /*!< Unit diagional matrix. */
+            blas_unit_diag     = 132   /*!< Non unit diagional matrix (the default). */ };
+
+/*! Unused/Unsupported. */
+enum blas_side_type {
+            blas_left_side  = 141, /*!< Unsupported. */ 
+            blas_right_side = 142  /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_cmach_type {
+            blas_base      = 151, /*!< Unsupported. */ 
+            blas_t         = 152, /*!< Unsupported. */ 
+            blas_rnd       = 153, /*!< Unsupported. */ 
+            blas_ieee      = 154, /*!< Unsupported. */ 
+            blas_emin      = 155, /*!< Unsupported. */ 
+            blas_emax      = 156, /*!< Unsupported. */ 
+            blas_eps       = 157, /*!< Unsupported. */ 
+            blas_prec      = 158, /*!< Unsupported. */ 
+            blas_underflow = 159, /*!< Unsupported. */ 
+            blas_overflow  = 160, /*!< Unsupported. */ 
+            blas_sfmin     = 161  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_norm_type {
+            blas_one_norm       = 171, /*!< Unsupported. */ 
+            blas_real_one_norm  = 172, /*!< Unsupported. */ 
+            blas_two_norm       = 173, /*!< Unsupported. */ 
+            blas_frobenius_norm = 174, /*!< Unsupported. */ 
+            blas_inf_norm       = 175, /*!< Unsupported. */ 
+            blas_real_inf_norm  = 176, /*!< Unsupported. */ 
+            blas_max_norm       = 177, /*!< Unsupported. */ 
+            blas_real_max_norm  = 178  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_sort_type {
+            blas_increasing_order = 181,  /*!< Unsupported. */ 
+            blas_decreasing_order = 182   /*!< Unsupported. */  };
+
+/*! Unused/Unsupported. */
+enum blas_conj_type {
+            blas_conj    = 191, /*!< Unsupported. */
+            blas_no_conj = 192  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_jrot_type {
+            blas_jrot_inner  = 201, /*!< Unsupported. */
+            blas_jrot_outer  = 202, /*!< Unsupported. */
+            blas_jrot_sorted = 203  /*!< Unsupported. */ };
+
+/*! Unused/Unsupported. */
+enum blas_prec_type {
+            blas_prec_single     = 211, /*!< Unsupported. */
+            blas_prec_double     = 212, /*!< Unsupported. */
+            blas_prec_indigenous = 213, /*!< Unsupported. */
+            blas_prec_extra      = 214  /*!< Unsupported. */ };
+
+/*! Index base (valid at matrix build/modify time). */
+enum blas_base_type {
+            blas_zero_base = 221, /*!< Zero based indices (default when matrix created using the C interface). */
+            blas_one_base  = 222  /*!< Zero based indices (default when matrix created using the Fortran interface). */ };
+
+/*! Symmetry properties. If not specified otherwise, valid for the both of #BLAS_ussp and #BLAS_usgp.
+ */
+enum blas_symmetry_type {
+            blas_general          = 231, /*!< General unsymmetric matrix (default). For #BLAS_usgp only. */
+            blas_symmetric        = 232, /*!< Symmetric matrix (either #blas_lower_symmetric or #blas_upper_symmetric). For #BLAS_usgp only. */
+            blas_hermitian        = 233, /*!< Hermitian matrix (either #blas_lower_hermitian or #blas_upper_hermitian). For #BLAS_usgp only. */
+            blas_triangular       = 234, /*!< Triangular matrix (either #blas_lower_triangular or #blas_upper_triangular). For #BLAS_usgp only. */
+            blas_lower_triangular = 235, /*!< Lower triangular matrix. */
+            blas_upper_triangular = 236, /*!< Upper triangular matrix. */
+            blas_lower_symmetric  = 237, /*!< Lower symmetric matrix. */
+            blas_upper_symmetric  = 238, /*!< Upper symmetric matrix. */
+            blas_lower_hermitian  = 239, /*!< Lower hermitian matrix. */
+            blas_upper_hermitian  = 240  /*!< Upper hermitian matrix. */ };
+
+/*! Numerical field type; can be used with #BLAS_usgp to inquiry about a matrix numerical type (1 will be returned in case of success, 0 in case of failure). */
+enum blas_field_type {
+            blas_complex          = 241, /*!< Will succeed if matrix is of 'C' or 'Z' type. */
+            blas_real             = 242, /*!< Will succeed if matrix is of 'S' or 'D' type. */
+            blas_double_precision = 243, /*!< Will succeed if matrix is of 'D' or 'Z' type. */
+            blas_single_precision = 244  /*!< Will succeed if matrix is of 'S' or 'C' type. */ };
+
+/*! Quantities that can be obtained via #BLAS_usgp. */
+enum blas_size_type {
+            blas_num_rows      = 251, /*!< Get the matrix rows count. */
+            blas_num_cols      = 252, /*!< Get the matrix columns count. */
+            blas_num_nonzeros  = 253  /*!< Get the matrix nonzeros count. */ };
+
+/*! The following are not fully implemented. Usable with #BLAS_usgp. */
+enum blas_handle_type{
+            blas_invalid_handle = 261, /*!< Used to check whether the handle is invalid. */
+			blas_new_handle     = 262, /*!< Will give 1 if the handle is new. */
+			blas_open_handle    = 263, /*!< will give 1 if the handle is open. */
+			blas_valid_handle   = 264  /*!< Will give 1 if the handle is valid (that is, after #BLAS_duscr_end/#BLAS_zuscr_end/#BLAS_cuscr_end/#BLAS_zuscr_end). */ };
+
+/*! The following are usable with #BLAS_usgp only. */
+enum blas_sparsity_optimization_type {
+            blas_regular       = 271, /*!< Will give 0. */
+            blas_irregular     = 272, /*!< Will give 1. */
+            blas_block         = 273, /*!< Will give 0. */
+            blas_unassembled   = 274  /*!< Complementary to #blas_valid_handle. */ };
+
+/*! Properties suitable to be used with #BLAS_ussp/#BLAS_usgp. All of these are not in the Sparse BLAS standard. */
+enum blas_rsb_ext_type {
+            blas_rsb_spmv_autotuning_on   = 6660,	/*!< Turn on executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. As an extension to the standard, the autotuning properties can be turned on/off at any time; if the autotuning feature has not been enabled at build time, using these properties will make the call fail. For more information, see #rsb_tune_spmm. (EXPERIMENTAL) */
+            blas_rsb_spmv_autotuning_off  = 6661,	/*!< Turn off executing threads autotuning for #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_on   = 6662,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_n_autotuning_off  = 6663,	/*!< Turn on executing threads autotuning for untransposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_on   = 6664,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_spmv_t_autotuning_off  = 6665,	/*!< Turn on executing threads autotuning for transposed #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv. See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_autotune_next_operation= 6666,	/*!< Turn on executing threads autotuning for the next operation among #BLAS_dusmv, #BLAS_zusmv, #BLAS_susmv, #BLAS_cusmv). See #blas_rsb_spmv_autotuning_on. (EXPERIMENTAL) */
+            blas_rsb_rep_rsb         = 9995,	/*!< Request/check for RSB representation. */
+            blas_rsb_rep_csr         = 9996,	/*!< Request/check for CSR representation. */
+            blas_rsb_rep_coo         = 9997,	/*!< Request/check for COO representation. */
+            blas_rsb_duplicates_ovw   = 9998,	/*!< Request/check for duplicate nonzeroes overwriting policy. */
+            blas_rsb_duplicates_sum   = 9999 	/*!< Request/check for duplicate nonzeroes summation policy. */
+};
+
+#endif
+   /* BLAS_ENUM_H */
+
+/** the sparse matrix descriptor type */
+typedef int blas_sparse_matrix;
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+               /* Level 1 Computational Routines */
+int BLAS_susdot(enum blas_conj_type conj, int nnz, const float * x,
+		const int *indx, const float * y, int incy, float * r,
+		enum blas_base_type index_base);
+void blas_susdot_(enum blas_conj_type*conj,int*nnz,const float *x,const int *indx,const float *y,int*incy,float *r,enum blas_base_type*index_base,int*istat);
+int BLAS_dusdot(enum blas_conj_type conj, int nnz, const double * x,
+		const int *indx, const double * y, int incy, double * r,
+		enum blas_base_type index_base);
+void blas_dusdot_(enum blas_conj_type*conj,int*nnz,const double *x,const int *indx,const double *y,int*incy,double *r,enum blas_base_type*index_base,int*istat);
+int BLAS_cusdot(enum blas_conj_type conj, int nnz, const void *x,
+		const int *indx, const void *y, int incy, void *r,
+		enum blas_base_type index_base);
+void blas_cusdot_(enum blas_conj_type*conj,int*nnz,const void *x,const int *indx,const void *y,int*incy,void *r,enum blas_base_type*index_base,int*istat);
+int BLAS_zusdot(enum blas_conj_type conj, int nnz, const void *x,
+		const int *indx, const void *y, int incy, void *r,
+		enum blas_base_type index_base);
+void blas_zusdot_(enum blas_conj_type*conj,int*nnz,const void *x,const int *indx,const void *y,int*incy,void *r,enum blas_base_type*index_base,int*istat);
+
+int BLAS_susaxpy(int nnz, float  alpha, const float * x, const int *indx,
+                 float * y, int incy, enum blas_base_type index_base);
+void blas_susaxpy_(int*nnz,float*alpha,const float *x,const int *indx,float *y,int*incy,enum blas_base_type*index_base,int*istat);
+int BLAS_dusaxpy(int nnz, double  alpha, const double * x, const int *indx,
+                 double * y, int incy, enum blas_base_type index_base);
+void blas_dusaxpy_(int*nnz,double*alpha,const double *x,const int *indx,double *y,int*incy,enum blas_base_type*index_base,int*istat);
+int BLAS_cusaxpy(int nnz, const void * alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base);
+void blas_cusaxpy_(int*nnz,const void *alpha,const void *x,const int *indx,void *y,int*incy,enum blas_base_type*index_base,int*istat);
+int BLAS_zusaxpy(int nnz, const void * alpha, const void *x, const int *indx,
+                 void *y, int incy, enum blas_base_type index_base);
+void blas_zusaxpy_(int*nnz,const void *alpha,const void *x,const int *indx,void *y,int*incy,enum blas_base_type*index_base,int*istat);
+
+int BLAS_susga(int nnz, const float * y, int incy, float * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_susga_(int*nnz,const float *y,int*incy,float *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_dusga(int nnz, const double * y, int incy, double * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_dusga_(int*nnz,const double *y,int*incy,double *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_cusga(int nnz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_cusga_(int*nnz,const void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_zusga(int nnz, const void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_zusga_(int*nnz,const void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+
+int BLAS_susgz(int nnz, float * y, int incy, float * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_susgz_(int*nnz,float *y,int*incy,float *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_dusgz(int nnz, double * y, int incy, double * x, const int *indx,
+              enum blas_base_type index_base);
+void blas_dusgz_(int*nnz,double *y,int*incy,double *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_cusgz(int nnz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_cusgz_(int*nnz,void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_zusgz(int nnz, void *y, int incy, void *x, const int *indx,
+              enum blas_base_type index_base);
+void blas_zusgz_(int*nnz,void *y,int*incy,void *x,const int *indx,enum blas_base_type*index_base,int*istat);
+
+int BLAS_sussc(int nnz, const float * x, float * y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_sussc_(int*nnz,const float *x,float *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_dussc(int nnz, const double * x, double * y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_dussc_(int*nnz,const double *x,double *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_cussc(int nnz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_cussc_(int*nnz,const void *x,void *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+int BLAS_zussc(int nnz, const void *x, void *y, int incy, const int *indx,
+              enum blas_base_type index_base);
+void blas_zussc_(int*nnz,const void *x,void *y,int*incy,const int *indx,enum blas_base_type*index_base,int*istat);
+
+               /* Level 2 Computational Routines */
+
+int BLAS_susmv(enum blas_trans_type transA, float alpha,
+    blas_sparse_matrix A, const float * x, int incx, float * y, int incy);
+
+void blas_susmv_(enum blas_trans_type*transA,float*alpha,blas_sparse_matrix*A,const float *x,int*incx,float *y,int*incy,int*istat);
+
+int BLAS_dusmv(enum blas_trans_type transA, double alpha,
+    blas_sparse_matrix A, const double * x, int incx, double * y, int incy);
+
+void blas_dusmv_(enum blas_trans_type*transA,double*alpha,blas_sparse_matrix*A,const double *x,int*incx,double *y,int*incy,int*istat);
+
+int BLAS_cusmv(enum blas_trans_type transA, const void *alpha,
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy);
+
+void blas_cusmv_(enum blas_trans_type*transA,const void *alpha,blas_sparse_matrix*A,const void *x,int*incx,void *y,int*incy,int*istat);
+
+int BLAS_zusmv(enum blas_trans_type transA, const void *alpha,
+    blas_sparse_matrix A, const void *x, int incx, void *y, int incy);
+
+void blas_zusmv_(enum blas_trans_type*transA,const void *alpha,blas_sparse_matrix*A,const void *x,int*incx,void *y,int*incy,int*istat);
+
+
+int BLAS_sussv(enum blas_trans_type transT, float alpha,
+    blas_sparse_matrix T, float * x, int incx);
+
+void blas_sussv_(enum blas_trans_type*transT,float*alpha,blas_sparse_matrix*T,float *x,int*incx,int*istat);
+
+int BLAS_dussv(enum blas_trans_type transT, double alpha,
+    blas_sparse_matrix T, double * x, int incx);
+
+void blas_dussv_(enum blas_trans_type*transT,double*alpha,blas_sparse_matrix*T,double *x,int*incx,int*istat);
+
+int BLAS_cussv(enum blas_trans_type transT, const void *alpha,
+    blas_sparse_matrix T, void *x, int incx);
+
+void blas_cussv_(enum blas_trans_type*transT,const void *alpha,blas_sparse_matrix*T,void *x,int*incx,int*istat);
+
+int BLAS_zussv(enum blas_trans_type transT, const void *alpha,
+    blas_sparse_matrix T, void *x, int incx);
+
+void blas_zussv_(enum blas_trans_type*transT,const void *alpha,blas_sparse_matrix*T,void *x,int*incx,int*istat);
+
+
+               /* Level 3 Computational Routines */
+
+int BLAS_susmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, float alpha, blas_sparse_matrix A, const float * b, int ldb,
+       float *  c, int ldc);
+
+void blas_susmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,float*alpha,blas_sparse_matrix*A,const float *b,int*ldb,float *c,int*ldc,int*istat);
+
+int BLAS_dusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, double alpha, blas_sparse_matrix A, const double * b, int ldb,
+       double *  c, int ldc);
+
+void blas_dusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,double*alpha,blas_sparse_matrix*A,const double *b,int*ldb,double *c,int*ldc,int*istat);
+
+int BLAS_cusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, int ldb,
+       void * c, int ldc);
+
+void blas_cusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,const void *alpha,blas_sparse_matrix*A,const void *b,int*ldb,void *c,int*ldc,int*istat);
+
+int BLAS_zusmm(enum blas_order_type order, enum blas_trans_type transA,
+   int nrhs, const void *alpha, blas_sparse_matrix A, const void *b, int ldb,
+       void * c, int ldc);
+
+void blas_zusmm_(enum blas_order_type*order,enum blas_trans_type*transA,int*nrhs,const void *alpha,blas_sparse_matrix*A,const void *b,int*ldb,void *c,int*ldc,int*istat);
+
+
+int BLAS_sussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, float alpha, blas_sparse_matrix T, float * b, int ldb);
+
+void blas_sussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,float*alpha,blas_sparse_matrix*T,float *b,int*ldb,int*istat);
+
+int BLAS_dussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, double alpha, blas_sparse_matrix T, double * b, int ldb);
+
+void blas_dussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,double*alpha,blas_sparse_matrix*T,double *b,int*ldb,int*istat);
+
+int BLAS_cussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, const void *alpha, blas_sparse_matrix T, void *b, int ldb);
+
+void blas_cussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,const void *alpha,blas_sparse_matrix*T,void *b,int*ldb,int*istat);
+
+int BLAS_zussm(enum blas_order_type order, enum blas_trans_type transT,
+               int nrhs, const void *alpha, blas_sparse_matrix T, void *b, int ldb);
+
+void blas_zussm_(enum blas_order_type*order,enum blas_trans_type*transT,int*nrhs,const void *alpha,blas_sparse_matrix*T,void *b,int*ldb,int*istat);
+
+
+               /* Handle Management Routines */
+               /*             +              */
+               /* Creation Routines */
+               /*             +              */
+               /* Insertion Routines */
+               /*             +              */
+               /* Completion of Construction Routines */
+               /*             +              */
+               /* Matrix Property Routines */
+               /*             +              */
+               /* Destruction Routine */
+
+blas_sparse_matrix BLAS_suscr_begin(int m, int n);
+void blas_suscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_duscr_begin(int m, int n);
+void blas_duscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_cuscr_begin(int m, int n);
+void blas_cuscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_zuscr_begin(int m, int n);
+void blas_zuscr_begin_(int*m,int*n,blas_sparse_matrix*A,int*istat);
+
+blas_sparse_matrix BLAS_suscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_suscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_duscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_duscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_cuscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_cuscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_zuscr_block_begin(int Mb, int Nb, int k, int l);
+void blas_zuscr_block_begin_(int*Mb,int*Nb,int*k,int*l,blas_sparse_matrix*A,int*istat);
+
+blas_sparse_matrix BLAS_suscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_suscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_duscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_duscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_cuscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_cuscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+blas_sparse_matrix BLAS_zuscr_variable_block_begin(int Mb, int Nb,
+		const int *K, const int *L);
+void blas_zuscr_variable_block_begin_(int*Mb,int*Nb,const int *K,const int *L,blas_sparse_matrix*A,int*istat);
+
+int BLAS_suscr_end(blas_sparse_matrix A);
+void blas_suscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_duscr_end(blas_sparse_matrix A);
+void blas_duscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_cuscr_end(blas_sparse_matrix A);
+void blas_cuscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_zuscr_end(blas_sparse_matrix A);
+void blas_zuscr_end_(blas_sparse_matrix*A,int*istat);
+
+int BLAS_suscr_insert_entry(blas_sparse_matrix A, float  val, int i, int j);
+void blas_suscr_insert_entry_(blas_sparse_matrix*A,float*val,int*i,int*j,int*istat);
+int BLAS_duscr_insert_entry(blas_sparse_matrix A, double  val, int i, int j);
+void blas_duscr_insert_entry_(blas_sparse_matrix*A,double*val,int*i,int*j,int*istat);
+int BLAS_cuscr_insert_entry(blas_sparse_matrix A, const void * val, int i, int j);
+void blas_cuscr_insert_entry_(blas_sparse_matrix*A,const void *val,int*i,int*j,int*istat);
+int BLAS_zuscr_insert_entry(blas_sparse_matrix A, const void * val, int i, int j);
+void blas_zuscr_insert_entry_(blas_sparse_matrix*A,const void *val,int*i,int*j,int*istat);
+
+int BLAS_suscr_insert_entries(blas_sparse_matrix A, int nnz, const float * val,
+                            const int *indx, const int *jndx);
+void blas_suscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const float *val,const int *indx,const int *jndx,int*istat);
+int BLAS_duscr_insert_entries(blas_sparse_matrix A, int nnz, const double * val,
+                            const int *indx, const int *jndx);
+void blas_duscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const double *val,const int *indx,const int *jndx,int*istat);
+int BLAS_cuscr_insert_entries(blas_sparse_matrix A, int nnz, const void *val,
+                            const int *indx, const int *jndx);
+void blas_cuscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const void *val,const int *indx,const int *jndx,int*istat);
+int BLAS_zuscr_insert_entries(blas_sparse_matrix A, int nnz, const void *val,
+                            const int *indx, const int *jndx);
+void blas_zuscr_insert_entries_(blas_sparse_matrix*A,int*nnz,const void *val,const int *indx,const int *jndx,int*istat);
+
+int BLAS_suscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const float * val, const int *indx);
+void blas_suscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const float *val,const int *indx,int*istat);
+int BLAS_duscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const double * val, const int *indx);
+void blas_duscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const double *val,const int *indx,int*istat);
+int BLAS_cuscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const void *val, const int *indx);
+void blas_cuscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const void *val,const int *indx,int*istat);
+int BLAS_zuscr_insert_col(blas_sparse_matrix A, int j, int nnz,
+                           const void *val, const int *indx);
+void blas_zuscr_insert_col_(blas_sparse_matrix*A,int*j,int*nnz,const void *val,const int *indx,int*istat);
+
+int BLAS_suscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const float * val, const int *indx);
+void blas_suscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const float *val,const int *indx,int*istat);
+int BLAS_duscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const double * val, const int *indx);
+void blas_duscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const double *val,const int *indx,int*istat);
+int BLAS_cuscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const void *val, const int *indx);
+void blas_cuscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const void *val,const int *indx,int*istat);
+int BLAS_zuscr_insert_row(blas_sparse_matrix A, int i, int nnz,
+                           const void *val, const int *indx);
+void blas_zuscr_insert_row_(blas_sparse_matrix*A,int*i,int*nnz,const void *val,const int *indx,int*istat);
+
+int BLAS_suscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const float * val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_suscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const float *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+int BLAS_duscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const double * val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_duscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const double *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+int BLAS_cuscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const void *val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_cuscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const void *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+int BLAS_zuscr_insert_clique(blas_sparse_matrix A, const int k, const int l,
+                       const void *val, const int row_stride,
+                       const int col_stride, const int *indx,
+                       const int *jndx);
+void blas_zuscr_insert_clique_(blas_sparse_matrix*A,const int*k,const int*l,const void *val,const int*row_stride,const int*col_stride,const int *indx,const int *jndx,int*istat);
+
+int BLAS_suscr_insert_block(blas_sparse_matrix A, const float * val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_suscr_insert_block_(blas_sparse_matrix*A,const float *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+int BLAS_duscr_insert_block(blas_sparse_matrix A, const double * val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_duscr_insert_block_(blas_sparse_matrix*A,const double *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+int BLAS_cuscr_insert_block(blas_sparse_matrix A, const void *val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_cuscr_insert_block_(blas_sparse_matrix*A,const void *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+int BLAS_zuscr_insert_block(blas_sparse_matrix A, const void *val,
+                        int row_stride, int col_stride, int i, int j);
+void blas_zuscr_insert_block_(blas_sparse_matrix*A,const void *val,int*row_stride,int*col_stride,int*i,int*j,int*istat);
+
+
+
+int BLAS_uscr_end(blas_sparse_matrix A);
+void blas_uscr_end_(blas_sparse_matrix*A,int*istat);
+int BLAS_usds(blas_sparse_matrix A);
+void blas_usds_(blas_sparse_matrix*A,int*istat);
+
+int BLAS_susrows_scale(blas_sparse_matrix A,const float *  d, enum blas_trans_type trans);
+void blas_susrows_scale_(blas_sparse_matrix*A,const float *d,enum blas_trans_type*trans,int*istat);
+int BLAS_dusrows_scale(blas_sparse_matrix A,const double *  d, enum blas_trans_type trans);
+void blas_dusrows_scale_(blas_sparse_matrix*A,const double *d,enum blas_trans_type*trans,int*istat);
+int BLAS_cusrows_scale(blas_sparse_matrix A,const void * d, enum blas_trans_type trans);
+void blas_cusrows_scale_(blas_sparse_matrix*A,const void *d,enum blas_trans_type*trans,int*istat);
+int BLAS_zusrows_scale(blas_sparse_matrix A,const void * d, enum blas_trans_type trans);
+void blas_zusrows_scale_(blas_sparse_matrix*A,const void *d,enum blas_trans_type*trans,int*istat);
+
+int BLAS_susget_diag(blas_sparse_matrix A,float *  d);
+void blas_susget_diag_(blas_sparse_matrix*A,float *d,int*istat);
+int BLAS_dusget_diag(blas_sparse_matrix A,double *  d);
+void blas_dusget_diag_(blas_sparse_matrix*A,double *d,int*istat);
+int BLAS_cusget_diag(blas_sparse_matrix A,void * d);
+void blas_cusget_diag_(blas_sparse_matrix*A,void *d,int*istat);
+int BLAS_zusget_diag(blas_sparse_matrix A,void * d);
+void blas_zusget_diag_(blas_sparse_matrix*A,void *d,int*istat);
+
+int BLAS_susget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_susget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+int BLAS_dusget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_dusget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+int BLAS_cusget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_cusget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+int BLAS_zusget_rows_nnz(blas_sparse_matrix A, int fr, int lr, int * nnzp);
+void blas_zusget_rows_nnz_(blas_sparse_matrix*A,int*fr,int*lr,int *nnzp,int*istat);
+
+int BLAS_susget_rows_sparse(blas_sparse_matrix A, float *  VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_susget_rows_sparse_(blas_sparse_matrix*A,float *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+int BLAS_dusget_rows_sparse(blas_sparse_matrix A, double *  VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_dusget_rows_sparse_(blas_sparse_matrix*A,double *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+int BLAS_cusget_rows_sparse(blas_sparse_matrix A, void * VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_cusget_rows_sparse_(blas_sparse_matrix*A,void *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+int BLAS_zusget_rows_sparse(blas_sparse_matrix A, void * VA, int * IA, int * JA, int * nnz, int fr, int lr);
+void blas_zusget_rows_sparse_(blas_sparse_matrix*A,void *VA,int *IA,int *JA,int *nnz,int*fr,int*lr,int*istat);
+
+int BLAS_susget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_susget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+int BLAS_dusget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_dusget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+int BLAS_cusget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_cusget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+int BLAS_zusget_matrix_nnz(blas_sparse_matrix A,int * nnz);
+void blas_zusget_matrix_nnz_(blas_sparse_matrix*A,int *nnz,int*istat);
+
+int BLAS_susget_infinity_norm(blas_sparse_matrix A,float * in, enum blas_trans_type trans);
+void blas_susget_infinity_norm_(blas_sparse_matrix*A,float *in,enum blas_trans_type*trans,int*istat);
+int BLAS_dusget_infinity_norm(blas_sparse_matrix A,double * in, enum blas_trans_type trans);
+void blas_dusget_infinity_norm_(blas_sparse_matrix*A,double *in,enum blas_trans_type*trans,int*istat);
+int BLAS_cusget_infinity_norm(blas_sparse_matrix A,void *in, enum blas_trans_type trans);
+void blas_cusget_infinity_norm_(blas_sparse_matrix*A,void *in,enum blas_trans_type*trans,int*istat);
+int BLAS_zusget_infinity_norm(blas_sparse_matrix A,void *in, enum blas_trans_type trans);
+void blas_zusget_infinity_norm_(blas_sparse_matrix*A,void *in,enum blas_trans_type*trans,int*istat);
+
+int BLAS_susset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const float *  va, int nnz);
+void blas_susset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const float *va,int*nnz,int*istat);
+int BLAS_dusset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const double *  va, int nnz);
+void blas_dusset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const double *va,int*nnz,int*istat);
+int BLAS_cusset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const void * va, int nnz);
+void blas_cusset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const void *va,int*nnz,int*istat);
+int BLAS_zusset_elements(blas_sparse_matrix A,const int * ia, const int *ja, const void * va, int nnz);
+void blas_zusset_elements_(blas_sparse_matrix*A,const int *ia,const int *ja,const void *va,int*nnz,int*istat);
+
+int BLAS_susset_element(blas_sparse_matrix A,int i, int j, float *  v);
+void blas_susset_element_(blas_sparse_matrix*A,int*i,int*j,float *v,int*istat);
+int BLAS_dusset_element(blas_sparse_matrix A,int i, int j, double *  v);
+void blas_dusset_element_(blas_sparse_matrix*A,int*i,int*j,double *v,int*istat);
+int BLAS_cusset_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_cusset_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+int BLAS_zusset_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_zusset_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+
+int BLAS_susget_element(blas_sparse_matrix A,int i, int j, float *  v);
+void blas_susget_element_(blas_sparse_matrix*A,int*i,int*j,float *v,int*istat);
+int BLAS_dusget_element(blas_sparse_matrix A,int i, int j, double *  v);
+void blas_dusget_element_(blas_sparse_matrix*A,int*i,int*j,double *v,int*istat);
+int BLAS_cusget_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_cusget_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+int BLAS_zusget_element(blas_sparse_matrix A,int i, int j, void * v);
+void blas_zusget_element_(blas_sparse_matrix*A,int*i,int*j,void *v,int*istat);
+
+
+
+
+
+
+#define BLAS_ussp rsb_wp__BLAS_ussp
+#define BLAS_usgp rsb_wp__BLAS_usgp
+int BLAS_ussp( blas_sparse_matrix A, int pname );
+int BLAS_usgp( blas_sparse_matrix A, int pname );
+blas_sparse_matrix rsb_load_spblas_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode ); /* This is a librsb extension. */
+
+
+
+struct rsb_mtx_t * rsb_blas_get_mtx(blas_sparse_matrix A);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+
+#endif /* RSB_LIBSPBLAS_H_INCLUDED */
+
+
diff --git a/rsb_libspblas.m4 b/rsb_libspblas.m4
new file mode 100644
index 0000000..9bdcf4f
--- /dev/null
+++ b/rsb_libspblas.m4
@@ -0,0 +1,314 @@
+dnl
+dnl
+include(`libspblas_macros.m4')dnl
+include(`rsb_fortran_macros.m4')dnl
+include(`rsb_misc.m4')dnl
+dnl
+/*!
+        @file
+        @author Michele Martone
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+	@brief  This file specifies the Sparse BLAS interface to librsb.
+',`dnl
+dnl /* @cond INNERDOC  */
+	@brief  This file implements Sparse BLAS for librsb.
+')dnl
+dnl
+dnl	all types        :RSB_M4_SPBLAS_MATRIX_ALL_TYPES
+	Supported types  :RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES .
+	Unsupported types:RSB_M4_SPBLAS_MATRIX_UNSUPPORTED_TYPES .
+	Level 1 ops      :RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS .
+	Level 2 ops      :RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS .
+	Level 3 ops      :RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS .
+*/
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_LIBSPBLAS_H_INCLUDED
+#define RSB_LIBSPBLAS_H_INCLUDED
+dnl typedef int rsb_blas_int_t;
+dnl
+#ifndef RSB_RSB_H_INCLUDED
+#error "You are using Sparse BLAS headers from librsb -- You should include <rsb.h> first!"
+#endif /* RSB_RSB_H_INCLUDED */
+dnl
+',`dnl
+')dnl
+dnl
+dnl #include "blas_sparse/blas_enum.h"
+include(`blas_sparse/blas_enum.h')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+
+/** the sparse matrix descriptor type */
+typedef int blas_sparse_matrix;
+',`dnl
+#include "rsb_libspblas_handle.h"
+')dnl
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+/* @cond INNERDOC  */
+RSB_INTERNALS_COMMON_HEAD_DECLS
+/* @endcond */
+')dnl
+dnl #include "blas_sparse/blas_sparse.h"
+dnl #include "blas_sparse/blas_sparse_proto.h"
+dnl
+define(`rsb_blas_int_t',`int')dnl
+define(`RSB_WRAPPER_PREFIX',`rsb_wp_')`'dnl
+dnl
+define(`RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L1',`dnl
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define' RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`ID',`0',lang) RSB_WRAPPER_PREFIX`'RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`ID',`0',lang)
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+')dnl
+dnl
+define(`RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L2',`dnl
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define' RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ID',`0',lang) RSB_WRAPPER_PREFIX`'RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`ID',`0',lang)
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+')dnl
+dnl
+define(`RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_CF',`dnl
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define' RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(type,mop,tri,`ID',`0',lang) RSB_WRAPPER_PREFIX`'RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(type,mop,tri,`ID',`0',lang)
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+')dnl
+dnl
+define(`RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_CFNT',`dnl
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define' RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(`',mop,tri,`ID',`0',lang) RSB_WRAPPER_PREFIX`'RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(`',mop,tri,`ID',`0',lang)
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+')dnl
+dnl
+define(`RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_EF',`dnl
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define' RSB_M4_SPBLAS_EXTRA_FUNCTION(type,mop,tri,`ID',`0',lang) RSB_WRAPPER_PREFIX`'RSB_M4_SPBLAS_EXTRA_FUNCTION(type,mop,tri,`ID',`0',lang)
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+')dnl
+dnl
+               /* Level 1 Computational Routines */
+foreach(`mop',RSB_M4_SPBLAS_MATRIX_ALL_L1_MOPS,`dnl
+foreach(`type',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`dnl
+foreach(`tri',RSB_M4_SPBLAS_SYMMETRY_UL_CHARCODE,`dnl
+foreach(`lang',RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES,`dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L1`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`function_declaration',`0',lang)dnl
+',`dnl
+RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L1`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L1_FUNCTION(type,mop,tri,`function_definition',`0',lang)dnl
+')dnl
+')dnl
+')dnl
+')dnl
+
+')dnl
+               /* Level 2 Computational Routines */
+
+foreach(`mop',RSB_M4_SPBLAS_MATRIX_ALL_L2_MOPS,`dnl
+foreach(`type',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`dnl
+foreach(`tri',RSB_M4_SPBLAS_SYMMETRY_UL_CHARCODE,`dnl
+foreach(`lang',RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES,`dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L2`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`function_declaration',`0',lang)
+',`dnl
+RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L2`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`function_definition',`0',lang)
+')dnl
+')dnl
+')dnl
+')dnl
+
+')dnl
+               /* Level 3 Computational Routines */
+
+foreach(`mop',RSB_M4_SPBLAS_MATRIX_ALL_L3_MOPS,`dnl
+foreach(`type',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`dnl
+foreach(`tri',RSB_M4_SPBLAS_SYMMETRY_UL_CHARCODE,`dnl
+foreach(`lang',RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES,`dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L2`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`function_declaration',`0',lang)
+',`dnl
+RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_L2`'dnl
+RSB_M4_SPBLAS_MATRIX_ALL_L2_FUNCTION(type,mop,tri,`function_definition',`0',lang)
+')dnl
+')dnl
+')dnl
+')dnl
+
+')dnl
+               /* Handle Management Routines */
+               /*             +              */
+               /* Creation Routines */
+               /*             +              */
+               /* Insertion Routines */
+               /*             +              */
+               /* Completion of Construction Routines */
+               /*             +              */
+               /* Matrix Property Routines */
+               /*             +              */
+               /* Destruction Routine */
+
+foreach(`mop',RSB_M4_SPBLAS_MATRIX_CREATION_MOPS,`dnl
+foreach(`type',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`dnl
+foreach(`lang',RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES,`dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_CF`'dnl
+RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(type,mop,`',`function_declaration',`0',lang)`'dnl
+',`dnl
+RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_CF`'dnl
+RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(type,mop,`',`function_definition',`0',lang)`'dnl
+')dnl
+')dnl
+')dnl
+
+')dnl
+
+
+foreach(`mop',(`cr_end',`ds'),`dnl
+foreach(`lang',RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES,`dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_CFNT`'dnl
+RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(`',mop,`',`function_declaration',`0',lang)`'dnl
+',`dnl
+RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_CFNT`'dnl
+RSB_M4_SPBLAS_MATRIX_CREATION_FUNCS(`',mop,`',`function_definition',`0',lang)`'dnl
+')dnl
+')dnl
+')dnl
+
+foreach(`mop',RSB_M4_SBLAS_EXTRA_INTERFACE_OPS,`dnl
+foreach(`type',RSB_M4_SPBLAS_MATRIX_ALL_TYPES,`dnl
+foreach(`lang',RSB_M4_SPBLAS_MATRIX_ALL_LANGUAGES,`dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_EF`'dnl
+RSB_M4_SPBLAS_EXTRA_FUNCTION(type,mop,`',`function_declaration',`0',lang)`'dnl
+',`dnl
+RSB_SPARSE_BLAS_INTERFACE_REWRAPPER_EF`'dnl
+RSB_M4_SPBLAS_EXTRA_FUNCTION(type,mop,`',`function_definition',`0',lang)`'dnl
+')dnl
+')dnl
+')dnl
+
+')dnl
+
+
+
+dnl
+dnl
+dnl
+
+ifdef(`ONLY_WANT_HEADERS',`
+`#define' BLAS_ussp RSB_WRAPPER_PREFIX`_'BLAS_ussp
+`#define' BLAS_usgp RSB_WRAPPER_PREFIX`_'BLAS_usgp
+rsb_blas_int_t BLAS_ussp( blas_sparse_matrix A, rsb_blas_int_t pname );
+rsb_blas_int_t BLAS_usgp( blas_sparse_matrix A, rsb_blas_int_t pname );
+blas_sparse_matrix rsb_load_spblas_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode ); /* This is a librsb extension. */
+',`dnl
+rsb_blas_int_t BLAS_usgp( blas_sparse_matrix A, rsb_blas_int_t pname ) /*  FIXME: temporarily here */
+{
+	/**
+	 \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_gp_msg
+	 \rsb_spblas_return_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	RSB_SPB_INTERFACE_RETURN_EXP(rsb__BLAS_usgp(A,pname))
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define'  blas_usgp`'RSB_M4_FORTRAN_SYMBOL_ADD_TO_C`'	RSB_WRAPPER_PREFIX`'blas_usgp`'RSB_M4_FORTRAN_SYMBOL_ADD_TO_C`'
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+void blas_usgp`'RSB_M4_FORTRAN_SYMBOL_ADD_TO_C`'( blas_sparse_matrix*A, rsb_blas_int_t*pname, rsb_blas_int_t * istat ) /*  FIXME: temporarily here */
+{
+	/** \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_gp_msg
+	 \rsb_spblas_istat_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	*istat=BLAS_usgp(*A,*pname); /*  FIXME: temporarily here */
+	RSB_SPB_INTERFACE_RETURN_VOID()
+}
+
+#if !RSB_WITH_SPARSE_BLAS_INTERFACE
+`#define'  blas_ussp`'RSB_M4_FORTRAN_SYMBOL_ADD_TO_C`'	RSB_WRAPPER_PREFIX`'blas_ussp`'RSB_M4_FORTRAN_SYMBOL_ADD_TO_C`'
+`#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */'
+void blas_ussp`'RSB_M4_FORTRAN_SYMBOL_ADD_TO_C`'( blas_sparse_matrix*A, rsb_blas_int_t*pname, rsb_blas_int_t * istat ) /*  FIXME: temporarily here */
+{
+	/**
+	 \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_sp_msg
+	 \rsb_spblas_istat_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	*istat=BLAS_ussp(*A,*pname); /*  FIXME: temporarily here */
+	RSB_SPB_INTERFACE_RETURN_VOID()
+}
+
+rsb_blas_int_t BLAS_ussp( blas_sparse_matrix A, rsb_blas_int_t pname ) /*  FIXME: temporarily here */
+{
+	/**
+	 \ingroup rsb_doc_sparse_blas
+	 \rsb_spblasl2_sp_msg
+	 \rsb_spblas_return_msg
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+	RSB_SPB_INTERFACE_RETURN(rsb__BLAS_ussp(A,pname))
+}
+
+')
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+struct rsb_mtx_t * rsb_blas_get_mtx(blas_sparse_matrix A);
+',`dnl
+struct rsb_mtx_t * rsb_blas_get_mtx(blas_sparse_matrix A)
+{
+	/*!
+ 	\ingroup rsb_doc_sparse_blas
+	\rsb_BLAS_get_mtx_msg
+
+	\rsb_spblasl2_A_msg
+	\return \rsbmtxpmessage_bg
+
+	\n
+	
+	\rsb_BLAS_get_mtx_example
+	\see_rsb_BLAS_get_mtx_msg
+	\rsb_BLAS_get_mtx_msg_todo
+	\rsb_BLAS_get_mtx_msg_note
+	\rsb_BLAS_get_mtx_msg_warn
+	 */
+	RSB_SPB_INTERFACE_PREAMBLE
+{
+	struct rsb_mtx_t * mtxAp = NULL;
+	mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	RSB_SPB_INTERFACE_RETURN_EXP( mtxAp )
+}
+}
+')dnl
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_LIBSPBLAS_H_INCLUDED */
+')
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+dnl /* @endcond */
+')dnl
+dnl
diff --git a/rsb_libspblas_handle.c b/rsb_libspblas_handle.c
new file mode 100644
index 0000000..29bc498
--- /dev/null
+++ b/rsb_libspblas_handle.c
@@ -0,0 +1,1734 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief  Sparse BLAS interface internals
+ * */
+/*
+ * TODO: support for blas_field_type, blas_base_type, blas_sort_type, ...
+ * 	 error reporting, input sanitizing, error handling, ...
+ * */
+/*
+*/
+/*   #include "blas_sparse/blas_sparse.h"*/
+//#include "blas_sparse/blas_enum.h"
+#include "rsb.h"
+#include "rsb_libspblas.h"
+#include "rsb_internals.h"
+/* #include "libspblas_handle.h" */
+#include "rsb_psblas.h"
+#include "rsb_do.h"
+/*  #include "blas_sparse/blas_sparse_proto.h"*/
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_WANT_SPARSE_BLAS_EXTENSIONS 1
+
+/* #define RSB_BLAS_INVALID_VAL blas_invalid_handle */
+
+#define RSB_ATPNAME_ANY 0
+#define RSB_SPBLAS_DEF_TUNING_ROUNDS RSB_CONST_MAX_TUNING_ROUNDS
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING 
+#define RSB_SPB_AT_OP(MTXAP,RNT,HINT,NRHS,ORDER,ALPHAP,BETAP,LHS,RHS,LDC,LDB,OPTYPE) 	\
+	if((MTXAP) && (HINT) == RSB_SPB_THR_STR_AUTO_NEXTOP /* ... next operation */ ) \
+	{		\
+		/* errval = */ rsb__tune_spxx(&(MTXAP), NULL, &ornt, RSB_SPBLAS_DEF_TUNING_ROUNDS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_CONST_MS_AT_AUTO_STEPS,RSB_AUT0_TUNING_DEFAULT_TIMES,RSB_AUT0_TUNING_DEFAULT_TIMES, 0, trans, ALPHAP, NULL, NRHS, ORDER, NULL, LDB, BETAP, NULL, LDC, OPTYPE, NULL, NULL, NULL, RSB_AUT0_TUNING_SILENT, NULL, NULL, NULL, NULL); \
+		if(RHS == NULL || LHS == NULL) { brv = RSB_BLAS_NO_ERROR; goto err; /* wanted just tuning */ } \
+		(HINT) = RSB_SPB_THREADS_DEFAULT; \
+	}
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+
+#define RSB_BLAS_IS_ATPNAME_OFF(PNAME) ( (PNAME) == blas_rsb_spmv_n_autotuning_off || (PNAME) == blas_rsb_spmv_t_autotuning_off || (PNAME) == blas_rsb_spmv_autotuning_off )
+#define RSB_BLAS_IS_ATPNAME_ON(PNAME)  ( (PNAME) == blas_rsb_spmv_n_autotuning_on  || (PNAME) == blas_rsb_spmv_t_autotuning_on  || (PNAME) == blas_rsb_spmv_autotuning_on || (PNAME) == blas_rsb_autotune_next_operation )
+#define RSB_BLAS_IS_ATPNAME_ANY(PNAME)  ( RSB_ATPNAME_ANY == RSB_ATPNAME_ANY )
+#define RSB_BLAS_IS_ATPNAME(PNAME) ( RSB_BLAS_IS_ATPNAME_OFF(PNAME) || RSB_BLAS_IS_ATPNAME_ON(PNAME) )
+#define RSB_BLAS_ALLOW_MTX_UPD 1 /* this allows updates in either blas_rsb_duplicates_sum or blas_rsb_duplicates_ovw style; after usds(), blas_rsb_duplicates_ovw is restored and if desired, blas_rsb_duplicates_sum should be set again. */
+#define RSB_TUNING_NEW_STYLE 1 /* FIXME: not yet active */
+
+static struct 
+{
+	struct rsb_blas_sparse_matrix_t * bsms;
+	size_t n;
+	size_t next_handle;
+} rsb_blas_handles;
+
+               /* Service Routines */
+
+static int rsb_compar_vbr_blas_sparse_matrix_t(const void * ap, const void * bp)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	blas_sparse_matrix ha = ((struct rsb_blas_sparse_matrix_t*)ap)->handle;
+	blas_sparse_matrix hb = ((struct rsb_blas_sparse_matrix_t*)bp)->handle;
+
+        return
+                 ( ha >  hb ) ? 1 :
+                 (( ha == hb ) ? 0 : -1);
+}
+
+static struct rsb_blas_sparse_matrix_t * rsb__BLAS_matrix_retrieve(blas_sparse_matrix handle)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/*
+	 shall retrieve the internal data structure associated to the handle
+	 */
+	struct rsb_blas_sparse_matrix_t key;
+
+	key.handle = handle;
+	return bsearch(&key,rsb_blas_handles.bsms,rsb_blas_handles.n,sizeof(struct rsb_blas_sparse_matrix_t),rsb_compar_vbr_blas_sparse_matrix_t);
+}
+
+static size_t rsb__BLAS_matrix_retrieve_index(blas_sparse_matrix handle)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/*
+	 shall retrieve the internal data structure associated to the handle
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+
+	bsm = rsb__BLAS_matrix_retrieve(handle);
+	if(!bsm)
+		return RSB_BLAS_HANDLE_INVALID;
+	return bsm-rsb_blas_handles.bsms;
+}
+
+rsb_err_t rsb__BLAS_is_type_supported(rsb_char_t c)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_err_t errval = RSB_ERR_UNSUPPORTED_TYPE;
+
+	switch(c)
+	{
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+		case('c'): case('C'):
+#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+		case('d'): case('D'):
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+		case('z'): case('Z'):
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT
+		case('s'): case('S'):
+#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+		errval = RSB_ERR_NO_ERROR;
+		break;
+		default:
+		break;
+	};
+	return errval;
+}
+
+struct rsb_mtx_t * rsb__BLAS_inner_matrix_retrieve(blas_sparse_matrix handle)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	if(rsb_blas_handles.n==1)
+	{
+		/* this is a trick */
+	       	mtxAp = rsb_blas_handles.bsms[0].mtxAp;
+	}
+	else
+	{
+		struct rsb_blas_sparse_matrix_t * bsk = NULL;
+	       	bsk = rsb__BLAS_matrix_retrieve(handle);
+		if(bsk)
+			mtxAp = bsk->mtxAp;
+	}
+	return mtxAp;
+}
+
+rsb_err_t rsb__BLAS_handles_free(void)
+{
+	/**
+	 \ingroup gr_internals
+	 This function shall be called as a finalizer.
+	 It can be dangerous, if called before initialization or before other sparse BLAS-related operations.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(rsb_blas_handles.bsms==NULL)
+		;
+	else
+		rsb__free(rsb_blas_handles.bsms);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static blas_sparse_matrix rsb__BLAS_handle_alloc(void)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/*
+	 shall allocate a new matrix handle
+	 NOTE: this allocates an initial pool of descriptors which shall be freed at a point.
+	 */
+	struct rsb_blas_sparse_matrix_t * nbsms = NULL;
+	size_t n;
+	blas_sparse_matrix handle = RSB_BLAS_INVALID_VAL;
+
+	RSB_DEBUG_ASSERT(rsb_blas_handles.n <= RSB_BLAS_MATRICES_MAX);
+	RSB_DEBUG_ASSERT(rsb_blas_handles.n>=0);
+
+	if(rsb_blas_handles.n >= RSB_BLAS_MATRICES_MAX)
+		RSB_PERR_GOTO(err,"matrix limit reached")
+
+	if(rsb_blas_handles.bsms==NULL)
+		nbsms = rsb__calloc(sizeof(struct rsb_blas_sparse_matrix_t)*(rsb_blas_handles.n+1)),
+		rsb_blas_handles.next_handle = RSB_BLAS_FIRST_HANDLE;/* handles reset */
+	else
+		nbsms = rsb__realloc(rsb_blas_handles.bsms,sizeof(struct rsb_blas_sparse_matrix_t)*(rsb_blas_handles.n+1));
+
+	if(!nbsms)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	else
+		rsb_blas_handles.bsms=nbsms;
+	/* we have one extra struct in, now. note that we should blank it before use  */
+	handle = rsb_blas_handles.next_handle<RSB_BLAS_FIRST_HANDLE?RSB_BLAS_FIRST_HANDLE:rsb_blas_handles.next_handle;
+	for(n=0;n<rsb_blas_handles.n && rsb_blas_handles.bsms[n].handle<handle;++n)
+		;/* TODO: this is inefficient. should fix this */
+
+	/*  inserting at n will keep the vector sorted ascendent */
+	if(n != rsb_blas_handles.n)	
+	{
+		RSB_MEMMOVE(rsb_blas_handles.bsms+n+1,rsb_blas_handles.bsms+n,sizeof(struct rsb_blas_sparse_matrix_t)*(rsb_blas_handles.n-n));
+	}
+
+	rsb_blas_handles.next_handle++;
+	if(rsb_blas_handles.next_handle>RSB_BLAS_LAST_HANDLE)
+		rsb_blas_handles.next_handle = RSB_BLAS_FIRST_HANDLE;
+
+	RSB_BZERO(rsb_blas_handles.bsms+n,sizeof(rsb_blas_handles.bsms[n]));/* we blank the new handle */
+	rsb_blas_handles.bsms[n].handle=handle;
+	rsb_blas_handles.n++;
+	return handle;
+err:
+	return RSB_BLAS_INVALID_VAL;
+}
+
+blas_sparse_matrix rsb__BLAS_handle_free(blas_sparse_matrix handle)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/*
+	 shall free a new handle
+	 */
+	size_t n;
+	struct rsb_blas_sparse_matrix_t * nbsms = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if(rsb_blas_handles.n<1)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	n = rsb__BLAS_matrix_retrieve_index(handle);
+	if(n>RSB_BLAS_MATRICES_MAX)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if(n<rsb_blas_handles.n-1)
+		RSB_MEMMOVE(rsb_blas_handles.bsms+n,rsb_blas_handles.bsms+n+1,sizeof(struct rsb_blas_sparse_matrix_t)*(rsb_blas_handles.n-(n+1)));
+
+	rsb_blas_handles.n--;
+
+	nbsms = rsb__realloc(rsb_blas_handles.bsms,sizeof(struct rsb_blas_sparse_matrix_t)*(rsb_blas_handles.n));
+	if(rsb_blas_handles.n==0 || (nbsms!=NULL))
+		rsb_blas_handles.bsms=nbsms;
+
+	retval = handle;
+err:
+	return retval;
+}
+
+               /* Matrix assembly routines */
+
+blas_sparse_matrix rsb__BLAS_new_matrix_begin(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnzest, rsb_type_t typecode, rsb_coo_idx_t br, rsb_coo_idx_t bc, const rsb_coo_idx_t*rbp, const rsb_coo_idx_t*cbp)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/* 
+	 * shall allocate a new matrix descriptor and handle
+	 * */
+	blas_sparse_matrix handle = RSB_BLAS_INVALID_VAL;
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+
+	if(rbp && cbp)
+	{
+		m=k=0;
+		RSB_FCOO_ASUM(m,rbp,0,br);
+		RSB_FCOO_ASUM(k,cbp,0,bc);
+		nnzest=1+RSB_MAX(m,k);
+	}
+
+	RSB_DEBUG_ASSERT(nnzest>0);
+
+	if(!RSB_ARE_VALID_MATRIX_INIT_PARS(m,k,nnzest?nnzest:1,typecode))
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	if(br<0 || bc<0)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+
+	if( (rbp && !cbp) || ( cbp && !rbp))
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+
+	if( (handle = rsb__BLAS_handle_alloc()) == RSB_BLAS_INVALID_VAL)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(handle) ) == NULL )
+		RSB_PERR_GOTO(errh,RSB_ERRM_ES)
+
+	RSB_BZERO_P(bsm); // should be already blanked, unless cycling in the descriptors array
+
+	if(rbp && cbp)
+	{
+		bsm->rbp = rsb__clone_area_with_extra(rbp,sizeof(rsb_coo_idx_t)*(br),sizeof(rsb_coo_idx_t),0);
+		bsm->cbp = rsb__clone_area_with_extra(cbp,sizeof(rsb_coo_idx_t)*(bc),sizeof(rsb_coo_idx_t),0);
+		if( (!bsm->rbp) || (!bsm->cbp) )
+			RSB_PERR_GOTO(errr,RSB_ERRM_ES)
+		bsm->rbp[0]=bsm->cbp[0]=0;
+		rsb__do_prefix_sum_coo_idx_t(bsm->rbp,br+1);
+		rsb__do_prefix_sum_coo_idx_t(bsm->cbp,bc+1);
+	}
+
+	bsm->symmetry=blas_general;
+	bsm->diag_type=blas_non_unit_diag;
+	bsm->mtxAp=NULL;
+	bsm->coomatrix.nnz=nnzest;
+	bsm->coomatrix.nr=m;
+	bsm->coomatrix.nc=k;
+	bsm->coomatrix.typecode=typecode;
+	bsm->nnzin=0;
+	if(br==0)
+		bsm->k=1;
+	else
+		bsm->k=br;
+	if(bc==0)
+		bsm->l=1;
+	else
+		bsm->l=bc;
+	bsm->handle=handle;
+	bsm->type=blas_new_handle;
+	bsm->dupstra = blas_rsb_duplicates_sum;
+	bsm->fmt_hint = blas_rsb_rep_rsb;
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING 
+	bsm->opt_mvn_hint = bsm->opt_mvt_hint = RSB_SPB_THREADS_DEFAULT;
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+	switch(typecode)
+	{
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+		case('d'): case('D'):
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+		case('z'): case('Z'):
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+		bsm->fprecision = blas_double_precision;break;
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+		case('c'): case('C'):
+#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT
+		case('s'): case('S'):
+#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+		bsm->fprecision = blas_single_precision;break;
+		default:
+		RSB_PERR_GOTO(errr,RSB_ERRM_ES)
+	       	break;
+	};
+	/* allocate temporary resources */
+	if(rsb__allocate_coo_matrix_t(&bsm->coomatrix)==NULL)
+		RSB_PERR_GOTO(errr,RSB_ERRM_ES)
+
+	bsm->type=blas_open_handle;
+
+	return handle;
+errr:
+	RSB_CONDITIONAL_FREE(bsm->rbp);
+	RSB_CONDITIONAL_FREE(bsm->cbp);
+errh:
+	rsb__BLAS_handle_free(handle);
+err:
+	return RSB_BLAS_INVALID_VAL;
+}
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_expand_store(struct rsb_blas_sparse_matrix_t * bsm, rsb_nnz_idx_t to_nnz)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/* expand the temporary store for inserting data */
+	RSB_DEBUG_ASSERT(bsm);
+
+	if( bsm->type!=blas_open_handle )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	RSB_DEBUG_ASSERT(bsm->coomatrix.IA);
+	RSB_DEBUG_ASSERT(bsm->coomatrix.JA);
+	RSB_DEBUG_ASSERT(bsm->coomatrix.VA);
+
+	if( bsm->nnzin == 0 && to_nnz == 0 && bsm->coomatrix.nnz != 0 )
+	{
+		/* when more nonzoeroes were estimated to come than they did */
+		rsb__destroy_coo_matrix_t(&bsm->coomatrix);
+		bsm->coomatrix.nnz = 0;
+	}
+	else
+		if( /*  ( to_nnz != bsm->coomatrix.nnz ) && */rsb__reallocate_coo_matrix_t(&bsm->coomatrix, to_nnz)==NULL && to_nnz>0)
+		{
+			RSB_PERR_GOTO(err,"Failed reallocation from %zd to %zd nonzeroes\n", (size_t) bsm->coomatrix.nnz, (size_t) to_nnz)
+		}
+
+	return bsm->handle;
+err:
+	return RSB_BLAS_INVALID_VAL;
+}
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_expand_store_try(struct rsb_blas_sparse_matrix_t * bsm, rsb_nnz_idx_t to_nnz_min, rsb_nnz_idx_t to_nnz_max)
+{
+	/* try opportunistically an upper limit first; if no success, try the lower one */
+	/* TODO: on the long run, shall substitute rsb__BLAS_new_matrix_expand_store */
+	blas_sparse_matrix handle = RSB_BLAS_INVALID_VAL;
+
+	handle = rsb__BLAS_new_matrix_expand_store(bsm, to_nnz_max);
+	if( handle == RSB_BLAS_INVALID_VAL && to_nnz_min < to_nnz_max )
+		handle = rsb__BLAS_new_matrix_expand_store(bsm, to_nnz_min);
+	return handle;
+}
+
+               /* Nonzeroes insertion routines */
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_insert_block(struct rsb_blas_sparse_matrix_t * bsm, const void * val, rsb_blas_int_t row_stride, rsb_blas_int_t col_stride, rsb_blas_int_t i, rsb_blas_int_t j)
+{
+	/**
+	 \ingroup gr_internals
+	 No check is performed on the block size arrays.
+	 */
+	int ii = 0, jj = 0, ob = 0;
+	int rb = 0, cb = 0, roff = 0, coff = 0;
+	size_t nnz = 0,es = 0;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	RSB_DEBUG_ASSERT(bsm);
+        es = RSB_NUMERICAL_TYPE_SIZE(bsm->coomatrix.typecode);
+
+	ob = bsm->off;
+	i -= ob;
+	j -= ob;
+	if(bsm->rbp)
+	{
+		rb=bsm->rbp[i+1]-bsm->rbp[i];
+		roff=bsm->rbp[i];
+	}
+	else
+	{
+		rb=bsm->k;
+		roff=rb*i;
+	}
+
+	if(bsm->cbp)
+	{
+		cb=bsm->cbp[j+1]-bsm->cbp[j];
+		coff=bsm->cbp[j];
+	}
+	else
+	{
+		cb=bsm->l;
+		coff=cb*j;
+	}
+
+	nnz=rb*cb;
+
+	if(nnz==0)
+	{
+		/* in different cases; e.g.: not a blocked mtxAp, zero sized block, ... */
+		RSB_ERROR(RSB_ERRM_BNCS);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(bsm->nnzin+nnz > bsm->coomatrix.nnz)
+		if(rsb__BLAS_new_matrix_expand_store(bsm,RSB_MAX(bsm->nnzin+nnz,2*bsm->coomatrix.nnz))==RSB_BLAS_INVALID_VAL)
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	/* please note that structural zeroes are not supported by the sparse blas interface,
+	 * so we don't use 
+	 *  if(!RSB_DO_FLAG_HAS(bsm->flags, RSB_FLAG_DISCARD_ZEROS) ... )
+	 */
+	for (ii=0; ii<rb; ii++)
+	{
+		for (jj=0; jj<cb; jj++)
+		{
+			int nzoff=bsm->nnzin+ii*cb+jj;
+			const char*eval=((const char*)val)+es*(ii*row_stride+jj*col_stride);
+			bsm->coomatrix.IA[nzoff]=roff+ii;
+			bsm->coomatrix.JA[nzoff]=coff+jj;
+			if(!RSB_IS_ELEMENT_ZERO(eval,bsm->coomatrix.typecode))
+ 		 		rsb_memcpy(((char*)bsm->coomatrix.VA)+es*nzoff,eval,es);
+		}
+	}
+	bsm->nnzin += nnz;
+
+	retval = RSB_BLAS_NO_ERROR;
+err:
+	return retval;
+}
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_insert_row(struct rsb_blas_sparse_matrix_t * bsm, rsb_blas_int_t i, rsb_blas_int_t nnz, const void * val, const rsb_blas_int_t *jndx )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_blas_int_t k;
+	int ob = 0;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	RSB_DEBUG_ASSERT(bsm);
+
+	ob = bsm->off;
+
+	if(bsm->nnzin+nnz > bsm->coomatrix.nnz)
+		if(rsb__BLAS_new_matrix_expand_store(bsm,RSB_MAX(bsm->nnzin+nnz,2*bsm->coomatrix.nnz))==RSB_BLAS_INVALID_VAL)
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+  	rsb_numerical_memcpy(bsm->coomatrix.typecode,bsm->coomatrix.VA,bsm->nnzin,val,0,nnz);
+	for(k=0;k<nnz;++k)
+		bsm->coomatrix.IA[bsm->nnzin+k] = i-ob,
+		bsm->coomatrix.JA[bsm->nnzin+k] = jndx[k]-ob;
+	bsm->nnzin += nnz;
+
+	retval = RSB_BLAS_NO_ERROR;
+err:
+	return retval;
+}
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_insert_col(struct rsb_blas_sparse_matrix_t * bsm, rsb_blas_int_t j, rsb_blas_int_t nnz, const void * val, const rsb_blas_int_t *indx )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/* append coo data */
+	/* ok */
+	int ob = 0;
+	rsb_blas_int_t k;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	RSB_DEBUG_ASSERT(bsm);
+
+	if( bsm->nnzin+nnz > bsm->coomatrix.nnz )
+		if( rsb__BLAS_new_matrix_expand_store(bsm,RSB_MAX(bsm->nnzin+nnz,2*bsm->coomatrix.nnz)) == RSB_BLAS_INVALID_VAL )
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	ob=bsm->off;
+
+  	rsb_numerical_memcpy(bsm->coomatrix.typecode,bsm->coomatrix.VA,bsm->nnzin,val,0,nnz);
+	for(k=0;k<nnz;++k)
+		bsm->coomatrix.IA[bsm->nnzin+k]=indx[k]-ob,
+		bsm->coomatrix.JA[bsm->nnzin+k]=j-ob;
+	bsm->nnzin += nnz;
+
+	retval = RSB_BLAS_NO_ERROR;
+err:
+	return retval;
+}
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_insert_clique(struct rsb_blas_sparse_matrix_t * bsm, const rsb_blas_int_t k, const rsb_blas_int_t l, const void * val, const rsb_blas_int_t row_stride, const rsb_blas_int_t col_stride, const rsb_blas_int_t *indx, const rsb_blas_int_t *jndx )
+{
+	/**
+	 \ingroup gr_internals
+	 append coo data
+	*/
+	int i = 0, j = 0, ob = 0;
+	size_t nnz = k*l;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	RSB_DEBUG_ASSERT(bsm);
+
+#if 0
+	RSB_ERROR(RSB_ERRM_CIIAUF);
+	RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	/* FIXME: unfinished! */
+#endif
+
+	if( bsm->nnzin+nnz > bsm->coomatrix.nnz )
+		if( rsb__BLAS_new_matrix_expand_store(bsm,RSB_MAX(bsm->nnzin+nnz,2*bsm->coomatrix.nnz)) == RSB_BLAS_INVALID_VAL )
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	ob=bsm->off;
+
+	for (i=0; i<k; i++)
+	{
+		for (j=0; j<l; j++)
+		{
+			bsm->coomatrix.IA[bsm->nnzin+i*l+j] = indx[i]-ob,
+			bsm->coomatrix.JA[bsm->nnzin+i*l+j] = jndx[j]-ob;
+ 		 	rsb_numerical_memcpy(bsm->coomatrix.typecode,bsm->coomatrix.VA,(bsm->nnzin+(i*l+j)),val,(i*row_stride+j*col_stride),1); /* FIXME: this is just one element */
+		}
+	}
+	bsm->nnzin += nnz;
+	retval = RSB_BLAS_NO_ERROR;
+err:
+	return retval;
+}
+
+static rsb_blas_int_t rsb__BLAS_new_matrix_insert_entries( struct rsb_blas_sparse_matrix_t * bsm, const rsb_blas_int_t nnz, const char *val, const rsb_blas_int_t * indx, const rsb_blas_int_t * jndx)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	int ob = 0;
+	rsb_blas_int_t k;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	RSB_DEBUG_ASSERT(bsm);
+
+#if RSB_BLAS_ALLOW_MTX_UPD
+	if( bsm->type == blas_valid_handle )
+	{
+		rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+		errval = rsb__do_set_elements(bsm->mtxAp, val, indx, jndx, nnz,  
+			bsm->base == blas_one_base ? RSB_FLAG_FORTRAN_INDICES_INTERFACE : RSB_FLAG_NOFLAGS);
+		if( RSB_SOME_ERROR( errval ))
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		goto ok;
+	}
+#endif /* RSB_BLAS_ALLOW_MTX_UPD */
+
+	if(bsm->nnzin+nnz > bsm->coomatrix.nnz)
+		if( rsb__BLAS_new_matrix_expand_store_try(bsm, (bsm->nnzin+nnz), RSB_MAX(bsm->nnzin+nnz,2*bsm->coomatrix.nnz)) == RSB_BLAS_INVALID_VAL )
+		{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+
+	ob = bsm->off;
+  	rsb_numerical_memcpy(bsm->coomatrix.typecode,bsm->coomatrix.VA,bsm->nnzin,val,0,nnz);
+	for(k=0;k<nnz;++k)
+		bsm->coomatrix.IA[bsm->nnzin+k]=indx[k]-ob,
+		bsm->coomatrix.JA[bsm->nnzin+k]=jndx[k]-ob;
+
+	bsm->nnzin += nnz;
+ok:
+	retval = RSB_BLAS_NO_ERROR;
+err:
+	return retval;
+}
+
+static rsb_flags_t rsb__BLAS_new_matrix_finish_flags(struct rsb_blas_sparse_matrix_t * bsm)
+{
+	/* sets rsb flags based on user-set properties */
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS | RSB_FLAG_SORT_INPUT | RSB_FLAG_OWN_PARTITIONING_ARRAYS /* | RSB_FLAG_WANT_BCSS_STORAGE */ ;
+
+	switch(bsm->symmetry)
+	{
+		case(blas_lower_symmetric):
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_SYMMETRIC);
+		break;
+		case(blas_lower_hermitian):
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_HERMITIAN);
+		break;
+		case(blas_upper_symmetric):
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_SYMMETRIC);
+		break;
+		case(blas_upper_hermitian):
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_HERMITIAN);
+		break;
+		case(blas_lower_triangular):
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+		break;
+		case(blas_upper_triangular):
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+		break;
+	}
+
+	if( bsm->dupstra == blas_rsb_duplicates_ovw )
+	{
+		RSB_DO_FLAG_SUBST(flags,RSB_FLAG_ALL_DUPLICATE_FLAGS,RSB_FLAG_DUPLICATES_KEEP_LAST);
+	}
+	else
+	if( bsm->dupstra == blas_rsb_duplicates_sum )
+	{
+		RSB_DO_FLAG_SUBST(flags,RSB_FLAG_ALL_DUPLICATE_FLAGS,RSB_FLAG_DUPLICATES_SUM);
+	}
+
+	if(bsm->diag_type==blas_unit_diag)
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_UNIT_DIAG_IMPLICIT);
+
+//	if(bsm->diag_type==blas_one_base) // this is complicated: should we keep these flags in both inner and outer interface ?
+//		RSB_DO_FLAG_ADD(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+	switch(bsm->fmt_hint)
+	{
+#if RSB_WANT_SPARSE_BLAS_EXTENSIONS
+		case(blas_rsb_rep_coo ): RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS); break;
+		case(blas_rsb_rep_csr ): RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS); break;
+#endif /* RSB_WANT_SPARSE_BLAS_EXTENSIONS */
+		case(blas_rsb_rep_rsb ):
+		default:
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_MATRIX_FLAGS);
+	}
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS);
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_QUAD_PARTITIONING);
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_COO_STORAGE);
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS);
+
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES);		// NEW
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_MORE_LEAVES_THAN_THREADS);	// NEW
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES_COO);
+	//RSB_DO_FLAG_ADD(flags,RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE);	// EXPERIMENTAL
+	return flags;
+}
+
+static rsb_blas_int_t rsb__BLAS_autotune( struct rsb_blas_sparse_matrix_t * bsm, rsb_blas_int_t pname )
+{
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING 
+	if( bsm == NULL )
+	       goto err;
+
+	if( bsm->type != blas_open_handle && bsm->type != blas_valid_handle )
+	       goto err;
+
+	if(RSB_BLAS_IS_ATPNAME_OFF(pname))
+	{
+		if( pname == blas_rsb_spmv_autotuning_off )
+			bsm->opt_mvn_hint = bsm->opt_mvt_hint = RSB_SPB_THREADS_DEFAULT;
+		if( pname == blas_rsb_spmv_n_autotuning_off )
+			bsm->opt_mvn_hint = RSB_SPB_THREADS_DEFAULT;
+		if( pname == blas_rsb_spmv_t_autotuning_off )
+			bsm->opt_mvt_hint = RSB_SPB_THREADS_DEFAULT;
+		goto done;
+	}
+
+	if(RSB_BLAS_IS_ATPNAME_ON(pname))
+	{
+#if RSB_TUNING_NEW_STYLE
+		if( pname == blas_rsb_spmv_autotuning_on )
+			bsm->opt_mvn_hint = /* bsm->opt_mvt_hint = */ RSB_SPB_THR_STR_AUTO;
+		if( pname == blas_rsb_spmv_n_autotuning_on )
+			bsm->opt_mvn_hint = RSB_SPB_THR_STR_AUTO;
+		if( pname == blas_rsb_spmv_t_autotuning_on )
+			bsm->opt_mvt_hint = RSB_SPB_THR_STR_AUTO;
+		if( pname == blas_rsb_autotune_next_operation )
+			bsm->opt_mvn_hint = bsm->opt_mvt_hint = RSB_SPB_THR_STR_AUTO_NEXTOP ;
+#else /* RSB_TUNING_NEW_STYLE */
+		if( pname == blas_rsb_spmv_autotuning_on )
+			bsm->opt_mvn_hint = /* bsm->opt_mvt_hint = */ RSB_SPB_THREADS_AUTO;
+		if( pname == blas_rsb_spmv_n_autotuning_on )
+			bsm->opt_mvn_hint = RSB_SPB_THREADS_AUTO;
+		if( pname == blas_rsb_spmv_t_autotuning_on )
+			bsm->opt_mvt_hint = RSB_SPB_THREADS_AUTO;
+		if( pname == blas_rsb_autotune_next_operation )
+			bsm->opt_mvn_hint = bsm->opt_mvt_hint = RSB_SPB_THR_STR_AUTO_NEXTOP ;
+#endif /* RSB_TUNING_NEW_STYLE */
+		
+		if( bsm->type == blas_open_handle )
+			goto done;
+		else
+			;/* continue */
+	}
+
+	if( bsm->type == blas_valid_handle )
+	{
+		if( RSB_BLAS_IS_ATPNAME_ANY(pname) )
+		{
+			rsb_err_t errval = RSB_ERR_NO_ERROR;
+			int nont = 0, tont = 0, mnt = rsb__set_num_threads(RSB_THREADS_GET_MAX);
+			struct rsb_mtx_t * mtxOp = bsm->mtxAp;
+
+			if( bsm->opt_mvt_hint == RSB_SPB_THREADS_AUTO )
+			{
+				rsb__do_tune_spmm(NULL,NULL,&tont,2*mnt,10.0/mnt,RSB_TRANSPOSITION_T,NULL,mtxOp,1,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER,NULL,0,NULL,NULL,0);
+				if( RSB_SOME_ERROR(errval))
+					goto err;
+				bsm->opt_mvt_hint = tont;
+			}
+
+			if( bsm->opt_mvn_hint == RSB_SPB_THREADS_AUTO )
+			{
+				rsb__do_tune_spmm(NULL,NULL,&nont,2*mnt,10.0/mnt,RSB_TRANSPOSITION_N,NULL,mtxOp,1,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER,NULL,0,NULL,NULL,0);
+				if( RSB_SOME_ERROR(errval)) 
+					goto err;
+				bsm->opt_mvn_hint = nont;
+			}
+
+#if RSB_TUNING_NEW_STYLE
+			if( bsm->opt_mvt_hint == RSB_SPB_THR_STR_AUTO )
+			{
+				errval = rsb__tune_spxx(&mtxOp, NULL, &tont, RSB_SPBLAS_DEF_TUNING_ROUNDS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES,RSB_AUT0_TUNING_DEFAULT_TIMES, 0, RSB_TRANSPOSITION_T, NULL, NULL, 1, RSB_FLAG_WANT_COLUMN_MAJOR_ORDER, NULL, 0, NULL, NULL, 0, rsb_op_spmv, NULL, NULL, NULL, RSB_AUT0_TUNING_SILENT, NULL, NULL, NULL, NULL);
+				bsm->opt_mvt_hint = tont;
+			}
+
+			if( bsm->opt_mvn_hint == RSB_SPB_THR_STR_AUTO )
+			{
+				errval = rsb__tune_spxx(&mtxOp, NULL, &nont, RSB_SPBLAS_DEF_TUNING_ROUNDS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES,RSB_AUT0_TUNING_DEFAULT_TIMES, 0, RSB_TRANSPOSITION_N, NULL, NULL, 1, RSB_FLAG_WANT_COLUMN_MAJOR_ORDER, NULL, 0, NULL, NULL, 0, rsb_op_spmv, NULL, NULL, NULL, RSB_AUT0_TUNING_SILENT, NULL, NULL, NULL, NULL);
+				bsm->opt_mvn_hint = nont;
+			}
+
+			if( bsm->mtxAp != mtxOp && mtxOp )
+		       	{
+#if !RSB_AT_DESTROYS_MTX
+			       	RSB_CONDITIONAL_FREE(bsm->mtxAp);
+#endif /* RSB_AT_DESTROYS_MTX */
+				bsm->mtxAp = mtxOp;
+		       	}
+
+			if( bsm->opt_mvn_hint == RSB_SPB_THR_STR_AUTO_NEXTOP )
+				; /* will optimize at operation time */
+
+			if( RSB_SOME_ERROR(errval))
+				goto err;
+#endif /* RSB_TUNING_NEW_STYLE */
+			return RSB_BLAS_NO_ERROR;
+		}
+	}
+done:
+	return RSB_BLAS_NO_ERROR;
+err:
+	return RSB_BLAS_ERROR;
+#else
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	return RSB_BLAS_ERROR_UNSUPPORTED;
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	return RSB_BLAS_NO_ERROR; /* bogus success */
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+#endif
+}
+
+static blas_sparse_matrix rsb__BLAS_new_matrix_finish(struct rsb_blas_sparse_matrix_t * bsm, const rsb_flags_t * flagsp)
+{
+	/**
+	 \ingroup gr_internals
+	 shall finish the build status of a new matrix  
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+	rsb_flags_t flags;
+
+	if(!bsm || bsm->type != blas_open_handle)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	/* trim the extra storage */
+	if(rsb__BLAS_new_matrix_expand_store(bsm,bsm->nnzin)==RSB_BLAS_INVALID_VAL)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	flags = flagsp ? *flagsp:rsb__BLAS_new_matrix_finish_flags(bsm);
+	bsm->mtxAp = rsb__mtx_alloc_inner(bsm->coomatrix.VA,bsm->coomatrix.IA,bsm->coomatrix.JA,bsm->coomatrix.nnz,0,0,bsm->coomatrix.typecode,bsm->coomatrix.nr,bsm->coomatrix.nc,bsm->k,bsm->l,flags,&errval);
+	if(!bsm->mtxAp)
+	{
+		rsb__destroy_coo_matrix_t(&bsm->coomatrix);
+		//bsm->coomatrix.IA=bsm->coomatrix.JA=bsm->coomatrix.VA=NULL;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+#if RSB_ALLOW_STDOUT
+	if(0)
+		RSB_STDOUT("sparse blas allocated (%d x %d) @ %p with flags 0x%x (coo:%d, csr:%d), storage: %x\n",
+			bsm->mtxAp->nr, bsm->mtxAp->nc, bsm->mtxAp, bsm->mtxAp->flags,
+			RSB_DO_FLAG_HAS(bsm->mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE),
+			RSB_DO_FLAG_HAS(bsm->mtxAp->flags,RSB_FLAG_WANT_BCSS_STORAGE),
+			bsm->mtxAp->matrix_storage
+			);
+#endif /* RSB_ALLOW_STDOUT */
+
+/*	rsb__do_print_matrix_stats(bsm->mtxAp,RSB_CONST_DUMP_DEFAULT); */
+/*	rsb__do_print_matrix_stats(bsm->mtxAp,RSB_CONST_DUMP_TIMES); */
+/*	rsb__do_print_matrix_stats(bsm->mtxAp,RSB_CONST_DUMP_RECURSION); */
+
+	bsm->type = blas_valid_handle;
+
+#if RSB_BLAS_ALLOW_MTX_UPD
+	RSB_DO_FLAG_SUBST(bsm->mtxAp->flags,RSB_FLAG_ALL_DUPLICATE_FLAGS,RSB_FLAG_DUPLICATES_KEEP_LAST);
+#endif /* RSB_BLAS_ALLOW_MTX_UPD */
+
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING 
+	if( bsm->opt_mvn_hint != RSB_SPB_THREADS_DEFAULT || bsm->opt_mvt_hint != RSB_SPB_THREADS_DEFAULT )
+		rsb__BLAS_autotune( bsm, RSB_ATPNAME_ANY ); /* FIXME: for now, no error reporting here */
+#endif
+
+	retval = bsm->handle;
+err:
+	return retval;
+}
+
+static blas_sparse_matrix rsb__BLAS_matrix_destroy(blas_sparse_matrix handle)
+{
+	/**
+	 \ingroup gr_internals
+	 shall destroy a matrix  
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(handle) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	RSB_DEBUG_ASSERT(bsm);
+	switch(bsm->type)
+	{
+		case blas_new_handle:
+		/* nothing to do  */
+		break;
+		case blas_valid_handle:
+			rsb__do_mtx_free(bsm->mtxAp);
+		break;
+		case blas_open_handle:
+			rsb__destroy_coo_matrix_t(&bsm->coomatrix);
+		break;
+		default:
+		/* nothing to do  */
+		break;
+	}
+
+	RSB_CONDITIONAL_FREE(bsm->rbp);
+	RSB_CONDITIONAL_FREE(bsm->cbp);
+
+	retval = rsb__BLAS_handle_free(handle);
+err:
+	return retval;
+}
+
+blas_sparse_matrix rsb__BLAS_Xuscr_begin( rsb_blas_int_t m, rsb_blas_int_t n, rsb_type_t typecode)
+{
+	/**
+	 \ingroup gr_internals
+	 \rsb_spblasl2_cr_begin_msg
+	 */
+	blas_sparse_matrix retval = rsb__BLAS_new_matrix_begin(m,n,1+RSB_MAX(m,n), typecode,1,1,NULL,NULL);
+	return retval;
+}
+
+blas_sparse_matrix rsb__BLAS_Xuscr_block_begin( rsb_blas_int_t Mb, rsb_blas_int_t Nb, rsb_blas_int_t k, rsb_blas_int_t l, rsb_type_t typecode)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	blas_sparse_matrix retval = rsb__BLAS_new_matrix_begin(Mb*k,Nb*l,1+RSB_MAX(Mb*k,Nb*l), typecode,k,l,NULL,NULL);
+	return retval;
+}
+
+blas_sparse_matrix rsb__BLAS_Xuscr_variable_block_begin( rsb_blas_int_t Mb, rsb_blas_int_t Nb, const rsb_blas_int_t *k, const rsb_blas_int_t *l, rsb_type_t typecode)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if(!k || !l)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	retval = rsb__BLAS_new_matrix_begin(0,0,0,typecode,Mb,Nb,k,l);
+err:
+	return retval;
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_entry( blas_sparse_matrix A, const void * valp, rsb_blas_int_t i, rsb_blas_int_t j )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_open_handle)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	retval = rsb__BLAS_new_matrix_insert_entries(bsm,1,valp,&i,&j);
+err:
+	return retval;
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_entries( blas_sparse_matrix A, rsb_blas_int_t nz, const void * val, const rsb_blas_int_t *indx, const rsb_blas_int_t *jndx )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	if( ( bsm->type != blas_open_handle )
+#if RSB_BLAS_ALLOW_MTX_UPD
+			&& ( bsm->type != blas_valid_handle )
+#endif /* RSB_BLAS_ALLOW_MTX_UPD */
+		)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	retval = rsb__BLAS_new_matrix_insert_entries( bsm, nz, val, indx, jndx );
+err:
+	return retval;
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_col( blas_sparse_matrix A, rsb_blas_int_t j, rsb_blas_int_t nz, const void * val, const rsb_blas_int_t *indx )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_open_handle)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	retval = rsb__BLAS_new_matrix_insert_col(bsm, j, nz, val, indx );
+err:
+	return retval;
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_row( blas_sparse_matrix A, rsb_blas_int_t i, rsb_blas_int_t nz, const void * val, const rsb_blas_int_t *jndx )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_open_handle)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	retval = rsb__BLAS_new_matrix_insert_row(bsm, i, nz, val, jndx );
+err:
+	return retval;
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_clique( blas_sparse_matrix A, const rsb_blas_int_t k, const rsb_blas_int_t l, const void * val, const rsb_blas_int_t row_stride, const rsb_blas_int_t col_stride, const rsb_blas_int_t *indx, const rsb_blas_int_t *jndx )
+{
+	/**
+	 \ingroup gr_internals
+	FIXME: this prototype does not respect the standard !
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_open_handle)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	retval = rsb__BLAS_new_matrix_insert_clique(bsm, k, l, val, row_stride, col_stride, indx, jndx );
+err:
+	return retval;
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_block( blas_sparse_matrix A, const void * val, rsb_blas_int_t row_stride, rsb_blas_int_t col_stride, rsb_blas_int_t i, rsb_blas_int_t j )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retval = RSB_BLAS_INVALID_VAL;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_open_handle)
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	retval = rsb__BLAS_new_matrix_insert_block(bsm, val, row_stride, col_stride, i, j);
+err:
+	return retval;
+}
+
+               /* Completion of Construction Routines */
+
+rsb_blas_int_t rsb__BLAS_Xuscr_end_flagged( blas_sparse_matrix A, const rsb_flags_t*flagsp)
+{
+	/**
+	 \ingroup gr_internals
+	 matrix finish contruction phase
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t errval = RSB_BLAS_ERROR; 
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_open_handle )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	errval = rsb__BLAS_new_matrix_finish(bsm,flagsp) == A ? RSB_BLAS_NO_ERROR : RSB_BLAS_ERROR;
+err:
+	return errval;
+
+}
+
+rsb_blas_int_t rsb__BLAS_Xuscr_end( blas_sparse_matrix A )
+{
+	rsb_blas_int_t retval = rsb__BLAS_Xuscr_end_flagged(A,NULL);
+	return retval;
+}
+
+               /* Matrix Property Routines */
+
+rsb_blas_int_t rsb__BLAS_usgp( blas_sparse_matrix A, rsb_blas_int_t pname )
+{
+	/**
+	 \ingroup gr_internals
+	 matrix property get
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_blas_int_t retcode = RSB_BLAS_ERROR;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		goto ret;
+
+	switch (pname)
+	{
+		/*  */
+		case (blas_num_rows) : retcode=bsm->coomatrix.nr; break;
+		case (blas_num_cols) : retcode=bsm->coomatrix.nc; break;
+		case (blas_num_nonzeros) : if (bsm->mtxAp != NULL) rsb__BLAS_Xusget_matrix_nnz(A,&retcode) ; else retcode = bsm->coomatrix.nnz; break;
+		//case (blas_num_nonzeros) : retcode=bsm->coomatrix.nnz; break;
+		/*  */
+		case (blas_complex) : retcode=bsm->field == blas_complex; break;
+		case (blas_real) : retcode=bsm->field == blas_real; break;
+		case (blas_single_precision) : retcode=bsm->fprecision == blas_single_precision ? 1:0; break;
+		case (blas_double_precision) : retcode=bsm->fprecision == blas_double_precision ? 1:0; break;
+		/*  */
+		case (blas_triangular) : retcode=( /*bsm->symmetry == blas_triangular || */bsm->symmetry == blas_lower_triangular || bsm->symmetry == blas_upper_triangular ); break;
+		case (blas_lower_triangular) : retcode=bsm->symmetry == blas_lower_triangular; break;
+		case (blas_upper_triangular) : retcode=bsm->symmetry == blas_upper_triangular; break;
+		/*  */
+		case (blas_general) : retcode=bsm->symmetry == blas_general; break;
+		case (blas_lower_symmetric) : retcode=( bsm->symmetry == blas_lower_symmetric ); break;
+		case (blas_upper_symmetric) : retcode=( bsm->symmetry == blas_upper_symmetric ); break;
+		case (blas_symmetric) : retcode=( /*bsm->symmetry == blas_symmetric || */bsm->symmetry == blas_lower_symmetric || bsm->symmetry == blas_upper_symmetric ); break;
+		/* case (blas_hermitian) : retcode=bsm->symmetry == blas_hermitian; break; */
+		case (blas_hermitian) : retcode=( /*bsm->symmetry == blas_hermitian || */bsm->symmetry == blas_lower_hermitian || bsm->symmetry == blas_upper_hermitian ); break;
+		/*  */
+		case (blas_zero_base) : retcode=bsm->base == blas_zero_base; break;
+		case (blas_one_base) : retcode=bsm->base == blas_one_base; break;
+		/*  */
+		case (blas_rowmajor) : retcode=bsm->order == blas_rowmajor; break;
+		case (blas_colmajor) : retcode=bsm->order == blas_colmajor; break;
+		/*  */
+		case (blas_new_handle) : retcode=bsm->type == blas_new_handle; break;
+		case (blas_open_handle) : retcode=bsm->type == blas_open_handle; break;
+		case (blas_valid_handle) : retcode=bsm->type == blas_valid_handle;
+		case (blas_invalid_handle) : retcode=bsm->type != blas_valid_handle; /* FIXME */
+		/* the following occur in the NIST 1.02 version */
+		/* case (blas_unassembled) : retcode = (bsm->mtxAp == NULL); */
+		case (blas_unassembled) : retcode=bsm->type != blas_valid_handle;
+		case (blas_regular) : retcode = 0;
+		case (blas_irregular) : retcode = 1;
+		case (blas_block) : retcode = 0;
+
+		break;
+#if RSB_WANT_SPARSE_BLAS_EXTENSIONS
+		case (blas_rsb_duplicates_ovw) :
+		case (blas_rsb_duplicates_sum) :
+		retcode = bsm->dupstra ;
+		break;
+		case (blas_rsb_rep_csr) :
+		case (blas_rsb_rep_coo) :
+		case (blas_rsb_rep_rsb) :
+		retcode = bsm->fmt_hint;
+		break;
+#endif /* RSB_WANT_SPARSE_BLAS_EXTENSIONS */
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING 
+		case (blas_rsb_spmv_autotuning_on ):
+		case (blas_rsb_spmv_autotuning_off ):
+		/* NOTE: we return values for untransposed here. */
+		case (blas_rsb_spmv_n_autotuning_off ):
+		case (blas_rsb_spmv_n_autotuning_on ):
+		case (blas_rsb_autotune_next_operation):
+			retcode = bsm->opt_mvn_hint > 0 ?  bsm->opt_mvn_hint : 0;
+		break;
+		case (blas_rsb_spmv_t_autotuning_off ):
+		case (blas_rsb_spmv_t_autotuning_on ):
+			retcode = bsm->opt_mvt_hint > 0 ?  bsm->opt_mvt_hint : 0;
+		break;
+#else
+		case (blas_rsb_spmv_autotuning_on ):
+		case (blas_rsb_spmv_autotuning_off ):
+		case (blas_rsb_spmv_n_autotuning_off ):
+		case (blas_rsb_spmv_n_autotuning_on ):
+		case (blas_rsb_spmv_t_autotuning_off ):
+		case (blas_rsb_spmv_t_autotuning_on ):
+		case (blas_rsb_autotune_next_operation):
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+			retcode = RSB_BLAS_ERROR_UNSUPPORTED;
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+			retcode = RSB_CONST_MIN_SUPPORTED_CORES;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		break;
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+	}
+ret:
+	return retcode;
+}
+
+rsb_blas_int_t rsb__BLAS_Xusget_rows_nnz( blas_sparse_matrix A, rsb_blas_int_t fr, rsb_blas_int_t lr, rsb_blas_int_t * nnzp)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( (bsm = rsb__BLAS_matrix_retrieve(A) ) == NULL )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	if( bsm->type != blas_valid_handle )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	if( bsm->base == blas_one_base )
+		--fr,--lr;
+
+	*nnzp = rsb__dodo_get_rows_nnz(bsm->mtxAp,fr,lr,RSB_FLAG_C_INDICES_INTERFACE,&errval);
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+err:
+	return RSB_BLAS_INVALID_VAL;
+}
+
+rsb_blas_int_t rsb__BLAS_ussp( blas_sparse_matrix A, rsb_blas_int_t pname )
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	/* TODO: table 3.4 in the Sparse BLAS standard from 2001 has values not present in the reference implementation ! */
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+
+	bsm = rsb__BLAS_matrix_retrieve(A);
+
+	if( RSB_BLAS_IS_ATPNAME(pname) )
+	{
+		rsb_blas_int_t ret = rsb__BLAS_autotune( bsm, pname ); /* This property can be set at any time */
+		return ret;
+	}
+
+#if RSB_BLAS_ALLOW_MTX_UPD
+	if( ( bsm != NULL ) && ( bsm->type == blas_valid_handle ) && ( bsm->mtxAp != NULL ) )
+	{
+		switch (pname)
+		{
+			case (blas_rsb_duplicates_ovw) :
+			RSB_DO_FLAG_SUBST(bsm->mtxAp->flags,RSB_FLAG_ALL_DUPLICATE_FLAGS,RSB_FLAG_DUPLICATES_KEEP_LAST);
+			bsm->dupstra = pname;
+			break;
+			case (blas_rsb_duplicates_sum) :
+			RSB_DO_FLAG_SUBST(bsm->mtxAp->flags,RSB_FLAG_ALL_DUPLICATE_FLAGS,RSB_FLAG_DUPLICATES_SUM);
+			bsm->dupstra = pname;
+			break;
+			default:
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		goto ok;
+	}
+#endif /* RSB_BLAS_ALLOW_MTX_UPD */
+
+	if( ( bsm == NULL ) || bsm->type != blas_open_handle )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	if( bsm->nnzin != 0 )
+	{
+		/*
+		 According to [dv_2002]:
+		  "Calls to USSP should be made after a call to the BEGIN routine but before
+		   the first call to an INSERT routine for the same handle."
+		   */
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	switch (pname)
+	{
+		case (blas_general)       : bsm->symmetry = blas_general; break;
+		case (blas_one_base)       : bsm->off=1; bsm->base=blas_one_base; break;
+		case (blas_zero_base)       : bsm->base=blas_zero_base; break;
+
+		case (blas_non_unit_diag) : bsm->diag_type=blas_non_unit_diag; break;
+		case (blas_unit_diag) : bsm->diag_type=blas_unit_diag; break;
+#if 1
+		case (blas_complex) :
+		case (blas_real)	:
+		case (blas_double_precision) :
+		case (blas_single_precision) :
+		/* FIXME : should return error only on type different from the already set one */
+		/* FIXME : unless what the standard mandates is really conversion ?!  */
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		break;
+#endif
+#if 0
+		/* FIXME: the following do not exist, but may be useful  */
+		case (blas_no_repeated_indices ) :
+			return RSB_BLAS_ERROR_UNIMPLEMENTED;
+		case (blas_repeated_indices ) :
+			return RSB_BLAS_ERROR_UNIMPLEMENTED;
+		break;
+#endif
+		case (blas_triangular) : return RSB_BLAS_ERROR_WRONG_USGP_ARG;		/* TODO */ break;
+		case (blas_lower_triangular) : bsm->symmetry=blas_lower_triangular; break;
+		case (blas_upper_triangular) : bsm->symmetry=blas_upper_triangular; break;
+		case (blas_symmetric)       : return RSB_BLAS_ERROR_WRONG_USGP_ARG;	/* TODO */ break;
+		case (blas_lower_symmetric) : bsm->symmetry=blas_lower_symmetric; break;
+		case (blas_upper_symmetric) : bsm->symmetry=blas_upper_symmetric; break;
+		case (blas_hermitian) : return RSB_BLAS_ERROR_WRONG_USGP_ARG;	/* TODO */ break;
+		case (blas_lower_hermitian) : bsm->symmetry=blas_lower_hermitian; break;
+		case (blas_upper_hermitian) : bsm->symmetry=blas_upper_hermitian; break;
+		case (blas_rowmajor) :
+			bsm->order=blas_rowmajor;
+		break;
+		case (blas_colmajor) :
+			return RSB_BLAS_ERROR_UNIMPLEMENTED;	/* TODO */
+		break;
+		case (blas_regular) : bsm->sparsity_optimization_type=blas_regular; break;
+#if 1
+		/* FIXME: we interpret the following as hints, but in future we may use them */
+		case (blas_block) :
+		case (blas_irregular) :
+		case (blas_unassembled) :
+		break;
+#endif
+#if RSB_WANT_SPARSE_BLAS_EXTENSIONS
+		case (blas_rsb_duplicates_ovw) :
+		case (blas_rsb_duplicates_sum) :
+			bsm->dupstra = pname;
+		break;
+		case (blas_rsb_rep_csr) :
+		case (blas_rsb_rep_coo) :
+		case (blas_rsb_rep_rsb) :
+			bsm->fmt_hint = pname;
+		break;
+#endif /* RSB_WANT_SPARSE_BLAS_EXTENSIONS */
+		/* ... */
+		default:
+		return RSB_BLAS_ERROR;
+	}
+ok:
+	return RSB_BLAS_NO_ERROR;
+err:
+	return RSB_BLAS_ERROR;
+}
+
+               /* Destruction Routine */
+
+rsb_blas_int_t rsb__BLAS_Xusds( blas_sparse_matrix A )
+{
+	/**
+	 */
+	/*
+	 * Destroys the given matrix
+	 * */
+	rsb_blas_int_t res = ( rsb__BLAS_matrix_destroy(A) == RSB_BLAS_INVALID_VAL ) ? RSB_BLAS_ERROR : RSB_BLAS_NO_ERROR;
+
+	return res;
+}
+
+rsb_trans_t rsb_do_psblas_trans_to_rsb_trans(const char trans)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_trans_t rtrans = RSB_INVALID_FLAGS;
+
+	switch(trans)
+	{
+		case(RSB_PSBLAS_TRANS_N):
+		rtrans = RSB_TRANSPOSITION_N;
+		break;
+		case(RSB_PSBLAS_TRANS_T):
+		rtrans = RSB_TRANSPOSITION_T;
+		break;
+		case(RSB_PSBLAS_TRANS_C):
+		rtrans = RSB_TRANSPOSITION_C;
+		break;
+	}
+	return rtrans;
+}
+
+rsb_trans_t rsb_blas_trans_to_rsb_trans(enum blas_trans_type trans)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_trans_t rtrans = RSB_INVALID_FLAGS;
+
+	switch(trans)
+	{
+		case(blas_no_trans):
+		rtrans = RSB_TRANSPOSITION_N;
+		break;
+		case(blas_trans):
+		rtrans = RSB_TRANSPOSITION_T;
+		break;
+		case(blas_conj_trans):
+		rtrans = RSB_TRANSPOSITION_C;
+		break;
+	}
+	return rtrans;
+}
+
+rsb_order_t rsb_blas_order_to_rsb_order(enum blas_order_type order)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_order_t rorder = RSB_FLAG_WANT_ROW_MAJOR_ORDER;
+
+	switch(order)
+	{
+		case(blas_colmajor):
+		rorder = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+		break;
+	}
+	return rorder;
+}
+
+int rsb__BLAS_Xusrows_scale(blas_sparse_matrix A,const void * d,enum blas_trans_type trans)
+{
+	/**
+	 \ingroup gr_internals
+	 \rsb_spblasl2e_usrows_scale_msg
+	 */
+	struct rsb_mtx_t * mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	rsb_err_t errval = rsb__do_scal(mtxAp,d,rsb_blas_trans_to_rsb_trans(trans));
+
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusget_diag(blas_sparse_matrix A,void * d)
+{
+	/**
+	 \ingroup gr_internals
+	 \rsb_spblasl2e_usget_diag_msg
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+
+	errval = rsb__do_matrix_compute(mtxAp, d, RSB_EXTF_DIAG);
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusget_rows_sparse(blas_sparse_matrix A,void *  VA, rsb_blas_int_t * IA, rsb_blas_int_t * JA, rsb_blas_int_t * nnz, rsb_blas_int_t fr, rsb_blas_int_t lr)
+{
+	/**
+	 \ingroup gr_internals
+	 \rsb_spblasl2e_usget_rows_sparse_msg
+	 */
+        rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_blas_sparse_matrix_t * bsm = NULL;
+
+	bsm = rsb__BLAS_matrix_retrieve(A);
+	if(!bsm || !bsm->mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,bsm->mtxAp,VA,IA,JA,fr,lr,nnz,
+			RSB_FLAG_SORT_INPUT|(
+			bsm->base == blas_one_base ? RSB_FLAG_FORTRAN_INDICES_INTERFACE : RSB_FLAG_NOFLAGS));
+err:
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusget_matrix_nnz(blas_sparse_matrix A, rsb_blas_int_t * nnzAp)
+{
+	/**
+	 \ingroup gr_internals
+	 \rsb_spblasl2e_usget_matrix_nnz_msg
+	 */
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	if(!mtxAp)
+		return RSB_ERROR_TO_BLAS_ERROR(RSB_ERR_BADARGS);
+	*nnzAp = mtxAp->nnz;
+	return RSB_BLAS_NO_ERROR;
+}
+
+int rsb__BLAS_Xusget_rows_sums(blas_sparse_matrix A, void * rs, enum blas_trans_type trans)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_mtx_t * mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	rsb_err_t errval = rsb__do_rowssums(mtxAp,rsb_blas_trans_to_rsb_trans(trans),rs);
+
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusget_infinity_norm(blas_sparse_matrix A, void * in, enum blas_trans_type trans)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	struct rsb_mtx_t * mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	/* rsb_err_t errval = rsb__do_matrix_norm(mtxAp,in,RSB_EXTF_NORM_INF); */
+
+	rsb_err_t errval = rsb__do_matrix_norm(mtxAp,in,RSB_EXTF_NORM_INF);
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusset_elements(blas_sparse_matrix A, const rsb_blas_int_t * ia, const rsb_blas_int_t *ja, const void *  va, rsb_blas_int_t nnz)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	if(!mtxAp)
+	{
+		errval = RSB_ERROR_TO_BLAS_ERROR(RSB_ERR_BADARGS);
+		goto err;
+	}
+
+	errval = rsb__do_set_coo_elements(mtxAp,va,ia,ja,nnz);
+err:
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusset_element(blas_sparse_matrix A,rsb_blas_int_t i, rsb_blas_int_t j, const void * v)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+
+	errval = rsb__do_set_coo_element(mtxAp,v,i,j);
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusget_element(blas_sparse_matrix A,rsb_blas_int_t i, rsb_blas_int_t j, void * v)
+{
+	/**
+	 \ingroup gr_internals
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+
+	errval = rsb__do_get_coo_element(mtxAp,v,i,j);
+	return RSB_ERROR_TO_BLAS_ERROR(errval);
+}
+
+int rsb__BLAS_Xusmm(enum blas_trans_type transA, const void * alphap, blas_sparse_matrix A, const void * b, rsb_blas_int_t ldb, const void * betap, void * c, rsb_blas_int_t ldc, rsb_blas_int_t nrhs, enum blas_order_type order)
+{
+	/**
+	 	Multiplies by multivector, accumulating in a multivector and scaling it.
+	*/
+	//const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	rsb_trans_t trans = rsb_blas_trans_to_rsb_trans(transA);
+	int brv = RSB_BLAS_ERROR;
+	struct rsb_blas_sparse_matrix_t * bsm = rsb__BLAS_matrix_retrieve(A);
+	rsb_order_t rorder = rsb_blas_order_to_rsb_order(order);
+	{
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING
+	rsb_int rnt = 0;
+	rsb_thread_t ornt = rsb_get_num_threads();
+	if((bsm->opt_mvn_hint) == RSB_SPB_THR_STR_AUTO_NEXTOP )
+		ornt = -ornt; /* want threads tuning */
+	RSB_SPB_AT_OP(bsm->mtxAp,rnt,bsm->opt_mvn_hint,nrhs,rorder,alphap,betap,c,b,ldc,ldb,rsb_op_spmv)
+	rnt = ( transA == blas_no_trans ) ?  bsm->opt_mvn_hint : bsm->opt_mvt_hint;
+	RSB_SPB_THREADS_PUSH
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+	if(!bsm->mtxAp)
+	       	goto err;
+	brv = RSB_ERROR_TO_BLAS_ERROR(rsb__do_spmm_general(bsm->mtxAp,b,c,alphap,betap,1,1,trans,RSB_OP_FLAG_DEFAULT,rorder,nrhs,ldb,ldc));
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING
+	RSB_SPB_THREADS_POP
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+	}
+err:
+	return brv;
+}
+
+int rsb__BLAS_Xusmv(enum blas_trans_type transA, const void * alphap, blas_sparse_matrix A, const void * Xp, rsb_blas_int_t incX, const void * betap, void * Yp, rsb_blas_int_t incY)
+{
+	/**
+	 	Multiplies by a vector, accumulating in a vector and scaling it.
+		\f$y \leftarrow \alpha A   x + \beta y  ,\f$
+		\f$y \leftarrow \alpha A^T x + \beta y,\f$
+		\f$y \leftarrow \alpha A^H x + \beta y\f$
+	*/
+	//const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	rsb_trans_t trans = rsb_blas_trans_to_rsb_trans(transA);
+	int brv = RSB_BLAS_ERROR;
+	struct rsb_blas_sparse_matrix_t * bsm = rsb__BLAS_matrix_retrieve(A);
+	{
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING
+	rsb_int rnt = 0;
+	rsb_thread_t ornt = rsb_get_num_threads();
+	if((bsm->opt_mvn_hint) == RSB_SPB_THR_STR_AUTO_NEXTOP )
+		ornt = -ornt; /* want threads tuning */
+	RSB_SPB_AT_OP(bsm->mtxAp,rnt,bsm->opt_mvn_hint,1,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER,alphap,betap,Yp,Xp,0,0,rsb_op_spmv)
+	rnt = ( transA == blas_no_trans ) ?  bsm->opt_mvn_hint : bsm->opt_mvt_hint;
+	RSB_SPB_THREADS_PUSH
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+	if(!bsm->mtxAp)
+	       	goto err;
+	brv = RSB_ERROR_TO_BLAS_ERROR(rsb_do_spmv_general(trans,alphap,bsm->mtxAp,Xp,incX,betap,Yp,incY,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS));
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING
+	RSB_SPB_THREADS_POP
+#endif /* RSB_BLAS_WANT_EXPERIMENTAL_TUNING */
+	}
+err:
+	return brv;
+}
+
+int rsb__BLAS_Xussv(enum blas_trans_type transT, void * alpha, blas_sparse_matrix T, void * x, rsb_blas_int_t incx)
+{
+	/**
+	 	Solves triangular system by a vector, scaling the result.
+		 \f$x \leftarrow \alpha T^{-1}x,\f$
+		 \f$x \leftarrow \alpha T^{-T}x,\f$
+		 \f$x \leftarrow \alpha T^{-H}x\f$
+	*/
+	const struct rsb_mtx_t *mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+	rsb_trans_t trans = rsb_blas_trans_to_rsb_trans(transT);
+
+	return RSB_ERROR_TO_BLAS_ERROR(rsb__do_spsv(trans,alpha,mtxAp,x,incx,x,incx));
+}
+
+/* @endcond */
+
+static rsb_flags_t rsb__flags_from_props(rsb_bool_t is_hermitian, rsb_bool_t is_symmetric, rsb_bool_t is_lower, rsb_bool_t is_upper, rsb_type_t typecode)
+{
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+	if(!RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && ( is_hermitian == RSB_BOOL_TRUE ) )
+	{
+			is_hermitian = RSB_BOOL_FALSE;
+			is_symmetric = RSB_BOOL_TRUE;
+	}
+	if(is_hermitian == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+	{
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_HERMITIAN);
+	}
+	if(is_symmetric == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+	{
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+	}
+
+	if( (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER)) && (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)) )
+	{
+		if(is_upper)
+ 			RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+		if(is_lower)
+ 			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+	}
+	if( RSB_NAND( RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER), RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)) )
+ 			RSB_DO_FLAG_ADD(flags,RSB_FLAG_TRIANGULAR);
+	return flags;
+}
+
+static rsb_blas_int_t rsb__mtx_flags_usgp( blas_sparse_matrix A, rsb_flags_t flags )
+{
+	/**
+	 \ingroup gr_internals
+	 matrix property set
+	TODO: missing checks.
+	 */
+	rsb_blas_int_t retcode = RSB_BLAS_ERROR;
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR))
+		retcode = rsb__BLAS_ussp( A, blas_upper_triangular);
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR))
+		retcode = rsb__BLAS_ussp( A, blas_lower_triangular);
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_HERMITIAN))
+		retcode = rsb__BLAS_ussp( A, blas_upper_hermitian);
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_HERMITIAN))
+		retcode = rsb__BLAS_ussp( A, blas_lower_hermitian);
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_SYMMETRIC))
+		retcode = rsb__BLAS_ussp( A, blas_upper_symmetric);
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_SYMMETRIC))
+		retcode = rsb__BLAS_ussp( A, blas_lower_symmetric);
+
+	/* blas_triangular, blas_general, blas_symmetric, blas_hermitian, blas_zero_base, blas_one_base, blas_rowmajor, blas_colmajor */
+	return retcode;
+}
+
+/* The following shall be documented. */
+blas_sparse_matrix rsb_load_spblas_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode )
+{
+	/**
+	 	Loads a BLAS Sparse matrix from a Matrix Market file.
+		This is a \librsb extension.
+
+		Sets either blas_upper_triangular, blas_lower_triangular, blas_upper_hermitian, blas_lower_hermitian, blas_upper_symmetric or blas_lower_symmetric property according to the loaded file.
+	 */
+	struct rsb_coo_matrix_t coo;
+	blas_sparse_matrix A = blas_invalid_handle /*RSB_BLAS_INVALID_VAL*/;
+	rsb_bool_t is_symmetric = /*RSB_BOOL_MAYBE*/RSB_BOOL_FALSE;
+	rsb_bool_t is_hermitian = /*RSB_BOOL_MAYBE*/RSB_BOOL_FALSE;
+	rsb_bool_t is_pattern = RSB_BOOL_MAYBE;
+	rsb_bool_t is_lower = RSB_BOOL_MAYBE, is_upper = RSB_BOOL_MAYBE;
+	rsb_bool_t is_vector = RSB_BOOL_FALSE;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_MATRIX_FLAGS | RSB_FLAG_NOFLAGS;
+
+	RSB_BZERO_P(&coo);
+	coo.typecode = typecode;
+	/* coo.typecode = RSB_NUMERICAL_TYPE_INVALID_TYPE; */
+
+	if( rsb__util_mm_info_matrix_f(filename,&coo.nr,&coo.nc,&coo.nnz,&coo.typecode,&is_symmetric,&is_hermitian,&is_pattern,&is_lower,&is_upper,&is_vector) )
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+
+	A = rsb__BLAS_Xuscr_begin(coo.nr,coo.nc,coo.typecode);
+	if( A == RSB_BLAS_INVALID_VAL )
+		RSB_PERR_GOTO(derr,RSB_ERRM_ES) /* TODO: this particular case should be made illegal. */
+
+	if( A == blas_invalid_handle )
+		RSB_PERR_GOTO(derr,RSB_ERRM_ES) /* TODO: this particular case should be made official. */
+
+	if( RSB_SOME_ERROR(rsb__util_mm_load_matrix_f(filename,&coo.IA,&coo.JA,&coo.VA,&coo.nr,&coo.nc,&coo.nnz,coo.typecode,RSB_FLAG_NOFLAGS,&is_lower,&is_upper)) )
+		RSB_PERR_GOTO(derr,RSB_ERRM_ES)
+
+	flags = rsb__flags_from_props(is_hermitian, is_symmetric, is_lower, is_upper, typecode); /* redundancy: rsb__BLAS_Xuscr_end_flagged does not use these */
+	rsb__mtx_flags_usgp(A, flags);
+	
+	if( rsb__BLAS_Xuscr_insert_entries(A,coo.nnz,coo.VA,coo.IA,coo.JA) == RSB_BLAS_INVALID_VAL )
+		RSB_PERR_GOTO(derr,RSB_ERRM_ES)
+
+	if( rsb__BLAS_Xuscr_end_flagged(A,&flags) /*rsb__BLAS_Xuscr_end(A)*/ == RSB_BLAS_INVALID_VAL )
+		RSB_PERR_GOTO(derr,RSB_ERRM_ES)
+
+	goto ok;
+derr:
+	/* FIXME: missing proper deallocation and program consistence in case of error */
+	/* better error reporting is needed */
+	RSB_ERROR(RSB_ERRM_ES);
+	rsb__destroy_coo_matrix_t(&coo);
+err:
+	return blas_invalid_handle /*RSB_BLAS_INVALID_VAL*/;
+ok:
+	rsb__destroy_coo_matrix_t(&coo);
+	return A; 
+}
+
+
diff --git a/rsb_libspblas_handle.h b/rsb_libspblas_handle.h
new file mode 100644
index 0000000..f708491
--- /dev/null
+++ b/rsb_libspblas_handle.h
@@ -0,0 +1,167 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief  Sparse BLAS interface internals
+ * */
+#ifndef LIBSPBLAS_HANDLE_H_INCLUDED
+#define LIBSPBLAS_HANDLE_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb.h"
+#include "blas_sparse/blas_enum.h"
+#include "rsb_libspblas.h"
+#include "rsb_internals.h"
+
+#define RSB_BLAS_FIRST_HANDLE 1024
+#define RSB_BLAS_LAST_HANDLE (RSB_MAX_VALUE_FOR_TYPE(blas_sparse_matrix)-1)
+#define RSB_BLAS_MATRICES_MAX (RSB_BLAS_LAST_HANDLE-RSB_BLAS_FIRST_HANDLE+1)
+#define RSB_BLAS_HANDLE_INVALID (RSB_BLAS_LAST_HANDLE+1)
+#define RSB_BLAS_INT_MAX RSB_MAX_VALUE_FOR_TYPE(rsb_blas_int_t)
+
+#define RSB_BLAS_HANDLE_TO_RSB_ERROR(HANDLE) (HANDLE==(RSB_BLAS_HANDLE_INVALID)?RSB_ERR_GENERIC_ERROR:RSB_ERR_NO_ERROR)
+
+#define RSB_ERROR_TO_BLAS_ERROR(E) (((E)==RSB_ERR_NO_ERROR)?(RSB_BLAS_NO_ERROR):(RSB_BLAS_ERROR))
+#define RSB_BLAS_ERROR_TO_RSB_ERROR(E) (((E)==RSB_BLAS_NO_ERROR)?(RSB_ERR_NO_ERROR):(RSB_ERR_GENERIC_ERROR))
+#define RSB_BLAS_WANT_EXPERIMENTAL_TUNING RSB_WANT_OMP_RECURSIVE_KERNELS	/* FIXME: this is experimental */
+
+#define RSB_BLAS_UPLO_CHAR(UPLO) ((UPLO)==blas_lower_triangular?'L':'U')
+#define RSB_BLAS_DIAG_CHAR(DIAG) ((DIAG)==blas_non_unit_diag?'E':'I')
+#define RSB_BLAS_INVALID_VAL -1
+
+#define RSB_SPB_INTERFACE_PREAMBLE RSB_INTERFACE_PREAMBLE
+#define RSB_SPB_INTERFACE_RETURN(EXP) { int istat = EXP; RSB_INTERFACE_ENDCMD RSB_DO_ERR_MANIFEST_INTERFACE(RSB_BLAS_ERROR_TO_RSB_ERROR(istat)) return istat; }
+#define RSB_SPB_INTERFACE_RETURN_HDL(EXP) { int handle = EXP; RSB_INTERFACE_ENDCMD RSB_DO_ERR_MANIFEST_INTERFACE(RSB_BLAS_HANDLE_TO_RSB_ERROR(handle)) return handle; }
+#define RSB_SPB_INTERFACE_RETURN_VOID() { RSB_INTERFACE_ENDCMD return; }
+#define RSB_SPB_INTERFACE_RETURN_EXP(EXP) { RSB_INTERFACE_ENDCMD return (EXP); }
+
+/*typedef rsb_blas_sparse_matrix_handle_t blas_sparse_matrix;*/
+
+/*!
+ * \ingroup rsb_doc_sparse_blas
+ * \brief An internal, helper structure.
+ * \internal
+ */
+struct rsb_blas_sparse_matrix_t
+{
+	struct rsb_mtx_t * mtxAp;
+	struct rsb_coo_matrix_t coomatrix;
+	rsb_nnz_idx_t nnzin;
+	blas_sparse_matrix handle;
+	/* rsb_blas_int_t prop ;*/
+	int k, l, off;
+	int*rbp,*cbp;
+	enum blas_handle_type   type;
+	enum blas_diag_type diag_type;
+	enum blas_symmetry_type symmetry;
+	enum blas_base_type base;
+	enum blas_order_type order;
+	enum blas_field_type fprecision;
+	enum blas_field_type field;
+	enum blas_sparsity_optimization_type sparsity_optimization_type;
+	enum blas_rsb_ext_type dupstra;
+	enum blas_rsb_ext_type fmt_hint;
+#if RSB_BLAS_WANT_EXPERIMENTAL_TUNING 
+	rsb_thread_t opt_mvn_hint, opt_mvt_hint;
+#endif
+/*  	we should also deal with :
+blas_order_type 
+blas_trans_type 
+blas_uplo_type  
+blas_diag_type 
+blas_side_type 
+blas_cmach_type 
+blas_norm_type 
+blas_sort_type 
+blas_conj_type 
+blas_jrot_type 
+blas_prec_type 
+blas_base_type 
+blas_symmetry_type 
+blas_field_type 
+blas_size_type 
+blas_handle_type
+blas_sparsity_optimization_type 
+ */
+};
+
+
+
+rsb_err_t rsb__BLAS_is_type_supported(rsb_char_t c);
+struct rsb_mtx_t * rsb__BLAS_inner_matrix_retrieve(blas_sparse_matrix handle);
+blas_sparse_matrix rsb__BLAS_Xuscr_begin( rsb_blas_int_t m, rsb_blas_int_t n, rsb_type_t typecode);
+blas_sparse_matrix rsb__BLAS_Xuscr_block_begin( rsb_blas_int_t Mb, rsb_blas_int_t Nb, rsb_blas_int_t k, rsb_blas_int_t l, rsb_type_t typecode);
+blas_sparse_matrix rsb__BLAS_Xuscr_variable_block_begin( rsb_blas_int_t Mb, rsb_blas_int_t Nb, const rsb_blas_int_t *k, const rsb_blas_int_t *l, rsb_type_t typecode);
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_entry( blas_sparse_matrix A, const void * valp, rsb_blas_int_t i, rsb_blas_int_t j );
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_entries( blas_sparse_matrix A, rsb_blas_int_t nz, const void * val, const rsb_blas_int_t *indx, const rsb_blas_int_t *jndx );
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_col( blas_sparse_matrix A, rsb_blas_int_t j, rsb_blas_int_t nz, const void * val, const rsb_blas_int_t *indx );
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_row( blas_sparse_matrix A, rsb_blas_int_t i, rsb_blas_int_t nz, const void * val, const rsb_blas_int_t *jndx );
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_clique( blas_sparse_matrix A, const rsb_blas_int_t k, const rsb_blas_int_t l, const void * val, const rsb_blas_int_t row_stride, const rsb_blas_int_t col_stride, const rsb_blas_int_t *indx, const rsb_blas_int_t *jndx );
+rsb_blas_int_t rsb__BLAS_Xuscr_insert_block( blas_sparse_matrix A, const void * val, rsb_blas_int_t row_stride, rsb_blas_int_t col_stride, rsb_blas_int_t i, rsb_blas_int_t j);
+rsb_blas_int_t rsb__BLAS_Xuscr_end( blas_sparse_matrix A);
+rsb_blas_int_t rsb__BLAS_Xuscr_end_flagged( blas_sparse_matrix A, const rsb_flags_t*flagsp);
+
+rsb_blas_int_t rsb__BLAS_usgp( blas_sparse_matrix A, rsb_blas_int_t pname );
+rsb_blas_int_t rsb__BLAS_ussp( blas_sparse_matrix A, rsb_blas_int_t pname );
+rsb_blas_int_t rsb__BLAS_Xusds( blas_sparse_matrix A );
+rsb_trans_t rsb_blas_trans_to_rsb_trans(enum blas_trans_type trans);
+rsb_trans_t rsb_do_psblas_trans_to_rsb_trans(const char trans);
+rsb_order_t rsb_blas_order_to_rsb_order(enum blas_order_type order);
+blas_sparse_matrix rsb__BLAS_new_matrix_begin(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnzest, rsb_type_t typecode, rsb_coo_idx_t br, rsb_coo_idx_t bc, const rsb_coo_idx_t*rbp, const rsb_coo_idx_t*cbp);
+rsb_err_t rsb__BLAS_handles_free(void);
+
+int rsb__BLAS_Xusrows_scale(blas_sparse_matrix A,const void * d,enum blas_trans_type trans);
+int rsb__BLAS_Xusget_diag(blas_sparse_matrix A,void * d);
+int rsb__BLAS_Xusget_rows_sparse(blas_sparse_matrix A,void *  VA, rsb_blas_int_t * IA, rsb_blas_int_t * JA, rsb_blas_int_t * nnz, rsb_blas_int_t fr, rsb_blas_int_t lr);
+int rsb__BLAS_Xusget_matrix_nnz(blas_sparse_matrix A, rsb_blas_int_t * nnz);
+int rsb__BLAS_Xusget_infinity_norm(blas_sparse_matrix A, void * in, enum blas_trans_type trans);
+int rsb__BLAS_Xusget_rows_sums(blas_sparse_matrix A, void * rs, enum blas_trans_type trans);
+int rsb__BLAS_Xusset_elements(blas_sparse_matrix A,const rsb_blas_int_t * ia, const rsb_blas_int_t *ja, const void *  va, rsb_blas_int_t nnz);
+int rsb__BLAS_Xusset_element(blas_sparse_matrix A,rsb_blas_int_t i, rsb_blas_int_t j, const void * v);
+int rsb__BLAS_Xusget_element(blas_sparse_matrix A,rsb_blas_int_t i, rsb_blas_int_t j, void * v);
+int rsb__BLAS_Xusmv(enum blas_trans_type transA, const void * alphap, blas_sparse_matrix A, const void * Xp, rsb_blas_int_t incX, const void * betap, void * Yp, rsb_blas_int_t incY);
+int rsb__BLAS_Xusmm(enum blas_trans_type transA, const void * alphap, blas_sparse_matrix A, const void * b, rsb_blas_int_t ldb, const void * betap, void * c, rsb_blas_int_t ldc, rsb_blas_int_t nrhs, enum blas_order_type order);
+int rsb__BLAS_Xussv(enum blas_trans_type transT, void * alpha, blas_sparse_matrix T, void * x, rsb_blas_int_t incx);
+void blas_usgp_f_( blas_sparse_matrix*A, rsb_blas_int_t * pname, rsb_blas_int_t * istat );
+void blas_ussp_f_( blas_sparse_matrix*A, rsb_blas_int_t * pname, rsb_blas_int_t * istat );
+/* blas_sparse_matrix rsb_load_spblas_matrix_file_as_matrix_market(const rsb_char_t * filename, rsb_type_t typecode ); now in rsb_libspblas.h */
+rsb_blas_int_t rsb__BLAS_Xusget_rows_nnz( blas_sparse_matrix A, rsb_blas_int_t fr, rsb_blas_int_t lr, rsb_blas_int_t * nnzp);
+blas_sparse_matrix rsb__BLAS_handle_free(blas_sparse_matrix handle);
+
+#define RSB_BLAS_STDOUT_MATRIX_SUMMARY(A)						\
+	{										\
+		struct rsb_mtx_t *RSB_DUMMY_ID=rsb__BLAS_inner_matrix_retrieve(A);	\
+		if(RSB_DUMMY_ID)RSB_STDOUT_MATRIX_SUMMARY(RSB_DUMMY_ID );		\
+		/*else            RSB_STDOUT_MATRIX_SUMMARY(RSB_DUMMY_MTX);*/		\
+	}
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* LIBSPBLAS_HANDLE_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_libspblas_tests.c b/rsb_libspblas_tests.c
new file mode 100644
index 0000000..4d8c92e
--- /dev/null
+++ b/rsb_libspblas_tests.c
@@ -0,0 +1,2258 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief  Sparse BLAS interface testing code
+ * */
+#include "rsb_common.h"
+#include "blas_sparse/blas_enum.h"
+#include "rsb_libspblas.h"
+/* #include "rsb_libspblas_handle.h" */
+#include "rsb_psblas.h" /* (in rsb) header for rsb_do_psblas_trans_to_rsb_trans */
+#include <stdio.h>	/* fileno */
+#include <unistd.h>	/* isatty */
+#include "rsb_libspblas_tests.h"
+#define RSB_WANT_SPGEMM_TESTING_FOR_ONLY_FIRST_DIMI 3
+#define RSB_WANT_VERBOSE_FAILURES 1
+#define RSB_TESTER_ALLOW_TIMEOUT 1
+#define RSB_BLAS_INVALID_MATRIX (-1)
+#define RSB_INVALID_BLAS_INT_IDX_VAL -1
+#define RSB_WANT_AUTOTUNING_TESTING 1
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+RSB_INTERNALS_RSBENCH_HEAD_DECLS
+
+/* TODO: shall use the following throughout the tester routine */
+#define RSB_LSTERR(MSG) {RSB_ERROR(MSG);goto err;} 
+#define RSB_LSTPROBE(EXP,MSG) if( RSB_SOME_ERROR(errval=(EXP))){RSB_ERROR(MSG);goto err;} /* error is not expected here */
+#define RSB_LSTPROBI(EXP,MSG) if(!RSB_SOME_ERROR(errval=(EXP))){errval=RSB_ERR_NO_ERROR;RSB_ERROR(MSG);goto err;}else{errval = RSB_ERR_INTERNAL_ERROR;} /* error is expected here but ignore: better use it after a non-zealot-error-reporting function, but rather, an internal one */
+#define RSB_EMPTY_STRING ""
+
+#define RSB_BLAS_MT_STR(MT) ((MT)==blas_lower_triangular?"LT":	\
+			((MT)==blas_upper_triangular?"UT":	\
+			((MT)==blas_lower_symmetric?"LS":	\
+			((MT)==blas_upper_symmetric?"US":	\
+			((MT)==blas_lower_hermitian?"LH":	\
+			((MT)==blas_upper_hermitian?"UH":	\
+			((MT)==blas_general?"GE":"??")		\
+			 ))))))
+
+rsb_err_t rsb_blas_tester_options_init(struct rsb_tester_options_t * top)
+{
+	/* This function shall not need any library initialization to work. */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_BZERO_P(top);
+	top->mtt = RSB_TIME_ZERO;
+	top->rrm = RSB_BOOL_FALSE;
+	top->tur = RSB_BOOL_FALSE;
+	top->wqt = RSB_BOOL_FALSE;
+	top->wqc = RSB_BOOL_FALSE;
+	top->wcs = RSB_BOOL_FALSE;
+	return errval;
+}
+
+static blas_sparse_matrix rsb_blas_single_allocation_tester(void)
+{
+	/**
+	 * \ingroup gr_internals
+	 * */
+	blas_sparse_matrix A = RSB_BLAS_INVALID_MATRIX;
+	const rsb_coo_idx_t IA[]={0};
+	const rsb_coo_idx_t JA[]={0};
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE VA[]={11};
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE X[]={1};
+	RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE Y[]={0};
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE alpha = 1.0, beta = 1.0;
+	const rsb_coo_idx_t m=1,n=1;
+	const int nz=1;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_FIRST_BLAS;
+	if( RSB_NUMERICAL_TYPE_FIRST_BLAS == RSB_NUMERICAL_TYPE_INVALID_TYPE ) 
+	{ RSB_INFO("SKIPPING A TEST (no BLAS types in)\n"); goto err; }
+	if( (A= rsb__BLAS_Xuscr_begin( m, n, typecode )) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR("error calling BLAS_duscr_begin\n"); goto err;}
+	if( rsb__BLAS_Xuscr_insert_entries( A, nz, VA, IA, JA) == RSB_BLAS_ERROR )
+	{RSB_ERROR("error calling BLAS_duscr_insert_entries\n"); goto err;}
+	if( rsb__BLAS_Xuscr_end(A) == RSB_BLAS_ERROR )
+	{RSB_ERROR("error calling BLAS_duscr_end\n"); goto err;}
+	if( rsb__BLAS_Xusmv( blas_no_trans, &alpha, A, X, 1, &beta, Y, 1) == RSB_BLAS_ERROR)
+	{RSB_ERROR("error calling BLAS_dusmv\n"); goto err;}
+
+	return A;
+err:
+	if(A != RSB_BLAS_INVALID_MATRIX)
+	{
+		if (rsb__BLAS_Xusds(A)!=RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("error destroying the matrix after error\n"); goto err;}
+
+	}
+	return RSB_BLAS_INVALID_MATRIX;
+}
+
+
+static rsb_err_t rsb_blas_allocation_tester(void)
+{
+	/**
+	 * \ingroup gr_internals
+	 *  Descriptor handling machinery tester.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_submatrix_idx_t mcount = RSB_MIN(1024,RSB_BLAS_MATRICES_MAX);
+	rsb_submatrix_idx_t count=0;
+	blas_sparse_matrix bsms[mcount];
+	blas_sparse_matrix A = RSB_BLAS_INVALID_MATRIX;
+
+	if( RSB_NUMERICAL_TYPE_FIRST_BLAS == RSB_NUMERICAL_TYPE_INVALID_TYPE ) 
+	{ RSB_INFO("SKIPPING A TEST (no BLAS types in)\n"); goto err; }
+
+	for(count=0;count<mcount;++count)
+		bsms[count] = RSB_BLAS_INVALID_MATRIX;
+
+	for(count=0;count<mcount;++count)
+	{
+		A = rsb_blas_single_allocation_tester();
+		if(A == RSB_BLAS_INVALID_MATRIX)
+		{
+			RSB_ERROR(RSB_ERRM_ES);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto out;
+		}
+		else
+			bsms[count]=A;
+	}
+out:
+	if(count<mcount)
+		RSB_ERROR("failed allocating %d matrices: only allocated %d!\n",mcount,count);
+
+	for(count=mcount-1;count+1>0;--count)
+	{
+		if((bsms[count] != RSB_BLAS_INVALID_MATRIX) && (rsb__BLAS_Xusds(bsms[count])==RSB_BLAS_ERROR))
+		{
+			RSB_ERROR(RSB_ERRM_ES);
+			RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+		}
+		bsms[count] = RSB_BLAS_INVALID_MATRIX;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_blas_mini_tester(void)
+{
+	/**
+	 * \ingroup gr_internals
+	 * */
+	blas_sparse_matrix A = RSB_BLAS_INVALID_MATRIX;
+	const rsb_coo_idx_t IA[]={0,1,2,3};
+	const rsb_coo_idx_t JA[]={0,1,2,3};
+	const rsb_coo_idx_t BR[]={2};
+	const rsb_coo_idx_t BC[]={2};
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE VA[]={0,11,22,33};
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE X[]={4,3,2,1};
+	RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE Y[]={0,0,0,0};
+	const RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE alpha = 1.0, beta = 1.0;
+	const rsb_coo_idx_t m=4,n=4;
+	const int nz=4;
+	rsb_char_t optstr[RSB_MAX_LINE_LENGTH];
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_FIRST_BLAS;
+	/* const char*tsep="*\n"; */
+	const char*tsep="%s";
+
+	if( RSB_NUMERICAL_TYPE_FIRST_BLAS == RSB_NUMERICAL_TYPE_INVALID_TYPE ) 
+	{ RSB_INFO("SKIPPING A TEST (no BLAS types in)\n"); goto ret; }
+
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE
+	RSB_INFO("SKIPPING BASIC SPARSE BLAS TEST (UNFINISHED TESTING SUITE)\n");
+	goto ret; /* FIXME: we are being overly tolerant here, because we don't want to break int-type-only cases */
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+
+#if 1
+	RSB_INFO("BASIC SPARSE BLAS TEST: BEGIN\n");
+	if((A= rsb__BLAS_Xuscr_begin( m, n, typecode )) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xuscr_insert_entries( A, nz, VA, IA, JA) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xuscr_end(A) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_rows ) != m ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_cols ) != n ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_nonzeros ) != nz ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xusmv( blas_no_trans, &alpha, A, X, 1, &beta, Y, 1) != RSB_BLAS_NO_ERROR )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+#if 0
+	/* missing lower triangular mark */
+	if( BLAS_dussv( blas_no_trans, alpha, A, X, 1) != RSB_BLAS_NO_ERROR )
+		goto err;
+	RSB_INFO("*\n");
+#endif
+
+	
+#if RSB_WANT_ALLOCATOR_LIMITS
+	if(1)
+{
+	/* TODO: in the future, may constrain the whole test within memory limits */
+	size_t sval=0;
+	sval=0;
+	RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_MAX_MEMORY_ALLOCATIONS,&sval,errval); RSB_LSTPROBE(errval,"");
+	RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_MAX_MEMORY_ALLOCATED,&sval,errval); RSB_LSTPROBE(errval,"");
+}
+#endif /* RSB_WANT_ALLOCATOR_LIMITS */
+	if(1)
+{
+	rsb_int val=0;
+	enum rsb_opt_t key=RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE;
+
+	RSB_INFO("INIT INTERFACE TEST: BEGIN\n");
+	RSB_DO_REINIT_SINGLE_VALUE_GET(key,&val,errval); RSB_LSTPROBE(errval,"");
+	if(val!=-1)
+	{ RSB_DO_REINIT_SINGLE_VALUE_SET(key,&val,errval); }
+       	RSB_LSTPROBE(errval,"");
+	rsb__sprintf(optstr,"got RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE: %d",val);
+	if(val!=-1)
+	{ RSB_LSTPROBE(rsb__do_set_initopt_as_string("RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE",optstr),""); }
+	RSB_INFO("%s\n",optstr);
+	
+	key=RSB_IO_WANT_IS_INITIALIZED_MARKER;
+	RSB_DO_REINIT_SINGLE_VALUE_GET(key,&val,errval); RSB_LSTPROBE(errval,"");
+	RSB_DO_REINIT_SINGLE_VALUE_SET(key,&val,errval); RSB_LSTPROBE(errval,"");
+	rsb__sprintf(optstr,"%d",val);
+	RSB_LSTPROBE(rsb__do_set_initopt_as_string("RSB_IO_WANT_IS_INITIALIZED_MARKER",optstr),"");
+	RSB_INFO("got RSB_IO_WANT_IS_INITIALIZED_MARKER: %s\n",optstr);
+
+	RSB_INFO("INIT INTERFACE TEST: END (SUCCESS)\n");
+}
+	RSB_INFO("PRINT TEST: BEGIN\n");
+	errval = rsb__do_file_mtx_save(rsb__BLAS_inner_matrix_retrieve(A),NULL);
+	/* rsb_mtx_file_render(rsb__BLAS_inner_matrix_retrieve(A)); */
+	errval = rsb__debug_print_vectors_diff(VA,VA+1,nz-1,typecode,1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+	errval = rsb__do_mtx_render(NULL,rsb__BLAS_inner_matrix_retrieve(A), 100, 100, RSB_MARF_EPS | RSB_MARF_EPS_B);
+	/* rsb__do_file_mtx_rndr(void * pmp, const char * filename, rsb_coo_idx_t pmlWidth, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags) */
+	RSB_INFO("PRINT TEST: END (SUCCESS)\n");
+
+	if(rsb__BLAS_Xusds( A ) == RSB_BLAS_ERROR)
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+#endif
+#if 1
+	if((A= rsb__BLAS_Xuscr_block_begin( 1, 1, 2, 2, typecode )) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xuscr_insert_block( A, VA, 1, 1, 0, 0) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xuscr_end(A) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+
+	if( rsb__BLAS_usgp( A, blas_num_rows ) != 2 ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_cols ) != 2 ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_nonzeros ) != 4 ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xusmv( blas_no_trans, &alpha, A, X, 1, &beta, Y, 1) != RSB_BLAS_NO_ERROR )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if(rsb__BLAS_Xusds( A ) == RSB_BLAS_ERROR)
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+#endif
+#if 1
+	if((A= rsb__BLAS_Xuscr_variable_block_begin( 1, 1, &BR[0], &BC[0], typecode )) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xuscr_insert_block( A, VA, 1, 1, 0, 0) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xuscr_end(A) == RSB_BLAS_INVALID_MATRIX )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_rows ) != 2 ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_cols ) != 2 ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	if( rsb__BLAS_usgp( A, blas_num_nonzeros ) != 4 ) {RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if( rsb__BLAS_Xusmv( blas_no_trans, &alpha, A, X, 1, &beta, Y, 1) != RSB_BLAS_NO_ERROR )
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+	if(rsb__BLAS_Xusds( A ) == RSB_BLAS_ERROR)
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO(tsep,"");
+#endif
+	RSB_INFO("BASIC SPARSE BLAS TEST: END (SUCCESS)\n");
+
+#if 0
+	RSB_INFO("BIGGER MATRICES SPARSE BLAS TEST: BEGIN\n");
+	if(rsb_blas_bigger_matrices_tester(NULL))
+		goto err;
+	RSB_INFO("BIGGER MATRICES SPARSE BLAS TEST: END\n");
+#endif
+
+	RSB_INFO("STRESS SPARSE BLAS TEST: BEGIN\n");
+	if(RSB_SOME_ERROR(rsb_blas_allocation_tester()))
+	{RSB_ERROR(RSB_ERRM_NL); goto err;}
+	RSB_INFO("STRESS SPARSE BLAS TEST: END (SUCCESS)\n");
+
+	RSB_INFO("SPARSE BLAS TESTS: END (SUCCESS)\n");
+	goto ret;
+err:
+	RSB_INFO("SPARSE BLAS TESTS: FAILURE!\n");
+	errval = RSB_ERR_GENERIC_ERROR;
+ret:
+	return errval;
+}
+
+static rsb_err_t rsb_basic_primitives_tester(void)
+{
+	/**
+	 * \ingroup gr_internals
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const size_t n=1024; // FIXME
+	rsb_nnz_idx_t i=0;
+	rsb_coo_idx_t *cp = rsb__calloc(sizeof(rsb_coo_idx_t)*n);
+	rsb_half_idx_t*hp=(rsb_half_idx_t*)cp;
+	RSB_INFO("BASIC PRIMITIVES TEST: BEGIN\n");
+	if(cp==NULL){ RSB_ERROR(RSB_ERRM_ES); errval = RSB_ERR_ENOMEM; goto err; }
+	// RSB_XCOO_ISET(hp,0,n);
+       	for(i=0;i<n;++i) hp[i]=i;
+	for(i=0;i<n;++i)if(hp[i]!=i){ RSB_ERROR("half word assignment is broken");errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+	rsb__do_switch_array_to_fullword_coo(hp,n,0);
+	for(i=0;i<n;++i)if(cp[i]!=i){ RSB_ERROR("half to full word conversion is broken (has %d instead of %d)",cp[i],i);errval = RSB_ERR_INTERNAL_ERROR; }
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	rsb__do_switch_array_to_halfword_coo(cp,n,0);
+	for(i=0;i<n;++i)if(hp[i]!=i){ RSB_ERROR("full to half word conversion is broken");errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+	
+	RSB_CONDITIONAL_FREE(cp);
+	
+err:
+	if(RSB_SOME_ERROR(errval))
+	{
+		rsb__do_perror(NULL,errval);
+		RSB_INFO("BASIC PRIMITIVES TEST: END (FAILURE)\n");
+	}
+	else
+	{
+		RSB_INFO("BASIC PRIMITIVES TEST: END (SUCCESS)\n");
+	}
+	return errval;
+}
+
+static rsb_err_t rsb_blas_limit_mul_tester(
+		const rsb_coo_idx_t*aIA, const rsb_coo_idx_t*aJA, const void* aVA,
+		const rsb_coo_idx_t*bIA, const rsb_coo_idx_t*bJA, const void* bVA,
+	       	const rsb_coo_idx_t m, const rsb_coo_idx_t k, const rsb_coo_idx_t n,
+	       	const rsb_nnz_idx_t annz, const rsb_nnz_idx_t bnnz, rsb_type_t typecode)
+{
+	/* FIXME: need a complete checking suite, here.  */
+	blas_sparse_matrix A = RSB_BLAS_INVALID_MATRIX;
+	blas_sparse_matrix B = RSB_BLAS_INVALID_MATRIX;
+	//blas_sparse_matrix C = RSB_BLAS_INVALID_MATRIX;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp=NULL;
+	struct rsb_mtx_t * mtxBp=NULL;
+	struct rsb_mtx_t * mtxCp=NULL;
+	rsb_trans_t trans = RSB_TRANSPOSITION_N;
+	if((A = rsb__BLAS_Xuscr_begin( m, k, typecode )) == RSB_BLAS_INVALID_MATRIX ) goto err;
+	if( rsb__BLAS_Xuscr_insert_entries( A, annz, aVA, aIA, aJA) == RSB_BLAS_INVALID_MATRIX ) goto err;
+	if( rsb__BLAS_Xuscr_end(A) == RSB_BLAS_INVALID_MATRIX ) goto err;
+	if((B= rsb__BLAS_Xuscr_begin( k, n, typecode )) == RSB_BLAS_INVALID_MATRIX ) goto err;
+	if( rsb__BLAS_Xuscr_insert_entries( B, bnnz, bVA, bIA, bJA) == RSB_BLAS_INVALID_MATRIX ) goto err;
+	if( rsb__BLAS_Xuscr_end(B) == RSB_BLAS_INVALID_MATRIX ) goto err;
+#if 1
+	mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+	mtxBp = rsb__BLAS_inner_matrix_retrieve(B);
+	if(!mtxAp || !mtxBp)
+	{
+		RSB_ERROR(RSB_ERRM_EM);
+		goto err;// it's not ok.
+	}
+
+
+#endif
+	/* TODO: need a complete checking suite, here.  */
+	if((mtxCp = rsb__do_matrix_mul(typecode,RSB_TRANSPOSITION_N,NULL,mtxAp,trans,NULL,mtxBp,&errval))==NULL)
+	{
+		if(errval == RSB_ERR_LIMITS)
+		{
+			RSB_INFO("failed computing a dense %d x %d matrix (for internal memory limits reasons--it's ok)!\n",m,n);
+			errval = RSB_ERR_NO_ERROR;
+		}
+		else
+		{
+			RSB_INFO("failed computing a dense %d x %d matrix (unknown reasons--it's not ok)!\n",m,n);
+		}
+	}
+	else
+	{
+		if(!rsb__mtx_chk(mtxCp))
+		{
+			RSB_ERROR("matrix does not seem to be correctly built\n");
+		       	RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+		}
+		RSB_MTX_FREE(mtxCp);
+	}
+
+	if(rsb__BLAS_Xusds( A ) == RSB_BLAS_ERROR) goto err;
+	if(rsb__BLAS_Xusds( B ) == RSB_BLAS_ERROR) goto err;
+	goto ret;
+err:
+	errval = RSB_ERR_INTERNAL_ERROR;
+ret:
+	return errval;
+}
+
+static rsb_err_t rsb_blas_limit_instancing_tester(const rsb_coo_idx_t*IA, const rsb_coo_idx_t*JA, const void* VA, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const rsb_nnz_idx_t nnz, rsb_type_t typecode)
+{
+	/* FIXME: need a complete checking suite, here.  */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp=NULL;
+#if 0
+	blas_sparse_matrix A = RSB_BLAS_INVALID_MATRIX;
+	if((A=BLAS_duscr_begin( m, k )) == RSB_BLAS_INVALID_MATRIX )
+		goto err;
+//	RSB_INFO("*\n");
+	if( rsb__BLAS_Xuscr_insert_entries( A, nnz, VA, IA, JA) == RSB_BLAS_INVALID_MATRIX )
+		goto err;
+//	RSB_INFO("*\n");
+	if( rsb__BLAS_Xuscr_end(A) == RSB_BLAS_INVALID_MATRIX )
+		goto err;
+
+	mtxAp = rsb__BLAS_inner_matrix_retrieve(A);
+#else
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,m,k,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,RSB_FLAG_NOFLAGS,&errval);
+#endif
+	if(!mtxAp)
+	{
+		//RSB_ERROR(RSB_ERRM_EM);
+		//goto err;// it's ok.
+		if(RSB_SOME_ERROR(errval)) { RSB_ERROR("failed allocating a %d x %d matrix !\n",m,k);  }
+	}
+	else
+	{
+		if(!rsb__mtx_chk(mtxAp))
+		{
+			RSB_ERROR("matrix does not seem to be correctly built\n");
+		       	RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+		}
+	}
+	/* FIXME: need a complete checking suite, here.  */
+#if 0
+	if(rsb__BLAS_Xusds( A ) == RSB_BLAS_INVALID_MATRIX)
+		goto err;
+#else
+	RSB_MTX_FREE(mtxAp);
+	if(errval == RSB_ERR_LIMITS)
+	{
+		RSB_INFO("failed instancing of (dense?) %d x %d matrix (it's ok)!\n",m,k);
+		errval = RSB_ERR_NO_ERROR;
+	}
+	else
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;// goto err;
+		goto err;
+	}
+#endif
+//	RSB_INFO("*\n");
+	RSB_INFO("instancing %d x %d, %d nnz succeeded\n",m,k,nnz);
+	goto ret;
+err:
+	errval = RSB_ERR_INTERNAL_ERROR;
+ret:
+	return errval;
+}
+
+rsb_err_t rsb_blas_limit_cases_tester(void)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * FIXME: shall perform some serious (more iterations) test, here.
+	 * FIXME: shall test on limits nonzeroes for various operations (sort, dups, etc)
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#ifndef RSB_NUMERICAL_TYPE_DOUBLE
+	RSB_INFO("SKIPPING BASIC LIMIT CASES TEST (UNFINISHED TESTING SUITE)\n");
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE;
+	const rsb_nnz_idx_t nnz=4;
+	const rsb_coo_idx_t dima[]={
+		4,RSB_MAX_SHORTIDX_MATRIX_DIM
+		//,RSB_MAX_MATRIX_DIM-1 
+		,2<<18 ,2<<20 
+		//,RSB_MAX_MATRIX_DIM 
+		//RSB_MAX_MATRIX_DIM+1000000+RSB_NNZ_BLK_MAX-3 
+		,2<<22 ,2<<24 
+		//,2<<26 ,2<<28 
+		//,RSB_MAX_MATRIX_DIM-1000000 
+		, RSB_MAX_MATRIX_DIM 
+		};
+	//const rsb_coo_idx_t dima[]={2};
+	rsb_int_t dimi;
+	RSB_INFO("BASIC LIMIT CASES TEST: BEGIN\n");
+	RSB_INFO("BASIC LIMIT CASES TEST: BEGIN\n");
+	RSB_INFO("(please do not worry if some tests fail due to insufficient memory)\n");
+#if 1
+	RSB_INFO("(forcing allocations to be memory resident)\n");
+	rsb__lock_as_memory_resident(RSB_BOOL_TRUE); /* TODO: check return value here! */
+#else
+#endif
+
+	if(1)
+	for(dimi=0;dimi<sizeof(dima)/sizeof(dima[0]);++dimi)
+	{
+		const rsb_coo_idx_t dim=dima[dimi];
+		rsb_coo_idx_t IA[nnz];
+		rsb_coo_idx_t JA[nnz];
+		const double VA[]={11,22,33,44};
+		//const double X[dim],Y[dim]; const double alpha = 1.0;
+		//rsb__util_set_array_to_converted_integer(X,typecode,m,1,1);
+		//rsb__util_set_array_to_converted_integer(Y,typecode,k,1,0);
+		rsb_coo_idx_t m,k;
+		RSB_INFO("testing instantiation %d-sized, %d nnz\n",dim,nnz);
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		/* FIXME: need a `rotation' routine, here      */
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=dim,k=dim;
+		IA[0]=0; IA[1]=0; IA[2]=dim-1; IA[3]=dim-1;
+		JA[0]=0; JA[1]=dim-1; JA[2]=0; JA[3]=dim-1;
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, nnz, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=1,k=dim;
+		IA[0]=0; IA[1]=0; IA[2]=0; IA[3]=0;
+		JA[0]=0; JA[1]=1; JA[2]=dim-2; JA[3]=dim-1;
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, nnz, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=dim,k=1;
+		IA[0]=0; IA[1]=1; IA[2]=dim-2; IA[3]=dim-1;
+		JA[0]=0; JA[1]=0; JA[2]=0; JA[3]=0;
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, nnz, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=dim,k=dim; IA[0]=0; JA[0]=0; 
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, 1, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=dim,k=dim; IA[0]=dim-1; JA[0]=0; 
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, 1, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=dim,k=dim; IA[0]=0; JA[0]=dim-1; 
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, 1, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+		m=dim,k=dim; IA[0]=dim-1; JA[0]=dim-1; 
+		errval = rsb_blas_limit_instancing_tester(IA, JA, VA, m, k, 1, typecode);
+		if(RSB_SOME_ERROR(errval)) {   }
+		/* * * * * * * * * * * * * * * * * * * * * * * */
+	}
+
+	if(1)
+	{
+		const rsb_nnz_idx_t dim = RSB_MAX_SHORTIDX_MATRIX_DIM+1;
+		//const rsb_nnz_idx_t dim=10;
+		rsb_coo_idx_t*aIA=NULL, *aJA=NULL, *bIA=NULL, *bJA=NULL;
+		void* aVA=NULL, * bVA=NULL;
+	       	const rsb_coo_idx_t m=dim, k=dim, n=dim;
+	       	const rsb_nnz_idx_t annz=dim+1, bnnz=dim+1;
+		rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE;
+		/* size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode); */
+		RSB_INFO("testing spmult for %d-sized, %d nnz\n",dim,nnz);
+		if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&aVA,&aIA,&aJA,annz,typecode,RSB_BOOL_TRUE))){goto erra;}
+		if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&bVA,&bIA,&bJA,bnnz,typecode,RSB_BOOL_TRUE))){goto erra;}
+		rsb__util_coo_array_set(aJA,annz,0);
+		rsb__util_coo_array_set_sequence(aIA,annz,0,1);
+		rsb__util_coo_array_set(bIA,bnnz,0);
+		rsb__util_coo_array_set_sequence(bJA,bnnz,0,1);
+		aIA[annz-1]=dim/2; aJA[bnnz-1]=dim/2;
+		bIA[annz-1]=dim/2; bJA[bnnz-1]=dim/2;
+		if(RSB_SOME_ERROR(rsb__fill_with_ones (aVA,typecode,dim,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto erra; }
+		if(RSB_SOME_ERROR(rsb__fill_with_ones (bVA,typecode,dim,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto erra; }
+		errval = rsb_blas_limit_mul_tester( aIA, aJA, aVA, bIA, bJA, bVA, m, k, n, annz, bnnz, typecode);
+	erra:
+		RSB_CONDITIONAL_FREE(aIA);
+		RSB_CONDITIONAL_FREE(aJA);
+		RSB_CONDITIONAL_FREE(aVA);
+		RSB_CONDITIONAL_FREE(bIA);
+		RSB_CONDITIONAL_FREE(bJA);
+		RSB_CONDITIONAL_FREE(bVA);
+		if(RSB_SOME_ERROR(errval)) {RSB_ERROR("!\n"); goto err;}
+	}
+
+	RSB_INFO("BASIC LIMIT CASES TEST: END\n");
+	goto ret;
+err:
+	RSB_INFO("BASIC LIMIT CASES TEST: END : FAILURE\n");
+ret:
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	return errval;
+}
+
+	
+#if RSB_WANT_COO_BEGIN 
+static rsb_err_t rsb_mtx_alloc_from_coo_test(void)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_nnz_idx_t nnzA=4;		/* matrix nonzeroes count */
+	const rsb_coo_idx_t  nrA=3;		/* matrix rows count */
+	const rsb_coo_idx_t  ncA=3;		/* matrix columns count */
+	rsb_coo_idx_t IA[]={0,1,2,2};
+	rsb_coo_idx_t JA[]={0,1,2,2};
+	RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE VA[]={11,22,32,1};/* values of nonzeroes */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_FIRST_BLAS;
+	struct rsb_mtx_t * mtxAp = NULL;
+
+	if( RSB_NUMERICAL_TYPE_FIRST_BLAS == RSB_NUMERICAL_TYPE_INVALID_TYPE ) 
+	{ RSB_INFO("SKIPPING A TEST (no BLAS types in)\n"); goto ret; }
+
+	mtxAp = rsb__do_mtx_alloc_from_coo_begin(nnzA,typecode,nrA,ncA,RSB_FLAG_NOFLAGS,&errval);
+	if(RSB_SOME_ERROR(errval))goto err;
+	if( mtxAp == NULL ){ errval = RSB_ERR_INTERNAL_ERROR;goto err; }
+	if(RSB_SOME_ERROR(errval = rsb__do_set_elements(mtxAp,VA,IA,JA,nnzA,RSB_FLAG_NOFLAGS)))goto err;
+	if(RSB_SOME_ERROR(errval = rsb__do_mtx_alloc_from_coo_end(&mtxAp)))goto err;
+	RSB_MTX_FREE(mtxAp);
+	goto ret;
+err:
+	RSB_INFO("!\n");
+ret:
+	return errval;
+}
+#endif /* RSB_WANT_COO_BEGIN  */
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+static void rsb__txt_ar(const char*c, int* ap, int*lp)
+{
+	int nul = 0,ci,l=0;
+
+	if(!c)
+		goto err;
+
+	do
+	{
+		while(*c!=nul && !isdigit(*c))++c;
+		ci = rsb__util_atoi(c);/* Flawfinder: ignore */
+
+		if(isdigit(*c))
+			ap[l++] = ci;
+		while(*c && isdigit(*c))++c;
+	}
+	while(*c);
+
+       	RSB_ASSIGN_IF(lp,l)
+err:
+	return;
+}
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+rsb_err_t rsb_blas_bigger_matrices_tester(struct rsb_tester_options_t * top)
+{
+	/**
+	 * \ingroup gr_internals
+	 * */
+	rsb_err_t errvalf = RSB_ERR_NO_ERROR;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//  full blas compliance:
+	enum blas_trans_type transTa[]={blas_no_trans,blas_trans,blas_conj_trans};
+	enum blas_symmetry_type stypea[]={
+		blas_lower_triangular
+		,blas_upper_triangular	
+		,blas_lower_symmetric
+		,blas_general
+		/* ,blas_upper_symmetric*/	/* one symmetry is enough for testing purposes ... */
+		,blas_lower_hermitian
+		//,blas_upper_hermitian
+	};
+	rsb_blas_int_t incXa[]={1,2};
+	rsb_blas_int_t incBa[]={1,2};
+	rsb_blas_int_t alphaa[]={-2,-1,1,2};
+#if (RSB_IMPLEMENTED_SOME_BLAS_TYPES>0)
+	rsb_type_t typecodea[]=RSB_MATRIX_SPBLAS_TYPE_CODES_ARRAY;
+#else /* RSB_IMPLEMENTED_SOME_BLAS_TYPES */
+	rsb_type_t typecodea[]={RSB_NUMERICAL_TYPE_INVALID_TYPE};/* bogus definition */
+#endif /* RSB_IMPLEMENTED_SOME_BLAS_TYPES */
+	enum blas_diag_type diaga[]={blas_non_unit_diag,blas_unit_diag};
+
+	// FIXME: should implement a routine to conjugate complex matrices before testing !
+
+	//enum blas_trans_type transTa[]={blas_no_trans};
+	//enum blas_trans_type transTa[]={blas_trans};
+	//enum blas_trans_type transTa[]={blas_conj_trans};
+	//enum blas_trans_type transTa[]={blas_no_trans,blas_trans};
+	//enum blas_symmetry_type stypea[]={blas_lower_triangular};
+	//enum blas_symmetry_type stypea[]={blas_upper_triangular};
+	//rsb_blas_int_t incXa[]={1};
+	//rsb_blas_int_t incBa[]={2};
+	//rsb_blas_int_t alphaa[]={1};
+	//rsb_blas_int_t alphaa[]={-1};
+	//rsb_blas_int_t incXa[]={2};
+	//rsb_blas_int_t alphaa[]={-1,1};
+	//rsb_blas_int_t alphaa[]={-2};
+	rsb_blas_int_t betaa[]={1,0};// FIXME: there the Sparse BLAS interface works only with beta=1
+	//rsb_type_t typecodea[]={RSB_NUMERICAL_TYPE_DOUBLE};
+	//rsb_type_t typecodea[]={RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX};
+	//rsb_type_t typecodea[]={RSB_NUMERICAL_TYPE_FLOAT};
+	//rsb_type_t typecodea[]=RSB_MATRIX_TYPE_CODES_ARRAY;
+
+	//const rsb_blas_int_t dima_lcl=1;
+	const rsb_blas_int_t dima_lcl=2;
+	const rsb_blas_int_t dima_pcl=dima_lcl;
+	const rsb_blas_int_t dima_tcl=dima_lcl;
+	//enum blas_diag_type diaga[]={blas_unit_diag};
+	//enum blas_diag_type diaga[]={blas_non_unit_diag};
+	//enum blas_diag_type diaga[]={blas_unit_diag};
+	const rsb_blas_int_t dimas=dima_pcl+dima_tcl*RSB_MAX_SUPPORTED_CACHE_LEVELS+dima_lcl;
+	rsb_blas_int_t dima[dimas];
+	rsb_blas_int_t dims=0;
+	rsb_blas_int_t diagi=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t transTi=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t alphai=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t betai=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t stypei=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t incXi=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t dimi=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t typecodei=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t incBi=RSB_INVALID_BLAS_INT_IDX_VAL;
+	rsb_blas_int_t cl=RSB_INVALID_BLAS_INT_IDX_VAL,cln = rsb__get_cache_levels_num();
+	rsb_blas_int_t passed=0,failed=0;
+	rsb_blas_int_t instantiated_some_recursive=0;
+#if RSB_TESTER_ALLOW_TIMEOUT
+	rsb_time_t tt = RSB_TIME_ZERO,tt0=rsb_time();
+	struct rsb_tester_options_t to;
+#endif /* RSB_TESTER_ALLOW_TIMEOUT */
+	rsb_blas_int_t isempty=0,isinvertible=1;
+/* FIXME: in the future, may use these indices (isemptym) to fill the matrix with a particular value */
+#if RSB_ALLOW_EMPTY_MATRICES
+#if RSB_ALLOW_ZERO_DIM
+	rsb_blas_int_t isemptym=3;
+#else
+	rsb_blas_int_t isemptym=2;
+#endif
+#else /* RSB_ALLOW_EMPTY_MATRICES */
+	rsb_blas_int_t isemptym=1;
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+	const rsb_char_t*btps=RSB_EMPTY_STRING;
+	rsb_int_t iat=1;
+#if RSB_ALLOW_INTERNAL_GETENVS
+	rsb_blas_int_t maxtc = 0; /* max tests count, current tests count  */
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+	if( (sizeof(typecodea)==0)
+#if (RSB_IMPLEMENTED_SOME_BLAS_TYPES==0)
+	|| 1		
+#endif /* RSB_IMPLEMENTED_SOME_BLAS_TYPES */
+	)
+	{
+		// FIXME: new
+		RSB_INFO("Did not configure any BLAS-standard type: thus skipping BLAS-based testing.\n");
+		goto ret;
+	}
+#if RSB_TESTER_ALLOW_TIMEOUT
+	if(!top)
+		errval = rsb_blas_tester_options_init(&to);
+	else
+		rsb_memcpy(&to,top,sizeof(to));
+#endif /* RSB_TESTER_ALLOW_TIMEOUT */
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR("!\n");
+		goto ret;
+	}
+	errval = rsb_basic_primitives_tester();
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR("!\n");
+		goto ret;
+	}
+
+	#if RSB_WANT_COO_BEGIN 
+	/* FIXME: this mini-test is insufficient! */
+	if(RSB_SOME_ERROR(errval=rsb_mtx_alloc_from_coo_test()))
+	{ RSB_ERROR("!\n"); goto ret; }
+	#endif /* RSB_WANT_COO_BEGIN  */
+
+
+#if RSB_HAVE_ISATTY
+#if RSB_HAVE_STREAMS
+	if( rsb_global_session_handle.out_stream )
+		iat=( isatty(rsb__fileno(rsb_global_session_handle.out_stream)) );
+	else
+		iat=0;
+#endif /* RSB_HAVE_STREAMS */
+#endif /* RSB_HAVE_ISATTY */
+
+	if(to.wcs==RSB_BOOL_TRUE)
+		btps=RSB_CLEARTERM_STRING; 
+	if((to.wqc==RSB_BOOL_TRUE) && (!iat))
+		to.wqt=RSB_BOOL_TRUE;
+	RSB_INFO("ADVANCED SPARSE BLAS TEST: BEGIN\n");
+#if 1
+	for(cl=0;cl<dima_pcl;++cl)
+	{
+		// 1,2,.. 
+		dima[dims++]=1<<cl;
+	}
+#else
+//	if(dims<dimas)dima[dims++]=39;
+//	if(dims<dimas)dima[dims++]=724;
+// 	if(dims<dimas)dima[dims++]=362;
+//	if(dims<dimas)dima[dims++]=1;
+	if(dims<dimas)dima[dims++]=2;
+//	if(dims<dimas)dima[dims++]=3;
+#endif
+#if 1
+	for(cl=1;cl<=cln && dims<dimas;++cl)
+	{
+		// around cache size
+		long cs = rsb__get_lnc_size(cl);
+		rsb_blas_int_t i;
+		for(i=1;i<=dima_tcl;++i)
+			dima[dims++]=((1<<i)*2*sqrt(cs))/(4*sizeof(rsb_coo_idx_t));
+	}
+	if((cl=cln)>0)
+	{
+		// more than outermost cache size
+		rsb_blas_int_t i;
+		long cs = rsb__get_lnc_size(cl);
+		for(i=1;i<=dima_lcl && dims<dimas;++i)
+		{
+			long ndim=(((i)*(1<<dima_tcl)*2*sqrt(cs))/(4*sizeof(rsb_coo_idx_t)));
+			if(ndim > dima[dims]) // to avoid duplicates
+				dima[dims++]=ndim;
+		}
+	}
+#endif
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+	rsb__txt_ar(getenv("RSB_BMT_ALPHA"),  &   alphaa[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_INCXA"),  &    incXa[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_INCBA"),  &    incBa[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_SYMMA"),  &   stypea[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_STYPA"),  &typecodea[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_DIAGA"),  &    diaga[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_TRANSA"), &  transTa[0], NULL);
+	rsb__txt_ar(getenv("RSB_BMT_DIMA"),   &     dima[0],&dims);
+#if RSB_ALLOW_EMPTY_MATRICES
+	if(getenv("RSB_BMT_ISEMPTYM")) isemptym = rsb__util_atoi(getenv("RSB_BMT_ISEMPTYM"));
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+	
+#if 1
+	if(
+		(rsb_do_psblas_trans_to_rsb_trans(RSB_PSBLAS_TRANS_N) != RSB_TRANSPOSITION_N) ||
+		(rsb_do_psblas_trans_to_rsb_trans(RSB_PSBLAS_TRANS_T) != RSB_TRANSPOSITION_T) ||
+		(rsb_do_psblas_trans_to_rsb_trans(RSB_PSBLAS_TRANS_C) != RSB_TRANSPOSITION_C)
+		)
+	{RSB_ERROR("!\n"); goto err;}
+#endif
+
+	//dims=0;
+	//dima[dims++]=45;
+	//dima[dims++]=362;
+	//dima[dims++]=500;
+	//dima[dims++]=499;
+	//dima[dims++]=724;
+	//dima[dims++]=1448;
+//	typecodei=3;
+	for(dimi=0;dimi<dims;++dimi)
+	for(stypei=0;stypei<sizeof(stypea)/sizeof(enum blas_symmetry_type);++stypei)
+	for(typecodei=0;typecodei<sizeof(typecodea)/sizeof(rsb_type_t);++typecodei)
+	for(incXi=0;incXi<sizeof(incXa)/sizeof(rsb_blas_int_t);++incXi)
+	for(incBi=0;incBi<sizeof(incBa)/sizeof(rsb_blas_int_t);++incBi)
+	for(alphai=0;alphai<sizeof(alphaa)/sizeof(rsb_blas_int_t);++alphai)
+	for(betai=0;betai<sizeof(betaa)/sizeof(rsb_blas_int_t);++betai)
+	for(diagi=0;diagi<sizeof(diaga)/sizeof(enum blas_diag_type);++diagi)
+	for(transTi=0;transTi<sizeof(transTa)/sizeof(enum blas_trans_type);++transTi)
+#if RSB_ALLOW_EMPTY_MATRICES
+	for(isempty=0;isempty<isemptym;++isempty)
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+	{
+		const rsb_blas_int_t is_really_empty=isempty && (diaga[diagi]!=blas_unit_diag);
+		blas_sparse_matrix T = RSB_BLAS_INVALID_MATRIX;
+		void *B=NULL,*X=NULL,*D=NULL;
+		rsb_blas_int_t dim=dima[dimi];
+	       	rsb_coo_idx_t *IA=NULL,*JA=NULL;
+		void * VA=NULL;
+	       	rsb_blas_int_t nnz = RSB_BLAS_INT_MAX;
+		rsb_type_t typecode=typecodea[typecodei];
+		enum blas_trans_type transT=transTa[transTi];
+		rsb_blas_int_t incX=incXa[incXi],incB=incBa[incBi],incD=1;
+		size_t el_size = RSB_NUMERICAL_TYPE_SIZE(typecode);
+		enum blas_symmetry_type stype=stypea[stypei];
+		rsb_aligned_t alpha_inv[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t inrm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t inrm_[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		struct rsb_mtx_t * mtxAp=NULL;
+		struct rsb_mtx_t *cmatrix=NULL;
+		struct rsb_mtx_t *kmatrix=NULL;
+		size_t extra_vels=0;
+		rsb_nnz_idx_t rnnz=0,ndnnz,rnz=0;
+		rsb_submatrix_idx_t submatrices=0;
+		rsb_trans_t trans = rsb_blas_trans_to_rsb_trans(transT);
+		rsb_char_t tc = RSB_TRANSPOSITION_AS_CHAR(trans);
+	       	rsb_blas_int_t mmi,msmd=100;
+		rsb_coo_idx_t coov;
+		rsb_nnz_idx_t nnzv;
+		rsb_aligned_t zero[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t one[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t two[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_aligned_t three[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb_int do_tune_test = 0; 
+#if RSB_ALLOW_ZERO_DIM
+		if(isempty>=2 && dim > 1) continue;
+		if(isempty>=2) dim=0;
+#endif
+		rsb__util_set_area_to_converted_integer(one,typecode,1);
+		rsb__util_set_area_to_converted_integer(two,typecode,2);
+		rsb__util_set_area_to_converted_integer(three,typecode,3);
+
+		rsb__util_set_area_to_fraction_of_integer(alpha_inv,alphaa[alphai],typecode);
+
+		// ... need asserts ...
+		rsb__util_set_area_to_converted_integer(alpha,typecode,alphaa[alphai]);
+		rsb__util_set_area_to_converted_integer(beta,typecode,betaa[betai]);
+		rsb__util_set_area_to_converted_integer(zero,typecode,0);
+#if RSB_ALLOW_ZERO_DIM
+		if(isempty>=2) extra_vels=1;
+#endif
+		X = rsb__calloc(el_size*(dim*incX+extra_vels));
+		B = rsb__calloc(el_size*(dim*incB+extra_vels));
+		D = rsb__calloc(el_size*(dim*incD+extra_vels));
+		if(!X || !B || !D)
+		{
+			RSB_ERROR("failed allocating a vector!\n"); goto err;
+		}
+
+		/* generate a triangular matrix */
+		/* FIXME: should make sure that the matrix is recursive, somehow. */
+		errval = rsb__generate_dense_lower_triangular_coo(dim,1,&IA,&JA,&VA,&nnz,typecode);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_ERROR("!\n"); goto err;
+		}
+#if RSB_ALLOW_EMPTY_MATRICES
+		if(isempty)
+		{
+			RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(typecode,nnz,&zero,VA,1));
+			if(RSB_SOME_ERROR(errval))
+				{RSB_ERROR("!\n"); goto err;}
+		}
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+		isinvertible=(diaga[diagi]==blas_unit_diag||!isempty);
+		isinvertible&=(stype != blas_general);
+	/*	isinvertible&=(stype != blas_lower_symmetric);
+		isinvertible&=(stype != blas_upper_symmetric);
+	       	*/
+		isinvertible&=(stype != blas_upper_hermitian);
+		isinvertible&=(stype != blas_lower_hermitian);
+		ndnnz=nnz-(diaga[diagi]==blas_unit_diag?dim:0);
+		if(ndnnz>nnz)
+		{
+			RSB_ERROR("!\n"); goto err;
+		}
+		if(nnz > 0 && (!VA || !IA || !JA))
+		{
+			RSB_ERROR("!\n");
+			goto err;
+		}
+
+		if(stype==blas_upper_triangular)
+			RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+
+#if 1 /* 20110425 */
+		if(incX==1)
+		if(incB==1)/* FIXME: shall propagate incX to the test routine, someday */
+		if(nnz>0)/* empty matrices are not supported for now */
+		if(!(diaga[diagi]==blas_unit_diag))/* FIXME: the accuracy test needs cleaned up input (i.e.: won't remove the diagonal) */
+
+		if(!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))/* FIXME: shall fix many vector-operating routines, first */
+		{
+			/* FIXME: to be complete, shall implement symmetry/lower/upper/diagonal flags */
+			rsb_flags_t aflags = RSB_FLAG_NOFLAGS;
+			struct rsb_coo_matrix_t coo;
+			if(stype==blas_lower_symmetric) RSB_DO_FLAG_ADD(aflags,RSB_FLAG_SYMMETRIC);
+			if(stype==blas_upper_symmetric) RSB_DO_FLAG_ADD(aflags,RSB_FLAG_SYMMETRIC);
+			if(stype==blas_upper_hermitian) RSB_DO_FLAG_ADD(aflags,RSB_FLAG_UPPER_HERMITIAN);
+			if(stype==blas_lower_hermitian) RSB_DO_FLAG_ADD(aflags,RSB_FLAG_LOWER_HERMITIAN);
+			if(diaga[diagi]==blas_unit_diag)RSB_DO_FLAG_ADD(aflags,RSB_FLAG_UNIT_DIAG_IMPLICIT);
+			rsb__fill_coo_struct(&coo,VA,IA,JA,dim,dim,nnz,typecode);
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_accuracy_test(&coo,NULL,0,aflags));
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("!\n");
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_accuracy_test(&coo,NULL,0,aflags));
+				goto err;
+			}
+		}
+#endif
+		T = rsb__BLAS_Xuscr_begin(dim,dim,typecode);
+		if( T == RSB_BLAS_INVALID_MATRIX )
+			{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while calling uscr_begin\n"); goto err;}
+		if( BLAS_ussp(T,stype) != RSB_BLAS_NO_ERROR )
+			{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while calling ussp(%d)\n",stype); goto err;}
+		if( BLAS_ussp(T,diaga[diagi]) != RSB_BLAS_NO_ERROR )
+			{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while calling ussp(%d)\n",diaga[diagi]); goto err;}
+		if( rsb__BLAS_Xuscr_insert_entries(T,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+			{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while calling cr_insert_entries\n"); goto err;}
+		if( rsb__BLAS_Xuscr_end(T) != RSB_BLAS_NO_ERROR )
+			{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error finalizing matrix!\n"); goto err;}
+		mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+		if(!mtxAp)
+		{
+			RSB_ERROR(RSB_ERRM_NL);
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+		}
+
+		if( mtxAp->nnz>0 && rsb__get_index_storage_amount(mtxAp)==0 )
+		{
+			RSB_ERROR(RSB_ERRM_NL);
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+		}
+
+		if(diaga[diagi]==blas_non_unit_diag) /* FIXME */
+		if(nnz > 0)
+		{
+			if( rsb__BLAS_Xusset_elements(T, IA, JA, VA, mtxAp->nnz) != RSB_BLAS_NO_ERROR )
+			{
+				RSB_ERROR(RSB_ERRM_NL);
+			       	errval = RSB_ERR_INTERNAL_ERROR;
+			       	goto err;
+			}
+		}
+
+		rnz=mtxAp->nnz;
+		if(!rsb__mtx_chk(mtxAp))
+		{
+			RSB_ERROR(RSB_ERRM_NL);
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+		}
+		
+		coov=0;
+		rsb__do_mtx_get_info(mtxAp,             RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T ,&coov);
+		if(coov!=dim){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL);goto err;}
+		coov=0;
+		rsb__do_get_matrix_info_from_string(mtxAp,"RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T",&coov,0);
+		if(coov!=dim){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL);goto err;}
+
+		coov=0;
+		rsb__do_mtx_get_info(mtxAp,             RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T ,&coov);
+		if(coov!=dim){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL);goto err;}
+		coov=0;
+		rsb__do_get_matrix_info_from_string(mtxAp,"RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T",&coov,0);
+		if(coov!=dim){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL);goto err;}
+
+		nnzv=0;
+		rsb__do_mtx_get_info            (mtxAp, RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T ,&nnzv);
+		if(nnzv!=mtxAp->nnz){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL);goto err;}
+		nnzv=0;
+		rsb__do_get_matrix_info_from_string(mtxAp,"RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T",&nnzv,0);
+		if(nnzv!=mtxAp->nnz){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL);goto err;}
+		
+		{
+			/* TODO: need a systematic tester */
+			const int errstrlen=128;
+			char errstr[errstrlen];
+			strcpy(errstr,"");
+			rsb__do_strerror_r(RSB_ERR_BADARGS,errstr,errstrlen);  
+			if(strlen(errstr)<1)
+			RSB_LSTERR(RSB_ERRM_ES);
+			
+		}
+
+		/* TODO: extract && test other values as well... */
+
+//		submatrices = rsb__submatrices(mtxAp);
+		submatrices = rsb__terminal_recursive_matrix_count(mtxAp);
+		instantiated_some_recursive+=(rsb__submatrices(mtxAp)>1?1:0);
+		if(rsb__get_sizeof(mtxAp)<(
+		(mtxAp->el_size*mtxAp->nnz)+ (sizeof(rsb_half_idx_t)*mtxAp->nnz)+0))
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_ERROR("!\n");
+			goto err;
+		}
+
+#if 1
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(! ( getenv("RSB_BMT_SPMV") && rsb__util_atoi(getenv("RSB_BMT_SPMV")) == 0 ) )
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_SYMMETRIC) || RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_HERMITIAN))
+		{
+			/* FIXME: this gets NOT covered, it seems  */
+		for(mmi=0;mmi< (dim<msmd?3:2) ;++mmi)
+		if(! (mmi==1 && ((incX!= 1) || (incB!=1) )  ))
+		{
+			const int nrhs=1;/* TODO: need more ... */
+			/* TODO : should fill X and B with sentinel values ! */
+			if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,X,incX)) || RSB_SOME_ERROR(rsb__fill_with_ones (B,typecode,dim,incB)))
+			{ errval = RSB_ERR_INTERNAL_ERROR; goto err; }
+
+
+			if(mmi==0)
+			if( rsb__BLAS_Xusmv(transT,alpha,T,B,incB,beta,X,incX) != RSB_BLAS_NO_ERROR )
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Symmetric USMV failed!\n"); goto err;
+			}
+
+			if(mmi==1)
+			if(RSB_SOME_ERROR( rsb__do_spmm(trans,alpha,mtxAp,nrhs,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER,B,dim,beta,X,dim,RSB_OP_FLAG_DEFAULT)) )
+			{
+			       	errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Symmetric USMV failed!\n"); goto err;
+		       	}
+
+			if(mmi==2)
+			if(RSB_SOME_ERROR( rsb_do_spmv_general(trans,alpha,mtxAp,B,incB,beta,X,incX,RSB_OP_FLAG_WANT_SERIAL RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS)))
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Symmetric USMV failed!\n"); goto err;
+			}
+
+			/* if(!isinvertible) */
+			if(is_really_empty)
+				rsb__util_set_array_to_converted_integer(B,typecode,dim,incB,0                 );
+			else
+			{
+				if(isempty)
+					rsb__util_set_array_to_converted_integer(B,typecode,dim,incB,    alphaa[alphai]);
+				else
+					rsb__util_set_array_to_converted_integer(B,typecode,dim,incB,dim*alphaa[alphai]);
+			}
+			if( RSB_SOME_ERROR(rsb__do_are_same(B,X,dim,typecode,incB,incX)) )
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Symmetric USMV computed wrong results!\n"); goto err;
+			}
+			goto err; /* ok. skip the remaining tests FIXME */
+		}
+		}
+#endif
+#if 1
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(! ( getenv("RSB_BMT_SPGEMM") && rsb__util_atoi(getenv("RSB_BMT_SPGEMM")) == 0 ) )
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+		// FIXME: this is quite slow : SPGEMM is O(dim^3, and so shall be limited down to a certain threshold)
+		if(dimi <= RSB_WANT_SPGEMM_TESTING_FOR_ONLY_FIRST_DIMI)// FIXME: spgemm cost increases quadratically..
+		if(incX==1 && incB==1)
+		if(stype==blas_lower_triangular || stype==blas_upper_triangular)
+		if(!RSB_DO_TOOFEWNNZFORCSR(nnz,dim) /*&& typecode == RSB_NUMERICAL_TYPE_DOUBLE*/ )
+		if(diaga[diagi]==blas_non_unit_diag)
+		if( trans == RSB_TRANSPOSITION_N ) /* FIXME: this is just a workaround, 20140324 */
+		{
+			rsb_nnz_idx_t ldd=2*dim;
+			rsb_bool_t rowmajor=(stype==blas_lower_triangular)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+			//rsb_nnz_idx_t ldd=1*dim;
+			void *dVA = rsb__calloc(el_size*dim*ldd);
+			rsb_aligned_t sum1[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+			rsb_aligned_t sum2[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+			rsb_aligned_t two[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+
+			if(dVA)
+			{
+				/* rsb_trans_t ttrans = rsb__do_transpose_transposition(trans); */
+				rsb_trans_t ttrans = (trans); /* FIXME: this is just a workaround, 20140324 */
+
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spgemm_to_dense(typecode,trans,alpha,mtxAp,ttrans,beta,mtxAp,ldd,dim,dim,rowmajor,dVA,NULL,NULL));
+				if(RSB_SOME_ERROR(errval)) { RSB_ERROR("!\n"); goto err; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__util_vector_sum(sum1,dVA,typecode,ldd*dim));
+				if(RSB_SOME_ERROR(errval)) { RSB_ERROR("!\n"); goto err; }
+				// now: c <- alpha a ^ trans * beta a ^ trans
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spgemm_to_dense(typecode,trans,alpha,mtxAp,ttrans,beta,mtxAp,ldd,dim,dim,rowmajor,dVA,NULL,NULL));
+				if(RSB_SOME_ERROR(errval)) { RSB_ERROR("!\n"); goto err; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__util_vector_sum(sum2,dVA,typecode,ldd*dim));
+				if(RSB_SOME_ERROR(errval)) { RSB_ERROR("!\n"); goto err; }
+				// now: c <- 2 ( alpha a ^ trans * beta a ^ trans )
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_CONDITIONAL_FREE(dVA);
+					RSB_ERROR(RSB_ERRM_FMMTDT);
+		       			errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+				rsb__util_set_area_to_converted_integer(two,typecode,2);
+				rsb__util_vector_div(sum2,two,typecode,1);
+				// TODO: there is risk of overflow, though..
+				if( RSB_SOME_ERROR(rsb__do_are_same(sum1,sum2,1,typecode,1,1) ))
+				{
+					RSB_CONDITIONAL_FREE(dVA);
+					RSB_ERROR(RSB_ERRM_FMMTDT);
+		       			errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			       	}
+				// since a is lower triangular full with ones,
+				// c is full, with values (given s=2(alpha+beta))
+				//   s   s   s   s ...
+				//   s  2s  2s  2s ...
+				//   s  2s  3s  3s ...
+				//   s  2s  3s  4s ...
+				//   ...
+				// (transposed, in the case trans is)
+				// now: c <- 2 ( alpha a ^ trans * beta ^ trans ) + alpha a ^ trans
+
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(typecode,ldd*dim,&zero,dVA,1));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_matrix_add_to_dense(alpha,mtxAp,ldd,dim,dim,rowmajor,dVA));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__util_vector_sum(sum1,dVA,typecode,ldd*dim));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_matrix_add_to_dense(alpha,mtxAp,ldd,dim,dim,rowmajor,dVA));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__util_vector_sum(sum2,dVA,typecode,ldd*dim));
+				rsb__util_vector_div(sum2,two,typecode,1);
+				// TODO: there is risk of overflow, though..
+				if( RSB_SOME_ERROR(rsb__do_are_same(sum1,sum2,1,typecode,1,1) ))
+				{
+					RSB_CONDITIONAL_FREE(dVA);
+					RSB_ERROR(RSB_ERRM_FMATDBC);
+		       			errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			       	}
+				RSB_CONDITIONAL_FREE(dVA);
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_FMATD);
+		       			errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+			}
+			//if((cmatrix = rsb__do_matrix_mul(RSB_TRANSPOSITION_N,NULL,mtxAp,trans,NULL,mtxAp,&errval))!=RSB_ERR_NO_ERROR)
+			if((cmatrix = rsb__do_matrix_mul(typecode,RSB_TRANSPOSITION_N,NULL,mtxAp,trans,NULL,mtxAp,&errval))!=NULL)
+			{
+				// FIXME: ignoring scaling values!
+				RSB_MTX_FREE(cmatrix);
+			}
+			else
+			{
+				RSB_ERROR(RSB_ERRM_FMM);
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			}
+		}
+#endif
+#if 1
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(! ( getenv("RSB_BMT_SUM") && rsb__util_atoi(getenv("RSB_BMT_SUM")) == 0 ) )
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+	{
+		if(diaga[diagi]!=blas_unit_diag && nnz>0)
+		{
+			cmatrix = NULL;
+			errval = rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+			// cmatrix = rsb__clone_simple(mtxAp);
+		if( (cmatrix == NULL) || (!rsb__mtx_chk(cmatrix) ) )
+		{
+			if(cmatrix==NULL)
+			{
+				RSB_ERROR(RSB_ERRM_FMC);
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			}
+			else
+			{
+				RSB_ERROR(RSB_ERRM_CMINBC);
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			}
+		}
+		else
+		{
+			/* very, very slow sparse matrices sum testing */
+			struct rsb_mtx_t *smatrix=NULL;
+			struct rsb_coo_matrix_t coo,coc;
+			// s = m^trans + 2 * m^trans = 3 m^trans
+			//smatrix = rsb__do_matrix_sum(typecode,mtxAp,one,trans,cmatrix,two,trans,&errval);
+			RSB_BZERO_P(&coo);
+		       	RSB_BZERO_P(&coc);
+
+			smatrix = rsb__do_matrix_sum(typecode,RSB_TRANSPOSITION_N,one,mtxAp,RSB_TRANSPOSITION_N,two,cmatrix,&errval);
+			if(!smatrix ) {RSB_ERROR(RSB_ERRM_FCMS);errval = RSB_ERR_INTERNAL_ERROR;}
+			if( RSB_SOME_ERROR(errval)) { RSB_ERROR(RSB_ERRM_FCMS); goto smerr; }
+#if 0
+			if( smatrix->nnz > 1)
+			{
+				rsb_nnz_idx_t i;
+				for(i=0;i<cmatrix->nnz;++i)RSB_STDOUT("%d \n",((rsb_half_idx_t*)(mtxAp->bindx))[i]);
+				RSB_INFO("cmatrix:\n");
+			       	rsb_print_matrix_t(cmatrix);
+				RSB_INFO(" mtxAp:\n");
+			       	rsb_print_matrix_t( mtxAp);
+				RSB_STDOUT_MATRIX_SUMMARY(mtxAp), RSB_INFO("\n");
+				RSB_INFO("smatrix:\n");
+			       	rsb_print_matrix_t(smatrix);
+				RSB_INFO("\n");
+			}
+#endif
+			if( smatrix->nnz != mtxAp->nnz)
+#if RSB_ALLOW_EMPTY_MATRICES
+			if( !isempty )
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+			{
+			       	RSB_ERROR("seems like matrix sum failed (same pattern, no cancellation possible): %d + %d to %d nnz)\n",mtxAp->nnz,cmatrix->nnz,smatrix->nnz);
+smatrix = rsb__do_matrix_sum(typecode,RSB_TRANSPOSITION_N,one,mtxAp,RSB_TRANSPOSITION_N,two,cmatrix,&errval);
+				 errval = RSB_ERR_INTERNAL_ERROR;
+				 goto smerr; 
+			}
+			//if(RSB_SOME_ERROR(errval = rsb_mtx_elemental_scale(cmatrix,&three)))
+			if(RSB_SOME_ERROR(errval = rsb__do_upd_vals(cmatrix,RSB_ELOPF_MUL,&three)))
+			{ RSB_ERROR(RSB_ERRM_ES); goto smerr; }
+			coo.typecode=smatrix->typecode; coo.nnz=smatrix->nnz;
+			RSB_DO_FLAG_ADD(smatrix->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+			errval = rsb__do_switch_rsb_mtx_to_coo(smatrix,&coo.VA,&coo.IA,&coo.JA,RSB_FLAG_SORTED_INPUT);
+			if(RSB_SOME_ERROR(errval)){ RSB_ERROR(RSB_ERRM_ES); goto smerr; }
+			smatrix=NULL;
+			coc.typecode=cmatrix->typecode; coc.nnz=cmatrix->nnz;
+			RSB_DO_FLAG_ADD(cmatrix->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+			errval = rsb__do_switch_rsb_mtx_to_coo(cmatrix,&coc.VA,&coc.IA,&coc.JA,RSB_FLAG_SORTED_INPUT);
+			if(RSB_SOME_ERROR(errval)){ RSB_ERROR(RSB_ERRM_ES); goto smerr; }
+			cmatrix=NULL;
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR(RSB_ERRM_ES); goto smerr; }
+			//if(trans == RSB_TRANSPOSITION_T)RSB_SWAP(rsb_coo_idx_t*,coo.IA,coo.JA);
+			//if(trans == RSB_TRANSPOSITION_C)errval = rsb__util_do_conjugate(coo.VA,coo.typecode,coo.nnz);
+			//coo.VA=coc.VA=NULL;
+#if RSB_ALLOW_EMPTY_MATRICES
+			if((!isempty) || (!rsb__are_coo_matrices_both_empty(&coo,RSB_FLAG_NOFLAGS,&coc,RSB_FLAG_NOFLAGS)))
+#endif /* RSB_ALLOW_EMPTY_MATRICES */
+			if(!rsb__are_coo_matrices_equal(&coo,&coc))
+			{ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR("matrices do not match!\n"); goto smerr; }
+			//
+smerr:
+			rsb__destroy_coo_matrix_t(&coo);
+			rsb__destroy_coo_matrix_t(&coc);
+			RSB_MTX_FREE(cmatrix);
+			RSB_MTX_FREE(smatrix);
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR("some error occurred while testing matrices sum functionality\n"); goto err; }
+#if RSB_WANT_AUTOTUNING_TESTING
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if( getenv("RSB_BMT_AUTOTUNE") )
+		       	do_tune_test = rsb__util_atoi(getenv("RSB_BMT_AUTOTUNE"));
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+		if( do_tune_test > 0 )
+		if( nnz > 0 && nnz < 100 ) /* FIXME: these limits here are only for time reasons */
+		{
+			struct rsb_mtx_t * mtxOp = NULL;
+#if !RSB_AT_DESTROYS_MTX
+			struct rsb_mtx_t * mtxQp = NULL;
+#endif /* RSB_AT_DESTROYS_MTX */
+			rsb_real_t *sfp = NULL;
+		       	rsb_int_t *tnp = NULL;
+		       	rsb_int_t oitmax = 0;
+		       	rsb_time_t tmax=/*RSB_TIME_ZERO*/0.003;
+		       	rsb_trans_t transA = trans;
+		       	const void * alphap = NULL;
+		       	// struct rsb_mtx_t * mtxAp = ;
+		       	rsb_coo_idx_t nrhs = 1;
+		       	rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+		       	const void * Bp = NULL;
+		       	rsb_nnz_idx_t ldB = 0;
+		       	const void * betap = NULL;
+		       	void * Cp = NULL;
+		       	rsb_nnz_idx_t ldC = 0;
+
+		       	// mtxOp = rsb__clone_simple(mtxAp);
+			errval = rsb__clone(&mtxOp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+
+		       	ldB = rsb_do_get_rows_of(mtxOp,transA);
+		       	ldC = rsb_do_get_columns_of(mtxOp,transA);
+#if !RSB_AT_DESTROYS_MTX
+			mtxQp = mtxOp;
+#endif /* RSB_AT_DESTROYS_MTX */
+			if( RSB_SOME_ERROR( errval = rsb__do_tune_spmm(&mtxOp, sfp, tnp, oitmax, tmax, transA, alphap, NULL, nrhs, order, Bp, ldB, betap, Cp, ldC)))
+			{
+				errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR("rsb_tune_spmm failed !\n"); goto sterr;
+			}
+#if !RSB_AT_DESTROYS_MTX
+			if(mtxQp != mtxOp) RSB_MTX_FREE(mtxQp);
+#endif /* RSB_AT_DESTROYS_MTX */
+#if 0
+			RSB_MTX_FREE(mtxOp);
+			mtxOp = rsb__clone_simple(mtxAp);
+#else
+			errval = rsb__clone(&mtxOp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+#endif
+#if !RSB_AT_DESTROYS_MTX
+			mtxQp = mtxOp;
+#endif /* RSB_AT_DESTROYS_MTX */
+#if RSB_ALLOW_EMPTY_MATRICES
+			if(!isempty)
+#endif
+			if(stype==blas_upper_triangular || stype==blas_lower_triangular )
+			if( RSB_SOME_ERROR( errval = rsb__do_tune_spsm(&mtxOp, sfp, tnp, oitmax, tmax, transA, alphap, NULL, nrhs, order, Bp, ldB, betap, Cp, ldC)))
+			{
+				errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR("rsb_tune_spsm failed !\n"); goto sterr;
+			}
+#if !RSB_AT_DESTROYS_MTX
+			if(mtxQp != mtxOp) RSB_MTX_FREE(mtxQp);
+#endif /* RSB_AT_DESTROYS_MTX */
+			RSB_MTX_FREE(mtxOp);
+sterr:
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR("...\n"); goto err; }
+		}
+#endif	/* RSB_WANT_AUTOTUNING_TESTING */
+		}
+		}
+	}
+#endif
+#if 1
+		cmatrix = NULL;
+		errval = rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+		// cmatrix = rsb__clone_simple(mtxAp);
+
+		if( cmatrix == NULL )
+		{
+			RSB_ERROR("failed matrix cloning\n");
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+		}
+		else
+		{
+			struct rsb_coo_matrix_t coo,csr;
+			rsb_flags_t cflags = RSB_DO_FLAG_FILTEROUT(cmatrix->flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+
+			RSB_BZERO_P(&coo);
+		       	RSB_BZERO_P(&csr);
+
+			RSB_DO_FLAG_ADD(cflags,RSB_FLAG_SORTED_INPUT); // NEW, TO SPEEDUP THIS CODE (WEAKENS THE TESTING EFFECTIVENESS, THOUGH)
+			if(!rsb__mtx_chk(cmatrix))
+			{
+				RSB_ERROR("cloned matrix is not built correctly\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			if(!cmatrix->nnz)
+				goto cmedone;
+			RSB_INIT_CXX_FROM_MTX(&coo,cmatrix);
+			csr=coo;
+			if((rsb__allocate_coo_matrix_t(&coo)!=&coo) || (rsb__allocate_coo_matrix_t(&csr)!=&csr))
+			{
+				RSB_ERROR("allocaton problem\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			coo.nnz=csr.nnz=mtxAp->nnz;
+getcsrcooagain:
+			// RSB -> COO
+			errval = rsb__do_get_coo_noalloc(mtxAp,coo.VA,coo.IA,coo.JA,NULL,cflags);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("coo extraction problems\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// RSB -> CSR
+			errval = rsb__do_get_csr(typecode,mtxAp,csr.VA,csr.IA,csr.JA,cflags);
+			//errval = rsb__do_get_csr(typecode,mtxAp,csr.VA,csr.IA,csr.JA,cflags|RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+			//errval = rsb__do_get_csr(typecode,mtxAp,csr.VA,csr.IA,csr.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("csr extraction problems\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// CSR -> COO
+			errval = rsb__util_uncompress_row_pointers_array(csr.IA,csr.nr,cflags,cflags,csr.IA);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("coo->csr conversion problems\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// let's check if 'csr' was converted in coo 
+			if(!rsb__are_coo_matrices_equal(&coo,&csr))
+			{
+				RSB_ERROR("no match in coo/csr extractors\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// COO -> CSR 
+			errval = rsb__util_compress_to_row_pointers_array(NULL,csr.nnz,csr.nr,cflags,cflags,csr.IA);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("csr->coo conversion failed!\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			else
+			/* FIXME: checks are missing for the following ! */
+			if(!RSB_DO_FLAG_HAS(cflags,RSB_FLAG_USE_HALFWORD_INDICES))
+			if(RSB_SOME_ERROR(errval = rsb__csr_chk(csr.IA,csr.JA,csr.nr,csr.nc,csr.nnz,0)))
+			{
+				RSB_ERROR("csr->coo conversion produced corrupt results!\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// RSB -> COO
+			// FIXME: 
+			// errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorteda.. 
+			// if ..
+			// rsb__destroy_coo_matrix_t(&icoo);
+			RSB_MTX_FREE(cmatrix);
+			// CSR -> RSB
+
+			if(incX==1 && incB==1) if(alphai==0 && betai==0) /* agnostic to these parameters */
+{
+			kmatrix = rsb__do_mtx_alloc_from_csr_const(csr.VA,csr.IA,csr.JA,csr.nnz,csr.typecode,csr.nr,csr.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,cflags,&errval);
+			if((RSB_SOME_ERROR(errval)) || (!kmatrix) || (!rsb__mtx_chk(kmatrix)))
+			{ RSB_ERROR("csr->rsb construction problems\n"); goto err;}
+			RSB_MTX_FREE(kmatrix);
+}
+
+			cmatrix = rsb__do_mtx_alloc_from_csr_inplace(csr.VA,csr.IA,csr.JA,csr.nnz,csr.typecode,csr.nr,csr.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,cflags,&errval);
+			if((RSB_SOME_ERROR(errval)) || (!cmatrix) || (!rsb__mtx_chk(cmatrix)))
+			{
+				if(RSB_SOME_ERROR(errval))
+					RSB_ERROR("csr->rsb construction problems\n");
+				else
+				if(!cmatrix)
+				{
+		       			errval = RSB_ERR_INTERNAL_ERROR;
+					RSB_ERROR("csr->rsb construction problems: did not succeed\n");
+				}
+				else
+				{
+					RSB_ERROR("csr->rsb construction problems: built a corrupted matrix\n");
+		       			errval = RSB_ERR_INTERNAL_ERROR;
+				}
+				goto err;
+			}
+			RSB_DO_FLAG_ADD(cmatrix->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+			// RSB -> COO 
+#if 0
+			errval = rsb__do_switch_rsb_mtx_to_coo(cmatrix,&csr.VA,&csr.IA,&csr.JA,cflags|RSB_FLAG_SORTED_INPUT);
+#else
+			// still broken!
+			if(nnz<42) /* coverage testing purpose :P */
+			{
+				errval  = rsb__do_switch_rsb_mtx_to_coo(cmatrix,&csr.VA,&csr.IA,&csr.JA,RSB_DO_FLAG_FILTEROUT(cflags,RSB_FLAG_SORTED_INPUT));
+				errval |= rsb_util_sort_row_major_inner(csr.VA,csr.IA,csr.JA,cmatrix->nnz,dim,dim,typecode,RSB_DO_FLAG_FILTEROUT(cflags,RSB_FLAG_SORTED_INPUT));
+			}
+			else
+				errval = rsb__do_switch_rsb_mtx_to_coo(cmatrix,&csr.VA,&csr.IA,&csr.JA,cflags|RSB_FLAG_SORTED_INPUT);
+#endif
+			if((RSB_SOME_ERROR(errval)) || !rsb__are_coo_matrices_equal(&coo,&csr))
+
+			{
+				RSB_ERROR("rsb->coo conversion problems\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// COO -> RSB 
+			cmatrix = rsb__do_mtx_alloc_from_coo_inplace(csr.VA,csr.IA,csr.JA,csr.nnz,csr.typecode,csr.nr,csr.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,cflags,&errval);
+			if((RSB_SOME_ERROR(errval)) || (!cmatrix) || (!rsb__mtx_chk(cmatrix)))
+			{
+				RSB_ERROR("csr->coo conversion problems\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			// RSB -> CSR 
+			RSB_DO_FLAG_ADD(cmatrix->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+			errval = rsb__do_switch_rsb_mtx_to_csr_sorted(cmatrix,&csr.VA,&csr.IA,&csr.JA,cflags);
+			cmatrix=NULL;
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("coo->csr conversion problems\n");
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+			errval = rsb__util_uncompress_row_pointers_array(csr.IA,csr.nr,cflags,cflags,csr.IA);
+			if((RSB_SOME_ERROR(errval)) || !rsb__are_coo_matrices_equal(&coo,&csr))
+			{
+				RSB_ERROR("csr->coo conversion problems\n");
+//				rsb__debug_print_index_vectors_diff(coo.IA,csr.IA,csr.nnz,RSB_VECTORS_DIFF_DISPLAY_N_SMALL);
+//				rsb__debug_print_index_vectors_diff(coo.JA,csr.JA,csr.nnz,RSB_VECTORS_DIFF_DISPLAY_N_SMALL);
+//				rsb__debug_print_vectors_diff(coo.VA,csr.VA,csr.nnz,typecode,1,1,RSB_VECTORS_DIFF_DISPLAY_N_SMALL);
+		       		errval = RSB_ERR_INTERNAL_ERROR; goto converr;
+			}
+
+			if(!RSB_DO_FLAG_HAS(cflags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+			{
+				RSB_DO_FLAG_ADD(cflags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+				goto getcsrcooagain;
+			}
+			else
+				RSB_DO_FLAG_DEL(cflags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+converr:
+			rsb__destroy_coo_matrix_t(&coo);
+			rsb__destroy_coo_matrix_t(&csr);
+cmedone:
+			RSB_MTX_FREE(cmatrix);
+		}
+#endif
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(! ( getenv("RSB_BMT_SPSV") && rsb__util_atoi(getenv("RSB_BMT_SPSV")) == 0 ) )
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+		if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_SYMMETRIC))
+		{
+		for(mmi=0;mmi< (dim<msmd?3:2) ;++mmi)
+		if(! (mmi==1 && ((incX!= 1) || (incB!=1) )  ))
+		{
+			const rsb_int nrhs=1;
+
+			/* TODO : should fill X and B with sentinel values ! */
+			if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,X,incX)) || RSB_SOME_ERROR(rsb__fill_with_ones (B,typecode,dim,incB)))
+			{ errval = RSB_ERR_INTERNAL_ERROR; goto err; }
+
+			if(mmi==0)
+			if( rsb__BLAS_Xusmv(transT,alpha,T,B,incB,beta,X,incX) != RSB_BLAS_NO_ERROR )
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while performing Unsymmetric USMV\n"); goto err;
+			}
+
+			if(mmi==1)
+			if(RSB_SOME_ERROR( rsb__do_spmm(trans,alpha,mtxAp,nrhs,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER,B,dim,beta,X,dim,RSB_OP_FLAG_DEFAULT)) )
+			{
+			       	errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Unsymmetric USMV failed!\n"); goto err;
+		       	}
+
+			if(mmi==2)
+			if(RSB_SOME_ERROR( rsb_do_spmv_general(trans,alpha,mtxAp,B,incB,beta,X,incX,RSB_OP_FLAG_WANT_SERIAL RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS)))
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Unsymmetric USMV failed!\n"); goto err;
+			}
+
+		if(isinvertible)
+		{
+			if(mmi==0)
+			if( rsb__BLAS_Xussv(transT,alpha_inv,T,X,incX) != RSB_BLAS_NO_ERROR )
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while performing USSV\n"); goto err;
+			}
+
+			if(mmi==1)
+			if(RSB_SOME_ERROR( rsb_spsm(trans,alpha_inv,mtxAp,nrhs,RSB_FLAG_WANT_COLUMN_MAJOR_ORDER,alpha_inv,X,dim,X,dim)) )
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while performing USSV\n"); goto err;
+			}
+
+			if(mmi==2)
+			if( rsb__do_spsv(trans,alpha_inv,mtxAp,X,incX,X,incX) != RSB_BLAS_NO_ERROR )
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("error while performing USSV\n"); goto err;
+			}
+		}
+		if(!isinvertible)
+			rsb__cblas_Xscal(typecode,dim,&zero,B,incB);
+		if(stype != blas_general)
+		if(stype != blas_lower_hermitian) /* FIXME: complete this case */
+		if(stype != blas_upper_hermitian) /* FIXME: complete this case */
+		if( RSB_SOME_ERROR(rsb__do_are_same(B,X,dim,typecode,incB,incX)) )
+		{
+			RSB_ERROR("failed post combined USMV-USSV check!\n");
+			rsb__debug_print_vectors_diff(B,X,dim,typecode,incB,incX,RSB_VECTORS_DIFF_DISPLAY_N);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+		}
+		}
+
+		if(betai > 0 || alphai > 0)
+			goto err; /* only the previous tests were affected by alpha and beta */
+		/*
+		 TODO: complete the following ...
+		 
+		if( rsb__BLAS_Xusget_rows_sums(T,rs,transT) != RSB_BLAS_NO_ERROR )
+		{
+			RSB_ERROR("error getting rows sum!\n");
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+		else
+		{
+		*/
+
+		/* TODO: need parameters scan here: */
+		if( RSB_SOME_ERROR(errval = rsb__do_upd_vals(mtxAp,RSB_ELOPF_NEG,NULL))) { RSB_ERROR("Failed negating.\n"); goto smerr; }
+		if( RSB_SOME_ERROR(errval = rsb__do_upd_vals(mtxAp,RSB_ELOPF_NEG,NULL))) { RSB_ERROR("Failed negating.\n"); goto smerr; }
+		if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,1,&zero,inrm,1))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR("!\n"); goto err; }
+
+		rsb__util_set_area_to_converted_integer(inrm,typecode,0);
+
+		if( rsb__BLAS_Xusget_infinity_norm(T,inrm,transT) != RSB_BLAS_NO_ERROR )
+		{
+			RSB_ERROR("error getting infinity norm!\n");
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+		else
+		{
+			if(is_really_empty)
+			{
+				rsb__util_set_area_to_converted_integer(D,typecode,0);
+			}
+			else
+			{
+				if(isempty)
+					rsb__util_set_area_to_converted_integer(D,typecode,1  );
+				else
+					rsb__util_set_area_to_converted_integer(D,typecode,dim);
+			}
+			if( RSB_SOME_ERROR(rsb__do_are_same(inrm,D,1,typecode,1,1)) )
+			{
+				RSB_ERROR("matrix norm is not what was expected!\n");
+				rsb__debug_print_vectors_diff(inrm,D,1,typecode,1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+
+			mtxAp = rsb__BLAS_inner_matrix_retrieve(T);
+			if(incX==1 && incB==1) if(alphai==0 && betai==0) /* agnostic to these parameters */
+			if((!isempty) && !(mtxAp->nnz == 0 && diaga[diagi]==blas_unit_diag))
+			{
+				/* FIXME: Frobenius norm (RSB_EXTF_NORM_TWO) is untested! */
+				rsb__util_set_area_to_converted_integer(D,typecode,1);
+				rsb__do_upd_vals(mtxAp,RSB_ELOPF_MUL,D);
+				rsb__util_set_area_to_converted_integer(inrm_,typecode,0);
+				RSB_LSTPROBE(rsb__BLAS_Xusget_infinity_norm(T,inrm_,transT),"");
+				RSB_LSTPROBE(rsb__do_are_same(inrm_,inrm,1,typecode,1,1),"");
+				rsb__util_set_area_to_converted_integer(D,typecode,2);
+
+				rsb__do_upd_vals(mtxAp,RSB_ELOPF_MUL,D);
+				rsb__util_set_area_to_converted_integer(inrm_,typecode,0);
+				RSB_LSTPROBE(rsb__BLAS_Xusget_infinity_norm(T,inrm_,transT),"");
+				if(mtxAp->nnz && diaga[diagi]==blas_unit_diag)
+					rsb__util_increase_by_one(inrm_,0,mtxAp->typecode);
+				rsb__util_vector_div(inrm_,D,typecode,1);
+				RSB_LSTPROBE(rsb__do_are_same(inrm_,inrm,1,typecode,1,1),"");
+
+				rsb__do_upd_vals(mtxAp,RSB_ELOPF_DIV,D);
+				rsb__util_set_area_to_converted_integer(inrm_,typecode,0);
+				RSB_LSTPROBE(rsb__BLAS_Xusget_infinity_norm(T,inrm_,transT),"");
+				RSB_LSTPROBE(rsb__do_are_same(inrm_,inrm,1,typecode,1,1),"");
+				/* TODO: there are many more subcases for rsb__clone! */
+				// if( ((cmatrix = rsb__clone_simple(mtxAp))!=NULL))
+				cmatrix = NULL;
+				errval = rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+				if( (cmatrix !=NULL))
+			       	{
+				RSB_LSTPROBE(rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_T,NULL,cmatrix,RSB_FLAG_IDENTICAL_FLAGS),"");
+				RSB_LSTPROBE(rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_T,NULL,cmatrix,RSB_FLAG_IDENTICAL_FLAGS),"");
+				if(RSB_IS_MATRIX_TYPE_COMPLEX(typecode))/* FIXME: shall fix many vector-operating routines, first */
+				{
+					/* TODO: more checking */
+					RSB_LSTPROBE(rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_C,NULL,cmatrix,RSB_FLAG_IDENTICAL_FLAGS),"");
+					RSB_LSTPROBE(rsb__clone(&cmatrix,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_C,NULL,cmatrix,RSB_FLAG_IDENTICAL_FLAGS),"");
+				}
+					RSB_MTX_FREE(cmatrix);
+				}
+
+				if(dim>0)
+			       	{
+					IA[0]=dim-1; JA[0]=dim-1;
+					RSB_LSTPROBE(rsb__do_get_elements(mtxAp,VA,IA,JA,1,mtxAp->flags),"");
+					if(!(diaga[diagi]==blas_unit_diag))
+					RSB_LSTPROBE(rsb__do_set_elements(mtxAp,VA,IA,JA,1,mtxAp->flags),"");
+				}
+				if(dim>1)
+			       	{
+					const rsb_int mmudim=100;
+					IA[0]=dim-1; JA[0]=0;
+					/* TODO: shall check value! */
+					if(stype==blas_upper_triangular)
+					{
+						RSB_LSTPROBE(rsb__do_get_elements(mtxAp,VA,JA,IA,1,mtxAp->flags),"");
+						if(!(diaga[diagi]==blas_unit_diag))
+						RSB_LSTPROBE(rsb__do_set_elements(mtxAp,VA,JA,IA,1,mtxAp->flags),"");
+						RSB_LSTPROBI(rsb__do_get_elements/*rsb__do_get_elements*/(mtxAp,VA,IA,JA,1,mtxAp->flags),"");
+						RSB_LSTPROBI(rsb__do_set_elements/*rsb__do_set_elements*/(mtxAp,VA,IA,JA,1,mtxAp->flags),"");
+					}
+					else
+					{
+						RSB_LSTPROBE(rsb__do_get_elements(mtxAp,VA,IA,JA,1,mtxAp->flags),"");
+						if(!(diaga[diagi]==blas_unit_diag))
+						RSB_LSTPROBE(rsb__do_set_elements(mtxAp,VA,IA,JA,1,mtxAp->flags),"");
+						RSB_LSTPROBI(rsb__do_get_elements/*rsb__do_get_elements*/(mtxAp,VA,JA,IA,1,mtxAp->flags),"");
+						RSB_LSTPROBI(rsb__do_set_elements/*rsb__do_set_elements*/(mtxAp,VA,JA,IA,1,mtxAp->flags),"");
+					}
+
+					if(dim>1 && mtxAp->nnz>0 )
+					if(dim < mmudim )
+					{
+						struct rsb_mtx_t*LU[]={NULL,NULL};
+						RSB_LSTPROBE(rsb__do_get_preconditioner(LU,mtxAp,RSB_PRECF_ILU0,NULL),"");
+						RSB_MTX_FREE(LU[0]);
+						RSB_MTX_FREE(LU[1]);
+					}
+#if 0
+					if(dim < mmudim && ( cmatrix = rsb__clone_simple(mtxAp)) !=NULL)
+					{
+					if(stype==blas_upper_triangular)
+					{
+				RSB_LSTPROBE(rsb_mtx_set_values_pattern_changing(&cmatrix,VA,IA,JA,1,mtxAp->flags),"");
+					}
+					else
+					{
+				RSB_LSTPROBE(rsb_mtx_set_values_pattern_changing(&cmatrix,VA,JA,IA,1,mtxAp->flags),"");
+					}
+					RSB_MTX_FREE(cmatrix);
+					}
+#endif
+				}
+
+			}
+		}
+
+		RSB_LSTPROBE(rsb__do_elemental_binop(mtxAp, RSB_ELOPF_POW, &three),""); /* FIXME: shall test systematically all the others as well !*/
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(! ( getenv("RSB_BMT_GET") && rsb__util_atoi(getenv("RSB_BMT_GET")) == 0 ) )
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+		if(1)
+		{
+			rsb_coo_idx_t m=dim,k=dim;
+			rsb_coo_idx_t rc=m/3,fr = rc,lr = RSB_MIN(m-1,2*rc),ri,
+					cc=k/3,fc=cc,lc = RSB_MIN(k-1,2*cc),ci;
+			rsb_nnz_idx_t bnnz=0,cnnz=0,off=0;
+			const void*vp=NULL;
+			bnnz = rsb__do_get_block_nnz(mtxAp,fr,lr,fc,lc,RSB_FLAG_C_INDICES_INTERFACE,&errval);
+			// FIXME: TODO: should also test rsb__do_get_block_sparse()
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("sparse subblocks nnz count mechanisms seem broken.\n");
+				rsb__do_perror(NULL,errval);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			for(ri=fr;ri<=lr;++ri)
+				for(ci=fc;ci<=lc;++ci)
+					cnnz+=(rsb__do_coo_element_inner_address(mtxAp,ri,ci)!=NULL);
+			if(bnnz!=cnnz)
+			{
+				RSB_ERROR("sparse subblocks nnz count mechanisms seem broken (%d vs %d counted in (%d,%d)..(%d,%d)).\n",bnnz,cnnz,fr,fc,lr,lc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+
+			rsb__util_coo_array_set(IA,nnz,RSB_MARKER_COO_VALUE);
+			rsb__util_coo_array_set(JA,nnz,RSB_MARKER_COO_VALUE);
+			errval = rsb__do_get_block_sparse(mtxAp,VA,IA,JA,fr,lr,fc,lc,NULL,NULL,&cnnz,RSB_FLAG_C_INDICES_INTERFACE);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("sparse subblocks nnz get mechanisms seem broken.\n");
+				rsb__do_perror(NULL,errval);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+
+			if(bnnz!=cnnz)
+			{
+				RSB_ERROR("sparse subblocks nnz get mechanisms seem broken (%d vs %d counted in (%d,%d)..(%d,%d)).\n",bnnz,cnnz,fr,fc,lr,lc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+
+			for(off=0;off<bnnz;++off)
+			if((vp = rsb__do_coo_element_inner_address(mtxAp,IA[off],JA[off]))!=NULL)
+			{
+				if(RSB_VA_MEMCMP(vp,0,VA,off,mtxAp->el_size))
+				{
+					RSB_ERROR("address of (%d,%d)@%d extracted from sparse seems not the right one\n",IA[off],JA[off],off);
+					errval = RSB_ERR_INTERNAL_ERROR;
+					goto err;
+				}
+			}
+			else
+			{
+					RSB_ERROR("an element (%d,%d)@%d extracted from sparse seems not present\n",IA[off],JA[off],off);
+					errval = RSB_ERR_INTERNAL_ERROR;
+					goto err;
+			}
+		}
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(! ( getenv("RSB_BMT_SCALE") && rsb__util_atoi(getenv("RSB_BMT_SCALE")) == 0 ) )
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+	{
+		if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,D,incD))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR("!\n"); goto err; }
+	  	if( rsb__BLAS_Xusget_diag(T,D) != RSB_BLAS_NO_ERROR )
+		{
+			RSB_ERROR("!\n");
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+		else
+		{
+			if(is_really_empty)
+			{if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,B,incB))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }}
+			else
+			{if(RSB_SOME_ERROR(rsb__fill_with_ones(B,typecode,dim,incB))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }}
+			if(RSB_SOME_ERROR(rsb__do_are_same(D,B,dim,typecode,incD,incB)))
+			{
+				RSB_ERROR("diagonal vector not what expected!\n");
+				rsb__debug_print_vectors_diff(D,B,dim,typecode,incD,incB,RSB_VECTORS_DIFF_DISPLAY_N);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+		}
+
+		incB=1; /* FIXME: now on, no stride */
+		incD=1; /* FIXME: now on, no stride */
+		if(RSB_SOME_ERROR(rsb__fill_with_increasing_values(B,typecode,dim))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }  /* rsb__fill_with_increasing_values..) -> ,every) */
+	  	if(rsb__BLAS_Xusrows_scale(T,B,transT) != RSB_BLAS_NO_ERROR)
+		{
+			RSB_ERROR("!\n");
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+
+		if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,D,incD))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }
+	  	if( rsb__BLAS_Xusget_diag(T,D) != RSB_BLAS_NO_ERROR )
+		{
+			RSB_ERROR("!\n");
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+		else
+		if(diaga[diagi]==blas_non_unit_diag && !isempty) // diagonal implicit won't be scaled :)
+		{
+			rsb_nnz_idx_t n;
+			if( RSB_SOME_ERROR(rsb__do_are_same(D,B,dim,typecode,incD,incB)) )
+			{
+				RSB_ERROR("!\n");
+				rsb__debug_print_vectors_diff(D,B,dim,typecode,incD,incB,RSB_VECTORS_DIFF_DISPLAY_N);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,B,incB))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }
+			for(n=0;n<dim;++n) rsb__BLAS_Xusget_element(T,n,n,((rsb_char_t*)B)+el_size*n*incB);
+			if( RSB_SOME_ERROR(rsb__do_are_same(D,B,dim,typecode,incD,incB)) )
+			{
+				RSB_ERROR("!\n");
+				rsb__debug_print_vectors_diff(D,B,dim,typecode,incD,incB,RSB_VECTORS_DIFF_DISPLAY_N);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			if(RSB_SOME_ERROR(rsb__fill_with_increasing_values(B,typecode,dim))){errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }
+			for(n=0;n<dim;++n) rsb__BLAS_Xusset_element(T,n,n,((rsb_char_t*)B)+el_size*n*incB);
+			if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,dim,&zero,D,incD))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }
+			for(n=0;n<dim;++n) rsb__BLAS_Xusget_element(T,n,n,((rsb_char_t*)D)+el_size*n*incB);
+			if( RSB_SOME_ERROR(rsb__do_are_same(D,B,dim,typecode,incD,incB)) )
+			{
+				RSB_ERROR("!\n");
+				rsb__debug_print_vectors_diff(D,B,dim,typecode,incD,incB,RSB_VECTORS_DIFF_DISPLAY_N);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+		}
+
+	}
+		//if((rnnz = rsb__dodo_get_rows_nnz(mtxAp,0,dim-1,RSB_FLAG_C_INDICES_INTERFACE,NULL))!=ndnnz)
+		rnnz=10;
+		if(!isempty)/* FIXME */
+		if((rsb__BLAS_Xusget_rows_nnz(T,0,dim-1,&rnnz)!=RSB_BLAS_NO_ERROR) || (rnnz!=ndnnz))
+		{
+			RSB_ERROR("Mismatch in the extracted rows nonzeroes counts vs non diagonal nonzero count: %d != %d\n",(int)rnnz,(int)ndnnz);
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	       	}
+
+		rnnz=0;
+		if(!isempty)/* FIXME */
+		if(rsb__BLAS_Xusget_matrix_nnz(T,&rnnz)!=RSB_BLAS_NO_ERROR || rnnz!=ndnnz)
+		{
+			RSB_ERROR("Mismatch in the wffective matrix counts vs input nonzero count: %d != %d\n",(int)rnnz,(int)ndnnz);
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+		}
+
+		if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,nnz,&zero,VA,1))){ errval = RSB_ERR_INTERNAL_ERROR; RSB_ERROR(RSB_ERRM_NL); goto err; }
+		rsb__util_coo_array_set(IA,nnz,RSB_MARKER_COO_VALUE);
+		rsb__util_coo_array_set(JA,nnz,RSB_MARKER_COO_VALUE);
+		rnnz=0;
+		//if(rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,VA,IA,JA,0,dim-1,&rnnz,RSB_FLAG_NOFLAGS|RSB_FLAG_SORT_INPUT))
+		if(rsb__BLAS_Xusget_rows_sparse(T,VA,IA,JA,&rnnz,0,dim-1)!=RSB_BLAS_NO_ERROR)
+		{
+			RSB_ERROR(RSB_ERRM_NL);
+		       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+		}
+		else
+		if(diaga[diagi]==blas_non_unit_diag)
+		{
+			rsb_nnz_idx_t n;
+			for(n=0;n<nnz;++n)
+			if(IA[n] != JA[n])
+			if(( rsb__do_coo_element_inner_address(mtxAp,IA[n],JA[n] ) == NULL) ||
+			 (RSB_SOME_ERROR(rsb__do_are_same(rsb__do_coo_element_inner_address(mtxAp,IA[n],JA[n]),
+						((rsb_char_t*)VA)+el_size*n,1,typecode,1,1) ) ))
+				{
+					RSB_ERROR("@%d (%d,%d) : 0x%x\n",n,IA[n],JA[n],rsb__do_coo_element_inner_address(mtxAp,IA[n],JA[n] ));
+				       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+		}
+		if(!RSB_DO_TOOFEWNNZFORCSR(nnz,dim))/* we don't want IA overwrite */
+		{
+			if(RSB_SOME_ERROR(rsb__do_get_csr(typecode,mtxAp,(void*)(VA),IA,JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS)))
+			{
+				RSB_ERROR(RSB_ERRM_NL);
+			       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			}
+			else
+			if(diaga[diagi]==blas_non_unit_diag)
+			{
+				// TODO: is_csr_sorted ?
+				rsb_nnz_idx_t n;
+				rsb_coo_idx_t i;
+
+				for(i=0;i<dim;++i)
+				if(!rsb__util_is_nnz_array_sorted_up(JA+IA[i],IA[i+1]-IA[i]))
+				{
+					RSB_ERROR(RSB_ERRM_NL);
+				       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+
+				for(i=0;i<dim;++i)
+				for(n=IA[i];n<IA[i+1];++n)
+				if(JA[n]<0 || JA[n]>=dim)
+				{
+					RSB_ERROR(RSB_ERRM_NL);
+				       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+
+				for(i=0;i<dim;++i)
+				for(n=IA[i];n<IA[i+1];++n)
+				if(i != JA[n])
+				if(( rsb__do_coo_element_inner_address(mtxAp,i,JA[n] ) == NULL) ||
+				 (RSB_SOME_ERROR(rsb__do_are_same(rsb__do_coo_element_inner_address(mtxAp,i,JA[n]),
+						((rsb_char_t*)VA)+el_size*n,1,typecode,1,1) ) ))
+				{
+					RSB_ERROR("@%d, %d %d : 0x%x\n",n,i,JA[n],rsb__do_coo_element_inner_address(mtxAp,i,JA[n] ));
+				       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+			}
+		}
+		if(!RSB_DO_TOOFEWNNZFORCSR(nnz,dim))/* we don't want JA overwrite */
+		{
+			if(RSB_SOME_ERROR(rsb__do_get_csc(mtxAp,(void*)(&VA),&JA,&IA)))
+			{
+				RSB_ERROR(RSB_ERRM_NL);
+			       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+			}
+			else
+			if(diaga[diagi]==blas_non_unit_diag)
+			{
+				rsb_nnz_idx_t n;
+				rsb_coo_idx_t j;
+
+				if( RSB_SOME_ERROR(rsb__csc_chk(JA,IA,dim,dim,JA[dim],0) ) )
+				{
+					RSB_ERROR(RSB_ERRM_NL);
+			       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+
+				for(j=0;j<dim;++j)
+				for(n=JA[j];n<JA[j+1];++n)
+				if(j != IA[n])
+				if(( rsb__do_coo_element_inner_address(mtxAp,IA[n],j) == NULL) ||
+				 ( RSB_SOME_ERROR(rsb__do_are_same(rsb__do_coo_element_inner_address(mtxAp,IA[n],j),
+						((rsb_char_t*)VA)+el_size*n,1,typecode,1,1))) )
+				{
+					RSB_ERROR("@%d, %d %d : 0x%x\n",n,IA[n],j,rsb__do_coo_element_inner_address(mtxAp,IA[n],j ));
+				       	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+				}
+			}
+
+			if(incX==1 && incB==1) if(alphai==0 && betai==0) /* agnostic to these parameters */
+{
+			rsb_flags_t cflags = RSB_DO_FLAG_FILTEROUT(mtxAp->flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+			kmatrix = rsb__do_mtx_alloc_from_csc_const(VA,IA,JA,/*nnz*/JA[dim],typecode,dim,dim,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,cflags,&errval);
+			if(RSB_SOME_ERROR(errval) || (!kmatrix) || (!rsb__mtx_chk(kmatrix)))
+			{ RSB_ERROR("csc->rsb construction problems\n"); goto err;}
+			RSB_MTX_FREE(kmatrix);
+}
+
+
+		}
+err:
+		if(errval == RSB_ERR_NO_ERROR)
+		{
+		}
+		else
+		if(mtxAp && X && B)
+		{
+			if(mtxAp->nnz<20)
+			       	rsb__do_file_mtx_save(mtxAp,NULL),
+				RSB_INFO("\n"),
+				RSB_INFO("actual results vs correct results:\n"),
+				rsb__debug_print_vectors(X,B,dim,incX,incB,typecode);
+			else
+			if( RSB_SOME_ERROR(rsb__do_are_same(B,X,dim,typecode,incB,incX) ))
+			{
+				RSB_INFO("actual results vs correct results:\n"),
+				rsb__debug_print_vectors_diff(X,B,dim,typecode,incX,incB,RSB_VECTORS_DIFF_DISPLAY_N);
+				errval = RSB_ERR_INTERNAL_ERROR;
+			}
+#if RSB_WANT_VERBOSE_FAILURES
+			RSB_INFO("Matrix summary:\n");
+		       	RSB_INFO_MATRIX_SUMMARY(mtxAp);
+			RSB_INFO("\n");
+ 			rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_RECURSION_BRIEF,NULL);
+			RSB_INFO("\n");
+#endif /* RSB_WANT_VERBOSE_FAILURES */
+		}
+		if( T != RSB_BLAS_INVALID_MATRIX && rsb__BLAS_Xusds(T) != RSB_BLAS_NO_ERROR )
+			errval = RSB_ERR_INTERNAL_ERROR;
+		T = RSB_BLAS_INVALID_MATRIX;
+		RSB_MTX_FREE(cmatrix);
+
+		RSB_CONDITIONAL_FREE(X);
+		RSB_CONDITIONAL_FREE(B);
+		RSB_CONDITIONAL_FREE(D);
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+		if(to.wqt!=RSB_BOOL_TRUE)
+		RSB_INFO("%s%7d: type:%c sym:%s incX:%d incB:%d dim:%10d transT:%c alpha:%+2d beta:%+2d diag:%c subms:%5d nz:%d",btps,passed,(char)typecode,RSB_BLAS_MT_STR(stype),incX,incB,dim,tc,alphaa[alphai],betaa[betai],RSB_BLAS_DIAG_CHAR(diaga[diagi]),submatrices,rnz);
+		errvalf|=errval;
+
+		if(errval == RSB_ERR_NO_ERROR)
+		{
+			if(to.wqt!=RSB_BOOL_TRUE)
+			RSB_INFO(" is ok\n");
+			++passed;
+		}
+		else
+		{
+			if(to.wqt!=RSB_BOOL_TRUE)
+			RSB_INFO(" is not ok\n");
+			++failed;
+			RSB_INFO("Terminating testing due to errors.\n");
+			goto done;
+		}
+
+		if(RSB_SHALL_QUIT)
+		{
+			RSB_INFO("Terminating testing earlier due to interactive user request: test took %lf s, max allowed was %lf.\n",tt,to.mtt);
+			goto done;
+		}
+
+#if RSB_TESTER_ALLOW_TIMEOUT
+		if(to.mtt != RSB_TIME_ZERO && (tt = rsb_time()-tt0)>to.mtt)
+		{
+			RSB_INFO("Terminating testing earlier due to user timeout request: test took %lf s, max allowed was %lf.\n",tt,to.mtt);
+			goto done;
+		}
+#endif /* RSB_TESTER_ALLOW_TIMEOUT */
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if( maxtc != 0 && passed + failed >= maxtc )
+		{
+			RSB_INFO("Terminating testing earlier due to user limit request to %d tests.\n",maxtc); 
+			goto done;
+		}
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+		if(to.tur==RSB_BOOL_TRUE && instantiated_some_recursive==1 && failed==0)
+		{
+			RSB_INFO("ALL TESTS PASSED SO FAR, AND ALSO INSTANTIATED ONE \"RECURSIVE\" MATRIX... THIS IS ENOUGH\n");
+			errval = RSB_ERR_NO_ERROR;
+			goto done;
+		}
+	}
+done:
+	if(to.rrm==RSB_BOOL_TRUE && instantiated_some_recursive==0 && failed==0)
+	{
+		RSB_INFO("STRANGE: TESTS PASSED, BUT DID NOT INSTANTIATE ANY \"RECURSIVE\" MATRIX... RAISING AN ERROR FOR THIS\n");
+		errvalf |= RSB_ERR_INTERNAL_ERROR;
+		rsb__do_perror(NULL,RSB_ERR_INTERNAL_ERROR);
+	}
+	RSB_INFO("	PASSED:%d\n	FAILED:%d\n",passed,failed);
+	RSB_INFO("ADVANCED SPARSE BLAS TEST: END (SUCCESS)\n");
+//	return RSB_ERR_NO_ERROR;
+	errval=errvalf;
+ret:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_blas_runtime_limits_tester(void)
+{
+	/**
+	 \ingroup gr_internals
+	 
+	 TODO: INCOMPLETE
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t maxcoo = RSB_MAX_MATRIX_DIM;
+	rsb_coo_idx_t * IA=NULL;
+	size_t maxcoo_bytes=0;
+	const size_t minmem=1;
+	size_t free_mem=0;
+	size_t tot_mem=0;
+	rsb_nnz_idx_t i;
+	rsb_nnz_idx_t fel;
+	
+	// FIXME: should fix the code revolving around the following:
+	//	RSB_STDOUT("%u>=%u : %d\n", rsb__nearest_power_of_two(maxcoo), maxcoo , rsb__nearest_power_of_two(maxcoo)>= maxcoo );
+	RSB_INFO("Beginning large binary search test.\n");
+	maxcoo_bytes=((size_t)maxcoo)*sizeof(rsb_coo_idx_t);
+
+	free_mem = rsb__sys_free_system_memory();
+	tot_mem = rsb__sys_total_system_memory();
+
+	RSB_INFO("Detected %zu bytes of memory, comprehensive of %zu of free memory.\n",tot_mem,free_mem);
+	
+	if(tot_mem<minmem || free_mem<minmem)
+	{
+		RSB_INFO("Too little memory detected: seems like your system is not well supported or not standards compliant.\n");
+		tot_mem=free_mem=1024*1024*16;
+		maxcoo=tot_mem/sizeof(rsb_coo_idx_t);
+		RSB_INFO("Will try setting a reasonably small value: %zu for detected free memory.\n",free_mem);
+		//goto skip_max_coo_test;
+	}
+	RSB_INFO("On this system, maximal array of coordinates can have %zu elements and occupy %zu bytes.\n",((size_t)maxcoo),maxcoo_bytes);
+
+	if(tot_mem<maxcoo_bytes || RSB_MUL_OVERFLOW(maxcoo,sizeof(rsb_coo_idx_t),rsb_nnz_idx_t,size_t))
+	{
+		/* FIXME: overflow cases shall be handled better */
+		maxcoo=(3*free_mem/4)/sizeof(rsb_coo_idx_t);
+		maxcoo_bytes=sizeof(rsb_coo_idx_t)*maxcoo;
+		RSB_INFO("Will perform the test using less memory (%zu MB) than on the maximal coordinate indices array (%zu) allows.\n",maxcoo_bytes/(1024*1024),maxcoo_bytes);
+	}
+
+	if(tot_mem<maxcoo_bytes)
+	{
+		RSB_INFO("Skipping test: too little memory.\n");
+		goto skip_max_coo_test;
+	}
+		
+	if(free_mem<maxcoo_bytes)
+	{
+		RSB_INFO("Detected %zd bytes of free memory, needed %zd\nlet's see if test succeed ..\n",free_mem,maxcoo_bytes);
+		//RSB_STDOUT("detected %zd bytes of free memory, needed %zd.\n",free_mem,maxcoo_bytes);
+		//RSB_STDOUT("detected %zd bytes of free memory, needed %zd.\n");
+		//RSB_STDOUT("detected %zd bytes of free memory, needed %zd. skipping test.\n",free_mem,maxcoo_bytes);
+		//goto skip_max_coo_test;
+	}
+	IA = rsb__calloc(sizeof(rsb_coo_idx_t)*maxcoo);
+	if(!IA)
+	{
+		RSB_INFO("Failed (c)allocating of %zd nnz (%zd bytes)\n",(size_t)maxcoo,maxcoo_bytes);
+		if(free_mem>maxcoo_bytes)
+		{
+			errval = RSB_ERR_ENOMEM;
+			goto err;
+		}
+		else
+			goto skip_max_coo_test;
+	}
+	else
+	{
+		RSB_INFO("(c)allocated %zd nnz (%zd bytes)\n",(size_t)maxcoo,maxcoo_bytes);
+	}
+
+	for(i=0;i<maxcoo;++i)IA[i]=i;
+
+	fel = rsb__seek_coo_idx_t(IA,maxcoo-1,maxcoo);
+	if(fel == RSB_MARKER_NNZ_VALUE || IA[fel]!=fel)
+	{
+		RSB_INFO("Failed retrieving array last element!\n");
+		errval = RSB_ERR_INTERNAL_ERROR;
+		goto err;
+	}
+	else
+		RSB_INFO("Succeeded retrieving array last element.\n");
+
+	RSB_INFO("Successfully performed large binary search test.\n");
+	goto done;
+skip_max_coo_test:
+	RSB_INFO("Skipping large binary search test.\n");
+done:
+err:
+	rsb__do_perror(NULL,errval);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_libspblas_tests.h b/rsb_libspblas_tests.h
new file mode 100644
index 0000000..8253d5c
--- /dev/null
+++ b/rsb_libspblas_tests.h
@@ -0,0 +1,45 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief  Sparse BLAS interface testing code
+ * */
+#ifndef LIBSPBLAS_TESTS_H_INCLUDED
+#define LIBSPBLAS_TESTS_H_INCLUDED
+#include "rsb_common.h"
+struct rsb_tester_options_t{
+	rsb_time_t mtt; /* maximal test time */
+	rsb_bool_t rrm; /* require recursive matrices (error otherwise) */
+	rsb_bool_t tur; /* test until recursive */
+	rsb_bool_t wqt; /* want quiet testing */
+	rsb_bool_t wqc; /* want quiet conditionally (on no tty) */
+	rsb_bool_t wcs; /* want clear screen */
+};
+rsb_err_t rsb_blas_tester_options_init(struct rsb_tester_options_t * top);
+rsb_err_t rsb_blas_mini_tester(void);
+rsb_err_t rsb_blas_bigger_matrices_tester(struct rsb_tester_options_t * top);
+rsb_err_t rsb_blas_limit_cases_tester(void);
+rsb_err_t rsb_blas_runtime_limits_tester(void);
+#endif /* LIBSPBLAS_TESTS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_license_header.inc b/rsb_license_header.inc
new file mode 100644
index 0000000..7ba4a52
--- /dev/null
+++ b/rsb_license_header.inc
@@ -0,0 +1,21 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
diff --git a/rsb_limiter.c b/rsb_limiter.c
new file mode 100644
index 0000000..8942a19
--- /dev/null
+++ b/rsb_limiter.c
@@ -0,0 +1,130 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Timing/limiting mechanisms.
+ */
+
+#include "rsb_common.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_err_t rsb_limiter_init(struct rsb_limiter* lsp, const rsb_time_t max_time, const rsb__times_t max_times) 
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!lsp) { errval = RSB_ERR_BADARGS;goto err; }
+	if( max_time  <  RSB_TIME_MIN ) { errval = RSB_ERR_BADARGS;goto err; }
+	if( max_time  >  RSB_TIME_MAX ) { errval = RSB_ERR_BADARGS;goto err; }
+	/*if( max_times < RSB_TIMES_MIN ) { errval = RSB_ERR_BADARGS;goto err; }*/
+	if( max_times > RSB_TIMES_MAX ) { errval = RSB_ERR_BADARGS;goto err; }
+	RSB_BZERO_P(lsp);
+	lsp->max_time  = max_time;
+	if( lsp->max_time  > RSB_TIME_ZERO )
+		lsp->t0  = rsb_time();
+	else
+		lsp->t0  = RSB_TIME_ZERO;
+	lsp->t1 = lsp->t0;
+	lsp->max_times = max_times;
+	lsp->times = RSB_TIMES_ZERO;
+err:
+	return errval;
+}
+
+rsb_err_t rsb__limiter_init_from_str(struct rsb_limiter* lsp, const char *tls)
+{
+	/* e.g.: tls = "4000" ; tls = "10s" */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_limiter lst;
+
+	if(!tls || !*tls)
+       		goto err;
+	if(!lsp)
+       		goto err;
+	if(strstr(tls,"s")!=NULL)
+	{
+		lst.max_time  = rsb__util_atof(tls);
+		lst.max_times = RSB_TIMES_ZERO;
+	}
+	else
+	{
+		lst.max_times = rsb__util_atoi(tls);
+		lst.max_time  = RSB_TIME_ZERO;
+	}
+	errval = rsb_limiter_init(lsp,lst.max_time,lst.max_times); 
+	goto ret;
+err:
+	errval = RSB_ERR_BADARGS;
+ret:
+	return errval;
+}
+ 
+rsb_err_t rsb__limiter_step(struct rsb_limiter* lsp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!lsp) { errval = RSB_ERR_BADARGS;goto err; }
+	lsp->times++;
+	if(lsp->max_time>RSB_TIME_ZERO)
+		lsp->t1=rsb_time();
+	/* FIXME: WRITE ME */
+err:
+	return errval;
+}
+ 
+rsb_bool_t rsb__limiter_done(const struct rsb_limiter* lsp)
+{
+	rsb_bool_t done = RSB_BOOL_TRUE;
+
+	if( !lsp ) { goto err; }
+	if( lsp->max_times > RSB_TIMES_ZERO && lsp->times >= lsp->max_times ) goto err;
+	if( lsp->max_time  > RSB_TIME_ZERO  && (lsp->t1-lsp->t0) >= lsp->max_time ) goto err;
+	if( lsp->max_times == RSB_TIMES_ZERO && lsp->max_time == RSB_TIME_ZERO ) goto err;
+	done = RSB_BOOL_FALSE;
+	/* FIXME: WRITE ME */
+err:
+	return done;
+}
+
+rsb_bool_t rsb__limiter_continue(const struct rsb_limiter* lsp)
+{
+	return RSB_BOOL_NOT(rsb__limiter_done(lsp));
+}
+
+rsb_err_t rsb__limiter_info(const struct rsb_limiter* lsp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_char_t*tis="Timer info: ";
+
+	if(!lsp) { errval = RSB_ERR_BADARGS;goto err; }
+	if( lsp->max_time > RSB_TIME_ZERO )
+		RSB_INFO("%s%lf / %lf seconds, %ld iterations.\n",tis,lsp->t1-lsp->t0,lsp->max_time,(long int)(lsp->times));
+	else
+	if( lsp->max_times > RSB_TIMES_ZERO )
+		RSB_INFO("%s%d / %d iterations.\n",tis,(int)(lsp->times),(int)(lsp->max_times));
+err:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_limiter.h b/rsb_limiter.h
new file mode 100644
index 0000000..6fa2e57
--- /dev/null
+++ b/rsb_limiter.h
@@ -0,0 +1,60 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Timing/limiting mechanisms.
+ */
+
+#ifndef RSB_LIMITER_H_INCLUDED
+#define RSB_LIMITER_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb.h"		/* public API specification */
+#define RSB_TIMES_MAX 1000000000
+#define RSB_TIMES_MIN 0
+#define RSB_TIMES_ZERO 0
+#define RSB_TIME_MAX RSB_CONST_IMPOSSIBLY_BIG_TIME
+#define RSB_TIME_MIN RSB_TIME_ZERO 
+struct rsb_limiter
+{
+	/*rsb_bool_t is_time_based;*/
+	rsb_time_t t0,t1;
+	rsb_time_t max_time;
+	rsb__times_t max_times;
+	rsb__times_t times;
+};
+rsb_err_t rsb_limiter_init(struct rsb_limiter* lsp, const rsb_time_t max_time, const rsb__times_t max_times);
+rsb_err_t rsb__limiter_init_from_str(struct rsb_limiter* lsp, const char *tls);
+rsb_err_t rsb__limiter_step(struct rsb_limiter* lsp);
+rsb_bool_t rsb__limiter_done(const struct rsb_limiter* lsp);
+rsb_bool_t rsb__limiter_continue(const struct rsb_limiter* lsp);
+rsb_err_t rsb__limiter_info(const struct rsb_limiter* lsp);
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_LIMITER_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_lock.c b/rsb_lock.c
new file mode 100644
index 0000000..358198d
--- /dev/null
+++ b/rsb_lock.c
@@ -0,0 +1,1279 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains locks for sparse recursive multicore operations.
+ * */
+#include "rsb_lock.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_WANT_DO_LOCK_TEST 0
+
+/*
+ TODO: one shall reduce the external interface, e.g. to a single rsb__lock function.
+*/
+
+rsb_bool_t rsb__do_lock_release(struct rsb_rows_lock_struct_t *lock, rsb_thr_t th_id)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	if(RSB__TRSV_OUT_)RSB_INFO("thread %d releases  %d %d\n",th_id,lock->coresrowf[th_id],lock->coresrowl[th_id]);
+	lock->corescoll[th_id]=RSB_MARKER_COO_VALUE;
+	lock->corescolf[th_id]=RSB_MARKER_COO_VALUE;
+	lock->coresrowl[th_id]=RSB_MARKER_COO_VALUE;
+	lock->coresrowf[th_id]=RSB_MARKER_COO_VALUE;
+	return RSB_BOOL_TRUE;
+}
+
+static RSB_INLINE rsb_bool_t rsb_do_lock_check_if_matrix_done(const struct rsb_rows_lock_struct_t *lock, rsb_submatrix_idx_t subm)
+{
+	/**
+	 * 	\ingroup gr_internals
+	 *  */
+	if(RSB_BITVECTOR_GET(lock->bmap,lock->subms,subm))
+		return RSB_BOOL_TRUE;
+	else
+		return RSB_BOOL_FALSE;
+}
+
+static RSB_INLINE rsb_bool_t rsb_do_lock_check_interval(const struct rsb_rows_lock_struct_t *lock, rsb_thr_t th_id, rsb_coo_idx_t roff, rsb_coo_idx_t m, rsb_coo_idx_t coff, rsb_coo_idx_t k, rsb_trans_t transA)
+{
+	/**
+	 * 	\ingroup gr_internals
+	 *  */
+	rsb_thr_t tn;
+	rsb_bool_t want_both=(lock->want_symlock == RSB_BOOL_TRUE);
+
+	if(want_both)
+	{
+		for(tn=0;tn<lock->nt; ++tn)
+		if( tn!=th_id && (
+                           ((lock->coresrowf[tn] >= roff) && (lock->coresrowf[tn] < roff+m))
+			|| ((lock->coresrowf[tn] <= roff) && (lock->coresrowl[tn]+1 > roff))
+			|| ((lock->corescolf[tn] >= coff) && (lock->corescolf[tn] < coff+k))
+			|| ((lock->corescolf[tn] <= coff) && (lock->corescoll[tn]+1 > coff))
+
+                        || ((lock->coresrowf[tn] >= coff) && (lock->coresrowf[tn] < coff+k))
+			|| ((lock->coresrowf[tn] <= coff) && (lock->coresrowl[tn]+1 > coff))
+			|| ((lock->corescolf[tn] >= roff) && (lock->corescolf[tn] < roff+m))
+			|| ((lock->corescolf[tn] <= roff) && (lock->corescoll[tn]+1 > roff))
+			))
+		{
+			if(RSB__TRSV_OUT_)RSB_INFO("%d %d blocks %d %d\n",lock->coresrowf[tn],lock->coresrowl[tn],roff,m);
+			goto l_false;
+		}
+	}
+	else
+	{
+		if((RSB_DOES_NOT_TRANSPOSE(transA)) || want_both)
+		for(tn=0;tn<lock->nt; ++tn)
+		if( tn!=th_id
+			&& (((lock->coresrowf[tn] >= roff) && (lock->coresrowf[tn] < roff+m))
+			|| ((lock->coresrowf[tn] <= roff) && (lock->coresrowl[tn]+1 > roff))))
+		{
+			if(RSB__TRSV_OUT_)RSB_INFO("%d %d blocks %d %d\n",lock->coresrowf[tn],lock->coresrowl[tn],roff,m);
+				goto l_false;
+		}
+	
+		if(RSB_DOES_TRANSPOSE(transA) || want_both)
+		for(tn=0;tn<lock->nt; ++tn)
+		if( tn!=th_id
+			&& (((lock->corescolf[tn] >= coff) && (lock->corescolf[tn] < coff+k))
+			|| ((lock->corescolf[tn] <= coff) && (lock->corescoll[tn]+1 > coff))))
+		{
+			if(RSB__TRSV_OUT_)RSB_INFO("%d %d blocks %d %d\n",lock->coresrowf[tn],lock->coresrowl[tn],coff,k);
+			goto l_false;
+		}
+	}
+	return RSB_BOOL_TRUE;
+l_false:
+	return RSB_BOOL_FALSE;
+}
+
+	/* sets only the interval info for a given thread */
+#define RSB_DO_LOCK_INTERVALS(LOCK,TH_ID,R0,R,C0,C) \
+	(LOCK)->coresrowf[(TH_ID)]=(R0), (LOCK)->coresrowl[(TH_ID)]=(R0)+((R)-1), \
+	(LOCK)->corescolf[(TH_ID)]=(C0), (LOCK)->corescoll[(TH_ID)]=(C0)+((C)-1)
+
+#define RSB_DO_LOCK_INTERVAL(LOCK,TH_ID,R0,R) \
+	(LOCK)->coresrowf[(TH_ID)]=(R0), (LOCK)->coresrowl[(TH_ID)]=(R0)+((R)-1), \
+	(LOCK)->corescolf[(TH_ID)]=(R0), (LOCK)->corescoll[(TH_ID)]=(R0)+((R)-1)	/* FIXME: is there a reason for redundance ? */
+
+/* FIXME: actually, this is the interval +1  */
+#define RSB_GET_LOCK_INTERVAL_W(LOCK,TH_ID,R0,R1) \
+	(R0)=(LOCK)->coresrowf[(TH_ID)], (R1)=(LOCK)->coresrowl[(TH_ID)]+1
+#define RSB_GET_LOCK_INTERVAL_L(LOCK,TH_ID,R0,R1) \
+	(R0)=(LOCK)->coresrolf[(TH_ID)], (R1)=(LOCK)->coresroll[(TH_ID)]+1
+
+#if 0
+#define RSB_GET_LOCK_INTERVALS(LOCK,TH_ID,R0,R,C0,C) \
+	(R0)=(LOCK)->coresrowf[(TH_ID)], \
+	(C0)=(LOCK)->corescolf[(TH_ID)], \
+	(R)=(LOCK)->coresrowl[(TH_ID)]-(R0)+1, \
+	(C)=(LOCK)->corescoll[(TH_ID)]-(C0)+1
+#endif
+
+rsb_bool_t rsb__do_lock_get(struct rsb_rows_lock_struct_t *lock, rsb_thr_t th_id, rsb_coo_idx_t roff, rsb_coo_idx_t m, rsb_coo_idx_t coff, rsb_coo_idx_t k, rsb_submatrix_idx_t subm, rsb_trans_t transA)
+{
+	/**
+	 * 	\ingroup gr_internals
+	 *  */
+#if 0
+	if(th_id)
+	if(RSB__TRSV_OUT_)RSB_INFO("blocked by %p %d @ %d .. %d\n",lock->bmap,lock->subms,th_id,subm);
+#endif
+	
+	if(RSB_BITVECTOR_GET(lock->bmap,lock->subms,subm))
+		goto l_false;
+
+	if(lock->want_fake_lock == RSB_BOOL_TRUE)
+		goto l_true;	/* debug only : no locked rows check */
+
+	if(!rsb_do_lock_check_interval(lock,th_id,roff,m,coff,k,transA))
+		goto l_false;
+
+	RSB_DO_LOCK_INTERVALS(lock,th_id,roff,m,coff,k);
+
+	if(RSB__TRSV_OUT_)RSB_INFO("thread %d locks  %d %d with matrix %d\n",th_id,lock->coresrowf[th_id],lock->coresrowl[th_id],subm);
+l_true:
+	/* 
+	 * WARNING : this does not mean that the matrix is 'done'.
+	 * It only means that the matrix is now assigned to some core, and it will be processed soon.
+	 * The guarantee that the matrix is done will be given us only by the lock-if this matrix
+	 * is marked AND its row (or column) interval is free, then the matrix is done (in SPSV/SPMV).
+	 * */
+	RSB_BITVECTOR_SET(lock->bmap,lock->subms,subm);
+	return RSB_BOOL_TRUE;
+l_false:
+	return RSB_BOOL_FALSE;
+}
+
+rsb_err_t rsb__do_lock_init(struct rsb_rows_lock_struct_t *lock, rsb_int_t num_threads, rsb_submatrix_idx_t subms, const struct rsb_mtx_t * mtxAp, enum rsb_op_flags_t op_flags)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_int tn;
+
+	if(!mtxAp || !lock)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(lock);
+	lock->nt=num_threads;
+	for(tn=0;tn<RSB_CONST_MAX_SUPPORTED_CORES; ++tn)
+		lock->corescolf[tn]=RSB_MARKER_COO_VALUE, lock->corescoll[tn]=RSB_MARKER_COO_VALUE,
+		lock->coresrowf[tn]=RSB_MARKER_COO_VALUE, lock->coresrowl[tn]=RSB_MARKER_COO_VALUE;
+	lock->dm=0;
+	lock->subms=subms;
+	lock->want_symlock = rsb__is_not_unsymmetric(mtxAp);
+	lock->want_fake_lock=(op_flags == RSB_OP_FLAG_FAKE_LOCK);
+	lock->bmap = rsb__allocate_bitvector(subms);
+	return (lock->bmap!=NULL)?RSB_ERR_NO_ERROR:RSB_ERR_ENOMEM;
+}
+
+rsb_err_t rsb__do_lock_free(struct rsb_rows_lock_struct_t *lock)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * */
+	if(!lock)
+		return RSB_ERR_BADARGS;
+	RSB_CONDITIONAL_FREE(lock->bmap);
+	return RSB_ERR_NO_ERROR;
+}
+
+/*  BEGIN EXPERIMENTAL CODE */
+
+#if RSB_WANT_DO_LOCK_TEST
+size_t static rsb_do_log2(size_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : document this
+	 */
+	size_t res = 0;
+	while(n /= 2)
+		++res;
+	return res;
+}
+#endif /* RSB_WANT_DO_LOCK_TEST */
+
+#define RSB_MULTINT_BY_TWO(X)   ((X)<<1)	/* FIXME: this is not portable */
+#define RSB_UPPER_BOUNDING_LOG2(X) (rsb_do_log2(rsb__nearest_power_of_two(X)))
+#define RSB_LOUD_BTILS_TESTING 0 /*  */
+#define RSB_LOUD_MVL_TESTING 0   /* multivector lock   */
+#define RSB_LOUD_MVR_TESTING 0   /* multivector reduce */
+#define RSB_INHIBIT_MULTIVECTOR 1   /* multivector reduce */
+#define RSB_INHIBIT_REDUCE 0   /* multivector reduce */
+
+static rsb_err_t rsb_do_btils_init(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t itl, rsb_coo_idx_t nlevels)
+{
+	/**
+	 * 	\ingroup gr_internals
+	 * 	Initializes a lock structure.
+	 * 	The input structure shall be freshly instantiated or freed.
+	 * 	In case of error, it is safe but not required to call rsb_do_btils_free() to free it.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!lock || nlevels<0)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	RSB_BZERO_P(lock);
+	lock->bmap=NULL;
+	lock->nlevels=nlevels;
+	lock->itl=itl;
+	lock->mvleaves = RSB_POWER_OF_2(nlevels);
+	lock->bsz=(2*lock->mvleaves-1);
+	/* FIXME: need a check on nlevels */
+	lock->bmap = rsb__allocate_bitvector(lock->bsz);
+	lock->tmap = rsb__allocate_bitvector(lock->bsz);
+	if(!lock->bmap || !lock->tmap)
+	{
+		RSB_CONDITIONAL_FREE(lock->bmap);
+		RSB_CONDITIONAL_FREE(lock->tmap);
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_do_btils_free(struct rsb_bti_lock_struct * lock)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * 	Frees a lock structure.
+	 * 	The input structure shall be initialized with success.
+	 * */
+	if(!lock)
+		return RSB_ERR_BADARGS;
+	RSB_CONDITIONAL_FREE(lock->bmap);
+	RSB_CONDITIONAL_FREE(lock->tmap);
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_coo_idx_t rsb_do_rindex_to_lindex(rsb_coo_idx_t r0, rsb_coo_idx_t r1, rsb_coo_idx_t n, rsb_coo_idx_t nlevels)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t l0=0,l1=0,doffset=1,offset=0;
+	rsb_coo_idx_t n0=n,n1=n;
+	rsb_int i,delta=0;
+	if(nlevels<1)
+	{
+		return 0;
+	}
+	if(r1==n1)
+		l1=2,r1=0;
+	for(i=0;i<nlevels;++i)
+	{
+		rsb_coo_idx_t m0=RSB_MIDDLE(n0);
+		rsb_coo_idx_t m1=RSB_MIDDLE(n1);
+
+		if(r0>=m0)
+			r0-=m0,++l0,n0-=m0;
+		else
+			n0=m0;
+		if(r1>=m1)
+			r1-=m1,++l1,n1-=m1;
+		else
+			n1=m1;	
+
+		if(i<nlevels-1)
+			l0*=2,l1*=2;
+	}
+#if 0
+  	RSB_INFO("%d!\n",l1-l0);
+#endif
+	delta=l1-l0;
+	l0=l0/(l1-l0);
+	offset = RSB_POWER_OF_2(nlevels)-1;
+	doffset = RSB_POWER_OF_2(nlevels-1);
+	for( ;delta>1;delta/=2,doffset/=2)
+		offset-=doffset;
+		
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("@ bit %d + %d\n",l0,offset);
+	return offset+l0;
+}
+
+static rsb_bool_t rsb_do_btils_lock_update_tmap(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t i)
+{
+	rsb_coo_idx_t iu,il,ii;
+	/* we taint the vector: after a lock, it will mark the interval as tainted 
+	 * (as opposed to the cases where an untainted vector is unlocked (e.g.: after a reduce))  */
+	RSB_BITVECTOR_SET(lock->tmap,lock->bsz,i);
+	/* did already any ancestor taint all way up ? */
+	for(iu=(i-1)/2;iu>0;iu=(iu-1)/2)
+	{
+		/* TODO: could speed up a little while by inverting the visit order */
+		if(RSB_LOUD_BTILS_TESTING)
+			RSB_INFO("updating tmap\n");
+		if(RSB_BITVECTOR_GET(lock->tmap,lock->bsz,iu))
+			goto l_done;
+	}
+	/* no ancestor tainted all way up */
+	RSB_ASSERT(iu==0);
+	if(RSB_BITVECTOR_GET(lock->tmap,lock->bsz,iu))
+		goto l_done;
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("reducing taint map:\n"),rsb__do_dump_bitmap(lock->tmap,1,lock->bsz),RSB_INFO("\n");
+
+	/* we look for neighbor leaves needing collapse, at any upper level  */
+	while(i>0)
+	{
+		il=2*((i-1)/2  )+1;
+		iu=2*((i-1)/2+1)+1;
+		for(ii=il;ii<iu;++ii)
+			if(!RSB_BITVECTOR_GET(lock->tmap,lock->bsz,ii))
+				goto skip;/* The sibling interval is not tainted: we may stop merging here.
+				    Pay attention: some descendant of ours may have still its bit set despite 
+				    at this level we are done with merging: thus that bit would be obsolete,
+				    and it could be possible for it to remain.
+				    This does not cause harm, so we don't force bit-clear to lower nodes, here.
+			           */
+		/* merge the current subtree */
+		for(ii=il;ii<iu;++ii)
+			RSB_BITVECTOR_UNSET(lock->tmap,lock->bsz,ii);
+		/* collapse to the upper node */
+		i=(i-1)/2;
+		RSB_BITVECTOR_SET(lock->tmap,lock->bsz,i);
+		continue;
+skip:
+		i=(i-1)/2;
+	}
+	/* the taint map is done */
+	
+l_done:
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("taint map:\n"),rsb__do_dump_bitmap(lock->tmap,1,lock->bsz),RSB_INFO("\n");
+	return RSB_BOOL_TRUE;
+}
+
+static RSB_INLINE rsb_bool_t rsb_do_btils_lock_probe_inner(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t i)
+{
+	/**
+	 * */
+	rsb_coo_idx_t iu,il;
+	rsb_coo_idx_t ili=2,ilii;
+	RSB_ASSERT(lock);
+	RSB_ASSERT(i>=0);
+
+	if(RSB_BITVECTOR_GET(lock->bmap,lock->bsz,i))
+		goto l_false;
+
+	for(iu=(i-1)/2;iu>0;iu=(iu-1)/2)
+	{
+#if 0
+		if(1) RSB_INFO("checking bit .. %d:%d\n",iu,RSB_BOOL_TRUE == RSB_BITVECTOR_GET(lock->bmap,lock->bsz,iu));
+#endif
+		if(RSB_BITVECTOR_GET(lock->bmap,lock->bsz,iu))
+			goto l_false;
+	}
+	if(RSB_BITVECTOR_GET(lock->bmap,lock->bsz,iu))/* iu==0 */
+		goto l_false;
+	for(il=2*i+1;il<lock->bsz;il=2*il+1,ili*=2)
+	{
+		for(ilii=0;ilii<ili;++ilii)
+			if(RSB_BITVECTOR_GET(lock->bmap,lock->bsz,il+ilii))
+				goto l_false;
+	}
+	
+	return RSB_BOOL_TRUE;
+l_false:
+	return RSB_BOOL_FALSE;
+}
+
+static rsb_bool_t rsb_do_btils_lock_probe(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t m0, rsb_coo_idx_t m1, rsb_coo_idx_t *ip)
+{
+	/**
+	 * */
+	rsb_coo_idx_t i;
+	RSB_ASSERT(lock);
+	RSB_ASSERT(ip);
+
+	i = rsb_do_rindex_to_lindex(m0,m1,lock->itl,lock->nlevels);
+	if(!rsb_do_btils_lock_probe_inner(lock,i))
+		goto l_false;
+	*ip=i;
+	return RSB_BOOL_TRUE;
+l_false:
+	return RSB_BOOL_FALSE;
+}
+
+static rsb_bool_t rsb_do_btils_lock_get_sym(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t m0, rsb_coo_idx_t m1, rsb_coo_idx_t k0, rsb_coo_idx_t k1, rsb_trans_t transA, rsb_coo_idx_t *ip, rsb_coo_idx_t *jp)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t i,j;
+	if(!rsb_do_btils_lock_probe(lock,m0,m1,&i))
+		goto l_false;
+	j=i;
+	if((m0!=k0) && (m1!=k1))
+		if(!rsb_do_btils_lock_probe(lock,k0,k1,&j))
+			goto l_false;
+
+	RSB_BITVECTOR_SET(lock->bmap,lock->bsz,i);
+	if(i!=j)
+		RSB_BITVECTOR_SET(lock->bmap,lock->bsz,j);
+	if(RSB_LOUD_BTILS_TESTING)
+		rsb__do_dump_bitmap(lock->bmap,1,lock->bsz),RSB_INFO(" (%d)\n",lock->bsz);
+
+	/* we're going to lock up to i */
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("(nlev=%d)(%d .. %d) -> %d ok\n",lock->nlevels,m0,m1,i);
+
+	/* TODO: update the taint vector accordingly */
+	rsb_do_btils_lock_update_tmap(lock,i);
+	if(i!=j)
+		rsb_do_btils_lock_update_tmap(lock,j);
+
+	if(RSB_DOES_TRANSPOSE(transA))
+		RSB_SWAP(rsb_coo_idx_t,i,j);
+
+	*ip=i;
+	*jp=j;
+
+	return RSB_BOOL_TRUE;
+l_false:
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("(nlev=%d)(%d .. %d) -> (%d %d) busy \n",lock->nlevels,m0,m1,i,j);
+	return RSB_BOOL_FALSE;
+}
+
+static rsb_bool_t rsb_do_btils_lock_get(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t m0, rsb_coo_idx_t m1, rsb_trans_t transA, rsb_coo_idx_t *ip, rsb_coo_idx_t *jp)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t i = RSB_MARKER_COO_VALUE,j = RSB_MARKER_COO_VALUE;
+	if(!rsb_do_btils_lock_probe(lock,m0,m1,&i))
+		goto l_false;
+
+	RSB_BITVECTOR_SET(lock->bmap,lock->bsz,i);
+	if(RSB_LOUD_BTILS_TESTING)
+		rsb__do_dump_bitmap(lock->bmap,1,lock->bsz),RSB_INFO(" (%d)\n",lock->bsz);
+
+	/* we're going to lock up to i */
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("(nlev=%d)(%d .. %d) -> %d ok\n",lock->nlevels,m0,m1,i);
+
+	/* TODO: update the taint vector accordingly */
+	rsb_do_btils_lock_update_tmap(lock,i);
+	
+	if(RSB_DOES_TRANSPOSE(transA))
+		RSB_SWAP(rsb_coo_idx_t,i,j);
+	
+	*ip=i,*jp=j;
+
+	return RSB_BOOL_TRUE;
+l_false:
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("(nlev=%d)(%d .. %d) -> %d busy \n",lock->nlevels,m0,m1,i);
+	return RSB_BOOL_FALSE;
+}
+
+static rsb_err_t rsb_do_get_interval_info_from_btils_lock(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t i, rsb_coo_idx_t *m0p, rsb_coo_idx_t * m1p)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * 	FIXME: unfinished
+	 * */
+	rsb_coo_idx_t m0=0,m1=lock->itl,h=lock->itl,iu,l=0,ii=0,pot=1, nl=0;
+
+	for(iu=i;iu>0;iu=(iu-1)/2)
+	{
+		ii = RSB_MULTINT_BY_TWO(ii);
+		if(RSB_IS_INTEGER_EVEN(iu))
+			++ii;
+		++nl;
+	}
+
+	for(l=0;l<nl;++l)
+	{
+		if(ii&pot)
+			m0+=RSB_MIDDLE(h),
+			h=h-RSB_MIDDLE(h);
+		else
+			m1-=h-RSB_MIDDLE(h),
+			h = RSB_MIDDLE(h);
+#if 0
+		RSB_INFO("BIBO: ii=%d m0=%d, h=%d, pot=%d\n",ii,m0,h,pot);
+#endif
+		pot = RSB_MULTINT_BY_TWO(pot);
+	}
+	*m0p=m0;
+	*m1p=m1;
+	return RSB_ERR_NO_ERROR;
+}
+
+void RSB_INLINE rsb_do_btils_lock_release_inner(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t i)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * 	FIXME: does this call free one or two intervals ?
+	 * */
+	if(RSB_LOUD_BTILS_TESTING)
+		rsb__do_dump_bitmap(lock->bmap,1,lock->bsz),RSB_INFO(" (%d)\n",lock->bsz);
+	RSB_BITVECTOR_UNSET(lock->bmap,lock->bsz,i);
+	if(RSB_LOUD_BTILS_TESTING)
+	{
+		rsb_coo_idx_t m0,m1;
+		rsb_do_get_interval_info_from_btils_lock(lock,i,&m0,&m1);
+		RSB_INFO("freeing (%d .. %d)\n",m0,m1),
+		rsb__do_dump_bitmap(lock->bmap,1,lock->bsz),RSB_INFO(" (%d)\n",lock->bsz);
+	}
+}
+
+#if RSB_WANT_DO_LOCK_TEST
+static rsb_err_t rsb_do_btils_lock_release(struct rsb_bti_lock_struct * lock, rsb_coo_idx_t m0, rsb_coo_idx_t m1)
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * 	FIXME: does this call free one or two intervals ?
+	 * 	FIXME: deprecated
+	 * */
+	rsb_coo_idx_t i;
+	if(!lock)
+		return RSB_ERR_BADARGS;
+	i = rsb_do_rindex_to_lindex(m0,m1,lock->itl,lock->nlevels);
+	RSB_ASSERT(i>=0);
+	rsb_do_btils_lock_release_inner(lock,i);
+	return RSB_ERR_NO_ERROR;
+}
+#endif /* RSB_WANT_DO_LOCK_TEST */
+
+#define RSB_MV_OFFSET(LOCK,INDEX,OFFSET) \
+	((((rsb_char_t *)((LOCK)->mv[INDEX]))) +(LOCK)->el_size*(OFFSET))
+
+static rsb_err_t rsb__do_mv_lock_release_single(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t *ov)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t nvi = RSB_MARKER_COO_VALUE;
+
+	/* in the case the locked vector was the master one */
+	if(ov==lock->ov)
+	{
+		if(RSB_LOUD_MVL_TESTING)
+			RSB_INFO("releasing master vector from thread %d\n",th_id);
+		rsb__do_lock_release(&(lock->olock),th_id);
+		goto ok;
+	}
+
+	/* in the case the locked vector was not the master one */
+	for(nvi=0;nvi<lock->nv;++nvi)
+		if( (ov >= RSB_MV_OFFSET(lock,nvi,0)) && (ov<RSB_MV_OFFSET(lock,nvi+1,0)))
+		{
+			struct rsb_bti_lock_struct * vlock=&(lock->locks[nvi]);
+			/* we localized the vector. now we shall see if that interval was locked */
+			if(RSB_LOUD_MVL_TESTING)
+			{
+				RSB_INFO("releasing vector %d from thread %d\n",nvi,th_id);
+/*  			if(RSB_BITVECTOR_GET(vlock->bmap,vlock->bsz,rsb_do_rindex_to_lindex(roff,roff+m,vlock->itl,vlock->nlevels)))
+				RSB_INFO("freeing interval %d .. %d on vector %d (thread %d)\n",roff,roff+m,nvi,th_id);
+				else
+				{RSB_INFO("guessed pointer !?\n");goto failure;}*/
+			}
+			if(lock->it[th_id]!=RSB_MARKER_COO_VALUE)
+			{
+				if(RSB_LOUD_MVL_TESTING) RSB_INFO("releasing inner\n");
+				rsb_do_btils_lock_release_inner(vlock,lock->it[th_id]);
+			}
+			if(lock->in[th_id]!=RSB_MARKER_COO_VALUE)
+			{
+				if(RSB_LOUD_MVL_TESTING) RSB_INFO("releasing inner\n");
+				rsb_do_btils_lock_release_inner(vlock,lock->in[th_id]);
+			}
+			lock->it[th_id]=RSB_MARKER_COO_VALUE;
+			lock->in[th_id]=RSB_MARKER_COO_VALUE;
+			goto ok;
+		}
+#if 0
+failure:
+	if(RSB_LOUD_MVL_TESTING)
+		RSB_INFO("did not find a vector to release for thread %d\n",th_id);
+	return RSB_ERR_GENERIC_ERROR;
+#endif
+ok:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_mv_lock_release(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t *ov)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+#if RSB_LOUD_MVL_TESTING
+#if 0
+	rsb_coo_idx_t roff = RSB_MARKER_COO_VALUE,m = RSB_MARKER_COO_VALUE,coff = RSB_MARKER_COO_VALUE,k = RSB_MARKER_COO_VALUE;
+#endif
+#endif /* RSB_LOUD_MVL_TESTING */
+	rsb_bool_t is_reduce_only = RSB_BOOL_TRUE;/* FIXME: ?? */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_ASSERT(lock);
+	RSB_ASSERT(ov);
+	RSB_ASSERT(th_id>=0);
+#if 0
+	RSB_GET_LOCK_INTERVALS(&(lock->olock),th_id,roff,m,coff,k);
+#endif
+	errval = rsb__do_mv_lock_release_single(lock,th_id,ov);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(failure,RSB_ERRM_ES);
+	}
+	if(is_reduce_only)
+		goto reduce_ok;
+	else
+	       	goto ok;
+reduce_ok:
+#if 0
+	RSB_ASSERT(roff>=0); RSB_ASSERT(m>=0); RSB_ASSERT(th_id>=0);
+	if(RSB_LOUD_MVL_TESTING)
+		RSB_INFO("freeing interval %d .. %d on master vector (thread %d) \n",roff,roff+m,th_id);
+	/* it may still be the master vector, or a random pointer :) */
+	rsb__do_lock_release(&(lock->olock),th_id);
+#endif
+	goto ok;
+failure:
+#if 0
+  	RSB_ASSERT(roff>=0); RSB_ASSERT(m>=0); RSB_ASSERT(th_id>=0);
+  	if(RSB_LOUD_MVL_TESTING)
+  		RSB_INFO("not freeing interval %d .. %d\n",roff,roff+m);
+#endif
+ok:
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_bool_t rsb_do_is_bitmap_blank(rsb_bitmap_data_t *bmap, rsb_coo_idx_t r, rsb_coo_idx_t c)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * 	FIXME: new, untested
+	 * */
+	size_t bs = RSB_WORDS_PER_BITMAP(r,c);
+	rsb_coo_idx_t i;
+	for(i=0;i<bs;++i)
+	{
+		if(bmap[i])
+			return RSB_BOOL_FALSE;
+	}
+	return RSB_BOOL_TRUE;
+}
+
+static rsb_bool_t rsb_do_is_bitvector_blank(rsb_bitmap_data_t *bmap, rsb_coo_idx_t c)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	return rsb_do_is_bitmap_blank(bmap,1,c);
+}
+
+rsb_bool_t rsb_do_mv_lock_is_used(struct rsb_mv_lock_t *lock)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t nvi;
+	for(nvi=0;nvi<lock->nv;++nvi)
+	{
+		struct rsb_bti_lock_struct * vlock=&(lock->locks[nvi]);
+		if(!rsb_do_is_bitvector_blank(vlock->bmap,vlock->bsz))
+			return RSB_BOOL_TRUE;
+	}
+	return RSB_BOOL_FALSE;
+}
+
+rsb_bool_t rsb_do_mv_lock_is_tainted(struct rsb_mv_lock_t *lock)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t nvi;
+	for(nvi=0;nvi<lock->nv;++nvi)
+	{
+		struct rsb_bti_lock_struct * vlock=&(lock->locks[nvi]);
+		if(!rsb_do_is_bitvector_blank(vlock->tmap,vlock->bsz))
+			return RSB_BOOL_TRUE;
+	}
+	return RSB_BOOL_FALSE;
+}
+
+rsb_err_t rsb__do_mv_lock_free(struct rsb_mv_lock_t *lock)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t nvi;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if !RSB_INHIBIT_MULTIVECTOR
+	rsb_bool_t tainted = rsb_do_mv_lock_is_tainted(lock);
+#endif /* RSB_INHIBIT_MULTIVECTOR */
+	if(RSB_LOUD_MVL_TESTING)
+	{
+		if(lock->nv)
+			RSB_INFO("taint maps:\n");
+		for(nvi=0;nvi<lock->nv;++nvi)
+			rsb__do_dump_bitmap(lock->locks[nvi].tmap,1,lock->locks[nvi].bsz),RSB_INFO("\n");
+	}
+
+	/* FIXME: TODO: reduce all of the vectors, here. */
+#if !RSB_INHIBIT_MULTIVECTOR
+#if RSB_INHIBIT_REDUCE
+	/* no reduce. this will produce wrong results, of course */
+#else /* RSB_INHIBIT_REDUCE */
+	if(rsb_do_mv_lock_is_used(lock))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,"no vector should not be in use before reducing!");
+	}
+#if 0
+	/* this approach is likely to be faster for high nnz/row cases */
+	if(RSB_LOUD_MVR_TESTING)
+		RSB_INFO("summing up (%d) vectors to the master (strided %d)\n",lock->nv,lock->incov);
+	if(RSB_LOUD_MVR_TESTING)
+		RSB_INFO("on master vector:\n"),
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_some_vector_stats(lock->ov,lock->typecode,lock->itl));
+	for(nvi=0;nvi<lock->nv;++nvi)
+	{
+		if(RSB_LOUD_MVR_TESTING)
+			RSB_INFO("on vector %d:\n",nvi),
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_some_vector_stats(lock->mv[nvi],lock->typecode,lock->itl));
+		rsb__vectors_left_sum_reduce_and_zero(lock->ov,lock->mv[nvi],lock->typecode,lock->itl,lock->incov,0);
+	}
+	if(RSB_LOUD_MVR_TESTING)
+		RSB_INFO("\n");
+#else
+#if 1
+	#pragma omp parallel shared(tainted) RSB_NTC
+	{
+		rsb_thr_t th_id = omp_get_thread_num();
+		rsb_coo_idx_t oincy=lock->incov,rh,r0;
+		rsb_char_t *ov=NULL;
+		extern struct rsb_session_handle_t rsb_global_session_handle;
+
+		if(th_id>=lock->nv)
+			goto skip;
+		if(th_id >= rsb_global_session_handle.rsb_want_threads)
+			goto skip;
+
+		while(tainted)
+		{
+
+			if(RSB_LOUD_MVR_TESTING)
+			{
+				if(lock->nv)
+					RSB_INFO("taint maps:\n");
+				for(nvi=0;nvi<lock->nv;++nvi)
+					rsb__do_dump_bitmap(lock->locks[nvi].tmap,1,lock->locks[nvi].bsz),RSB_INFO(" (%d)\n",rsb_do_mv_lock_is_tainted(lock));
+				if(lock->nv)
+					RSB_INFO("use maps:\n");
+				for(nvi=0;nvi<lock->nv;++nvi)
+					rsb__do_dump_bitmap(lock->locks[nvi].bmap,1,lock->locks[nvi].bsz),RSB_INFO(" (%d)\n",rsb_do_mv_lock_is_tainted(lock));
+			}
+
+			ov=lock->ov;
+			#pragma omp critical (rsb_lock_crs)
+			{ rsb__do_pick_candidate_interval_for_reduce(lock,th_id,&ov,&r0,&rh); }
+	
+			if(ov && ov!=lock->ov)
+			{
+				if(RSB_LOUD_MVR_TESTING)
+					RSB_INFO("%d .. %d (incov = %d)\n",r0,rh,oincy);
+				rsb__vectors_left_sum_reduce_and_zero(lock->ov,ov,lock->typecode,rh,oincy,r0);/*wrong ?*/
+#if 0
+				rsb__vectors_left_sum_reduce_and_zero(lock->ov,ov,lock->typecode,lock->itl,oincy,0);/*~works*/
+				rsb__vectors_left_sum_reduce_and_zero(lock->ov,ov,lock->typecode,lock->itl,lock->incov,0);/*~works*/
+#endif
+	                     	#pragma omp critical (rsb_lock_crs)
+	                   	{ rsb__do_release_candidate_interval_for_reduce(lock,th_id,ov,r0,rh); }
+			}
+			#pragma omp critical (rsb_lock_crs)
+			{ tainted = rsb_do_mv_lock_is_tainted(lock); }
+	}
+skip:
+		#pragma omp barrier
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}
+#else
+	if(RSB_LOUD_MVR_TESTING)
+	{
+		if(lock->nv)
+			RSB_INFO("taint maps:\n");
+		for(nvi=0;nvi<lock->nv;++nvi)
+			rsb__do_dump_bitmap(lock->locks[nvi].tmap,1,lock->locks[nvi].bsz),RSB_INFO("\n");
+	}
+	/* serial approach, for debugging purposes (very slow) ; it should be used to debug the rest */
+	for(nvi=0;nvi<lock->nv;++nvi)
+		rsb__vectors_left_sum_reduce_and_zero(lock->ov,lock->mv[nvi],lock->typecode,lock->itl,lock->incov,0);
+#endif
+#endif
+#endif /* RSB_INHIBIT_REDUCE */
+#endif /* RSB_INHIBIT_MULTIVECTOR */
+	goto nosync;
+nosync:
+	if(!lock)
+	{	
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	for(nvi=0;nvi<lock->nv;++nvi)
+		RSB_CONDITIONAL_FREE(lock->mv[nvi]);
+
+	for(;nvi>=0;--nvi)
+		rsb_do_btils_free(&(lock->locks[nvi]));
+
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_lock_free(&(lock->olock)));
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_mv_lock_init(struct rsb_mv_lock_t *lock, rsb_int_t num_threads, rsb_submatrix_idx_t subms, const struct rsb_mtx_t * mtxAp, enum rsb_op_flags_t op_flags, rsb_trans_t transA, rsb_char_t * ov, rsb_coo_idx_t incov)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t nvi,nlevels;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int_t th_id=0;
+	rsb_coo_idx_t tn;
+
+	if(!lock || !mtxAp)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(lock);
+	errval = rsb__do_lock_init(&(lock->olock),num_threads,subms,mtxAp,op_flags);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err0,RSB_ERRM_ES);
+	}
+	/* FIXME: we need a policy for this */
+#if RSB_INHIBIT_MULTIVECTOR
+	lock->nv=0;	/* FIXME: for debugging purposes */
+#else
+#if 0
+  	lock->nv = RSB_MIN(num_threads-1,((mtxAp->nnz+1)/(4*mtxAp->nr+1)));
+  	lock->nv = RSB_MIN(num_threads-1,1);/*FIXME: temporary */
+  	lock->nv = RSB_MIN(num_threads-1,rsb__submatrices(mtxAp));/*FIXME: temporary */
+  	lock->nv=1;	/* FIXME: for debugging purposes */
+#endif
+	lock->nv = RSB_MIN(num_threads-1,mtxAp->all_leaf_matrices_n-1);/* FIXME: temporary */
+#endif /* RSB_INHIBIT_MULTIVECTOR */
+	if(RSB_LOUD_MVR_TESTING)
+		RSB_INFO("Will use %d temporary vectors for %d threads\n",lock->nv,num_threads);
+
+	RSB_ASSERT(lock->nv<RSB_CONST_MAX_SUPPORTED_CORES);
+	lock->el_size=mtxAp->el_size;
+	lock->typecode=mtxAp->typecode;
+	lock->itl = rsb_do_get_rows_of(mtxAp,transA);
+	lock->ov=ov;
+	lock->incov=incov;
+	lock->transA=transA;
+	nlevels = rsb__get_recursive_matrix_depth(mtxAp);
+	for(tn=0;tn<RSB_CONST_MAX_SUPPORTED_CORES; ++tn)
+		lock->it[tn]=
+		lock->in[tn]=RSB_MARKER_COO_VALUE;
+	for(nvi=0;nvi<lock->nv;++nvi)
+		if((errval = rsb_do_btils_init(&(lock->locks[nvi]),lock->itl,nlevels))!=RSB_ERR_NO_ERROR)
+		{
+			RSB_PERR_GOTO(err1,RSB_ERRM_ES);
+		}
+	/* time to allocate the temporary vectors */
+	for(nvi=0;nvi<lock->nv;++nvi)
+		if((lock->mv[nvi]=rsb__calloc(lock->el_size*lock->itl))==NULL)
+		{
+			RSB_PERR_GOTO(err2,RSB_ERRM_ES);
+		}
+	for(th_id=0;th_id<num_threads;++th_id)
+		lock->last_subm[th_id]=RSB_SUBM_IDX_MARKER;
+	/* the multivector lock is allocated. nice! */
+	
+	return RSB_ERR_NO_ERROR;
+err2:
+	for(nvi=0;nvi<lock->nv;++nvi)
+		RSB_CONDITIONAL_FREE(lock->mv[nvi]);
+err1:
+	for(nvi=0;nvi<lock->nv;++nvi)
+		rsb_do_btils_free(&(lock->locks[nvi]));
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_lock_free(&(lock->olock)));
+err0:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_bool_t rsb__do_mv_lock_get(struct rsb_mv_lock_t *lock ,rsb_thr_t th_id, rsb_coo_idx_t roff, rsb_coo_idx_t m, rsb_coo_idx_t coff, rsb_coo_idx_t k, rsb_submatrix_idx_t subm, rsb_trans_t transA, rsb_char_t **ov, rsb_coo_idx_t *incov)
+{
+	/* *
+	 * 	\ingroup gr_internals
+	 * */
+	rsb_coo_idx_t nvi = RSB_MARKER_COO_VALUE;
+	rsb_bool_t was_looping=(lock->last_subm[th_id]==subm);
+	rsb_coo_idx_t i,j;
+	if(!ov)
+		return RSB_BOOL_FALSE;
+	if(rsb_do_lock_check_if_matrix_done(&(lock->olock),subm))
+	{
+		if(RSB_LOUD_MVL_TESTING)
+			RSB_INFO("matrix %d is already locked (for thread %d)\n",subm,th_id);
+		/* if the thread was looping on this mtxAp, there's no reason to do so anymore (mtxAp locked or done) */
+		if(was_looping)
+			lock->last_subm[th_id]=RSB_SUBM_IDX_MARKER;
+
+		return RSB_BOOL_FALSE;/* nothing to do: matrix done */
+	}
+	/* first, we try to get a lock on the master vector */
+	if(rsb__do_lock_get(&(lock->olock),th_id,roff,m,coff,k,subm,transA))
+	{
+		if(RSB_LOUD_MVL_TESTING)
+			RSB_INFO("locking matrix %d [%d...%d) to thread %d on master vector\n",subm,roff,roff+m,th_id);
+		goto found;
+	}
+	/* if the master vector was not available, we check if this thread was in a loop on this matrix */
+	if(!was_looping)
+	{
+		/* it was not looping on this submatrix */
+		if(lock->last_subm[th_id]==RSB_SUBM_IDX_MARKER)
+		{
+			/* it was not looping at all; 
+			 * now, if the thread will be back here with the value unchanged, the loop will be detected */
+			lock->last_subm[th_id]=subm;
+			if(RSB_LOUD_MVL_TESTING)
+				RSB_INFO("not locking matrix %d to thread %d : waiting for a loop\n",subm,th_id);
+			return RSB_BOOL_ALMOST_TRUE;
+		}
+		else
+			;/*  the thread is looping on another submatrix: let it loop there, then */
+		return RSB_BOOL_FALSE;
+	}
+
+	/* the thread was looping, and then it has the right to use a temporary vector (if any) */
+	if(RSB_DOES_TRANSPOSE(transA))
+	{ RSB_SWAP(rsb_coo_idx_t,k,m);RSB_SWAP(rsb_coo_idx_t,coff,roff); } /* FIXME: a dirty trick */
+	if((lock->olock.want_symlock == RSB_BOOL_TRUE))
+	{
+		for(nvi=0;nvi<lock->nv;++nvi)
+			if(rsb_do_btils_lock_get_sym(&(lock->locks[nvi]),roff,m+roff,coff,k+coff,transA,&i,&j))
+			{
+				lock->it[th_id]=j,lock->in[th_id]=i;
+				goto found;
+			}
+	}
+	else
+	{
+		for(nvi=0;nvi<lock->nv;++nvi)
+			if(rsb_do_btils_lock_get(&(lock->locks[nvi]),roff,m+roff,transA,&i,&j))/* FIXME */
+			{
+				lock->in[th_id]=i,lock->it[th_id]=RSB_MARKER_COO_VALUE;
+				goto found;
+			}
+	}
+	/* TODO:  are we sure that pushing the thread for looping is always the best thing ? */
+	/* TODO: implement here the task of picking up some vector and "reducing" it (not here, but in a "returned" signalling)! */
+	return RSB_BOOL_FALSE;
+found:
+	/* found a temporary vector to perform computation */
+	if(RSB_LOUD_MVL_TESTING)
+		if(nvi != RSB_MARKER_COO_VALUE)
+			RSB_INFO("locking interval %d .. %d on vector %d to thread %d\n",roff,roff+m,nvi,th_id);
+
+	lock->last_subm[th_id]=RSB_SUBM_IDX_MARKER;
+	if(nvi == RSB_MARKER_COO_VALUE)
+		*ov=lock->ov,			/* we'll work on the master vector */
+		*incov=lock->incov;			/* unchanged */
+	else
+		*ov = RSB_MV_OFFSET(lock,nvi,0),	/* we'll work on an auxiliary vector */
+		*incov=1;
+	RSB_BITVECTOR_SET(lock->olock.bmap,lock->olock.subms,subm);
+	return RSB_BOOL_TRUE;
+}
+
+rsb_err_t rsb__do_release_candidate_interval_for_reduce(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t *ov, rsb_coo_idx_t roff, rsb_coo_idx_t m)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* retrieve working interval */
+	/* ... */
+	rsb_coo_idx_t m0=0,m1=0;
+	RSB_GET_LOCK_INTERVAL_W(&(lock->olock),th_id,m0,m1);
+	rsb__do_lock_release(&(lock->olock),th_id);
+	rsb__do_mv_lock_release(lock,th_id,ov);
+	if(RSB_LOUD_MVR_TESTING)
+		RSB_INFO("releasing reduce interval %d .. %d from thread %d\n",m0,m1,th_id);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_pick_candidate_interval_for_reduce(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t ** ov, rsb_coo_idx_t * roff, rsb_coo_idx_t * m)
+{
+	/*
+		pick an interval which is free on both the master vector AND some vector v_i, and candidate for reducing
+		we begin with the last temporary vector first
+	*/
+	rsb_coo_idx_t nvi;
+	rsb_coo_idx_t i;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!lock)
+		goto err;
+	/* we start looking for a vector to reduce with the last one */
+	for(nvi=lock->nv-1;nvi>=0;--nvi)
+	{
+		struct rsb_bti_lock_struct * vlock=&(lock->locks[nvi]);
+		if(RSB_LOUD_MVR_TESTING)
+			RSB_INFO("looking for tainted subvectors in temporary vector %d\n",nvi),
+			RSB_INFO("taint map: "),rsb__do_dump_bitmap(vlock->tmap,1,vlock->bsz),RSB_INFO("\n"),
+			RSB_INFO("use   map: "),rsb__do_dump_bitmap(vlock->bmap,1,vlock->bsz),RSB_INFO("\n");
+		for(i=0;i<vlock->bsz;++i)
+		{
+			if(RSB_BITVECTOR_GET(vlock->tmap,vlock->bsz,i) && rsb_do_btils_lock_probe_inner(vlock,i))
+			{
+				/* let's see if the master vector has available subvector i (TODO) */
+				/* first step is to obtain the bounds of the lock */
+				rsb_coo_idx_t m0,m1;
+				rsb_bool_t goir = RSB_BOOL_FALSE;
+
+				errval = rsb_do_get_interval_info_from_btils_lock(vlock,i,&m0,&m1);
+				if(RSB_LOUD_MVR_TESTING)
+					RSB_INFO("temporary vector %d is tainted at interval %d\n",nvi,i);
+
+				if(RSB_LOUD_MVR_TESTING)
+					RSB_INFO("let's see if the master vector has available subvector %d at [%d .. %d]... ",nvi,m0,m1);
+				goir = rsb_do_lock_check_interval(&(lock->olock),th_id,m0,m1-m0,m0,m1-m0,lock->transA);
+				if(RSB_LOUD_MVR_TESTING)
+					if(!goir)
+						RSB_INFO("no\n");
+				if(goir)
+				{
+					if(RSB_LOUD_MVR_TESTING)
+						RSB_INFO("yes. will lock now [%d .. %d].\n",m0,m1);
+					/* The interval is free on both subvectors.
+					 *  We lock the interval on both vectors, then.
+					 *  */
+					/* mark the interval as not tainted anymore, but locked, on nvi  */
+					RSB_BITVECTOR_SET(vlock->bmap,vlock->bsz,i);
+					RSB_BITVECTOR_UNSET(vlock->tmap,vlock->bsz,i);
+					/* mark the interval as locked, on the master vector  */
+					RSB_DO_LOCK_INTERVAL(&(lock->olock),th_id,m0,m1-m0);/* FIXME */
+
+					lock->it[th_id]=i;/* FIXME */
+/*					RSB_BITVECTOR_SET(lock->ir,RSB_CONST_MAX_SUPPORTED_CORES,th_id); */
+					
+					/* let's give the vector info */
+					*ov = RSB_MV_OFFSET(lock,nvi,0);
+					*roff=m0;
+					*m=m1-m0;
+					goto done;
+				}
+			}
+#if 0
+			else
+			if(RSB_LOUD_MVR_TESTING)
+					RSB_INFO("interval %d in vector %d is not available\n",i,nvi);
+#endif
+		}
+	}
+	if(RSB_LOUD_MVR_TESTING)
+		RSB_INFO("there are no available taint vectors.\n");
+	/* no tainted subvectors found, or no free ones found. */
+	/* in this case, we could look for a common subvector to both some v_i and v_j, i<j, and reduce it into v_i */
+	goto done;
+err:
+	errval = RSB_ERR_BADARGS;
+done:
+	return errval;
+}
+
+rsb_err_t rsb_do_perform_partial_reduce(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_trans_t transA, rsb_coo_idx_t incv)
+{
+	/* FIXME: this is only an example routine */
+	rsb_char_t * ov=NULL;
+	rsb_coo_idx_t roff, m;
+	
+	rsb__do_pick_candidate_interval_for_reduce(lock,th_id,&ov,&roff,&m);
+
+	if(!ov)
+		goto done;
+	RSB_ASSERT(lock->ov);
+	if(RSB_LOUD_BTILS_TESTING)
+		RSB_INFO("on thread %d about to reduce %d .. %d\n",th_id,roff,m+roff);
+	rsb__vectors_left_sum_reduce_and_zero(lock->ov,ov,lock->typecode,m,incv,roff);
+	/* perform reduce here */
+	rsb__do_release_candidate_interval_for_reduce(lock,th_id,ov,roff,m);
+	/*
+	(with no symmetry or transposition, here)
+	lock it for both vectors
+	reduce the corresponding subvector (via sum), and zero it on the v_i vector
+	update v_i's taint vector accordingly
+	release the lock
+	*/
+done:
+	return RSB_ERR_NO_ERROR;
+}
+
+#if RSB_WANT_DO_LOCK_TEST
+rsb_err_t rsb__do_lock_test()
+{
+	/** 
+	 * 	\ingroup gr_internals
+	 * 	FIXME: NEW, UNFINISHED
+	 **/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_bti_lock_struct lock;
+       	rsb_coo_idx_t itl=8,nlevels = RSB_UPPER_BOUNDING_LOG2(itl);
+	rsb_int in,it,i;
+       	rsb_trans_t transA = RSB_TRANSPOSITION_N;
+
+	RSB_ASSERT(nlevels==3);
+	if((errval = rsb_do_btils_init(&lock,itl,nlevels))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,2,4,transA,&in,&it));
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,4,8,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,0,4,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,0,8,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,2,3,transA,&in,&it));
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,0,1,transA,&in,&it));
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,1,2,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,0,2,transA,&in,&it));
+	rsb_do_btils_lock_release(&lock,1,2);
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,0,2,transA,&in,&it));
+	rsb_do_btils_lock_release(&lock,0,1);
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,0,2,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,2,3,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,3,4,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,4,5,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,6,7,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,7,8,transA,&in,&it));
+	rsb_do_btils_lock_release(&lock,4,8);
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,4,5,transA,&in,&it));
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,6,7,transA,&in,&it));
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,7,8,transA,&in,&it));
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,7,8,transA,&in,&it));
+	rsb_do_btils_free(&lock);
+
+	itl=10,nlevels = RSB_UPPER_BOUNDING_LOG2(itl);/* 4 */
+	RSB_ASSERT(nlevels==4);
+	if((errval = rsb_do_btils_init(&lock,itl,nlevels))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,0,itl,transA,&in,&it));
+	for(i=0;i<itl;++i)
+		RSB_ASSERT(!rsb_do_btils_lock_get(&lock,i,i+1,transA,&in,&it));
+	for(i=0;i<RSB_MIDDLE(itl);++i)
+		RSB_ASSERT(!rsb_do_btils_lock_get(&lock,2*i,2*i+1,transA,&in,&it));
+	rsb_do_btils_lock_release(&lock,0,itl);
+	for(i=0;i<RSB_MIDDLE(itl);++i)
+		RSB_ASSERT( rsb_do_btils_lock_get(&lock,2*i,2*i+1,transA,&in,&it)),
+		RSB_ASSERT(!rsb_do_btils_lock_get(&lock,2*i,2*i+1,transA,&in,&it));
+	for(i=0;i<RSB_MIDDLE(itl);++i)
+		rsb_do_btils_lock_release(&lock,2*i,2*i+1);
+	RSB_ASSERT( rsb_do_btils_lock_get(&lock,0,RSB_MIDDLE(itl),transA,&in,&it)),
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,0,RSB_MIDDLE(itl),transA,&in,&it)),
+	RSB_ASSERT(!rsb_do_btils_lock_get(&lock,0,RSB_MIDDLE(itl),transA,&in,&it)),
+	rsb_do_btils_free(&lock);
+
+	/*
+	 * TODO:
+	 * need symmetry and transposition support.
+	 * need routines for reducing the temporary vectors after 'failed' double loops
+	 * */
+	RSB_INFO("binary tree lock test ok\n");
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+       	struct rsb_mv_lock_t lock;
+	rsb_int_t num_threads=4,th_id=0;
+	struct rsb_mtx_t *mtxAp=NULL;
+	struct rsb_mtx_t *submatrix=NULL;
+	enum rsb_op_flags_t op_flags = RSB_OP_FLAG_DEFAULT;
+	rsb_submatrix_idx_t si=0;
+	rsb_char_t * y=NULL,*oy=NULL,*oY=NULL,*oX=NULL;
+       	rsb_submatrix_idx_t subms=0;
+	rsb_coo_idx_t incv=1;
+	mtxAp = rsb__generate_dense_lower_triangular(2000,NULL,RSB_NUMERICAL_TYPE_DEFAULT);
+	if(!mtxAp)
+	{ RSB_PERR_GOTO(erri,RSB_ERRM_ES); }
+       	subms=mtxAp->all_leaf_matrices_n;
+	y = rsb__calloc(mtxAp->el_size*mtxAp->nr*incv);
+	if(!y)
+	{ RSB_PERR_GOTO(erri,RSB_ERRM_ES); }
+	if((errval = rsb__do_mv_lock_init(&lock,num_threads,subms,mtxAp,op_flags,transA,y,incv))!=RSB_ERR_NO_ERROR)
+	{ RSB_PERR_GOTO(erri,RSB_ERRM_ES); }
+	
+	submatrix=mtxAp->all_leaf_matrices[si].mtxlp;
+	RSB_ASSERT(rsb__do_mv_lock_get(&lock,th_id,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si,transA,&oy,&incv));
+RSB_ASSERT(!rsb__do_mv_lock_get(&lock,th_id+1,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si+1,transA,&oY,&incv));
+RSB_ASSERT( rsb__do_mv_lock_get(&lock,th_id+1,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si+1,transA,&oY,&incv));
+RSB_ASSERT(!rsb__do_mv_lock_get(&lock,th_id,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si,transA,&oy,&incv));
+RSB_ASSERT(!rsb__do_mv_lock_get(&lock,th_id+2,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si+2,transA,&oX,&incv));
+RSB_ASSERT( rsb__do_mv_lock_get(&lock,th_id+2,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si+2,transA,&oX,&incv));
+RSB_ASSERT(!rsb__do_mv_lock_get(&lock,th_id+3,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si+3,transA,&oy,&incv));
+RSB_ASSERT(!rsb__do_mv_lock_get(&lock,th_id+3,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,si+3,transA,&oy,&incv));
+	RSB_ASSERT(!rsb__do_mv_lock_release(&lock,th_id,oy));
+	RSB_ASSERT(!rsb__do_mv_lock_release(&lock,th_id+2,oY));
+	RSB_ASSERT(!rsb__do_mv_lock_release(&lock,th_id+1,oX));
+	RSB_ASSERT(!rsb__do_mv_lock_release(&lock,th_id,oy));	/* harmless duplicate */
+	RSB_ASSERT(!rsb__do_mv_lock_release(&lock,th_id+1,oX));	/* harmless duplicate */
+
+	rsb_do_perform_partial_reduce(&lock,th_id,transA,incv);
+	rsb_do_perform_partial_reduce(&lock,th_id+1,transA,incv);
+
+	/* 
+		The following idea was inspired by Frigo's 'reducers & hyperobjects' paper.
+		To support it, we could extend the rsb_bool_t to handle a trivalent logic:
+		RSB_BOOL_FALSE=0, RSB_BOOL_TRUE=1, RSB_BOOL_ALMOST=2
+		When encountering RSB_BOOL_ALMOST, the thread would perform the reducing strategy.
+		After a detected loop, the lock (which could effectively turned out into a scheduler)
+	       	would "propose" the thread to perform a "partial reduce".
+
+	 * */
+	/* TODO: this way of handling things forces 'incx' to be 1, then */
+	RSB_INFO("FIXME: missing handling of after-reduce release.\n");
+	goto oki;
+erri:
+	RSB_INFO("binary tree based multi-lock test problems..\n");
+oki:
+	RSB_MTX_FREE(mtxAp);
+	RSB_CONDITIONAL_FREE(y);
+	rsb__do_mv_lock_free(&lock);
+}
+	goto ok;
+ok:
+	RSB_INFO("binary tree based multi-lock test ok\n");
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_WANT_DO_LOCK_TEST */
+
+/*  END EXPERIMENTAL CODE */
+/* @endcond */
diff --git a/rsb_lock.h b/rsb_lock.h
new file mode 100644
index 0000000..563ea50
--- /dev/null
+++ b/rsb_lock.h
@@ -0,0 +1,148 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains locks for sparse recursive multicore operations.
+ * */
+
+#ifndef RSB_LOCK_H_INCLUDED
+#define RSB_LOCK_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb_internals.h"
+
+#define RSB__TRSV_OUT  0
+#define RSB__TRSV_OUT_ 0
+#define RSB__TRSV_OUT__ 0
+
+#define RSB_CONST_MIN_SUPPORTED_CORES 	1
+#define RSB_CONST_MAX_SUPPORTED_CORES 	RSB_CONST_MAX_SUPPORTED_THREADS /* The maximum number of cores (TODO: support any number of cores) */
+#define RSB_CONST_MAX_SUPPORTED_TEMPORARY_VECTORS RSB_CONST_MAX_SUPPORTED_CORES
+
+typedef int rsb_thr_t;
+
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+struct rsb_rows_lock_struct_t
+{
+	/* FIXME : EXPERIMENTAL,NEW  */
+	/* FIXME : THE LOCK SHULD BE SIZED PROPORTIONALLY TO THE MATRIX, INSTEAD !  */
+	rsb_coo_idx_t coresrowf[RSB_CONST_MAX_SUPPORTED_CORES];	/*  first locked row, for each thread */
+	rsb_coo_idx_t coresrowl[RSB_CONST_MAX_SUPPORTED_CORES];	/*  last  locked row, for each thread */
+	rsb_coo_idx_t corescolf[RSB_CONST_MAX_SUPPORTED_CORES];	/*  first locked col, for each thread */
+	rsb_coo_idx_t corescoll[RSB_CONST_MAX_SUPPORTED_CORES];	/*  last  locked col, for each thread */
+	rsb_bitmap_data_t * bmap;	/* done matrices bitmap */
+	rsb_submatrix_idx_t subms;	/* all matrices count */
+	rsb_submatrix_idx_t dm;	/* done matrices count */
+	rsb_submatrix_idx_t dr;	/* last done row */
+	rsb_int_t nt;				/* number of threads */
+	rsb_bool_t want_symlock;	/* symmetrical lock -- will lock both row and column region of output vector */
+	rsb_bool_t want_fake_lock;	/* fake lock -- will allow concurrent writes (debug only) */
+};
+
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+struct rsb_bti_lock_struct
+{
+	rsb_coo_idx_t mvleaves;	/* maximal vertical leaves (>=itl) (2**(nlevels)) */
+	rsb_coo_idx_t nlevels;	/* number of subdivisions  */
+	rsb_coo_idx_t bsz;		/* (=2*mvleaves-1)*/
+	rsb_coo_idx_t itl;		/* lock interval total length (e.g.: matrix dimension) */
+	rsb_bitmap_data_t * bmap;	/* done intervals bitmap */
+	rsb_bitmap_data_t * tmap;	/* tainted intervals bitmap */
+};
+
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+struct rsb_mv_lock_t
+{
+	/** 
+	 * NEW: EXPERIMENTAL
+	 * */
+	struct rsb_rows_lock_struct_t olock;				/* output vector lock */
+	struct rsb_bti_lock_struct locks[RSB_CONST_MAX_SUPPORTED_TEMPORARY_VECTORS];	/* it has no sense to have more locks than cores */
+	size_t el_size;							/* numerical element size */
+	rsb_type_t typecode;						/* type code */
+	rsb_coo_idx_t nv;						/* number of vectors  */
+	rsb_char_t * mv[RSB_CONST_MAX_SUPPORTED_TEMPORARY_VECTORS];		/* multiple vectors */
+	rsb_char_t * ov;						/* master (output) vector */
+	rsb_coo_idx_t itl;						/* interval total length */
+	rsb_submatrix_idx_t last_subm[RSB_CONST_MAX_SUPPORTED_CORES];	/* last (tried unsuccessfully) matrix, per thread */
+	rsb_coo_idx_t   in[RSB_CONST_MAX_SUPPORTED_CORES];		/* interval index, non transposed */
+	rsb_coo_idx_t   it[RSB_CONST_MAX_SUPPORTED_CORES];		/* interval index, transposed */
+	rsb_coo_idx_t   incov;					/* FIXME: NEW */
+	rsb_trans_t	transA;						/* FIXME: NEW */
+/*	rsb_bitmap_data_t ir[RSB_WORDS_PER_BITVECTOR(RSB_CONST_MAX_SUPPORTED_CORES)];	*/	/* is reducing ? */
+};
+
+#define RSB_WANT_SPMV_WITH_REDUCE 0
+
+#if !RSB_WANT_SPMV_WITH_REDUCE
+#define RSB_BOOL_ALMOST_TRUE 2 /* :) */
+#define rsb_spmv_lock_struct_t rsb_rows_lock_struct_t
+#define rsb_do_spmv_lock_init(LOCK,NT,SUMBS,MATRIX,OPFLAGS,TRANSA,OV,IO) rsb__do_lock_init(LOCK,NT,SUMBS,MATRIX,OPFLAGS)
+#define rsb_do_spmv_lock_free(LOCK) rsb__do_lock_free(LOCK)
+#define rsb_do_spmv_lock_release(LOCK,THID,OV) rsb__do_lock_release(LOCK,THID)
+#define rsb_do_spmv_lock_get(LOCK,THID,ROFF,M,COFF,K,SUBM,TRANSA,OV,OI) rsb__do_lock_get(LOCK,THID,ROFF,M,COFF,K,SUBM,TRANSA)
+#define RSB_DO_SPMV_LOCK_DM(LOCK) ((LOCK).dm)
+#define RSB_DO_SPMV_LOCK_DM_INC(LOCK) ((LOCK).dm)++
+#else
+#define RSB_BOOL_ALMOST_TRUE 2 /* :) */
+#define rsb_spmv_lock_struct_t rsb_mv_lock_t
+#define rsb_do_spmv_lock_init(LOCK,NT,SUMBS,MATRIX,OPFLAGS,TRANSA,OV,IO) rsb__do_mv_lock_init(LOCK,NT,SUMBS,MATRIX,OPFLAGS,TRANSA,OV,IO)
+#define rsb_do_spmv_lock_free(LOCK) rsb__do_mv_lock_free(LOCK)
+#define rsb_do_spmv_lock_release(LOCK,THID,OV) rsb__do_mv_lock_release(LOCK,THID,OV)
+#define rsb_do_spmv_lock_get(LOCK,THID,ROFF,M,COFF,K,SUBM,TRANSA,OV,OI) rsb__do_mv_lock_get(LOCK,THID,ROFF,M,COFF,K,SUBM,TRANSA,OV,OI)
+#define RSB_DO_SPMV_LOCK_DM(LOCK) ((LOCK).olock.dm)
+#define RSB_DO_SPMV_LOCK_DM_INC(LOCK) ((LOCK).olock.dm)++
+#endif /* RSB_WANT_SPMV_WITH_REDUCE */
+
+rsb_err_t rsb__do_mv_lock_init(struct rsb_mv_lock_t *lock, rsb_int_t num_threads, rsb_submatrix_idx_t subms, const struct rsb_mtx_t * mtxAp, enum rsb_op_flags_t op_flags, rsb_trans_t transA, rsb_char_t * ov, rsb_coo_idx_t incov);
+rsb_err_t rsb__do_mv_lock_free(struct rsb_mv_lock_t *lock);
+rsb_err_t rsb__do_mv_lock_release(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t *ov);
+rsb_bool_t rsb__do_mv_lock_get(struct rsb_mv_lock_t *lock ,rsb_thr_t th_id, rsb_coo_idx_t roff, rsb_coo_idx_t m, rsb_coo_idx_t coff, rsb_coo_idx_t k, rsb_submatrix_idx_t subm, rsb_trans_t transA, rsb_char_t **ov, rsb_coo_idx_t *incov);
+rsb_err_t rsb__do_pick_candidate_interval_for_reduce(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t ** ov, rsb_coo_idx_t * roff, rsb_coo_idx_t * m);
+rsb_err_t rsb__do_release_candidate_interval_for_reduce(struct rsb_mv_lock_t *lock, rsb_thr_t th_id, rsb_char_t *ov, rsb_coo_idx_t roff, rsb_coo_idx_t m);
+
+rsb_bool_t rsb__do_lock_release(struct rsb_rows_lock_struct_t *lock, rsb_thr_t th_id);
+rsb_bool_t rsb__do_lock_get(struct rsb_rows_lock_struct_t *lock,rsb_thr_t th_id, rsb_coo_idx_t roff, rsb_coo_idx_t m, rsb_coo_idx_t coff, rsb_coo_idx_t k, rsb_submatrix_idx_t subm, rsb_trans_t transA);
+rsb_err_t rsb__do_lock_init(struct rsb_rows_lock_struct_t *lock, rsb_int_t num_threads, rsb_submatrix_idx_t subms, const struct rsb_mtx_t * mtxAp, enum rsb_op_flags_t op_flags);
+rsb_err_t rsb__do_lock_free(struct rsb_rows_lock_struct_t *lock);
+#if 0
+rsb_err_t rsb__do_lock_test(void);
+#endif
+
+#endif /* RSB_LOCK_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_mbw.c b/rsb_mbw.c
new file mode 100644
index 0000000..5a14b78
--- /dev/null
+++ b/rsb_mbw.c
@@ -0,0 +1,900 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Memory bandwidth related (e.g.: read, write, and read-write) microbenchmarks.
+ */
+/* 
+ * This code is for cache based machines.
+ * For machines not cache based it doesn't make sense ( e.g.: the Cell Broadband Engine SPE cores )
+ *
+ * TODO : 
+ *  * make sure loops get unrolled
+ *  * embed time parametrability
+ *
+ */
+
+#include "rsb_common.h"
+#include "rsb.h"
+//#include <stdlib.h>	/* printf, srand */
+#include <limits.h>	/* CHAR_BIT (FIXME: we use RSB_CHAR_BIT) */
+#include <strings.h>	/* RSB_BZERO */
+/* should be sizeof(w_t) >= sizeof(void*)*/
+//typedef int w_t ;
+typedef size_t w_t ;
+//typedef double w_t ;	/**< a private typedef */
+
+size_t entropy;		/**< a private checksum only variable, necessary to avoid compiler optimization of memory scan operations */
+
+//enum {WRITE = RSB_MB_WRITE,READ = RSB_MB_READ,FLUSH,RW = RSB_MB_RW,ZERO = RSB_MB_ZERO,MEMSET = RSB_MB_MEMSET,BZERO = RSB_MB_BZERO,CHASE = RSB_MB_LINEAR_CHASE};
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+const char * rsb_mbw_s2s(rsb_flags_t btype)
+{
+	/**
+		\ingroup gr_internals
+	 	\return a pointer to a const string descriptive of this particular memory measurement
+	 */
+	switch(btype)
+	{
+		case RSB_MB_READ	:	
+			return "READ";
+			break;
+		case RSB_MB_WRITE	:	
+			return "WRITE";
+			break;
+		case RSB_MB_RW		:
+			return "RW";
+			break;
+		case RSB_MB_BZERO		:
+			return "BZERO";
+			break;
+		case RSB_MB_ZERO		:
+			return "ZERO";
+			break;
+		case RSB_MB_MEMCPY		:
+			return "MEMCPY";
+			break;
+		case RSB_MB_MEMCPY2		:
+			return "MEMCPY2";
+			break;
+		case RSB_MB_MEMSET		:
+			return "MEMSET";
+			break;
+		case RSB_MB_LINEAR_CHASE	:
+			return "LINEAR_CHASE";
+			break;
+		case RSB_MB_MORTON_CHASE	:
+			return "MORTON_CHASE";
+			break;
+		default:
+		/* error */
+		return "";
+	}
+}
+
+
+typedef int cb_t;
+typedef size_t zb_t ;
+
+static void h2c(cb_t *bip, cb_t *bjp, zb_t bz)
+{
+	/**
+	 * \ingroup gr_internals
+	 * morton to coordinate
+	 */
+        int b;
+        *bip=0;
+        *bjp=0;
+//      RSB_STDERR("-> %ld\n",bz);
+	/* this would greatly benefit of bit interleaving (absent on x86) */
+        for(b=0;b<8*sizeof(cb_t);++b)
+        {
+                /* mancano queste due righe */
+                *bip|=(bz&(0x1<<(2*b+1)))>>(b+1);
+                *bjp|=(bz&(0x1<<(2*b+0)))>>(b+0);
+        }
+}
+
+#if 0
+/* unused function */
+static void c2h(cb_t bi, cb_t bj, zb_t * bzp)
+{
+	/** 
+	 * \ingroup gr_internals
+	 * coordinate to morton
+	 */
+        int b;
+        *bzp=0;
+//      RSB_STDERR("b : %d %d\t",bi,bj);
+        for(b=0;b<8*sizeof(cb_t);++b)
+        {
+                *bzp|=(bi&(0x1<<b))<<(b+1);
+                *bzp|=(bj&(0x1<<b))<<(b+0);
+        }
+//      RSB_STDERR("z : %9ld\n",*bzp);
+}
+#endif
+
+static int morton_pointer_chasing_pattern_init(w_t *p, size_t dim)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * NEW: document me  (e.g.: this function wants dim==4^n )
+	 * FIXME : when dim!=4^k for some k, then only half benchmark is performed. fix this with a tiling approach!
+         */
+	int stride = 1;
+	int words,i;
+//	dim=2*2*16*sizeof(w_t);
+	int ni=0,oi=0,e=0 /* dim>=2^e */;
+	int side=0;
+	{int tmp=dim/(stride*2*sizeof(w_t));while(tmp>0){++e;tmp/=2;}e/=2;e*=2;/* e is even */}
+	if(e<1)return -1;
+	words=(1<<e);
+	side = (1<<(e/2));
+	oi=0;
+	ni = 0;
+/*
+	RSB_STDERR("morton_pointer_chasing_pattern_init\n");
+	RSB_STDERR("%d\n",e);
+	RSB_STDERR("should span %d bytes\n",dim);
+	RSB_STDERR("%d side\n",side);
+	RSB_STDERR("will span %d words \n",words);
+	RSB_STDERR("will span %d bytes \n",words*sizeof(w_t));*/
+	for(i=0;i<words;++i)
+	{
+		//int j=0;
+		int nx=0,ny=0;
+		/* WARNING : we could not have 8 bits per byte */
+		h2c( &nx,  &ny,  i+1);
+		nx=nx%side;
+		ny=ny%side;
+		ni=(nx+ny*side)%words;
+		//RSB_STDERR("%d %d\n",nx,ny);
+		//RSB_STDERR("%d\n",ni);
+		//RSB_STDERR("%d %d %d %d\n",nx,ny,i+1,ni);
+		*(w_t**)&p[oi*stride]=(w_t*)&p[ni*stride];
+		oi=ni;
+	}
+	return 0;
+}
+
+static int pointer_chasing_pattern_init(w_t *p, size_t dim)
+{
+	/**
+	 * \ingroup gr_internals
+         * Initializes a memory area to perform a linear pointer chasing.
+         */
+	int i;
+	int stride=1;
+	int words=dim/(stride * sizeof(w_t));
+
+	for(i=1;i<=words;++i)
+		*(w_t**)&p[(i-1)*stride]=(w_t*)&p[(i%words)*stride];
+
+/*        for (i = stride; i < range; i += stride) {
+                *(char **)&addr[i - stride] = (char*)&addr[i];
+        }
+        *(char **)&addr[i - stride] = (char*)&addr[0];*/
+	return 0;
+}
+
+static int scan_cache(w_t *p, size_t dim, int should, size_t times, w_t *q)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Performs a naive memory scan with side effect.
+	 *
+	 *  The memory scan will operate on a memory area of dim bytes.
+	 *  Will touch consecutively memory locations with a stride of 
+	 *  sizeof(w_t) bytes.
+	 *  Since we hadn't unrolled the following loops, this should be 
+	 *  aggressively unrolled in a way to remain memory bound.
+	 *
+	 *  Please note that if dim is less than L1/L2/L3 cache, you will
+	 *  not effectively benchmark your memory subsystem, but only caches.
+	 *
+	 *  It is advised for p to be aligned in some way for better performance.
+	 *
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * */
+	w_t n=0;
+	int i,t;
+	int words=dim/sizeof(w_t);
+	
+	if(should == RSB_MB_MEMCPY2 && !q)
+		return -1;
+
+	/* FIXME : t and i could overflow */
+
+	if(should == RSB_MB_FLUSH)/* we ignore times */
+	{
+		#pragma omp parallel for schedule(static,1) RSB_NTC
+		for(i=0;i<words;++i)
+			n+=p[i];
+	}
+	else
+	/*
+	 * Warning : if the compiler is really, really smart, it
+	 * could detect we are zeroing and give an excessively high value here!
+	 * */
+		/* FIXME : xlc -O4 is much smarter than us here (maybe on times!). */
+		if(should == RSB_MB_ZERO)
+			for(t=0;t<times;++t)
+			{
+				//for(i=0;i<words;++i)
+				//	p[i]=0 ;
+				for(i=0;i+7<words;i+=8)
+					p[i+0]=0,p[i+1]=0,
+					p[i+2]=0,p[i+3]=0,
+					p[i+4]=0,p[i+5]=0,
+					p[i+6]=0,p[i+7]=0;
+				for(;i<words;i++)
+					p[i+0]=0;
+			}
+	else
+		if(should == RSB_MB_BZERO)
+			for(t=0;t<times;++t)
+				RSB_BZERO(p,dim);
+	/*
+		WARNING : memcpy operations involve two buffers or two halves ! 
+		so be careful when interpreting these results:
+		memory bandwidth is double than transfer speed!
+	 */
+	else
+		if(should == RSB_MB_MEMCPY)
+			for(t=0;t<times;++t)
+				memcpy(p,((char*)p)+dim/2,dim/2);
+	else
+		if(should == RSB_MB_MEMCPY2)
+			for(t=0;t<times;++t)
+				memcpy(p,q,dim);
+	else
+		/* FIXME : xlc -O4 is much smarter than us here (maybe on times!). */
+		if(should == RSB_MB_WRITE)
+			for(t=0;t<times;++t)
+			{
+				//for(i=0;i<words;++i)
+				//	p[i]=i ;
+				for(i=0;i+7<words;i+=8)
+					p[i+0]=i+0,p[i+1]=i+1,
+					p[i+2]=i+2,p[i+3]=i+3,
+					p[i+4]=i+4,p[i+5]=i+5,
+					p[i+6]=i+6,p[i+7]=i+7;
+				for(;i<words;i++)
+					p[i+0]=i+0;
+			}
+	else
+		/* FIXME : xlc -O4 is much smarter than us here (maybe on times!). */
+		if(should == RSB_MB_READ)
+			for(t=0;t<times;++t)
+			{
+				//for(i=0;i<words;++i)
+					//n+=p[i];	// double loop == loop overhead
+				for(i=0;i+7<words;i+=8)
+					n+=p[i] +p[i+1] +p[i+2] +p[i+3] +p[i+4] +p[i+5] +p[i+6] +p[i+7];
+				for(;i<words;i++)
+					n+=p[i];
+			}
+	else
+		if(should == RSB_MB_MORTON_CHASE || should == RSB_MB_LINEAR_CHASE)
+			for(t=0;t<times;++t)
+				for(i=0;i<words;++i)
+					p=*(w_t**)p;	/* this is pointer chasing, folks */
+	else
+		if(should == RSB_MB_RW)
+			for(t=0;t<times;++t)
+				for(i=0;i<words;++i)
+					p[i]+=i;
+	else
+//		if(should & MEMSET)
+		if(should == RSB_MB_MEMSET)
+			for(t=0;t<times;++t)
+				memset(p,0x0A0B0C0D,dim);
+	else
+		return -1;
+	return n+*p;	/* WARNING ! easily optimizable! should mantain a pool of entropy! FIXME (could it be dangerous (p.chasing!?) ?) */
+}
+
+static int rsb_mbw_area_init_and_cache_flush(size_t sz, w_t *fc, w_t *p, int btype /*, int * entropy*/, size_t times)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Will init the memory area for benchmarking and then
+	 * flush the cache, assuming that its size is sz.
+	 */
+	switch(btype)
+	{
+		case(RSB_MB_LINEAR_CHASE):
+			return pointer_chasing_pattern_init(p, sz);
+		case(RSB_MB_MORTON_CHASE):
+			return morton_pointer_chasing_pattern_init(p, sz);
+		default:
+			return 0;
+	}
+	scan_cache(fc,sz,RSB_MB_FLUSH,times, NULL);	/* flush cache */
+}
+
+static rsb_time_t mbw_total_time( struct rsb_mbw_m_t *mbw_m  )
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	rsb_time_t t = RSB_REAL_ZERO;
+	int i;
+	if(!mbw_m)
+		return t;
+	for(i=0;i<RSB_MB_N;++i)
+	{
+		t+=mbw_m->mb[i].t;
+	}
+	return t;
+}
+
+static rsb_err_t mbw_test( struct rsb_mbw_m_t *mbw_m  );
+
+static rsb_err_t probe_approx_mbw( struct rsb_mbw_m_t * mbwm, rsb_time_t s )
+{
+	/**
+	 * \ingroup gr_internals
+	 * we run this quick test and return a rough estimate
+	 * of the number of times the test should be performed
+	 * on the given memory area to last circa s seconds.
+	 * (assumes all memory tests)
+	 * */
+	const rsb_time_t min_time=0.1;
+	size_t times=0;
+	rsb_time_t t=0;/* some compilers (e.g.: pgcc) don't init variables for us :) */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( !mbwm )
+		{errval = RSB_ERR_BADARGS;goto err;}
+	mbwm->times=1;	/* we set times */
+	if(s<min_time)
+		{errval = RSB_ERR_BADARGS;goto err;}
+
+	while(t<min_time && mbwm->times<=INT_MAX)	/* FIXME : INT_MAX could be undefined */
+	{
+		mbwm->times*=2;	/* we set times */
+		if((errval=mbw_test(mbwm))) /* we perform benchmarking */
+			goto err;
+		t=mbw_total_time( mbwm  );
+		if(t <= RSB_REAL_ZERO)
+			{errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+	}
+	/* times/s == mbwm.times/t */
+	times=(int)(((double)mbwm->times)/t)*s;
+	if(times<=0 /*overflow ?*/ /* || times < 100*/)
+#ifdef INT_MAX 
+	{
+		times=INT_MAX;
+		return 0;
+	}
+#else /* INT_MAX  */
+		{errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#endif /* INT_MAX  */
+	/* finally, we set our estimate 'times' value for s seconds benchmarking */
+	mbwm->times=times;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t mbw_test( struct rsb_mbw_m_t *mbw_m  )
+{
+	/**
+	 * \ingroup gr_internals
+	 * Will perform a run of each memory benchmark.
+	 * It assumes the existence of hardware managed caches.
+	 *
+	 * TODO : m.sz should be big as the higher level cache.
+	 */
+	w_t * p=NULL,*fc=NULL,*q=NULL;
+	struct rsb_mbw_m_t m; 
+	int i;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mbw_m)
+		return RSB_ERR_BADARGS;
+
+	m=*mbw_m;
+
+	/* if m.times is zero we probe for an appropriate value */
+	if(m.times == 0 && (errval=probe_approx_mbw(&m,1.0)))
+	{
+		RSB_STDERR("uhm. timing problems ?!.\n");
+		rsb__do_perror(NULL,errval);
+		goto errl;
+	}
+
+	/* TODO :
+	 * we should be absolutely sure that
+	 * flushing works effectively; that is, that 
+	 * the flush array is big enough.
+	 *
+	 * Therefore it is advised to set m.hlcs to at least the size of the bigger cache.
+	 * */
+	p = rsb__aligned_malloc( m.sz , m.sz );
+	q = rsb__aligned_malloc( m.sz , m.sz );/* q is auxiliary */
+	fc= rsb__aligned_malloc( m.hlcs , m.hlcs );
+
+	if(!p || !fc || !q)
+	{
+		RSB_STDERR("problems allocating %zd bytes.\n",m.sz);
+		RSB_CONDITIONAL_FREE(p);	
+		RSB_CONDITIONAL_FREE(q);	
+		RSB_CONDITIONAL_FREE(fc);	
+		errval = RSB_ERR_GENERIC_ERROR;
+		goto errl;
+	}
+
+	for(i=0;i<RSB_MB_N;++i)
+	{
+		m.mb[i].btype=i;	/* we set benchmark type */
+		rsb_mbw_area_init_and_cache_flush(m.sz, fc, p, i/*, int * entropy*/, m.times);
+		m.mb[i].t = - rsb_time();
+		entropy+=scan_cache(p,m.sz,i,m.times,q);	/* we perform measurement */
+		m.mb[i].t += rsb_time();
+	}
+
+	// about commenting the following : DANGER
+	//if(m.entropy)fprintf(stderr,"the following number is printed only for tricking the compiler optimizer, and has no sense: %d\n",entropy); /* this is essential */
+	if(p )rsb__free(p );
+	if(fc)rsb__free(fc);
+	if(q)rsb__free(q);
+	if(mbw_m) *mbw_m=m;
+	return 0;
+errl:
+	if(p )rsb__free(p );
+	if(fc)rsb__free(fc);
+	if(q)rsb__free(q);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t mbw_ratio_printf(struct rsb_mbw_m_t *h, struct rsb_mbw_m_t *l)
+{
+	/**
+	 * \ingroup gr_internals
+	 * prints the ratio in performance of two measurements.
+	 * FIXME : new
+	 */
+	double M=/*1000000.0*/1.0;/* simplifies */
+	int i;
+
+	if(!h||!l)
+		return RSB_ERR_BADARGS;
+
+	for(i=0;i<RSB_MB_N;++i)
+		RSB_INFO("#%-32s ratio  %lg \n"  ,
+			rsb_mbw_s2s(i),
+			((((double)h->times)*h->sz)/(h->mb[i].t*M))/
+			((((double)l->times)*l->sz)/(l->mb[i].t*M))
+			);
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t mbw_printf(struct rsb_mbw_m_t *m, int level)
+{
+	/**
+	 * \ingroup gr_internals
+		Prints out memory benchmarks results. 
+	*/
+	int i;
+	double M=1000000.0;
+
+	if(!m)
+		return RSB_ERR_BADARGS;
+
+	RSB_INFO("#%-32s\tsize\tlevel\tbw(MBps)\n","size");
+	for(i=0;i<RSB_MB_N;++i)
+		RSB_INFO("%-32s\t%zd\t%zd\t%lg\n",rsb_mbw_s2s(m->mb[i].btype),(rsb_printf_int_t)m->sz,(rsb_printf_int_t)level,(((double)m->times)*m->sz)/(m->mb[i].t*M));
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__mem_hier_timings(struct rsb_mbw_cm_t * cm)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Measures memory bandwidth in scanning arrays increasingly sized.
+	 * They are sized and aligned like initial memory hierarchies (caches) and then more.
+	 * 
+	 * TODO : should we measure in-memory (but out of cache) performance, too ?
+	 * TODO : what if user has opted out output functionality ?
+	 */
+	int cln=0,cl;
+	struct rsb_mbw_m_t mbw_m,*mbw_ms=NULL;
+	long cs=0;
+	const long extra_level=2;
+
+	if( !cm )
+		return RSB_ERR_BADARGS;
+
+	cln = rsb__get_cache_levels_num();
+
+	if(cln<1)
+	{
+		RSB_INFO("No information about caches, sorry\n");
+		return -1;
+	}
+	mbw_ms = rsb__calloc((cln+extra_level) * sizeof(*mbw_ms));
+	//RSB_STDERR("%d\n",((cln+extra_level) * sizeof(*mbw_ms)));
+	if(!mbw_ms)
+	{
+		goto err;
+	}
+
+	RSB_INFO("# This test will measure times in scanning arrays sized and aligned to fit in caches.\n");
+	RSB_INFO("# %d cache levels detected\n",cln);
+
+	/* we do measure for each level in the cache hierarchy plus two */
+	for(cl=1;cl<=cln+extra_level;++cl)
+	{
+		/* timing for cache level cl  */
+
+		if(cl<=cln)
+			cs = rsb__get_lnc_size(cl);
+		else
+			cs=2*cs;
+
+		if(cs<1)
+		{
+			RSB_ERROR("#uhm. overflow ?\n");
+			goto err;
+		}
+		mbw_m.so=sizeof(w_t);
+		mbw_m.sz=cs;
+		mbw_m.times=0;/* mbw_test will probe for a default reasonable time */
+		mbw_m.cln=cln;
+		mbw_m.cl=cl;
+		mbw_m.hlcs = rsb__get_lnc_size(cln);
+		if(mbw_m.hlcs<1)
+			goto err;
+
+		if(mbw_test(&mbw_m))
+			goto err;
+
+		memcpy( &(mbw_ms[cl-1]) ,&mbw_m,sizeof(struct rsb_mbw_m_t));
+	}
+
+	cm->mb=mbw_ms;
+	cm->cln=cln;
+	cm->extra_level=extra_level;
+	return 0;
+err:
+	RSB_CONDITIONAL_FREE(mbw_ms);
+	RSB_STDERR("An error occurred during memory benchmarking.\n");
+	return -1;
+}
+
+rsb_err_t rsb__print_mem_hier_timings(struct rsb_mbw_cm_t * cm)
+{
+	/**
+	 * \ingroup gr_internals
+	 * 
+	 */
+	long cl;
+	long print_ratio=1;
+
+	if(!cm)
+		return RSB_ERR_BADARGS;
+
+	for(cl=1;cl<=cm->cln+cm->extra_level;++cl)
+	{
+		if(cl<=cm->cln)
+			RSB_INFO("#Level %ld:\n",cl);
+		else
+			RSB_INFO("#Level %ld (RAM) (sample size 2^%ld times the last cache size):\n",cm->cln+1,cl-cm->extra_level);
+		mbw_printf(&cm->mb[cl-1],cl);
+
+		if(cl>1 && print_ratio)
+			if(mbw_ratio_printf(&cm->mb[cl-1],&cm->mb[cl-2]))
+				;/* TODO : an error code */
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_tlb_benchmark(void)
+{
+	/**
+		UNFINISHED : THIS CODE DOES NOT PERFORM ANYTHING USEFUL FOR NOW
+
+		The performance and eventually, unpredictability of this benchmark should 
+		expose the shortcomings of memory fragmentation on performance.
+
+		FIXME : potential overflows
+	 */
+	size_t sz,psz,pn,wpp,times;
+	const size_t K=1024;
+	w_t * p=NULL;
+	w_t c=0;
+	rsb_int i,j;
+	rsb_time_t t;
+	double mBps;
+
+	RSB_WARN("TLB benchmark code is unfinished!\n");
+	RSB_STDERR("#TLB benchmark.\n");
+	for(sz=K*K/2;sz<K*K*K;sz*=2)
+	{
+		//sz=1024*1024*32;
+		/* FIXME : problems with congruences ! */
+		psz=4096;
+		pn=sz/psz;
+		wpp=psz/sizeof(w_t);
+		times=1000;
+		p = rsb__aligned_malloc( sz , sz );
+		//p = rsb__aligned_malloc( sz , psz );
+		if(!p)
+			goto ok;
+		
+		t = - rsb_time();
+		for(j=0;j<times;++j)
+		for(i=0;i<pn;++i)
+		{
+			c+=p[i*wpp];
+		}
+		t += rsb_time();
+		rsb__free(p);
+
+		mBps=1.0;
+		//mBps*=sz;
+		mBps*=pn*sizeof(w_t);
+		mBps/=t;
+		mBps*=times;
+		mBps/=1024*1024;
+
+		RSB_STDERR("#TLB timing benchmark : scanned %zd entries spaced %zd bytes across %zd bytes in %lg s (%lg MBps)\n",pn,psz,sz,t,mBps);
+	}
+
+ok:
+	return 0;
+//err:
+//	return -1;
+}
+
+static rsb_err_t rsb_indirect_scan_benchmark(long ss, long * spiffero, long times, rsb_time_t *bt)
+{
+	/**
+		TODO: error handling
+	*/
+	/* FIXME: bounds of times: should be adaptive */
+	rsb_time_t dt,rt,lt;
+	rsb_coo_idx_t *IA=NULL;		/* the array to be scanned */
+	rsb_coo_idx_t acc=0;			/* accumulator */
+	rsb_nnz_idx_t *IP=NULL;		/* the array setting the scan order */
+	void *CA=NULL;				/* the array setting the scan order */
+	long els=0,fas=0;
+	long i,ab,it;
+	els=ss/(sizeof(rsb_coo_idx_t)),fas=4*ss;	/* the number of elements   */
+	if(els<1 || fas<1)
+		{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+	ab=sizeof(rsb_nnz_idx_t)*els+sizeof(rsb_coo_idx_t)*els;
+	IP = rsb__malloc(sizeof(rsb_nnz_idx_t)*els);
+	IA = rsb__malloc(sizeof(rsb_coo_idx_t)*els);
+	CA = rsb__malloc(fas);
+	if(!IP){RSB_ERROR(RSB_ERRM_ES);goto erri;}
+	if(!IA){RSB_ERROR(RSB_ERRM_ES);goto erri;}
+	if(!CA){RSB_ERROR(RSB_ERRM_ES);goto erri;}
+	// random fill
+	for(i=0;i<els;++i)
+		IA[i]=rand()%els;
+	// first phase: random scan
+	for(i=0;i<els;++i)
+		IP[i]=rand()%els;
+	scan_cache(CA,fas,RSB_MB_FLUSH,RSB_FLUSH_TIMES,NULL);	/* flush cache */
+	dt = - rsb_time();
+	for(it=0;it<times;++it)
+		for(i=0;i<els;++i)
+			acc+=IA[IP[i]];
+	dt += rsb_time();
+	rt=dt/times;
+	// second phase: linear scan
+	for(i=0;i<els;++i)
+		IP[i]=i;
+	scan_cache(CA,fas,RSB_MB_FLUSH,RSB_FLUSH_TIMES,NULL);	/* flush cache */
+	dt = - rsb_time();
+	for(it=0;it<times;++it)
+		for(i=0;i<els;++i)
+			acc+=IA[IP[i]];
+	dt += rsb_time();
+	lt=dt/times;
+	if(spiffero)
+		RSB_INFO("for %ld elements, %ld bytes, random access time: %lg, linear access time: %lg, ratio %lg\n",els,ab,rt,lt,rt/lt);
+	else
+		;/* tuning mode only */
+	if(spiffero)
+		*spiffero+=acc;
+	else
+	{	RSB_INFO("ignore this: %zd\n",(size_t)acc);}
+	if(bt)
+		*bt=(rt+lt)*times;
+erri:
+	RSB_CONDITIONAL_FREE(CA);
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(IP);
+err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+
+rsb_err_t rsb__memory_benchmark(void)
+{
+	/**
+	 * Will benchmark the memory hierarchy.
+	 * You should call rsb_lib_init(RSB_NULL_INIT_OPTIONS) before.
+	 */
+	struct rsb_mbw_cm_t cm;
+#if 1
+	/* NEW: mem scan benchmark */
+	{
+		//long lcs = rsb__get_lastlevel_c_size();
+		long /*spiffero=0,*/times = RSB_MEMSCAN_MIN_TIMES,tinc=1,reftimes=0;
+		const long wet = rsb_get_num_threads();
+		long fsm = rsb__sys_free_system_memory();
+		int ci;
+//		size_t wss=4*wet*lcs*4;
+		size_t wss=fsm/3;
+		long i,els=wss/sizeof(rsb_coo_idx_t);
+		rsb_coo_idx_t *IS=NULL,*ID=NULL;
+		rsb_time_t /*mt = RSB_MEMSCAN_TIME,*/bt = RSB_REAL_ZERO,dt = RSB_REAL_ZERO;
+
+		if(wss<1)
+			goto errm;
+		IS = rsb__malloc(wss);
+		ID = rsb__calloc(wss);
+		if(!IS || !ID)
+			goto errm;
+		for(i=0;i<els;++i)
+			IS[i]=rand()%els;
+
+		while(times<(RSB_MEMSCAN_MAX_TIMES/2) && bt<RSB_MEMSCAN_TIME)
+		{
+			int it;
+			times+=tinc;
+			dt = - rsb_time();
+			for(it=0;it<tinc;++it)
+				RSB_A_MEMCPY_parallel(ID,IS,0,0,wss/RSB_CHAR_BIT,RSB_CHAR_BIT);
+			dt += rsb_time();
+			bt+=dt;
+			tinc*=2;
+		}
+		reftimes=times;
+		if(0)
+		{RSB_WARN("first estimate of MEMCPY on %zd bytes: %lg GB/s (%ld times in %lg s)\n",(size_t)wss,
+			((((double)wss)*times)/bt)/1.e9,times,bt);}
+		/* FIXME: SHOULD FLUSH  */
+		for(i=0;i<els;++i)
+			IS[i]=rand()%els;
+		for(ci=1;ci<=wet;++ci)
+		{
+			int it;
+			rsb__set_num_threads(ci);
+			dt = - rsb_time();
+			for(it=0;it<times;++it)
+				RSB_A_MEMCPY_parallel(ID,IS,0,0,wss/RSB_CHAR_BIT,RSB_CHAR_BIT);
+			dt += rsb_time();
+			bt=dt;
+		RSB_WARN("%zu cores MEMCPY on %zd bytes: %lg GB/s (%ld times in %lg s)\n",(size_t)ci,wss,
+			((((double)wss)*times)/bt)/1.e9,times,bt);
+		}
+		rsb__set_num_threads(wet);
+//		RSB_WARN("begin naive MEMCPY parallelism estimation %ld iterations\n",reftimes);
+errm:
+		RSB_CONDITIONAL_FREE(IS);
+		RSB_CONDITIONAL_FREE(ID);
+	}
+#endif
+
+
+#if 1
+	/* NEW: mem scan benchmark */
+	{
+		long fcs = rsb__get_first_level_c_size();
+		long lcs = rsb__get_lastlevel_c_size();
+		long rcs=lcs;
+		long fsm = rsb__sys_free_system_memory();
+		long spiffero=0,times = RSB_MEMSCAN_MIN_TIMES,tinc=1,reftimes=0;
+		rsb_time_t /*mt = RSB_MEMSCAN_TIME,*/bt = RSB_REAL_ZERO,dt = RSB_REAL_ZERO;
+		RSB_WARN("begin experimental indirect array scan benchmark\n");
+		RSB_WARN("autotuning..\n");
+		while(times<(RSB_MEMSCAN_MAX_TIMES/2) && bt<RSB_MEMSCAN_TIME)
+		{
+			times+=tinc;
+			/*errval=*/rsb_indirect_scan_benchmark(rcs,NULL,tinc,&dt);
+			bt+=dt;
+			tinc*=2;
+		}
+		reftimes=times;
+		RSB_WARN("autotuning done. will proceed with presumably %lg s samples\n",bt);
+#define RSB_MEMSCAN_TIMES_FROM_REF(reftimes,refsize,bufsize) \
+	((refsize)<(bufsize)? \
+	RSB_MAX(reftimes/((bufsize)/(refsize)),RSB_MEMSCAN_MIN_TIMES): \
+	RSB_MAX(((refsize)/(bufsize))*reftimes,RSB_MEMSCAN_MIN_TIMES))
+
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,fcs);
+		/*errval=*/rsb_indirect_scan_benchmark(fcs,&spiffero,times,&bt);
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,(lcs-fcs)/2);
+		/*errval=*/rsb_indirect_scan_benchmark(fcs+(lcs-fcs)/2,&spiffero,times,&bt);
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,lcs);
+		/*errval=*/rsb_indirect_scan_benchmark(lcs,&spiffero,times,&bt);
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,4*lcs);
+		/*errval=*/rsb_indirect_scan_benchmark(4*lcs,&spiffero,times,&bt);
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,16*lcs);
+		/*errval=*/rsb_indirect_scan_benchmark(RSB_MIN(fsm,16*lcs),&spiffero,times/2,&bt);
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,32*lcs);
+		/*errval=*/rsb_indirect_scan_benchmark(RSB_MIN(fsm,32*lcs),&spiffero,times/4,&bt);
+		times = RSB_MEMSCAN_TIMES_FROM_REF(reftimes,rcs,64*lcs);
+		/*errval=*/rsb_indirect_scan_benchmark(RSB_MIN(fsm,64*lcs),&spiffero,times/4,&bt);
+		RSB_INFO("#please ignore this: %ld\n",spiffero);
+		RSB_INFO("end experimental indirect array scan benchmark\n");
+	}
+#endif
+
+	/* FIXME : temporarily here ! */
+	rsb_tlb_benchmark();
+
+	if(rsb__mem_hier_timings(&cm))
+		goto err;
+
+	if(rsb__print_mem_hier_timings(&cm))
+		goto err;
+
+	RSB_CONDITIONAL_FREE(cm.mb);
+	return 0;
+err:
+	return -1;
+}
+
+rsb_err_t rsb__flush_cache(size_t sz)
+{
+	/**
+	 Flush caches by repeated memory scans.
+	 */
+	void * fc=NULL;
+	size_t times = RSB_MIN_CACHE_FLUSH_SCAN_TIMES;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(sz==0)
+	{
+		sz = rsb__get_lastlevel_c_size();
+		sz = RSB_MAX(sz,2*sz);
+	}
+	fc = rsb__calloc(sz);
+	if(fc==NULL)
+		return RSB_ERR_ENOMEM;
+	errval = scan_cache(fc,sz,RSB_MB_FLUSH,times,NULL);	/* flush cache */
+	RSB_CONDITIONAL_FREE(fc);	
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if 0
+int main(void)
+{
+	return rsb__memory_benchmark();
+}
+#endif
+
+/* @endcond */
diff --git a/rsb_mbw.h b/rsb_mbw.h
new file mode 100644
index 0000000..0ab299c
--- /dev/null
+++ b/rsb_mbw.h
@@ -0,0 +1,128 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Memory bandwidth related (e.g.: read, write, and read-write) microbenchmarks.
+ */
+
+#ifndef RSB_MBW_H_INCLUDED
+#define RSB_MBW_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb.h"		/* public API specification */
+
+#if 0
+#define RSB_MB_READ		0x00
+#define RSB_MB_WRITE		0x01
+#define RSB_MB_RW		0x02
+#define RSB_MB_BZERO		0x03
+#define RSB_MB_ZERO		0x04
+
+#define RSB_MB_MEMSET		0x05
+#define RSB_MB_LINEAR_CHASE	0x06
+#define RSB_MB_MORTON_CHASE	0x07
+
+#define RSB_MB_N		0x09
+#define RSB_MB_FLUSH		0xAAAA
+#else
+enum{
+RSB_MB_READ		=0x00,
+RSB_MB_WRITE		=0x01,
+RSB_MB_RW		=0x02,
+RSB_MB_BZERO		=0x03,
+RSB_MB_ZERO		=0x04,
+RSB_MB_MEMSET		=0x05,
+RSB_MB_MEMCPY		=0x06,
+RSB_MB_MEMCPY2		=0x07,
+RSB_MB_LINEAR_CHASE	=0x08,
+RSB_MB_MORTON_CHASE	=0x09,
+
+RSB_MB_N		=0x0A,
+RSB_MB_FLUSH		=0xAAAA
+};
+#endif
+/**<
+ * The available memory tests.
+ */
+
+#define RSB_MIN_CACHE_FLUSH_SCAN_TIMES 2
+
+/*!
+ * \ingroup gr_internals
+ * \brief a memory bandwidth single measurement
+ */
+struct rsb_mbw_sm_t
+{
+	rsb_time_t t;			/**< time, in seconds */
+	rsb_flags_t btype;		/**< measurement type */
+};
+
+/*!
+ * \ingroup gr_internals
+ */
+typedef size_t rsb__times_t;
+
+/*!
+ * \ingroup gr_internals
+ * \brief a memory bandwidth benchmark record for a level
+ */
+struct rsb_mbw_m_t
+{
+	size_t so;			/**< sizeof probed word type */
+	rsb__times_t times,sz;		/**< number of iterations of scanning a sz bytes wide area */
+	struct rsb_mbw_sm_t mb[RSB_MB_N];	/**< measurements */
+	long cln;			/**< number of cache levels */
+	long cl;			/**< cache level for this measurement */
+	long hlcs;			/**< higher level cache size */
+};
+
+/*!
+ * \ingroup gr_internals
+ * \brief  a complete memory bandwidth benchmark record
+ */
+struct rsb_mbw_cm_t
+{
+	struct rsb_mbw_m_t * mb;		/**< a memory bandwidth benchmark record for each level+extra_level */
+	long cln;			/**< number of cache levels */
+	long extra_level;		/**< number of additional measurements */
+};
+
+#define RSB_FLUSH_TIMES 10	/* minimum number of scannes for a "cache flush" intended array scan */
+#define RSB_MEMSCAN_MIN_TIMES 10/* minimum number of scans for an array during a memory-bandwidth benchmarking */
+#define RSB_MEMSCAN_MAX_TIMES RSB_MAX_SIGNED(int) /* minimum number of scans for an array during a memory-bandwidth benchmarking */
+#define RSB_MEMSCAN_TIME  1.0	/* max allowable time scanning of an array during a memory-bandwidth benchmark */
+
+rsb_err_t rsb__mem_hier_timings(struct rsb_mbw_cm_t * cm);
+rsb_err_t rsb__print_mem_hier_timings(struct rsb_mbw_cm_t * cm);
+rsb_err_t rsb__memory_benchmark(void);
+rsb_err_t rsb__flush_cache(size_t fs);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_MBW_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_merge.c b/rsb_merge.c
new file mode 100644
index 0000000..68b8332
--- /dev/null
+++ b/rsb_merge.c
@@ -0,0 +1,545 @@
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Auxiliary functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#include "rsb_common.h"
+#ifndef RSB_PS_ASSERT
+/*#define RSB_PS_ASSERT(e) assert(e)	*/ 	/* uncomment this to use   asserts */
+#define RSB_PS_ASSERT(e)			/* uncomment this to avoid asserts */
+#else /* RSB_PS_ASSERT */
+#undef RSB_PS_ASSERT
+#define RSB_PS_ASSERT(e) 
+#endif /* RSB_PS_ASSERT */
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_double(
+		double *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+{
+	/**
+	 * \ingroup gr_util
+	 *       A         B
+	 * +----------+----------+
+	 *  <- annz -> <- bnnz ->
+	 *
+	 *        W
+	 * +------------+
+	 *  <-  wnnz  ->
+	 *
+	 * Merges an array containing two ordered subarrays A and B, 
+	 * sized respectively with annz and bnnz elements, using a
+	 * swap area sized wsize bytes. 
+	 *
+	 * NOTE: this is NOT an optimized code, just a naive one to have this functionality working.
+	 */
+	rsb_int_t wpasses;
+	rsb_nnz_idx_t wnnz,nnz=annz+bnnz;
+	double *VW=NULL;
+       	rsb_coo_idx_t * IW=NULL;
+       	rsb_coo_idx_t * JW=NULL;
+	double *VB=NULL;
+       	rsb_coo_idx_t * IB=NULL;
+       	rsb_coo_idx_t * JB=NULL;
+	size_t el_size=sizeof(double);
+	int step;
+	rsb_nnz_idx_t aoff=0,boff=0,woff=0;
+
+	wnnz=wsize/(el_size+2*sizeof(rsb_coo_idx_t));
+	VW=(double*)W;
+	W+=el_size*wnnz;
+	IW=(rsb_coo_idx_t*)W;
+	W+=sizeof(rsb_coo_idx_t)*wnnz;
+	JW=(rsb_coo_idx_t*)W;
+
+	VB=VA+annz;
+	IB=IA+annz;
+	JB=JA+annz;
+
+	wpasses=(annz+bnnz+wnnz-1)/wnnz;
+
+#define RSB_COO_MOVE(VD,ID,JD,VS,IS,JS,doff,soff) \
+		VD[doff]=VS[soff], \
+		ID[doff]=IS[soff], \
+		JD[doff]=JS[soff],++soff,++doff
+
+#define RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff) \
+		IA[aoff]<IB[boff] || ( IA[aoff]==IB[boff] && JA[aoff] < JB[boff] )
+
+/*	RSB_STDOUT(" * \n");*/
+/*	RSB_STDOUT("wsize=%d steps:%d wnnz=%d nnz=%d\n",wsize,wpasses,wnnz,nnz);*/
+
+/*	RSB_STDOUT("SSSSSSsentinel:%x %d %d\n",IA+annz+bnnz,IA[annz+bnnz],JA[annz+bnnz]);*/
+	for(step=0;step<wpasses;++step)
+	{
+		rsb_nnz_idx_t cnnz;
+		if(step==wpasses-1)
+			wnnz=nnz-step*wnnz;
+
+		cnnz=wnnz;
+		cnnz = RSB_MIN(cnnz,annz);
+		cnnz = RSB_MIN(cnnz,bnnz);
+/*		RSB_STDOUT("step:%d wnnz=%d annz=%d bnnz=%d cnnz=%d\n",step,wnnz,annz,bnnz,cnnz);*/
+		/* merge wnnz elements from A and B in W */
+		woff=0;
+		aoff=boff=0;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+	/*	RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+bnnz,IB[bnnz],JB[bnnz]); */
+		while(woff<wnnz && aoff<annz && boff<bnnz)
+		{
+			if(RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff))
+				RSB_COO_MOVE(VW,IW,JW,VA,IA,JA,woff,aoff);
+			else
+				RSB_COO_MOVE(VW,IW,JW,VB,IB,JB,woff,boff);
+		}
+/*		RSB_STDOUT("aoff=%d boff=%d woff=%d\n",aoff,boff,woff);*/
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,woff,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		if(woff<wnnz)
+		{
+			if(aoff==annz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VB,IB,JB,woff,boff,wnnz-woff,el_size),boff+=(wnnz-woff);
+			else
+			if(boff==bnnz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VA,IA,JA,woff,aoff,wnnz-woff,el_size),aoff+=(wnnz-woff);
+		}
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,wnnz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+/*		RSB_STDOUT("aoff:%d boff=%d wnnz=%d annz=%d\n",aoff,boff,wnnz,annz);*/
+		/* memmove A boff places forward */
+		bnnz-=boff;
+		annz-=aoff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+/*		RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		RSB_COO_MEMMOVE(VA,IA,JA,VA,IA,JA,wnnz,aoff,annz,el_size);
+/*		RSB_STDOUT("PSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		VB+=boff;
+		IB+=boff;
+		JB+=boff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		RSB_COO_MEMMOVE(VA,IA,JA,VW,IW,JW,0,0,wnnz,el_size);
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,wnnz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE ,NULL,flags));
+		VA+=wnnz;
+		IA+=wnnz;
+		JA+=wnnz;
+	}
+	return RSB_ERR_NO_ERROR;
+	#undef RSB_COO_MOVE
+	#undef RSB_CMP_COO_LESS_THAN
+}
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_float(
+		float *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+{
+	/**
+	 * \ingroup gr_util
+	 *       A         B
+	 * +----------+----------+
+	 *  <- annz -> <- bnnz ->
+	 *
+	 *        W
+	 * +------------+
+	 *  <-  wnnz  ->
+	 *
+	 * Merges an array containing two ordered subarrays A and B, 
+	 * sized respectively with annz and bnnz elements, using a
+	 * swap area sized wsize bytes. 
+	 *
+	 * NOTE: this is NOT an optimized code, just a naive one to have this functionality working.
+	 */
+	rsb_int_t wpasses;
+	rsb_nnz_idx_t wnnz,nnz=annz+bnnz;
+	float *VW=NULL;
+       	rsb_coo_idx_t * IW=NULL;
+       	rsb_coo_idx_t * JW=NULL;
+	float *VB=NULL;
+       	rsb_coo_idx_t * IB=NULL;
+       	rsb_coo_idx_t * JB=NULL;
+	size_t el_size=sizeof(float);
+	int step;
+	rsb_nnz_idx_t aoff=0,boff=0,woff=0;
+
+	wnnz=wsize/(el_size+2*sizeof(rsb_coo_idx_t));
+	VW=(float*)W;
+	W+=el_size*wnnz;
+	IW=(rsb_coo_idx_t*)W;
+	W+=sizeof(rsb_coo_idx_t)*wnnz;
+	JW=(rsb_coo_idx_t*)W;
+
+	VB=VA+annz;
+	IB=IA+annz;
+	JB=JA+annz;
+
+	wpasses=(annz+bnnz+wnnz-1)/wnnz;
+
+#define RSB_COO_MOVE(VD,ID,JD,VS,IS,JS,doff,soff) \
+		VD[doff]=VS[soff], \
+		ID[doff]=IS[soff], \
+		JD[doff]=JS[soff],++soff,++doff
+
+#define RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff) \
+		IA[aoff]<IB[boff] || ( IA[aoff]==IB[boff] && JA[aoff] < JB[boff] )
+
+/*	RSB_STDOUT(" * \n");*/
+/*	RSB_STDOUT("wsize=%d steps:%d wnnz=%d nnz=%d\n",wsize,wpasses,wnnz,nnz);*/
+
+/*	RSB_STDOUT("SSSSSSsentinel:%x %d %d\n",IA+annz+bnnz,IA[annz+bnnz],JA[annz+bnnz]);*/
+	for(step=0;step<wpasses;++step)
+	{
+		rsb_nnz_idx_t cnnz;
+		if(step==wpasses-1)
+			wnnz=nnz-step*wnnz;
+
+		cnnz=wnnz;
+		cnnz = RSB_MIN(cnnz,annz);
+		cnnz = RSB_MIN(cnnz,bnnz);
+/*		RSB_STDOUT("step:%d wnnz=%d annz=%d bnnz=%d cnnz=%d\n",step,wnnz,annz,bnnz,cnnz);*/
+		/* merge wnnz elements from A and B in W */
+		woff=0;
+		aoff=boff=0;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+	/*	RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+bnnz,IB[bnnz],JB[bnnz]); */
+		while(woff<wnnz && aoff<annz && boff<bnnz)
+		{
+			if(RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff))
+				RSB_COO_MOVE(VW,IW,JW,VA,IA,JA,woff,aoff);
+			else
+				RSB_COO_MOVE(VW,IW,JW,VB,IB,JB,woff,boff);
+		}
+/*		RSB_STDOUT("aoff=%d boff=%d woff=%d\n",aoff,boff,woff);*/
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,woff,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		if(woff<wnnz)
+		{
+			if(aoff==annz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VB,IB,JB,woff,boff,wnnz-woff,el_size),boff+=(wnnz-woff);
+			else
+			if(boff==bnnz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VA,IA,JA,woff,aoff,wnnz-woff,el_size),aoff+=(wnnz-woff);
+		}
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,wnnz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+/*		RSB_STDOUT("aoff:%d boff=%d wnnz=%d annz=%d\n",aoff,boff,wnnz,annz);*/
+		/* memmove A boff places forward */
+		bnnz-=boff;
+		annz-=aoff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+/*		RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		RSB_COO_MEMMOVE(VA,IA,JA,VA,IA,JA,wnnz,aoff,annz,el_size);
+/*		RSB_STDOUT("PSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		VB+=boff;
+		IB+=boff;
+		JB+=boff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		RSB_COO_MEMMOVE(VA,IA,JA,VW,IW,JW,0,0,wnnz,el_size);
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,wnnz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT ,NULL,flags));
+		VA+=wnnz;
+		IA+=wnnz;
+		JA+=wnnz;
+	}
+	return RSB_ERR_NO_ERROR;
+	#undef RSB_COO_MOVE
+	#undef RSB_CMP_COO_LESS_THAN
+}
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_float_complex(
+		float complex *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+{
+	/**
+	 * \ingroup gr_util
+	 *       A         B
+	 * +----------+----------+
+	 *  <- annz -> <- bnnz ->
+	 *
+	 *        W
+	 * +------------+
+	 *  <-  wnnz  ->
+	 *
+	 * Merges an array containing two ordered subarrays A and B, 
+	 * sized respectively with annz and bnnz elements, using a
+	 * swap area sized wsize bytes. 
+	 *
+	 * NOTE: this is NOT an optimized code, just a naive one to have this functionality working.
+	 */
+	rsb_int_t wpasses;
+	rsb_nnz_idx_t wnnz,nnz=annz+bnnz;
+	float complex *VW=NULL;
+       	rsb_coo_idx_t * IW=NULL;
+       	rsb_coo_idx_t * JW=NULL;
+	float complex *VB=NULL;
+       	rsb_coo_idx_t * IB=NULL;
+       	rsb_coo_idx_t * JB=NULL;
+	size_t el_size=sizeof(float complex);
+	int step;
+	rsb_nnz_idx_t aoff=0,boff=0,woff=0;
+
+	wnnz=wsize/(el_size+2*sizeof(rsb_coo_idx_t));
+	VW=(float complex*)W;
+	W+=el_size*wnnz;
+	IW=(rsb_coo_idx_t*)W;
+	W+=sizeof(rsb_coo_idx_t)*wnnz;
+	JW=(rsb_coo_idx_t*)W;
+
+	VB=VA+annz;
+	IB=IA+annz;
+	JB=JA+annz;
+
+	wpasses=(annz+bnnz+wnnz-1)/wnnz;
+
+#define RSB_COO_MOVE(VD,ID,JD,VS,IS,JS,doff,soff) \
+		VD[doff]=VS[soff], \
+		ID[doff]=IS[soff], \
+		JD[doff]=JS[soff],++soff,++doff
+
+#define RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff) \
+		IA[aoff]<IB[boff] || ( IA[aoff]==IB[boff] && JA[aoff] < JB[boff] )
+
+/*	RSB_STDOUT(" * \n");*/
+/*	RSB_STDOUT("wsize=%d steps:%d wnnz=%d nnz=%d\n",wsize,wpasses,wnnz,nnz);*/
+
+/*	RSB_STDOUT("SSSSSSsentinel:%x %d %d\n",IA+annz+bnnz,IA[annz+bnnz],JA[annz+bnnz]);*/
+	for(step=0;step<wpasses;++step)
+	{
+		rsb_nnz_idx_t cnnz;
+		if(step==wpasses-1)
+			wnnz=nnz-step*wnnz;
+
+		cnnz=wnnz;
+		cnnz = RSB_MIN(cnnz,annz);
+		cnnz = RSB_MIN(cnnz,bnnz);
+/*		RSB_STDOUT("step:%d wnnz=%d annz=%d bnnz=%d cnnz=%d\n",step,wnnz,annz,bnnz,cnnz);*/
+		/* merge wnnz elements from A and B in W */
+		woff=0;
+		aoff=boff=0;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+	/*	RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+bnnz,IB[bnnz],JB[bnnz]); */
+		while(woff<wnnz && aoff<annz && boff<bnnz)
+		{
+			if(RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff))
+				RSB_COO_MOVE(VW,IW,JW,VA,IA,JA,woff,aoff);
+			else
+				RSB_COO_MOVE(VW,IW,JW,VB,IB,JB,woff,boff);
+		}
+/*		RSB_STDOUT("aoff=%d boff=%d woff=%d\n",aoff,boff,woff);*/
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,woff,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		if(woff<wnnz)
+		{
+			if(aoff==annz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VB,IB,JB,woff,boff,wnnz-woff,el_size),boff+=(wnnz-woff);
+			else
+			if(boff==bnnz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VA,IA,JA,woff,aoff,wnnz-woff,el_size),aoff+=(wnnz-woff);
+		}
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,wnnz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+/*		RSB_STDOUT("aoff:%d boff=%d wnnz=%d annz=%d\n",aoff,boff,wnnz,annz);*/
+		/* memmove A boff places forward */
+		bnnz-=boff;
+		annz-=aoff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+/*		RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		RSB_COO_MEMMOVE(VA,IA,JA,VA,IA,JA,wnnz,aoff,annz,el_size);
+/*		RSB_STDOUT("PSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		VB+=boff;
+		IB+=boff;
+		JB+=boff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		RSB_COO_MEMMOVE(VA,IA,JA,VW,IW,JW,0,0,wnnz,el_size);
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,wnnz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,NULL,flags));
+		VA+=wnnz;
+		IA+=wnnz;
+		JA+=wnnz;
+	}
+	return RSB_ERR_NO_ERROR;
+	#undef RSB_COO_MOVE
+	#undef RSB_CMP_COO_LESS_THAN
+}
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_double_complex(
+		double complex *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+{
+	/**
+	 * \ingroup gr_util
+	 *       A         B
+	 * +----------+----------+
+	 *  <- annz -> <- bnnz ->
+	 *
+	 *        W
+	 * +------------+
+	 *  <-  wnnz  ->
+	 *
+	 * Merges an array containing two ordered subarrays A and B, 
+	 * sized respectively with annz and bnnz elements, using a
+	 * swap area sized wsize bytes. 
+	 *
+	 * NOTE: this is NOT an optimized code, just a naive one to have this functionality working.
+	 */
+	rsb_int_t wpasses;
+	rsb_nnz_idx_t wnnz,nnz=annz+bnnz;
+	double complex *VW=NULL;
+       	rsb_coo_idx_t * IW=NULL;
+       	rsb_coo_idx_t * JW=NULL;
+	double complex *VB=NULL;
+       	rsb_coo_idx_t * IB=NULL;
+       	rsb_coo_idx_t * JB=NULL;
+	size_t el_size=sizeof(double complex);
+	int step;
+	rsb_nnz_idx_t aoff=0,boff=0,woff=0;
+
+	wnnz=wsize/(el_size+2*sizeof(rsb_coo_idx_t));
+	VW=(double complex*)W;
+	W+=el_size*wnnz;
+	IW=(rsb_coo_idx_t*)W;
+	W+=sizeof(rsb_coo_idx_t)*wnnz;
+	JW=(rsb_coo_idx_t*)W;
+
+	VB=VA+annz;
+	IB=IA+annz;
+	JB=JA+annz;
+
+	wpasses=(annz+bnnz+wnnz-1)/wnnz;
+
+#define RSB_COO_MOVE(VD,ID,JD,VS,IS,JS,doff,soff) \
+		VD[doff]=VS[soff], \
+		ID[doff]=IS[soff], \
+		JD[doff]=JS[soff],++soff,++doff
+
+#define RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff) \
+		IA[aoff]<IB[boff] || ( IA[aoff]==IB[boff] && JA[aoff] < JB[boff] )
+
+/*	RSB_STDOUT(" * \n");*/
+/*	RSB_STDOUT("wsize=%d steps:%d wnnz=%d nnz=%d\n",wsize,wpasses,wnnz,nnz);*/
+
+/*	RSB_STDOUT("SSSSSSsentinel:%x %d %d\n",IA+annz+bnnz,IA[annz+bnnz],JA[annz+bnnz]);*/
+	for(step=0;step<wpasses;++step)
+	{
+		rsb_nnz_idx_t cnnz;
+		if(step==wpasses-1)
+			wnnz=nnz-step*wnnz;
+
+		cnnz=wnnz;
+		cnnz = RSB_MIN(cnnz,annz);
+		cnnz = RSB_MIN(cnnz,bnnz);
+/*		RSB_STDOUT("step:%d wnnz=%d annz=%d bnnz=%d cnnz=%d\n",step,wnnz,annz,bnnz,cnnz);*/
+		/* merge wnnz elements from A and B in W */
+		woff=0;
+		aoff=boff=0;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+	/*	RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+bnnz,IB[bnnz],JB[bnnz]); */
+		while(woff<wnnz && aoff<annz && boff<bnnz)
+		{
+			if(RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff))
+				RSB_COO_MOVE(VW,IW,JW,VA,IA,JA,woff,aoff);
+			else
+				RSB_COO_MOVE(VW,IW,JW,VB,IB,JB,woff,boff);
+		}
+/*		RSB_STDOUT("aoff=%d boff=%d woff=%d\n",aoff,boff,woff);*/
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,woff,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		if(woff<wnnz)
+		{
+			if(aoff==annz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VB,IB,JB,woff,boff,wnnz-woff,el_size),boff+=(wnnz-woff);
+			else
+			if(boff==bnnz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VA,IA,JA,woff,aoff,wnnz-woff,el_size),aoff+=(wnnz-woff);
+		}
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,wnnz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+/*		RSB_STDOUT("aoff:%d boff=%d wnnz=%d annz=%d\n",aoff,boff,wnnz,annz);*/
+		/* memmove A boff places forward */
+		bnnz-=boff;
+		annz-=aoff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+/*		RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		RSB_COO_MEMMOVE(VA,IA,JA,VA,IA,JA,wnnz,aoff,annz,el_size);
+/*		RSB_STDOUT("PSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		VB+=boff;
+		IB+=boff;
+		JB+=boff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		RSB_COO_MEMMOVE(VA,IA,JA,VW,IW,JW,0,0,wnnz,el_size);
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,wnnz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ,NULL,flags));
+		VA+=wnnz;
+		IA+=wnnz;
+		JA+=wnnz;
+	}
+	return RSB_ERR_NO_ERROR;
+	#undef RSB_COO_MOVE
+	#undef RSB_CMP_COO_LESS_THAN
+}
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place(
+		void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags, rsb_type_t typecode
+		)
+{
+	switch(typecode)
+	{
+		case RSB_NUMERICAL_TYPE_DOUBLE :
+			return rsb__do_util_merge_sorted_subarrays_in_place_double(VA,IA,JA,W,annz,bnnz,wsize,flags);
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT :
+			return rsb__do_util_merge_sorted_subarrays_in_place_float(VA,IA,JA,W,annz,bnnz,wsize,flags);
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX :
+			return rsb__do_util_merge_sorted_subarrays_in_place_float_complex(VA,IA,JA,W,annz,bnnz,wsize,flags);
+		break;
+		case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX :
+			return rsb__do_util_merge_sorted_subarrays_in_place_double_complex(VA,IA,JA,W,annz,bnnz,wsize,flags);
+		break;
+		default :
+			return RSB_ERR_UNSUPPORTED_TYPE;
+	}
+}
+
+/* @endcond */
diff --git a/rsb_merge.h b/rsb_merge.h
new file mode 100644
index 0000000..2840a3a
--- /dev/null
+++ b/rsb_merge.h
@@ -0,0 +1,75 @@
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Auxiliary functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifndef RSB_MERGE_H_INCLUDED
+#define RSB_MERGE_H_INCLUDED
+
+#include "rsb_common.h"
+#ifndef RSB_PS_ASSERT
+/*#define RSB_PS_ASSERT(e) assert(e)	*/ 	/* uncomment this to use   asserts */
+#define RSB_PS_ASSERT(e)			/* uncomment this to avoid asserts */
+#else /* RSB_PS_ASSERT */
+#undef RSB_PS_ASSERT
+#define RSB_PS_ASSERT(e) 
+#endif /* RSB_PS_ASSERT */
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_double(
+		double *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+;rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_float(
+		float *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+;rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_float_complex(
+		float complex *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+;rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_double_complex(
+		double complex *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+;rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place(
+		void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags, rsb_type_t typecode
+		)
+;
+#endif /* RSB_MERGE_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_merge.m4 b/rsb_merge.m4
new file mode 100644
index 0000000..7b8546c
--- /dev/null
+++ b/rsb_merge.m4
@@ -0,0 +1,190 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Auxiliary functions.
+ */
+RSB_M4_HEADER_MESSAGE()dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_MERGE_H_INCLUDED
+#define RSB_MERGE_H_INCLUDED
+')
+dnl
+dnl
+#include "rsb_common.h"
+dnl
+#ifndef RSB_PS_ASSERT
+/*#define RSB_PS_ASSERT(e) assert(e)	*/ 	/* uncomment this to use   asserts */
+#define RSB_PS_ASSERT(e)			/* uncomment this to avoid asserts */
+#else /* RSB_PS_ASSERT */
+#undef RSB_PS_ASSERT
+#define RSB_PS_ASSERT(e) 
+#endif /* RSB_PS_ASSERT */
+dnl
+dnl
+foreach(`mtype',RSB_M4_TYPES,`dnl
+dnl
+dnl
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place_`'RSB_M4_TYPE_CODE(mtype)`'(
+		mtype *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags
+		)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+dnl
+pushdef(`typecode',RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))`'dnl
+dnl
+	/**
+	 * \ingroup gr_util
+	 *       A         B
+	 * +----------+----------+
+	 *  <- annz -> <- bnnz ->
+	 *
+	 *        W
+	 * +------------+
+	 *  <-  wnnz  ->
+	 *
+	 * Merges an array containing two ordered subarrays A and B, 
+	 * sized respectively with annz and bnnz elements, using a
+	 * swap area sized wsize bytes. 
+	 *
+	 * NOTE: this is NOT an optimized code, just a naive one to have this functionality working.
+	 */
+	rsb_int_t wpasses;
+	rsb_nnz_idx_t wnnz,nnz=annz+bnnz;
+	mtype *VW=NULL;
+       	rsb_coo_idx_t * IW=NULL;
+       	rsb_coo_idx_t * JW=NULL;
+	mtype *VB=NULL;
+       	rsb_coo_idx_t * IB=NULL;
+       	rsb_coo_idx_t * JB=NULL;
+	size_t el_size=sizeof(mtype);
+	int step;
+	rsb_nnz_idx_t aoff=0,boff=0,woff=0;
+
+	wnnz=wsize/(el_size+2*sizeof(rsb_coo_idx_t));
+	VW=(mtype*)W;
+	W+=el_size*wnnz;
+	IW=(rsb_coo_idx_t*)W;
+	W+=sizeof(rsb_coo_idx_t)*wnnz;
+	JW=(rsb_coo_idx_t*)W;
+
+	VB=VA+annz;
+	IB=IA+annz;
+	JB=JA+annz;
+
+	wpasses=(annz+bnnz+wnnz-1)/wnnz;
+
+#define RSB_COO_MOVE(VD,ID,JD,VS,IS,JS,doff,soff) \
+		VD[doff]=VS[soff], \
+		ID[doff]=IS[soff], \
+		JD[doff]=JS[soff],++soff,++doff
+
+#define RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff) \
+		IA[aoff]<IB[boff] || ( IA[aoff]==IB[boff] && JA[aoff] < JB[boff] )
+
+/*	RSB_STDOUT(" * \n");*/
+/*	RSB_STDOUT("wsize=%d steps:%d wnnz=%d nnz=%d\n",wsize,wpasses,wnnz,nnz);*/
+
+/*	RSB_STDOUT("SSSSSSsentinel:%x %d %d\n",IA+annz+bnnz,IA[annz+bnnz],JA[annz+bnnz]);*/
+	for(step=0;step<wpasses;++step)
+	{
+		rsb_nnz_idx_t cnnz;
+		if(step==wpasses-1)
+			wnnz=nnz-step*wnnz;
+
+		cnnz=wnnz;
+		cnnz = RSB_MIN(cnnz,annz);
+		cnnz = RSB_MIN(cnnz,bnnz);
+/*		RSB_STDOUT("step:%d wnnz=%d annz=%d bnnz=%d cnnz=%d\n",step,wnnz,annz,bnnz,cnnz);*/
+		/* merge wnnz elements from A and B in W */
+		woff=0;
+		aoff=boff=0;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,typecode,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,typecode,NULL,flags));
+	/*	RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+bnnz,IB[bnnz],JB[bnnz]); */
+		while(woff<wnnz && aoff<annz && boff<bnnz)
+		{
+			if(RSB_CMP_COO_LESS_THAN(IA,JA,IB,JB,aoff,boff))
+				RSB_COO_MOVE(VW,IW,JW,VA,IA,JA,woff,aoff);
+			else
+				RSB_COO_MOVE(VW,IW,JW,VB,IB,JB,woff,boff);
+		}
+/*		RSB_STDOUT("aoff=%d boff=%d woff=%d\n",aoff,boff,woff);*/
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,woff,typecode,NULL,flags));
+		if(woff<wnnz)
+		{
+			if(aoff==annz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VB,IB,JB,woff,boff,wnnz-woff,el_size),boff+=(wnnz-woff);
+			else
+			if(boff==bnnz)
+				RSB_COO_MEMMOVE(VW,IW,JW,VA,IA,JA,woff,aoff,wnnz-woff,el_size),aoff+=(wnnz-woff);
+		}
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VW,IW,JW,wnnz,typecode,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,typecode,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,typecode,NULL,flags));
+/*		RSB_STDOUT("aoff:%d boff=%d wnnz=%d annz=%d\n",aoff,boff,wnnz,annz);*/
+		/* memmove A boff places forward */
+		bnnz-=boff;
+		annz-=aoff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,typecode,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,typecode,NULL,flags));
+/*		RSB_STDOUT("SSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		RSB_COO_MEMMOVE(VA,IA,JA,VA,IA,JA,wnnz,aoff,annz,el_size);
+/*		RSB_STDOUT("PSSSsentinel:%x %d %d\n",IB+boff+bnnz,IB[boff+bnnz],JB[boff+bnnz]);*/
+		VB+=boff;
+		IB+=boff;
+		JB+=boff;
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VB,IB,JB,bnnz,typecode,NULL,flags));
+		RSB_COO_MEMMOVE(VA,IA,JA,VW,IW,JW,0,0,wnnz,el_size);
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,wnnz,typecode,NULL,flags));
+		RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,annz,typecode,NULL,flags));
+		VA+=wnnz;
+		IA+=wnnz;
+		JA+=wnnz;
+	}
+	return RSB_ERR_NO_ERROR;
+	#undef RSB_COO_MOVE
+	#undef RSB_CMP_COO_LESS_THAN
+dnl
+popdef(`typecode')`'dnl
+dnl
+}
+')dnl
+')dnl
+dnl
+rsb_err_t rsb__do_util_merge_sorted_subarrays_in_place(
+		void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_char_t * W,
+		rsb_nnz_idx_t annz, rsb_nnz_idx_t bnnz,
+	       	size_t wsize, rsb_flags_t flags, rsb_type_t typecode
+		)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	switch(typecode)
+	{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+		case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype):
+			return rsb__do_util_merge_sorted_subarrays_in_place_`'RSB_M4_TYPE_CODE(mtype)`'(VA,IA,JA,W,annz,bnnz,wsize,flags);
+		break;
+')dnl
+		default :
+			return RSB_ERR_UNSUPPORTED_TYPE;
+	}
+}
+')dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_MERGE_H_INCLUDED */
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_mergesort.c b/rsb_mergesort.c
new file mode 100644
index 0000000..b077954
--- /dev/null
+++ b/rsb_mergesort.c
@@ -0,0 +1,2520 @@
+
+
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Sorting functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+
+
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+
+extern struct rsb_session_handle_t rsb_global_session_handle;
+
+rsb_err_t rsb__do_mergesort_CSR(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+	void *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+	void *result,
+	rsb_type_t type)
+{
+	/*!
+	 * \ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 * 	\param length  the input  arrays length
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  mtype array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	FIXME : UNDOCUMENTED
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE )
+	return rsb_do_mergesort_double_CSR(iarray, jarray,
+array, length,
+iresult, jresult,
+result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_FLOAT )
+	return rsb_do_mergesort_float_CSR(iarray, jarray,
+array, length,
+iresult, jresult,
+result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	return rsb_do_mergesort_float_complex_CSR(iarray, jarray,
+array, length,
+iresult, jresult,
+result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	return rsb_do_mergesort_double_complex_CSR(iarray, jarray,
+array, length,
+iresult, jresult,
+result);
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE;
+}
+
+rsb_err_t rsb__do_mergesort_BCSR(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+	void *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t mb,
+	rsb_coo_idx_t kb,
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+	void *result,
+	rsb_type_t type)
+{
+	/*!
+	 * \ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 * 	\param length  the input  arrays length
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  mtype array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+
+	 *	FIXME : UNDOCUMENTED
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE )
+	return rsb_do_mergesort_double_BCSR(iarray, jarray,
+	mb,kb,array, length,
+iresult, jresult,
+result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_FLOAT )
+	return rsb_do_mergesort_float_BCSR(iarray, jarray,
+	mb,kb,array, length,
+iresult, jresult,
+result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	return rsb_do_mergesort_float_complex_BCSR(iarray, jarray,
+	mb,kb,array, length,
+iresult, jresult,
+result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	return rsb_do_mergesort_double_complex_BCSR(iarray, jarray,
+	mb,kb,array, length,
+iresult, jresult,
+result);
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE;
+}
+
+rsb_err_t rsb__do_mergesort_VBR(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+	rsb_coo_idx_t *biarray,
+	rsb_coo_idx_t *bjarray,
+	void *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+	rsb_coo_idx_t *biresult,
+	rsb_coo_idx_t *bjresult,
+	void *result,
+	rsb_type_t type)
+{
+	/*!
+	 * \ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 * 	\param length  the input  arrays length
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  mtype array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE )
+	return rsb_do_mergesort_double_VBR(iarray, jarray,
+	biarray,bjarray,array, length,
+iresult, jresult,
+	biresult,bjresult,result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_FLOAT )
+	return rsb_do_mergesort_float_VBR(iarray, jarray,
+	biarray,bjarray,array, length,
+iresult, jresult,
+	biresult,bjresult,result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	return rsb_do_mergesort_float_complex_VBR(iarray, jarray,
+	biarray,bjarray,array, length,
+iresult, jresult,
+	biresult,bjresult,result);
+	else
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	return rsb_do_mergesort_double_complex_VBR(iarray, jarray,
+	biarray,bjarray,array, length,
+iresult, jresult,
+	biresult,bjresult,result);
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE;
+}
+
+rsb_err_t rsb_do_mergesort_double_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	double *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      double matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  double array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	double * left  ;
+	double * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(double*)result = *(double*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_double_CSR
+	( ileft, jleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_double_CSR
+	(iright, jright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+	((double*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(double)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((double*)result)+middle ,sizeof(double)*(length-middle));
+
+	rsb_do_merge_double_CSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_double_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const double* left, const double* restrict right,  double* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our CSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+		
+		ileft[left_index] < iright[right_index] ||
+		(	ileft[left_index] == iright[right_index]	&&
+			jleft[left_index] <= jright[right_index]	)
+		)
+				{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_float_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	float *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      float matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  float array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	float * left  ;
+	float * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(float*)result = *(float*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_float_CSR
+	( ileft, jleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_float_CSR
+	(iright, jright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+	((float*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(float)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((float*)result)+middle ,sizeof(float)*(length-middle));
+
+	rsb_do_merge_float_CSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_float_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const float* left, const float* restrict right,  float* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our CSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+		
+		ileft[left_index] < iright[right_index] ||
+		(	ileft[left_index] == iright[right_index]	&&
+			jleft[left_index] <= jright[right_index]	)
+		)
+				{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_float_complex_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	float complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float complex *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      float complex matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  float complex array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	float complex * left  ;
+	float complex * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(float complex*)result = *(float complex*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_float_complex_CSR
+	( ileft, jleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_float_complex_CSR
+	(iright, jright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+	((float complex*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(float complex)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((float complex*)result)+middle ,sizeof(float complex)*(length-middle));
+
+	rsb_do_merge_float_complex_CSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_float_complex_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const float complex* left, const float complex* restrict right,  float complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our CSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+		
+		ileft[left_index] < iright[right_index] ||
+		(	ileft[left_index] == iright[right_index]	&&
+			jleft[left_index] <= jright[right_index]	)
+		)
+				{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_double_complex_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	double complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double complex *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      double complex matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  double complex array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	double complex * left  ;
+	double complex * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(double complex*)result = *(double complex*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_double_complex_CSR
+	( ileft, jleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_double_complex_CSR
+	(iright, jright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+	((double complex*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(double complex)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((double complex*)result)+middle ,sizeof(double complex)*(length-middle));
+
+	rsb_do_merge_double_complex_CSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_double_complex_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const double complex* left, const double complex* restrict right,  double complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our CSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+		
+		ileft[left_index] < iright[right_index] ||
+		(	ileft[left_index] == iright[right_index]	&&
+			jleft[left_index] <= jright[right_index]	)
+		)
+				{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_double_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	double *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      double matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  double array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	double * left  ;
+	double * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(double*)result = *(double*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_double_BCSR
+	( ileft, jleft,
+ mb, kb, 		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_double_BCSR
+	(iright, jright,
+ mb, kb, 		right, length-middle,  iresult+middle  ,jresult+middle,
+	((double*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(double)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((double*)result)+middle ,sizeof(double)*(length-middle));
+
+	rsb_do_merge_double_BCSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+	mb,kb,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_double_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const double* left, const double* restrict right,  double* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our BCSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+			
+		ileft[left_index]/mb < iright[right_index]/mb ||
+		(	ileft[left_index]/mb == iright[right_index]/mb	&&
+			jleft[left_index]/kb <= jright[right_index]/kb	)
+		)
+			{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_float_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	float *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      float matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  float array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	float * left  ;
+	float * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(float*)result = *(float*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_float_BCSR
+	( ileft, jleft,
+ mb, kb, 		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_float_BCSR
+	(iright, jright,
+ mb, kb, 		right, length-middle,  iresult+middle  ,jresult+middle,
+	((float*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(float)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((float*)result)+middle ,sizeof(float)*(length-middle));
+
+	rsb_do_merge_float_BCSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+	mb,kb,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_float_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const float* left, const float* restrict right,  float* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our BCSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+			
+		ileft[left_index]/mb < iright[right_index]/mb ||
+		(	ileft[left_index]/mb == iright[right_index]/mb	&&
+			jleft[left_index]/kb <= jright[right_index]/kb	)
+		)
+			{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_float_complex_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	float complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float complex *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      float complex matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  float complex array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	float complex * left  ;
+	float complex * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(float complex*)result = *(float complex*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_float_complex_BCSR
+	( ileft, jleft,
+ mb, kb, 		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_float_complex_BCSR
+	(iright, jright,
+ mb, kb, 		right, length-middle,  iresult+middle  ,jresult+middle,
+	((float complex*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(float complex)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((float complex*)result)+middle ,sizeof(float complex)*(length-middle));
+
+	rsb_do_merge_float_complex_BCSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+	mb,kb,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_float_complex_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const float complex* left, const float complex* restrict right,  float complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our BCSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+			
+		ileft[left_index]/mb < iright[right_index]/mb ||
+		(	ileft[left_index]/mb == iright[right_index]/mb	&&
+			jleft[left_index]/kb <= jright[right_index]/kb	)
+		)
+			{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_double_complex_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	double complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double complex *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      double complex matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  double complex array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+	size_t tn=0;
+	double complex * left  ;
+	double complex * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+		*(double complex*)result = *(double complex*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_double_complex_BCSR
+	( ileft, jleft,
+ mb, kb, 		left,   middle,
+	        iresult  ,       jresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_double_complex_BCSR
+	(iright, jright,
+ mb, kb, 		right, length-middle,  iresult+middle  ,jresult+middle,
+	((double complex*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(double complex)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+	RSB_MEMCPY( right, ((double complex*)result)+middle ,sizeof(double complex)*(length-middle));
+
+	rsb_do_merge_double_complex_BCSR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+	mb,kb,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_double_complex_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const double complex* left, const double complex* restrict right,  double complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our BCSR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+			
+		ileft[left_index]/mb < iright[right_index]/mb ||
+		(	ileft[left_index]/mb == iright[right_index]/mb	&&
+			jleft[left_index]/kb <= jright[right_index]/kb	)
+		)
+			{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_double_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	double *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	double *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      double matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  double array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+
+	rsb_coo_idx_t * bileft  ;
+	rsb_coo_idx_t * biright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+
+	rsb_coo_idx_t * bjleft  ;
+	rsb_coo_idx_t * bjright ;
+	size_t tn=0;
+	double * left  ;
+	double * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+
+		*biresult = *biarray;
+		*bjresult = *bjarray;
+		*(double*)result = *(double*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+
+	bileft  = biarray;
+	bjleft  = bjarray;
+	biright = biarray+middle;
+	bjright = bjarray+middle;
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_double_VBR
+	( ileft, jleft,
+		bileft, bjleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		biresult, bjresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_double_VBR
+	(iright, jright,
+		biright, bjright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+		biresult+middle, bjresult+middle,
+	((double*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+
+	RSB_MEMCPY(bileft ,biresult       ,so*middle);
+	RSB_MEMCPY(bjleft ,bjresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(double)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+
+	RSB_MEMCPY(biright ,biresult+middle       ,so*(length-middle));
+	RSB_MEMCPY(bjright ,bjresult+middle       ,so*(length-middle));
+	RSB_MEMCPY( right, ((double*)result)+middle ,sizeof(double)*(length-middle));
+
+	rsb_do_merge_double_VBR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			bileft, biright, biresult,
+			bjleft, bjright, bjresult,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_double_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const double* left, const double* restrict right,  double* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our VBR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,BIEL,BJEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	biresult[result_index]=(BIEL);  \
+	bjresult[result_index]=(BJEL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	biresult[result_index]=bileft[left_index];  \
+	bjresult[result_index]=bjleft[left_index];  \
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	biresult[result_index]=biright[right_index];  \
+	bjresult[result_index]=bjright[right_index];  \
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+	
+		bileft[left_index] < biright[right_index] ||
+		(	bileft[left_index] == biright[right_index]	&&
+			bjleft[left_index] <= bjright[right_index]	)
+		)
+					{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_float_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	float *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	float *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      float matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  float array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+
+	rsb_coo_idx_t * bileft  ;
+	rsb_coo_idx_t * biright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+
+	rsb_coo_idx_t * bjleft  ;
+	rsb_coo_idx_t * bjright ;
+	size_t tn=0;
+	float * left  ;
+	float * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+
+		*biresult = *biarray;
+		*bjresult = *bjarray;
+		*(float*)result = *(float*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+
+	bileft  = biarray;
+	bjleft  = bjarray;
+	biright = biarray+middle;
+	bjright = bjarray+middle;
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_float_VBR
+	( ileft, jleft,
+		bileft, bjleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		biresult, bjresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_float_VBR
+	(iright, jright,
+		biright, bjright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+		biresult+middle, bjresult+middle,
+	((float*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+
+	RSB_MEMCPY(bileft ,biresult       ,so*middle);
+	RSB_MEMCPY(bjleft ,bjresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(float)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+
+	RSB_MEMCPY(biright ,biresult+middle       ,so*(length-middle));
+	RSB_MEMCPY(bjright ,bjresult+middle       ,so*(length-middle));
+	RSB_MEMCPY( right, ((float*)result)+middle ,sizeof(float)*(length-middle));
+
+	rsb_do_merge_float_VBR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			bileft, biright, biresult,
+			bjleft, bjright, bjresult,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_float_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const float* left, const float* restrict right,  float* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our VBR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,BIEL,BJEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	biresult[result_index]=(BIEL);  \
+	bjresult[result_index]=(BJEL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	biresult[result_index]=bileft[left_index];  \
+	bjresult[result_index]=bjleft[left_index];  \
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	biresult[result_index]=biright[right_index];  \
+	bjresult[result_index]=bjright[right_index];  \
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+	
+		bileft[left_index] < biright[right_index] ||
+		(	bileft[left_index] == biright[right_index]	&&
+			bjleft[left_index] <= bjright[right_index]	)
+		)
+					{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_float_complex_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	float complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	float complex *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      float complex matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  float complex array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+
+	rsb_coo_idx_t * bileft  ;
+	rsb_coo_idx_t * biright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+
+	rsb_coo_idx_t * bjleft  ;
+	rsb_coo_idx_t * bjright ;
+	size_t tn=0;
+	float complex * left  ;
+	float complex * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+
+		*biresult = *biarray;
+		*bjresult = *bjarray;
+		*(float complex*)result = *(float complex*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+
+	bileft  = biarray;
+	bjleft  = bjarray;
+	biright = biarray+middle;
+	bjright = bjarray+middle;
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_float_complex_VBR
+	( ileft, jleft,
+		bileft, bjleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		biresult, bjresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_float_complex_VBR
+	(iright, jright,
+		biright, bjright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+		biresult+middle, bjresult+middle,
+	((float complex*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+
+	RSB_MEMCPY(bileft ,biresult       ,so*middle);
+	RSB_MEMCPY(bjleft ,bjresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(float complex)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+
+	RSB_MEMCPY(biright ,biresult+middle       ,so*(length-middle));
+	RSB_MEMCPY(bjright ,bjresult+middle       ,so*(length-middle));
+	RSB_MEMCPY( right, ((float complex*)result)+middle ,sizeof(float complex)*(length-middle));
+
+	rsb_do_merge_float_complex_VBR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			bileft, biright, biresult,
+			bjleft, bjright, bjresult,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_float_complex_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const float complex* left, const float complex* restrict right,  float complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our VBR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,BIEL,BJEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	biresult[result_index]=(BIEL);  \
+	bjresult[result_index]=(BJEL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	biresult[result_index]=bileft[left_index];  \
+	bjresult[result_index]=bjleft[left_index];  \
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	biresult[result_index]=biright[right_index];  \
+	bjresult[result_index]=bjright[right_index];  \
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+	
+		bileft[left_index] < biright[right_index] ||
+		(	bileft[left_index] == biright[right_index]	&&
+			bjleft[left_index] <= bjright[right_index]	)
+		)
+					{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+rsb_err_t rsb_do_mergesort_double_complex_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	double complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	double complex *restrict result)
+
+{
+	/*!
+	 *	\ingroup gr_util
+	 *	This function will sort the non zero elements of a sparse blocked
+	 *      double complex matrix.
+	 *      It will read row and column indices arrays, the values array,
+	 *	and will sort them in separate output arrays.
+	 *
+	 *	NOTE : This function could be optimized.
+	 *
+	 * 	\param iarray  the input  row    indices array
+	 * 	\param jarray  the input  column indices array
+	 * 	\param array   the input  double complex array
+	 * 	\param iresult the output row    indices array
+	 * 	\param jresult the output column indices array
+	 * 	\param result  the output values array
+	 * 	\param biarray  the input  block row    indices array
+	 * 	\param bjarray  the input  block column indices array
+	 * 	\param biresult the output block row    indices array
+	 * 	\param bjresult the output block column indices array
+	 *	Will sort  thethree arrays (iarray, jarray, array) following the 
+	 *	criteria :
+	 *
+	 * 	(ia1,ja1)<=(ia2,ja2) iff (ia1<ia2) or ( (ia1==ia2) and (ja1<ja2) )
+	 * 	i.e.: C (row major) ordering
+	 */
+
+	rsb_nnz_idx_t middle;
+	rsb_coo_idx_t so=sizeof(rsb_coo_idx_t);
+
+	rsb_coo_idx_t * ileft  ;
+	rsb_coo_idx_t * iright ;
+
+	rsb_coo_idx_t * bileft  ;
+	rsb_coo_idx_t * biright ;
+	rsb_coo_idx_t * jleft  ;
+	rsb_coo_idx_t * jright ;
+
+	rsb_coo_idx_t * bjleft  ;
+	rsb_coo_idx_t * bjright ;
+	size_t tn=0;
+	double complex * left  ;
+	double complex * right ;
+	
+#define LIMIT 1
+	if(length==LIMIT)
+	{
+		*iresult = *iarray;
+		*jresult = *jarray;
+
+		*biresult = *biarray;
+		*bjresult = *bjarray;
+		*(double complex*)result = *(double complex*)array;
+	}
+	if(length<=LIMIT) return RSB_ERR_NO_ERROR;
+#undef LIMIT
+	middle = length/2;
+
+
+	bileft  = biarray;
+	bjleft  = bjarray;
+	biright = biarray+middle;
+	bjright = bjarray+middle;
+	left  = array;
+	right  = array+middle;
+	ileft  = iarray;
+	jleft  = jarray;
+	iright = iarray+middle;
+	jright = jarray+middle;
+
+/* 20121016 commented out omp usage because broke serial compilation  */
+	{
+	rsb_do_mergesort_double_complex_VBR
+	( ileft, jleft,
+		bileft, bjleft,
+		left,   middle,
+	        iresult  ,       jresult,
+		biresult, bjresult,
+		result         );
+
+	if(tn==1)
+	rsb_do_mergesort_double_complex_VBR
+	(iright, jright,
+		biright, bjright,
+		right, length-middle,  iresult+middle  ,jresult+middle,
+		biresult+middle, bjresult+middle,
+	((double complex*)result)+middle  );
+	}
+
+	RSB_MEMCPY(ileft ,iresult       ,so*middle);
+	RSB_MEMCPY(jleft ,jresult       ,so*middle);
+
+	RSB_MEMCPY(bileft ,biresult       ,so*middle);
+	RSB_MEMCPY(bjleft ,bjresult       ,so*middle);
+	RSB_MEMCPY(  left, result       ,sizeof(double complex)*middle);
+	RSB_MEMCPY(iright,iresult+middle,so*(length-middle));
+	RSB_MEMCPY(jright,jresult+middle,so*(length-middle));
+
+	RSB_MEMCPY(biright ,biresult+middle       ,so*(length-middle));
+	RSB_MEMCPY(bjright ,bjresult+middle       ,so*(length-middle));
+	RSB_MEMCPY( right, ((double complex*)result)+middle ,sizeof(double complex)*(length-middle));
+
+	rsb_do_merge_double_complex_VBR		(
+			ileft,iright,iresult,
+			jleft,jright,jresult,
+
+			bileft, biright, biresult,
+			bjleft, bjright, bjresult,
+			left, right, result,
+			middle,length-middle
+			);
+	return RSB_ERR_NO_ERROR;	/* ! */
+}
+
+
+
+void rsb_do_merge_double_complex_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const double complex* left, const double complex* restrict right,  double complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+
+{
+	/*!
+	 * \ingroup gr_util
+	 * The merge function for our VBR matrix coefficients sorting.
+	 *
+	 * NOTE :This function is the mergesort bottleneck.
+	 */
+	register int left_index=0, right_index=0, result_index=0;
+	
+	/*
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	          |<-  length ----->|
+	          ^- index
+	 */
+
+
+#define LEFT_ADVANCE		left_index =( left_index+1); left_length-- ;
+#define RIGHT_ADVANCE		right_index=(right_index+1); right_length--;
+#define RESULT_ADVANCE		result_index =( result_index+1);
+
+#define RESULT_APPEND(IEL,JEL,BIEL,BJEL,EL)	\
+	iresult[result_index]=(IEL);  \
+	jresult[result_index]=(JEL);  \
+	result[result_index]=( EL);  \
+	biresult[result_index]=(BIEL);  \
+	bjresult[result_index]=(BJEL);  \
+	RESULT_ADVANCE;
+
+#define LRESULT_APPEND	\
+	iresult[result_index]=ileft[left_index];\
+	jresult[result_index]=jleft[left_index];\
+	biresult[result_index]=bileft[left_index];  \
+	bjresult[result_index]=bjleft[left_index];  \
+	result[ result_index]= left[left_index];\
+	RESULT_ADVANCE; \
+	LEFT_ADVANCE;
+
+#define RRESULT_APPEND	\
+	iresult[result_index]=iright[right_index];\
+	jresult[result_index]=jright[right_index];\
+	biresult[result_index]=biright[right_index];  \
+	bjresult[result_index]=bjright[right_index];  \
+	 result[result_index]= right[right_index];\
+	RESULT_ADVANCE; \
+	RIGHT_ADVANCE; 
+
+	while( left_length > 0 && right_length > 0)
+	if(
+	
+		bileft[left_index] < biright[right_index] ||
+		(	bileft[left_index] == biright[right_index]	&&
+			bjleft[left_index] <= bjright[right_index]	)
+		)
+					{
+		LRESULT_APPEND
+	}
+	else
+	{
+		RRESULT_APPEND
+	}
+
+	while( left_length  > 0 )
+	{
+		LRESULT_APPEND
+	}
+	while( right_length  > 0 )
+	{
+		RRESULT_APPEND
+	}
+#undef LEFT_ADVANCE
+#undef RIGHT_ADVANCE
+#undef RESULT_ADVANCE
+#undef RESULT_APPEND
+#undef LRESULT_APPEND
+#undef RRESULT_APPEND
+
+}
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+/* @endcond */
diff --git a/rsb_mergesort.h b/rsb_mergesort.h
new file mode 100644
index 0000000..e4b9667
--- /dev/null
+++ b/rsb_mergesort.h
@@ -0,0 +1,330 @@
+
+
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Sorting functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifndef RSB_MERGESORT_H_INCLUDED
+#define RSB_MERGESORT_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+
+
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+
+
+
+rsb_err_t rsb__do_mergesort_CSR(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+	void *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+	void *result,
+	rsb_type_t type);
+rsb_err_t rsb__do_mergesort_BCSR(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+	void *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t mb,
+	rsb_coo_idx_t kb,
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+	void *result,
+	rsb_type_t type);
+rsb_err_t rsb__do_mergesort_VBR(
+	rsb_coo_idx_t *iarray,
+	rsb_coo_idx_t *jarray,
+	rsb_coo_idx_t *biarray,
+	rsb_coo_idx_t *bjarray,
+	void *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *iresult,
+	rsb_coo_idx_t *jresult,
+	rsb_coo_idx_t *biresult,
+	rsb_coo_idx_t *bjresult,
+	void *result,
+	rsb_type_t type);
+rsb_err_t rsb_do_mergesort_double_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	double *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double *restrict result)
+;
+
+void rsb_do_merge_double_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const double* left, const double* restrict right,  double* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_float_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	float *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float *restrict result)
+;
+
+void rsb_do_merge_float_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const float* left, const float* restrict right,  float* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_float_complex_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	float complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float complex *restrict result)
+;
+
+void rsb_do_merge_float_complex_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const float complex* left, const float complex* restrict right,  float complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_double_complex_CSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	double complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double complex *restrict result)
+;
+
+void rsb_do_merge_double_complex_CSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+		const double complex* left, const double complex* restrict right,  double complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_double_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	double *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double *restrict result)
+;
+
+void rsb_do_merge_double_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const double* left, const double* restrict right,  double* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_float_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	float *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float *restrict result)
+;
+
+void rsb_do_merge_float_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const float* left, const float* restrict right,  float* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_float_complex_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	float complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	float complex *restrict result)
+;
+
+void rsb_do_merge_float_complex_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const float complex* left, const float complex* restrict right,  float complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_double_complex_BCSR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t mb, rsb_coo_idx_t kb,
+	double complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	double complex *restrict result)
+;
+
+void rsb_do_merge_double_complex_BCSR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t mb, const rsb_coo_idx_t kb,		const double complex* left, const double complex* restrict right,  double complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_double_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	double *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	double *restrict result)
+;
+
+void rsb_do_merge_double_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const double* left, const double* restrict right,  double* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_float_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	float *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	float *restrict result)
+;
+
+void rsb_do_merge_float_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const float* left, const float* restrict right,  float* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_float_complex_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	float complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	float complex *restrict result)
+;
+
+void rsb_do_merge_float_complex_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const float complex* left, const float complex* restrict right,  float complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+rsb_err_t rsb_do_mergesort_double_complex_VBR(
+	rsb_coo_idx_t *restrict iarray,
+	rsb_coo_idx_t *restrict jarray,
+	rsb_coo_idx_t *restrict biarray,
+	rsb_coo_idx_t *restrict bjarray,
+	double complex *array,
+	rsb_nnz_idx_t length, 
+	rsb_coo_idx_t *restrict iresult,
+	rsb_coo_idx_t *restrict jresult,
+	rsb_coo_idx_t *restrict biresult,
+	rsb_coo_idx_t *restrict bjresult,
+	double complex *restrict result)
+;
+
+void rsb_do_merge_double_complex_VBR(
+		const rsb_coo_idx_t* restrict ileft, const rsb_coo_idx_t* restrict iright,  rsb_coo_idx_t*restrict iresult,
+		const rsb_coo_idx_t* restrict jleft, const rsb_coo_idx_t* restrict jright,  rsb_coo_idx_t*restrict jresult,
+const rsb_coo_idx_t * restrict bileft, const rsb_coo_idx_t * restrict biright, rsb_coo_idx_t * restrict biresult,const rsb_coo_idx_t * restrict bjleft, const rsb_coo_idx_t * restrict bjright, rsb_coo_idx_t * restrict bjresult,		const double complex* left, const double complex* restrict right,  double complex* restrict result,
+		rsb_nnz_idx_t left_length,
+		rsb_nnz_idx_t right_length )
+
+;
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_MERGESORT_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_mergesort.m4 b/rsb_mergesort.m4
new file mode 100644
index 0000000..32fc2fc
--- /dev/null
+++ b/rsb_mergesort.m4
@@ -0,0 +1,108 @@
+dnl
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+include(`mergesort_macros.m4')dnl
+dnl
+/* @cond INNERDOC */
+dnl
+/**
+ * @file
+ * @brief
+ * Sorting functions.
+ */
+RSB_M4_HEADER_MESSAGE()dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_MERGESORT_H_INCLUDED
+#define RSB_MERGESORT_H_INCLUDED
+')
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+',`dnl
+dnl
+dnl /* We may use custom memcpy functions. */
+dnl #define RSB_MEMCPY(DST,SRC,BYTES) rsb_memcpy((DST),(SRC),(BYTES))
+')dnl
+dnl
+
+
+ifelse(`0',`1',`dnl 20121016 
+ifdef(`RSB_M4_WANT_OMP',dnl
+dnl	FIXME : this should be moved elsewhere
+`#define RSB_WANT_OMP        '1
+`#define RSB_MAX_OMP_THREADS 'RSB_M4_MAX_OMP_THREADS
+#ifdef RSB_HAVE_OMP_H
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+#include <omp.h>       /* OpenMP parallelism (EXPERIMENTAL) */
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+#endif /* RSB_HAVE_OMP_H */
+)dnl
+')dnl
+
+dnl
+dnl #include "rsb_internals.h"
+dnl #include "rsb_common.h"
+RSB_M4_INCLUDE_HEADERS
+dnl #include "types.h"
+dnl 
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+extern struct rsb_session_handle_t rsb_global_session_handle;
+')dnl
+
+dnl
+define(`blockorientations',`(CSR,BCSR,VBR)')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+foreach(`blockoriented',blockorientations,`dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER_PROTOTYPE(RSB_M4_TYPES,blockoriented);
+')dnl
+')dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+foreach(`blockoriented',blockorientations,`dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_DISPATCHER(RSB_M4_TYPES,blockoriented)
+')dnl
+')dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+foreach(`blockoriented',blockorientations,`dnl
+foreach(`mtype',RSB_M4_TYPES,`dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION(mtype,blockoriented)
+RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION(mtype,blockoriented)
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+foreach(`blockoriented',blockorientations,`dnl
+foreach(`mtype',RSB_M4_TYPES,`dnl
+RSB_M4_MERGESORT_ON_COORDINATES_FUNCTION_PROTOTYPE(mtype,blockoriented);
+RSB_M4_MERGESORT_ON_COORDINATES_MERGE_FUNCTION_PROTOTYPE(mtype,blockoriented);
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_MERGESORT_H_INCLUDED */
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_mio.c b/rsb_mio.c
new file mode 100644
index 0000000..0a26bd3
--- /dev/null
+++ b/rsb_mio.c
@@ -0,0 +1,1275 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix I/O functions.
+ * */
+
+// FIXME: this code is messy and unclean
+
+#include "rsb_internals.h"
+#ifdef RSB_HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif /* RSB_HAVE_SYS_STAT_H */
+#if RSB_WANT_ZLIB_SUPPORT
+#include <zlib.h>
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+#include <stdio.h>
+#define RSB_MMIOH_CL 4 
+#define RSB_20120321_IOBUFFERING 0
+#if RSB_WANT_OMPIO_SUPPORT
+#include "rsb_ompio.h"
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+#define RSB_PATCH_GILLES_20130906 0
+#if RSB_PATCH_GILLES_20130906
+//#define _GNU_SOURCE
+#include <asm/fcntl.h>
+#include <unistd.h>		/* O_DIRECT */
+#include <sys/types.h>          /* See NOTES */
+#include <sys/stat.h>
+#endif
+
+#define rsb_util_sort_column_major(VA,IA,JA,nnz,nr,nc,typecode,flags) rsb_util_sort_row_major_inner(VA,JA,IA,nnz,nc,nr,typecode,flags)
+#if 1
+#define RSB_IO_VERBOSE_MSG(NZI,NNZ)
+#else
+#define RSB_IO_VERBOSE_GRNLRT RSB_MILLION_I
+// #define RSB_IO_VERBOSE_GRNLRT 100000
+#define RSB_IO_VERBOSE_MSG(NZI,NNZ) if((NZI)%RSB_IO_VERBOSE_GRNLRT==0) RSB_STDERR("%s%dM/%dM\n",RSB_CLEARTERM_STRING,(NZI)/RSB_IO_VERBOSE_GRNLRT,(NNZ)/RSB_IO_VERBOSE_GRNLRT )
+#endif
+
+#define RSB_FILE_ALLOW_LOAD_EMPTY_PATTERN 1 /* 20140324 */
+
+#ifdef RSB_WITH_MM
+rsb_err_t rsb_util_mm_load_coo_matrix(const char *filename, struct rsb_coo_matrix_t * cmp)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * Loads in a matrix in unsorted COO format. (new)
+	 *
+	 * FIXME : UNTESTED
+	 *
+	 * \note used by experiment.c files
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_coo_matrix_t cm;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+	if(!cmp || !filename)
+	{
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	RSB_BZERO_P(&cm);
+	cm.typecode = cmp->typecode;	// should be specified
+	errval = rsb__util_mm_load_matrix_f(filename, &cm.IA, &cm.JA,&cm.VA , &cm.nr, &cm.nc, &cm.nnz , cm.typecode, flags, NULL, NULL);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	rsb_memcpy(cmp,&cm,sizeof(cm));
+err:
+	rsb__do_perror(NULL,errval);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__util_mm_info_matrix_f(const char *fn,  rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t *typecode, rsb_bool_t * is_symmetric, rsb_bool_t * is_hermitian, rsb_bool_t * is_pattern, rsb_bool_t * is_lower, rsb_bool_t * is_upper , rsb_bool_t * is_vector)
+{
+	/*! 
+	 *  \ingroup internals
+	 *  FIXME : does not return cleanly in case of errors
+	 * */
+
+  	FILE * fd = NULL;
+	int innz = 0;	/* FIXME */
+	int _m = 0,_k = 0;
+	char matcode[RSB_MMIOH_CL]; // !?
+	rsb_bool_t is_vector_ = RSB_BOOL_FALSE;
+
+	if(nnz)*nnz = RSB_MARKER_NNZ_VALUE ;
+	if(m)*m = RSB_MARKER_COO_VALUE;
+	if(k)*k = RSB_MARKER_COO_VALUE;
+	/* TODO: needs to define some RSB_NUMERICAL_COMPLEX_TYPE_DEFAULT macro and use it here, in case of a complex matrix */
+	if(typecode && RSB_MATRIX_UNSUPPORTED_TYPE(*typecode))*typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+
+	if(!fn)
+	{
+		return RSB_ERR_BADARGS;
+	}
+
+	if(fd==NULL)
+	if ((fd = RSB_FOPEN(fn, "r")) == NULL)
+	{
+		RSB_STDERR("Failed opening file: %s\n",fn);
+		return RSB_ERR_GENERIC_ERROR;
+	}
+
+	if (rsb__mm_read_banner(fd,NULL,&(matcode)) != 0)
+	{
+        	RSB_STDERR("Could not process Matrix Market banner.\n");
+		RSB_FCLOSE(fd);
+		return RSB_ERR_GENERIC_ERROR;
+	}
+
+	/*  This is how one can screen matrix types if their application */
+	/*  only supports a subset of the Matrix Market data types.      */
+
+	is_vector_ = (rsb_mm_is_sparse(matcode))?RSB_BOOL_FALSE:RSB_BOOL_TRUE;
+	if ( !rsb_mm_is_matrix(matcode) /*|| !rsb_mm_is_sparse(matcode)*/ )
+//	if (!rsb_mm_is_real(matcode) || !rsb_mm_is_matrix(matcode) || !rsb_mm_is_sparse(matcode) )
+	{
+        	RSB_STDERR("%s","Sorry, this application does not support ");
+	        RSB_STDERR("Matrix Market type: [%s]\n", rsb__mm_typecode_to_str(matcode));
+		RSB_FCLOSE(fd);
+        	return RSB_ERR_UNSUPPORTED_TYPE;
+	}
+
+	/* find out size of sparse matrix .... */
+
+		
+	if( ((is_vector_) && (rsb__mm_read_mtx_array_size(fd,NULL,&_m,&_k) !=0)) || ((!is_vector_) && (rsb__mm_read_mtx_crd_size(fd,NULL,&_m,&_k,&innz)) !=0) )
+	{
+		RSB_FCLOSE(fd);
+        	return RSB_ERR_GENERIC_ERROR;
+	}
+	if(m)*m = (rsb_coo_idx_t)_m;
+	if(k)*k = (rsb_coo_idx_t)_k;
+
+	if(is_vector)
+	{
+		*is_vector = is_vector_;
+	}
+
+	if(is_pattern)
+	{
+		if(rsb_mm_is_pattern(matcode))
+			*is_pattern = RSB_BOOL_TRUE;
+		else
+			*is_pattern = RSB_BOOL_FALSE;
+	}
+
+	if(is_symmetric)
+	{
+		if(rsb_mm_is_symmetric(matcode))
+			*is_symmetric = RSB_BOOL_TRUE;
+		else
+			*is_symmetric = RSB_BOOL_FALSE;
+	}
+
+	if(is_hermitian)
+	{
+		if(rsb_mm_is_hermitian(matcode))
+			*is_hermitian = RSB_BOOL_TRUE;
+		else
+			*is_hermitian = RSB_BOOL_FALSE;
+	}
+
+	if(is_lower)
+	{
+		// FIXME: using the default. UNFINISHED
+		if(is_symmetric || is_hermitian)
+			*is_lower |= RSB_BOOL_TRUE;
+		else
+			*is_lower |= RSB_BOOL_FALSE;
+	}
+
+	if(is_upper)
+		*is_upper |= RSB_BOOL_FALSE; // FIXME: why this ?
+		//*is_upper |= RSB_BOOL_TRUE;
+
+	if(m && k)
+	if (((int)*m != _m)||((int)*k != _k))
+	{
+		/* overflow */
+		RSB_IO_ERROR("overflow error while reading matrix dimensions.\n");
+        	return RSB_ERR_INTERNAL_ERROR;
+	}
+
+	if(is_vector_)
+		innz = _m*_k; /* 20120904 */
+	if(nnz)
+	{
+		*nnz = innz;
+#if RSB_FILE_ALLOW_LOAD_EMPTY_PATTERN
+		if(*nnz<0)
+#else
+		if(*nnz<1)
+#endif
+			return RSB_ERR_GENERIC_ERROR;
+	}
+
+	RSB_FCLOSE(fd);
+	return RSB_ERR_NO_ERROR;
+}
+
+static int rsb_zfscanf(FILE * fd,const char * fs,rsb_coo_idx_t *IV, rsb_coo_idx_t *JV, void * VAR, void * VAI, void * ngzfd)
+{
+	/**
+	 *  \ingroup internals
+	 *  FIXME: error handling is missing
+	 * */
+#if RSB_WANT_ZLIB_SUPPORT
+	if(ngzfd)
+	{
+		if((!IV) && (!JV))
+		{
+			if(VAI)
+				return fscanf(ngzfd,fs,VAR,VAI);
+			else
+				return fscanf(ngzfd,fs,VAR);
+		}
+		else
+		{
+			if(VAI)
+				return fscanf(ngzfd,fs,IV,JV,VAR,VAI);
+			if(VAR)
+				return fscanf(ngzfd,fs,IV,JV,VAR);
+			else
+				return fscanf(ngzfd,fs,IV,JV);
+		}
+	}
+	else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+		return rsb_fscanf(fd,fs,IV,JV,VAR,VAI);
+}
+
+int rsb_fscanf(FILE * fd,const char * fs,rsb_coo_idx_t *IV, rsb_coo_idx_t *JV, void * VAR, void * VAI)
+{
+	/**
+	 *  \ingroup internals
+	 *  FIXME: error handling is missing
+	 * */
+#if RSB_WANT_ZLIB_SUPPORT
+	char line[MM_MAX_LINE_LENGTH];
+	gzgets(fd,line,MM_MAX_LINE_LENGTH);
+
+	if((!IV) && (!JV))
+	{
+		if(VAI)
+			return sscanf(line,fs,VAR,VAI);
+		if(VAR)
+			return sscanf(line,fs,VAR);
+		else
+			return 0;
+	}
+	else
+	{
+		if(VAI)
+			return sscanf(line,fs,IV,JV,VAR,VAI);
+		if(VAR)
+			return sscanf(line,fs,IV,JV,VAR);
+		else
+			return sscanf(line,fs,IV,JV);
+	}
+#else /* RSB_WANT_ZLIB_SUPPORT */
+	if((!IV) && (!JV))
+	{
+		if(VAI)
+			return fscanf(fd,fs,VAR,VAI);
+		if(VAR)
+			return fscanf(fd,fs,VAR);
+		else
+			return 0;
+	}
+	else
+	{
+		if(VAI)
+			return fscanf(fd,fs,IV,JV,VAR,VAI);
+		if(VAR)
+			return fscanf(fd,fs,IV,JV,VAR);
+		else
+			return fscanf(fd,fs,IV,JV);
+	}
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+}
+
+char * rsb_fgets(char* RSB_RESTRICT buf, int len, FILE * RSB_RESTRICT fd)
+{
+	/**
+	 *  \ingroup internals
+	 * */
+#if RSB_WANT_ZLIB_SUPPORT
+	return gzgets(fd,buf,len);
+#else /* RSB_WANT_ZLIB_SUPPORT */
+	return fgets(buf,len,fd);
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+}
+
+rsb_err_t rsb__util_mm_load_vector_f(const char *fn, void **VA, rsb_nnz_idx_t *nnz, rsb_type_t typecode)
+{
+	/* FIXME: in perpective, need stride, C/Fortran order, etc ... */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb__util_mm_load_matrix_f(fn,NULL,NULL,VA,NULL,NULL,nnz,typecode,RSB_FLAG_NOFLAGS,NULL,NULL);
+	return errval;
+}
+
+rsb_err_t rsb__util_mm_load_matrix_f(const char *fn, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void **VA, rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t typecode, rsb_flags_t flags, rsb_bool_t *is_lowerp, rsb_bool_t *is_upperp)
+{
+	/**
+	 *  \ingroup internals
+	 *  This function reads in a single Matrix Market format matrix drom a specified filename.
+	 *  The matrix will be loaded in coordinate storage format, and IA,JA, VA arrays will 
+	 *  be allocated for this purpose, here.
+	 *
+	 * \param fn	should contain a valid matrix file name
+	 * \param IA	should point to a pointer which will be allocated here to contain the elements row values
+	 * \param JA	should point to a pointer which will be allocated here to contain the elements column values
+	 * \param VA	should point to a pointer which will be allocated here to contain the elements column values
+	 * \param m	should point to the matrix rows count variable, which will be set in this function
+	 * \param k	should point to the matrix columns count variable, which will be set in this function
+	 * \param nnz	should point to the matrix nonzero count variable, which will be set in this function (shall be initialized to zero in advance!)
+	 * \param type	should specify a valid numerical type to convert the read data in. see rsb.h for this.
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * Notes: 
+	 *
+	 *	There is no guarantee that the loaded matrix will be free from duplicate values.
+	 *	The specified numerical type should be enabled at library generaton time.
+	 *
+	 * FIXME : lots of ths function's code should be generated from macros.
+	 * FIXME : error handling is awful
+	 * FIXME : otype stands for 'original' or 'output' ?
+	 * TODO  : some option to specify a pattern-only load
+	 * FIXME : in a future version, should not allocate if pointers not NULL
+	 * TODO: may print (at least for rsbench) out how many discarded duplicates, how many zeroes, etc etc
+	 * TODO: may support different styles for tolerating / detecting e.g. reading a vector file instead a matrix one.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t re = 0;/* read elements */
+	FILE *fd = NULL;
+//#if RSB_WANT_ZLIB_SUPPORT
+	FILE *ngzfd = NULL;
+//#endif
+	rsb_nnz_idx_t i = 0;
+	rsb_nnz_idx_t annz = 0;/* allocated nnz */
+	rsb_bool_t is_symmetric = RSB_BOOL_FALSE,is_lower = RSB_BOOL_FALSE,is_upper = RSB_BOOL_FALSE,is_pattern = RSB_BOOL_FALSE, is_hermitian = RSB_BOOL_FALSE, is_vector = RSB_BOOL_FALSE;/* FIXME : no expansion support for hermitian */
+	rsb_bool_t aja = RSB_BOOL_FALSE, ava = RSB_BOOL_FALSE, aia = RSB_BOOL_FALSE;
+	rsb_flags_t otype = typecode;/* original type */
+	rsb_time_t frt = 0;
+	char matcode[RSB_MMIOH_CL]; // !?
+	rsb_bool_t is_gz = RSB_BOOL_FALSE;
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	double  **dval = NULL;
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	float **fval = NULL;
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	char **cval = NULL;
+	#endif /* RSB_NUMERICAL_TYPE_CHAR */
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	int  **ival = NULL;
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	float complex  **zval = NULL;
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	double complex  **Zval = NULL;
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	int innz = 0;	/* FIXME */
+	int _m = 0,_k = 0;
+#if RSB_20120321_IOBUFFERING
+	char*iobuf = NULL;
+	size_t iobbs = 16*1024*1024;
+#endif /* RSB_20120321_IOBUFFERING */
+
+	frt = - rsb_time();
+
+	if ( RSB_MATRIX_UNSUPPORTED_TYPE(typecode) )return RSB_ERR_UNSUPPORTED_TYPE	;
+
+	//if(!VA || !JA || !IA) return RSB_ERR_BADARGS;
+	if( (!(VA && JA && IA)) && (!(VA && (!JA) && (!IA))) ) return RSB_ERR_BADARGS;
+	if(IA)aia = RSB_BOOL_IS_POINTER_NON_NULL(*IA);
+	if(JA)aja = RSB_BOOL_IS_POINTER_NON_NULL(*JA);
+	if(VA)ava = RSB_BOOL_IS_POINTER_NON_NULL(*VA);
+	//if(!nnz || !k || !m) return RSB_ERR_BADARGS;
+	if((!(nnz && k && m)) && (!(nnz && (!k) && (!m)))) return RSB_ERR_BADARGS;
+	if(*nnz)
+		annz = *nnz;// if user set
+
+	errval = rsb__util_mm_info_matrix_f(fn,m,k,nnz,&typecode,&is_symmetric,&is_hermitian,&is_pattern,&is_lower,&is_upper,&is_vector);
+	if(RSB_SOME_ERROR(errval))
+		goto prerr;
+	if(annz==0)
+		annz = *nnz;
+	if(annz<*nnz)
+	{
+		RSB_ERROR("user-set array size (%d) does not fit actual input (%d)\n",(int)*nnz,(int)annz);
+		errval = RSB_ERR_BADARGS;
+		goto prerr;
+	}
+	
+	if(is_pattern)
+	#ifdef RSB_NUMERICAL_TYPE_PATTERN
+		typecode = RSB_NUMERICAL_TYPE_PATTERN;
+	#else /* RSB_NUMERICAL_TYPE_PATTERN */
+		return RSB_ERR_UNSUPPORTED_FEATURE;
+	#endif /* RSB_NUMERICAL_TYPE_PATTERN */
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if (otype == RSB_NUMERICAL_TYPE_DOUBLE)dval = (double**)(VA);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if (otype == RSB_NUMERICAL_TYPE_FLOAT)fval = (float **)(VA);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	if (otype == RSB_NUMERICAL_TYPE_INT)ival = (int **)(VA);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	if (otype == RSB_NUMERICAL_TYPE_CHAR)cval = (char **)(VA);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_CHAR */
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	if (otype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX)Zval = (double complex **)(VA);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	if (otype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX)zval = (float complex **)(VA);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_PATTERN
+	if (otype == RSB_NUMERICAL_TYPE_PATTERN){/* nothing to do */}
+	else
+	#endif /* RSB_NUMERICAL_TYPE_PATTERN */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+
+	if(IA)
+	*IA = NULL;
+	if(JA)
+	*JA = NULL;
+  	
+	{
+		// THIS IS A CODE DUPLICATION ..
+	is_gz = RSB_BOOL_FALSE;
+#if RSB_WANT_ZLIB_SUPPORT
+	if ((fd = gzopen(fn,"r")) != NULL)
+	{
+		if(gzdirect(fd))
+		{
+			gzclose(fd);
+		}
+		else
+			is_gz = RSB_BOOL_TRUE;
+	}
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+
+#if RSB_WANT_ZLIB_SUPPORT
+	if(is_gz)
+	{
+		if((fd = gzopen(fn,"r")) == NULL)
+		{
+			/* TODO: the following code is not robust, shall fix it. */
+#ifndef RSB_HAVE_DUP
+#error Functions 'dup/fileno' is not present ? Reconfigure without Z library then!
+			int fnum = 0;/* */
+			ngzfd = NULL;
+#else /* RSB_HAVE_DUP */
+			int fnum = dup( rsb__fileno(fd) );
+			ngzfd = gzdopen(fnum,"r");
+#endif /* RSB_HAVE_DUP */
+			if(!ngzfd)
+			{
+				gzclose(fd);
+				RSB_ERROR(RSB_ERRMSG_FILEOPENPGZ"\n");
+				return RSB_ERR_GENERIC_ERROR;
+			}
+			else
+				;
+		}
+	}
+	else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+#if !RSB_PATCH_GILLES_20130906
+	if ((fd = fopen(fn,"r")) == NULL)
+#else /* RSB_PATCH_GILLES_20130906 */
+{
+		int _fd;
+		if ((_fd = open(fn,O_RDONLY|O_DIRECT)) < 0)
+		{
+			RSB_STDERR(RSB_ERRMSG_FILEOPENP" %s\n",fn);
+			return RSB_ERR_GENERIC_ERROR;
+		}
+		else if ((fd = fdopen(_fd,"r")) == NULL) 
+#endif /* RSB_PATCH_GILLES_20130906 */
+	{
+		RSB_STDERR(RSB_ERRMSG_FILEOPENP" %s\n",fn);
+		return RSB_ERR_GENERIC_ERROR;
+	}
+	else
+		ngzfd = fd;
+#if RSB_20120321_IOBUFFERING
+	//iobbs = BUFSIZ;
+	if(iobbs>0)
+	if(((iobuf = rsb__malloc(iobbs))==NULL) 
+			//|| (0!=setvbuf(ngzfd,NULL,_IOLBF,0))// line buffering: slow
+			//|| (0!=setvbuf(ngzfd,NULL,_IONBF,0))// no buffering: super-slow
+			|| (0!=setvbuf(ngzfd,iobuf,_IOFBF,iobbs)) // seem to not work
+				)
+	{
+		RSB_STDERR("problems setting up a buffer for file ""%s\n",fn);
+	}
+	//setbuf(ngzfd,iobbs);
+	//setbuffer(ngzfd,iobuf,iobbs);
+#endif /* RSB_20120321_IOBUFFERING */
+#if RSB_PATCH_GILLES_20130906
+}
+#endif /* RSB_PATCH_GILLES_20130906 */
+
+	if (rsb__mm_read_banner(fd,ngzfd,&(matcode)) != 0)
+	{
+        	RSB_STDERR(RSB_ERRMSG_TMXMKTBANNER".\n");
+		RSB_FCLOSE(fd);
+		return RSB_ERR_GENERIC_ERROR;
+	}
+
+	if( ((is_vector) && (rsb__mm_read_mtx_array_size(fd,ngzfd,&_m,&_k) !=0)) || ((!is_vector) && (rsb__mm_read_mtx_crd_size(fd,ngzfd,&_m,&_k,&innz)) !=0) )
+	{
+		RSB_FCLOSE(fd);
+        	return RSB_ERR_GENERIC_ERROR;
+	}
+	if(m)
+	*m = (rsb_coo_idx_t)_m;
+	if(k)
+	*k = (rsb_coo_idx_t)_k;
+	}
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if (otype == RSB_NUMERICAL_TYPE_DOUBLE)*dval = *VA?*VA: rsb__calloc(sizeof(double)*annz);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if (otype == RSB_NUMERICAL_TYPE_FLOAT) *fval =*VA?*VA: rsb__calloc(sizeof(float) *annz);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	if (otype == RSB_NUMERICAL_TYPE_INT) *ival =*VA?*VA: rsb__calloc(sizeof(int) *annz);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	if (otype == RSB_NUMERICAL_TYPE_CHAR) *cval =*VA?*VA: rsb__calloc(sizeof(char) *annz);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_CHAR */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	if (otype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX) *zval =*VA?*VA: rsb__calloc(sizeof(float complex) *annz);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	if (otype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX) *Zval =*VA?*VA: rsb__calloc(sizeof(double complex) *annz);
+	else
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_PATTERN
+	if (otype == RSB_NUMERICAL_TYPE_PATTERN){/* no zval allocation (TODO : document this) */}
+	else
+	#endif /* RSB_NUMERICAL_TYPE_PATTERN */
+	{errval = RSB_ERR_UNSUPPORTED_TYPE;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+
+	if(IA && JA)
+	{
+		if( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR )
+		{
+			RSB_WARN("RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR will break with non matching index types\n");
+			/* 	Allocating slightly oversized arrays.
+				FIXME : potential type/size mismatches here ! 
+			*/
+			if(!*IA)
+				*IA  = rsb__calloc(sizeof(rsb_nnz_idx_t)*((annz>*m?annz:*m)+1));
+			if(!*JA)
+				*JA   = rsb__calloc(sizeof(rsb_nnz_idx_t)*(annz+1));
+		}
+		else
+		{
+			if(!*IA)
+			*IA   = rsb__calloc(sizeof(rsb_coo_idx_t)*annz);
+			if(!*JA)
+			*JA   = rsb__calloc(sizeof(rsb_coo_idx_t)*annz);
+		}
+    		if( !*IA || !*JA)
+			goto err;
+	}
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if (otype == RSB_NUMERICAL_TYPE_DOUBLE) if(!dval) {RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if (otype == RSB_NUMERICAL_TYPE_FLOAT ) if(!fval) {RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	if (otype == RSB_NUMERICAL_TYPE_INT   ) if(!ival) {RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	if (otype == RSB_NUMERICAL_TYPE_CHAR  ) if(!cval) {RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	#endif /* RSB_NUMERICAL_TYPE_CHAR */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	if (otype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ) if(!zval) {RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	if (otype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ) if(!Zval) {RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_PATTERN
+	/* :) */
+	#endif /* RSB_NUMERICAL_TYPE_PATTERN */
+
+    	if( IA && JA)
+		goto full_scan;
+
+#if RSB_WANT_OMPIO_SUPPORT
+	{errval = RSB_ERR_UNIMPLEMENTED_YET;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if (typecode == RSB_NUMERICAL_TYPE_DOUBLE)
+	{
+		double iv;
+		if(rsb_mm_is_complex(matcode))
+		for (i=0; i<*nnz; i++)
+		{
+			re += (rsb_zfscanf(fd,"%lg %lg\n",NULL,NULL,*dval+i,&(iv),ngzfd)==2);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+		else
+		for (i=0; i<*nnz; i++)
+		{
+			re += (rsb_zfscanf(fd,"%lg\n",NULL,NULL,*dval+i,NULL,ngzfd)==1);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+	}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if (typecode == RSB_NUMERICAL_TYPE_FLOAT)
+	{
+		float iv;
+		if(rsb_mm_is_complex(matcode))
+		for (i=0; i<*nnz; i++)
+		{
+			re += (rsb_zfscanf(fd,"%g %g\n",NULL,NULL,*fval+i,&(iv),ngzfd)==2);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+		else
+		for (i=0; i<*nnz; i++)
+		{
+			re += (rsb_zfscanf(fd, "%g\n",NULL,NULL,*fval+i,NULL,ngzfd)==1);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+	}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	if (typecode == RSB_NUMERICAL_TYPE_INT)
+	for (i=0; i<*nnz; i++)
+	{
+		double fv;
+		re += (rsb_zfscanf(fd,"%lg\n",NULL,NULL,&fv,NULL,ngzfd)==1);
+		(*ival)[i] = (int)fv;
+		RSB_IO_VERBOSE_MSG(i,*nnz);
+	}
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	if (typecode == RSB_NUMERICAL_TYPE_CHAR)
+	for (i=0; i<*nnz; i++)
+	{
+		double fv;
+		re += (rsb_zfscanf(fd,"%g\n",NULL,NULL,&fv,NULL,ngzfd)==1);
+		(*cval)[i] = (char)fv;
+		RSB_IO_VERBOSE_MSG(i,*nnz);
+	}
+	#endif /* RSB_NUMERICAL_TYPE_CHAR */
+
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	if (typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX)
+	{
+		if(rsb_mm_is_complex(matcode))
+		for (i=0; i<*nnz; i++)
+		{
+			float rv,iv;
+			re += (rsb_zfscanf(fd,"%g %g\n",NULL,NULL,&(rv),&(iv),ngzfd)==2);
+			(*zval)[i] = (rv + I * iv);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+		else
+		for (i=0; i<*nnz; i++)
+		{
+			float rv;
+			re += (rsb_zfscanf(fd,"%g\n",NULL,NULL,&(rv),NULL,ngzfd)==1);
+			(*zval)[i] = (rv + I * 0);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+	}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	if (typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX)
+	{
+		if(rsb_mm_is_complex(matcode))
+		for (i=0; i<*nnz; i++)
+		{
+			double rv,iv;
+			re += (rsb_zfscanf(fd,"%lg %lg\n",NULL,NULL,&(rv),&(iv),ngzfd)==2);
+			(*Zval)[i] = (rv + I * iv);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+		else
+		for (i=0; i<*nnz; i++)
+		{
+			double rv;
+			re += (rsb_zfscanf(fd,"%lg\n",NULL,NULL,&(rv),NULL,ngzfd)==1);
+			(*Zval)[i] = (rv + I * 0);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+	}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	goto scan_done;
+full_scan:
+	/* NOTE: when reading in doubles, ANSI C requires the use of the "l"  */
+	/*   specifier as in "%lg", "%lf", "%le", otherwise errors will occur */
+	/*  (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15)            */
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if (typecode == RSB_NUMERICAL_TYPE_DOUBLE)
+	{
+		if(rsb_mm_is_complex(matcode))
+#if RSB_WANT_OMPIO_SUPPORT
+		{errval = RSB_ERR_UNIMPLEMENTED_YET;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			double iv;
+			re += (rsb_zfscanf(fd,"%d %d %lg %lg\n",&iI,&iJ,*dval+i,&(iv),ngzfd)==4);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+		else
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb__ompio_DOUBLE(nnz,fd,ngzfd,dval,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			re += (rsb_zfscanf(fd,"%d %d %lg\n",&iI,&iJ,*dval+i,NULL,ngzfd)==3);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+        		(*JA)[i]--;
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+	}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if (typecode == RSB_NUMERICAL_TYPE_FLOAT)
+	{
+		if(rsb_mm_is_complex(matcode))
+#if RSB_WANT_OMPIO_SUPPORT
+		{errval = RSB_ERR_UNIMPLEMENTED_YET;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			float iv;
+			re += (rsb_zfscanf(fd,"%d %d %g %g\n",&iI,&iJ,*fval+i,&(iv),ngzfd)==4);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+		else
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb_ompio_FLOAT(nnz,fd,ngzfd,dval,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			re += (rsb_zfscanf(fd, "%d %d %g\n",&iI,&iJ,*fval+i,NULL,ngzfd)==3);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+	}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	if (typecode == RSB_NUMERICAL_TYPE_INT)
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb_ompio_INT(nnz,fd,ngzfd,dval,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+	for (i=0; i<*nnz; i++)
+	{
+		int iI,iJ;
+		double fv;
+		re += (rsb_zfscanf(fd,"%d %d %lg\n",&iI,&iJ,&fv,NULL,ngzfd)==3);
+		(*IA)[i] = (rsb_coo_idx_t)iI;
+		(*JA)[i] = (rsb_coo_idx_t)iJ;
+        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+        	(*JA)[i]--;
+		(*ival)[i] = (int)fv;
+		RSB_IO_VERBOSE_MSG(i,*nnz);
+	}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	if (typecode == RSB_NUMERICAL_TYPE_CHAR)
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb_ompio_CHAR(nnz,fd,ngzfd,dval,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+	for (i=0; i<*nnz; i++)
+	{
+		int iI,iJ;
+		double fv;
+		re += (rsb_zfscanf(fd,"%d %d %g\n",&iI,&iJ,&fv,NULL,ngzfd)==3);
+		(*IA)[i] = (rsb_coo_idx_t)iI;
+		(*JA)[i] = (rsb_coo_idx_t)iJ;
+        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+        	(*JA)[i]--;
+		(*cval)[i] = (char)fv;
+		RSB_IO_VERBOSE_MSG(i,*nnz);
+	}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	if (typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX)
+	{
+		if(rsb_mm_is_complex(matcode))
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb_ompio_FLOAT_COMPLEX(nnz,fd,ngzfd,dval,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			float rv,iv;
+			re += (rsb_zfscanf(fd,"%d %d %g %g\n",&iI,&iJ,&(rv),&(iv),ngzfd)==4);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			(*zval)[i] = (rv + I * iv);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+		else
+#if RSB_WANT_OMPIO_SUPPORT
+		{errval = RSB_ERR_UNIMPLEMENTED_YET;RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			float rv;
+			re += (rsb_zfscanf(fd,"%d %d %g\n",&iI,&iJ,&(rv),NULL,ngzfd)==3);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			(*zval)[i] = (rv + I * 0);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+	}
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	if (typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX)
+	{
+		if(rsb_mm_is_complex(matcode))
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb__ompio_DOUBLE_COMPLEX(nnz,fd,ngzfd,dval,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			double rv,iv;
+			re += (rsb_zfscanf(fd,"%d %d %lg %lg\n",&iI,&iJ,&(rv),&(iv),ngzfd)==4);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			(*Zval)[i] = (rv + I * iv);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+		else
+		for (i=0; i<*nnz; i++)
+		{
+			int iI,iJ;
+			double rv;
+			re += (rsb_zfscanf(fd,"%d %d %lg\n",&iI,&iJ,&(rv),NULL,ngzfd)==3);
+			(*IA)[i] = (rsb_coo_idx_t)iI;
+			(*JA)[i] = (rsb_coo_idx_t)iJ;
+	        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+	        	(*JA)[i]--;
+			(*Zval)[i] = (rv + I * 0);
+			RSB_IO_VERBOSE_MSG(i,*nnz);
+		}
+	}
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+
+	#ifdef RSB_NUMERICAL_TYPE_PATTERN
+	if (typecode == RSB_NUMERICAL_TYPE_PATTERN)
+#if RSB_WANT_OMPIO_SUPPORT
+			rsb__ompio_PATTERN(nnz,fd,ngzfd,IA,JA,&re);
+#else /* RSB_WANT_OMPIO_SUPPORT */
+	for (i=0; i<*nnz; i++)
+	{
+		int iI,iJ;
+		re += (rsb_zfscanf(fd,"%d %d\n",&iI,&iJ,NULL,NULL,ngzfd)==2);
+		(*IA)[i] = (rsb_coo_idx_t)iI;
+		(*JA)[i] = (rsb_coo_idx_t)iJ;
+        	(*IA)[i]--;  /* adjust from 1-based to 0-based */
+        	(*JA)[i]--;
+		RSB_IO_VERBOSE_MSG(i,*nnz);
+	}
+#endif /* RSB_WANT_OMPIO_SUPPORT */
+	#endif /* RSB_NUMERICAL_TYPE_PATTERN */
+
+	if( is_lowerp || is_upperp )
+	{
+		rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+		flags = rsb__do_detect_and_add_triangular_flags(*IA,*JA,*nnz,flags);
+		if( is_lowerp )
+			*is_lowerp = RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER);
+		if( is_upperp )
+			*is_upperp = RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER);
+	}
+
+	#ifdef RSB_NUMERICAL_TYPE_PATTERN	
+	if(typecode == RSB_NUMERICAL_TYPE_PATTERN && otype!=typecode && VA)
+		rsb__fill_with_ones(*VA,otype,*nnz,1);
+	#endif /* RSB_NUMERICAL_TYPE_PATTERN */
+scan_done:
+	if (fd !=stdin)
+	{
+		// FIXME
+		if(ngzfd)
+			fclose(fd);
+		else
+			RSB_FCLOSE(fd);
+	}
+	if(re!=*nnz)
+	{
+		/* FIXME : this can happen when reading as double a complex matrix, now. */
+		RSB_STDERR("read only %zu out of %d matrix elements (incomplete or not a matrix file?)!\n",re,*nnz);
+		goto err;
+	}
+
+	if( _m != _k && is_symmetric )
+	{
+		RSB_STDERR("matrix declared as symmetric but not square!\n");
+		goto err;
+	}
+
+	if(!(IA && JA))
+		goto afterpmtxchecks;
+	if(is_symmetric && RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+	{
+		/* FIXME: this breaks reallocation tricks !! */
+		if( rsb__reallocate_with_symmetry(IA,JA,VA,nnz,(otype)) )
+		{
+			RSB_STDERR("problems handling matrix symmetry!\n");
+			goto err;
+		}
+	}
+
+	if(is_symmetric && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+	{
+		rsb_bool_t has_diagonal_elements = RSB_BOOL_FALSE;
+
+		if(rsb__util_coo_check_if_triangle_non_empty(*IA,*JA,*nnz,RSB_FLAG_UPPER))
+		{
+			RSB_STDERR("#converting upper to lower triangle..\n");
+			rsb__util_coo_upper_to_lower_symmetric(*IA,*JA,*nnz);
+			if(is_lower)
+				is_lower = RSB_BOOL_TRUE;
+		}
+
+		if(rsb__util_coo_check_if_triangle_non_empty(*IA,*JA,*nnz,RSB_FLAG_UPPER))
+		{
+			RSB_STDERR("input declared as symmetric, but it is unsymmetric!\n");
+			goto err;
+		}
+
+		errval = rsb__util_coo_check_if_has_diagonal_elements(*IA,*JA,*nnz,*m,&has_diagonal_elements);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_STDERR("error while checking diagonal elements!\n");
+			goto err;
+		}
+
+		if(!has_diagonal_elements)
+		{
+			RSB_STDERR("Input has missing elements on the diagonal.\n"); /* FIXME: emit this in a verbose mode only */
+		}
+	}
+afterpmtxchecks:
+	frt += rsb_time();
+	/* this should be a disk read only routine, not an output reporting one */
+	if(0)
+		RSB_IO_NOTICE("file I/O took %lf s (%lf nnz, %lf nnz/s ) \n",frt,((double)*nnz),(((double)*nnz)/frt));
+
+	if(IA && JA)
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER))
+		RSB_SWAP(rsb_coo_idx_t*,*IA,*JA);
+
+	errval = RSB_ERR_NO_ERROR;
+prerr:
+	goto ret;
+err:
+#if RSB_20120321_IOBUFFERING
+	RSB_CONDITIONAL_FREE(iobuf);
+#endif /* RSB_20120321_IOBUFFERING */
+	/* FIXME: this will free also already allocated arrays */
+	if(!aia) if(IA)
+	RSB_CONDITIONAL_FREE(*IA);
+	if(!aja) if(JA)
+	RSB_CONDITIONAL_FREE(*JA);
+	if(!ava){
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if(dval) RSB_CONDITIONAL_FREE(*dval);
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT
+	if(fval) RSB_CONDITIONAL_FREE(*fval);
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT */
+	#ifdef RSB_NUMERICAL_TYPE_INT
+	if(ival) RSB_CONDITIONAL_FREE(*ival);
+	#endif /* RSB_NUMERICAL_TYPE_INT */
+	#ifdef RSB_NUMERICAL_TYPE_CHAR
+	if(cval) RSB_CONDITIONAL_FREE(*cval);
+	#endif /* RSB_NUMERICAL_TYPE_CHAR */
+	#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX
+	if(zval) RSB_CONDITIONAL_FREE(*zval);
+	#endif /* RSB_NUMERICAL_TYPE_FLOAT_COMPLEX */
+	#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX
+	if(Zval) RSB_CONDITIONAL_FREE(*Zval);
+	#endif /* RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX */
+	}
+	errval = RSB_ERR_GENERIC_ERROR;
+ret:
+	RSB_DO_ERR_RETURN(errval);
+}
+#endif /* RSB_WITH_MM */
+
+rsb_err_t rsb__do_util_get_matrix_dimensions(const char * filename, size_t * cols, size_t * rows, size_t * nnzp, rsb_flags_t*flagsp)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * FIXME : needs error handling
+	 */
+	rsb_coo_idx_t m,k;
+	rsb_nnz_idx_t nnz;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+	rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+	rsb_bool_t is_pattern = RSB_BOOL_FALSE;
+	rsb_bool_t is_lower = RSB_BOOL_FALSE;
+	rsb_bool_t is_upper = RSB_BOOL_FALSE;
+	rsb_bool_t is_vector = RSB_BOOL_FALSE;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	errval = rsb__util_mm_info_matrix_f(filename,&m,&k,&nnz,&typecode,&is_symmetric,&is_hermitian,&is_pattern,&is_lower,&is_upper,&is_vector);
+	if(cols)
+		*cols = k;
+	if(rows)
+		*rows = m;
+	if(nnzp)
+		*nnzp = nnz;
+	if(is_symmetric)
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+	if(is_hermitian)
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_HERMITIAN);
+	/* if(is_pattern) ... */
+	if(is_lower)
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+	if(is_upper)
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+	if(flagsp)
+		*(flagsp) = flags;
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__util_mm_load_matrix_f_as_csr(const char *filename, rsb_nnz_idx_t ** INDX, rsb_coo_idx_t ** JA, void **VA, rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t typecode, rsb_flags_t flags/*, rsb_bool_t *is_lowerp, rsb_bool_t *is_upperp*/)
+{
+	/**
+	 * \ingroup gr_internals
+	 * FIXME : should optimize
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+       	struct rsb_coo_matrix_t coo;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);
+	*INDX = NULL;
+	*JA = NULL;
+	*VA = NULL;
+	RSB_BZERO_P(&coo);
+	coo.typecode = typecode;
+       	errval = rsb_util_mm_load_coo_matrix(filename,&coo);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb_util_sort_row_major_inner(coo.VA,coo.IA,coo.JA,coo.nnz,coo.nr,coo.nc,coo.typecode,flags);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb__allocate_csr_arrays_from_coo_sorted(coo.VA,coo.IA,coo.JA,coo.nnz,coo.nr,coo.nc,coo.typecode,VA,JA,INDX);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	*m = coo.nr; *k = coo.nc; *nnz = coo.nnz;
+err:
+	rsb__destroy_coo_matrix_t(&coo);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__util_mm_load_matrix_f_as_csc(const char *filename, rsb_nnz_idx_t ** INDX, rsb_coo_idx_t ** IA, void **VA, rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t typecode, rsb_flags_t flags/*, rsb_bool_t *is_lowerp, rsb_bool_t *is_upperp*/)
+{
+	/** 
+	 * \ingroup gr_internals
+	 * FIXME : should optimize
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+       	struct rsb_coo_matrix_t coo;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);
+	*INDX = NULL;*IA = NULL;*VA = NULL;
+	RSB_BZERO_P(&coo);
+	coo.typecode = typecode;
+       	errval = rsb_util_mm_load_coo_matrix(filename,&coo);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb_util_sort_column_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.nr,coo.nc,coo.typecode,flags);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb__allocate_csc_arrays_from_coo_sorted(coo.VA,coo.IA,coo.JA,coo.nnz,coo.nr,coo.nc,coo.typecode,VA,IA,INDX);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	*m = coo.nr; *k = coo.nc; *nnz = coo.nnz;
+err:
+	rsb__destroy_coo_matrix_t(&coo);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_util_mm_fill_arrays_for_csc(const char *filename, rsb_nnz_idx_t * INDX, rsb_coo_idx_t * IA, void *VA, rsb_type_t typecode, rsb_flags_t flags)
+{
+	/** 
+	 * \ingroup gr_internals
+	 * FIXME : should optimize
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+       	struct rsb_coo_matrix_t coo;
+	rsb_nnz_idx_t *iINDX = NULL;
+	rsb_coo_idx_t *iIA = NULL;
+	void *iVA = NULL;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);
+	if(!INDX || !IA || !VA)
+		return RSB_ERR_BADARGS;
+
+	RSB_BZERO_P(&coo);
+	coo.typecode = typecode;
+       	errval = rsb_util_mm_load_coo_matrix(filename,&coo);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb_util_sort_column_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.nr,coo.nc,coo.typecode,flags);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb__allocate_csc_arrays_from_coo_sorted(coo.VA,coo.IA,coo.JA,coo.nnz,coo.nr,coo.nc,coo.typecode,&iVA,&iIA,&iINDX);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	errval = rsb__copy_css_arrays(iVA,iINDX,iIA,coo.nnz,coo.nc,typecode,VA,INDX,IA);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+err:
+	rsb__destroy_coo_matrix_t(&coo);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+size_t rsb_sys_filesize(const char *filename)
+{
+	/**
+	 * file size, in bytes
+	 * FIXME
+	 * TODO : to sys.c
+	 * TODO: rsb__do_util_get_matrix_dimensions shall invoke this.
+	 * */
+#ifdef RSB_HAVE_SYS_STAT_H
+	struct stat ss;
+#endif /* RSB_HAVE_SYS_STAT_H */
+	if(!filename)
+		goto err;
+#ifdef RSB_HAVE_SYS_STAT_H
+	stat(filename,&ss);
+	return ss.st_size;
+#else /* RSB_HAVE_SYS_STAT_H */
+#endif /* RSB_HAVE_SYS_STAT_H */
+err:
+	return 0;
+}
+
+rsb_err_t rsb__do_file_mtx_get_dims(const char * filename, rsb_coo_idx_t* nrp, rsb_coo_idx_t *ncp, rsb_coo_idx_t *nzp, rsb_flags_t*flagsp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	size_t nrA = 0,ncA = 0,nzA = 0;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+	errval = rsb__do_util_get_matrix_dimensions(filename,&ncA,&nrA,&nzA,&flags);
+
+	if(RSB_SOME_ERROR(errval))
+		goto ret;
+
+	/* RSB_STDOUT("%d / %d  %d / %d  %d / %d\n",nrA,RSB_MAX_MATRIX_DIM,ncA,RSB_MAX_MATRIX_DIM,nnzA,RSB_MAX_MATRIX_NNZ); */
+	if( nrp && RSB_INVALID_COO_INDEX(nrA) )
+		errval |= RSB_ERR_LIMITS;
+	if( ncp && RSB_INVALID_COO_INDEX(ncA) )
+		errval |= RSB_ERR_LIMITS;
+	if( nzp && RSB_INVALID_NNZ_INDEX(nzA) )
+		errval |= RSB_ERR_LIMITS;
+	if(nrp)
+		*nrp = (rsb_coo_idx_t)nrA;
+	if(ncp)
+		*ncp = (rsb_coo_idx_t)ncA;
+	if(nzp)
+		*nzp = (rsb_nnz_idx_t)nzA;
+	if(flagsp)
+		*flagsp = (rsb_flags_t)flags;
+	/* FIXME: need overflow check here */
+ret:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_mio.h b/rsb_mio.h
new file mode 100644
index 0000000..a0bd7a5
--- /dev/null
+++ b/rsb_mio.h
@@ -0,0 +1,52 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix I/O functions.
+ * */
+
+#ifndef RSB_IO_H_INCLUDED
+#define RSB_IO_H_INCLUDED
+
+/*#include "rsb_internals.h"*/
+/*#include "rsb.h"*/
+#include "rsb_common.h"
+
+#ifdef RSB_WITH_MM
+rsb_err_t rsb__util_mm_load_matrix_f(const char *fn, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, void **VA, rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t typecode, rsb_flags_t flags, rsb_bool_t *is_lowerp, rsb_bool_t *is_upperp);
+rsb_err_t rsb__util_mm_load_vector_f(const char *fn, void **VA, rsb_nnz_idx_t *nnz, rsb_type_t typecode);
+rsb_err_t rsb__util_mm_load_matrix_f_as_csr(const char *fn, rsb_nnz_idx_t ** INDX, rsb_coo_idx_t ** JA, void **VA, rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t typecode, rsb_flags_t flags);
+rsb_err_t rsb__util_mm_load_matrix_f_as_csc(const char *fn, rsb_nnz_idx_t ** INDX, rsb_coo_idx_t ** IA, void **VA, rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t typecode, rsb_flags_t flags);
+rsb_err_t rsb__do_util_get_matrix_dimensions(const char * filename, size_t * cols, size_t * rows, size_t * nnzp, rsb_flags_t*flagsp);
+rsb_err_t rsb__util_mm_info_matrix_f(const char *fn,  rsb_coo_idx_t *m, rsb_coo_idx_t *k , rsb_nnz_idx_t *nnz, rsb_type_t *typecode, rsb_bool_t * is_symmetric, rsb_bool_t * is_hermitian, rsb_bool_t * is_pattern, rsb_bool_t * is_lower, rsb_bool_t * is_upper , rsb_bool_t * is_vector );
+rsb_err_t rsb_util_mm_load_coo_matrix(const char *filename, struct rsb_coo_matrix_t * cmp);
+rsb_err_t rsb_util_mm_fill_arrays_for_csc(const char *filename, rsb_nnz_idx_t * INDX, rsb_coo_idx_t * IA, void *VA, rsb_type_t typecode, rsb_flags_t flags);
+size_t rsb_sys_filesize(const char *filename);
+int rsb_fscanf(FILE * fd,const char * fs,rsb_coo_idx_t *IV, rsb_coo_idx_t *JV, void * VAR, void * VAI);
+char * rsb_fgets(char* RSB_RESTRICT buf, int len, FILE * RSB_RESTRICT fd);
+rsb_err_t rsb__do_file_mtx_get_dims(const char * filename, rsb_coo_idx_t* nrp, rsb_coo_idx_t *ncp, rsb_coo_idx_t *nzp, rsb_flags_t*flagsp);
+#endif /* RSB_WITH_MM */
+#endif /* RSB_IO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_misc.m4 b/rsb_misc.m4
new file mode 100644
index 0000000..c5c8917
--- /dev/null
+++ b/rsb_misc.m4
@@ -0,0 +1,981 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl	forloop(TOKEN,LOWERI,UPPERI,ACTION)
+dnl	-----------------------------------
+dnl	Expands every occurrence of TOKEN in ACTION to each numerical
+dnl	value in the [LOWERI,UPPERI] interval.
+dnl	Therefore, ACTION is expanded [UPPERI-LOWERI+1] times, with a
+dnl	varying index value being substituted to TOKEN.
+dnl
+divert(`-1')dnl
+# forloop(var, from, to, stmt) - simple version
+dnl define(`forloop', `pushdef(`$1', `$2')_forloop($@)popdef(`$1')')dnl
+define(`forloop',`pushdef(`$1',`$2')_forloop($@)popdef(`$1')')dnl
+dnl define(`_forloop',dnl
+dnl        `$4`'ifelse($1, `$3', `', `define(`$1', incr($1))$0($@)')')dnl
+define(`_forloop',dnl
+`$4`'ifelse($1,`$3',`',`define(`$1',incr($1))$0($@)')')dnl
+divert`'dnl
+dnl	this `foreach' macro is the one in the ./examples  directory distributed with the M4 package
+dnl
+dnl
+dnl
+include(`rsb_config.m4')dnl		we include essential directives there
+dnl
+define(`RSB_M4_HEADER_MESSAGE',`
+include(`rsb_license_header.inc')dnl
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+dnl File generated syscmd(`date')
+ */
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	foreach(TOKEN,VALUES,ACTION)
+dnl	----------------------------
+dnl	Expands each occurrence of TOKEN in ACTION to each value in the
+dnl	VALUES list.
+dnl	Therefore, ACTION is expanded a number of times equal to the
+dnl	number of items in the VALUES list.
+dnl
+divert(`-1')dnl
+# foreach(x, (item_1, item_2, ..., item_n), stmt)
+#   parenthesized list, simple version
+define(`foreach',`pushdef(`$1')_foreach($@)popdef(`$1')')dnl
+define(`_arg1',`$1')dnl
+define(`_foreach',`ifelse(`$2',`()',`',dnl
+`define(`$1',_arg1$2)$3`'$0(`$1',(shift$2),`$3')')')dnl
+divert`'dnl
+dnl
+dnl 
+dnl	tolowercase(TOKEN)
+dnl	------------------
+dnl	Expands TOKEN to lowercase.
+dnl 
+define(`tolowercase',`translit($1,`A-Z',`a-z')')dnl
+dnl 
+dnl 
+dnl	touppercase(TOKEN)
+dnl	------------------
+dnl	Expands TOKEN to uppercase.
+dnl 
+define(`touppercase',`translit($1,`a-z',`A-Z')')dnl
+dnl 
+dnl 
+dnl	touppercase_(TOKEN)
+dnl	------------------
+dnl	Expands TOKEN to uppercase.
+dnl
+changequote([,])dnl
+define([touppercase_],dnl
+[dnl comment
+translit([$1], [a-z], [A-Z])
+])dnl
+dnl 
+dnl
+dnl	singlequote(TOKEN)
+dnl	------------------
+dnl	Expands to TOKEN surrounded by single quotes.
+dnl	That is, 'TOKEN'.
+define([singlequote],
+[dnl comment
+'$1'])dnl
+dnl 
+dnl 
+dnl	tolowercase_(TOKEN)
+dnl	------------------
+dnl	Expands TOKEN to lowercase
+dnl	FIXME : remove this
+dnl
+define([tolowercase_],
+[dnl comment
+translit([$1], [A-Z], [a-z])
+])dnl
+dnl 
+dnl 
+changequote(`,')dnl
+dnl 
+dnl	RSB_M4_CHOPTRAILINGSPACES(STRING)
+dnl	-------------------------
+dnl	FIXME : document
+dnl
+define(`RSB_M4_CHOPTRAILINGSPACES',`dnl
+pushdef(`type',$1)dnl
+patsubst($1,` *$',)`'dnl
+popdef(`type')dnl
+')dnl
+dnl 
+dnl	RSB_M4_CHOPSPACES(STRING)
+dnl	-------------------------
+dnl	Expands to the input STRING with underscores ('_') substituted
+dnl	to spaces (' ').
+dnl
+define(`RSB_M4_CHOPSPACES',`dnl
+pushdef(`type',$1)dnl
+patsubst($1,` ',_)`'dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl	
+dnl	RSB_M4_TRANSPOSITION_CODE(transposition)
+dnl	----------------------
+dnl	DOCUMENT ME
+dnl
+define(`RSB_M4_TRANSPOSITION_CODE',`dnl
+pushdef(`transposition',$1)dnl
+`'touppercase(transposition)`'dnl
+popdef(`transposition')dnl
+')dnl
+dnl
+dnl	
+dnl	RSB_M4_TYPE_CODE(TYPE)
+dnl	----------------------
+dnl	Expands to a code assigned to the specified numerical type.
+dnl
+define(`RSB_M4_TYPE_CODE',`dnl
+pushdef(`type',$1)dnl
+RSB_M4_CHOPSPACES(type)dnl
+popdef(`type')dnl
+')dnl
+dnl 
+dnl	RSB_M4_HAVE_TYPE(TYPE)
+dnl	----------------------
+dnl	...
+dnl
+define(`RSB_M4_HAVE_TYPE',`dnl
+pushdef(`type',$1)dnl
+RSB_M4_MEMBER(mtype,WANT_TYPES)dnl
+popdef(`type')dnl
+')dnl
+dnl 
+dnl
+dnl
+dnl	RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL(TYPE)
+dnl	------------------------------------------
+dnl	Converts a matrix type code in a preprocessor symbol used to
+dnl	indicate the type availability.
+dnl
+dnl #define RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL(mop)
+dnl
+define(`RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`type',$1)`'dnl
+`RSB_HAVE_TYPE_'touppercase( RSB_M4_CHOPSPACES(type) )dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_TYPE_INDEX_PREPROCESSOR_SYMBOL(type)
+dnl	--------------------------------------------
+dnl	Converts a matrix type code in a preprocessor symbol used to
+dnl	index various mop-related arrays.
+dnl
+define(`RSB_M4_TYPE_INDEX_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`type',$1)`'dnl
+`RSB_TYPE_INDEX_'touppercase( RSB_M4_CHOPSPACES(type) )dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl 
+dnl
+dnl	RSB_M4_OPTYPE_INDEX_PREPROCESSOR_SYMBOL(MOP)
+dnl	--------------------------------------------
+dnl	Converts a matrix operation code in a preprocessor symbol used to
+dnl	index various mop-related arrays.
+dnl
+define(`RSB_M4_OPTYPE_INDEX_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`mop',$1)`'dnl
+`RSB_OPTYPE_INDEX_'touppercase( RSB_M4_CHOPSPACES(mop) )dnl
+popdef(`mop')`'dnl
+')dnl
+dnl
+dnl 
+dnl
+dnl	RSB_M4_HAVE_OPTYPE_PREPROCESSOR_SYMBOL(MOP)
+dnl	-------------------------------------------
+dnl	Converts a matrix operation code in a preprocessor symbol used to
+dnl	indicate the operation availability.
+dnl
+dnl #define RSB_M4_HAVE_OPTYPE_PREPROCESSOR_SYMBOL(mop)
+dnl
+define(`RSB_M4_HAVE_OPTYPE_PREPROCESSOR_SYMBOL',`dnl
+pushdef(`mop',$1)`'dnl
+`RSB_HAVE_OPTYPE_'touppercase( RSB_M4_CHOPSPACES(mop) )dnl
+popdef(`mop')`'dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_DEBUGINFO(``MACRO_SYMBOL'')dnl
+dnl	-------------------------------------
+dnl	Will expand to a C comment stating debug info about the given macro.
+dnl
+dnl	e.g.:
+dnl	RSB_M4_DEBUGINFO(``RSB_M4_UNROLL_KERNEL'')dnl
+dnl
+dnl
+define(`RSB_M4_DEBUGINFO',dnl
+ifdef(`RSB_M4_DEBUG',`/* generated by the $1 macro */'
+,`')dnl
+)dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SPACED_LIST
+dnl	-----------------
+dnl	Expands the given list inserting spaces between each consecutive element.
+dnl
+define(`RSB_M4_SPACED_LIST',`dnl
+patsubst(`'dnl
+patsubst(`'dnl
+foreach(`listel',$1,listel )`'dnl
+,` $',`')`'dnl
+,` ',` ')`'dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_COMMA_LIST
+dnl	-----------------
+dnl	Expands the given list inserting commas between each consecutive element.
+dnl
+define(`RSB_M4_COMMA_LIST',`dnl
+patsubst(`'dnl
+patsubst(`'dnl
+foreach(`listel',$1,listel )`'dnl
+,` $',`')`'dnl
+,` ',`,')`'dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_QUOTED_COMMA_LIST
+dnl	------------------------
+dnl
+define(`RSB_M4_QUOTED_COMMA_LIST',`dnl
+dnl
+patsubst(`'dnl
+patsubst(`'dnl
+foreach(`listel',$1,"listel" )`'dnl
+,` $',`')`'dnl
+,` ',`,')`'dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_ARG_TO_ACTUAL_ARG',`patsubst($1,`.*\(\<[a-zA-Z_0-9]+$\)',`\1')')dnl
+dnl
+dnl	RSB_M4_ARGS_TO_ACTUAL_ARGS
+dnl	--------------------------
+dnl	Takes a C prototype string and cleans it from the type and array declarators,
+dnl	making from it an argument list.
+dnl	e.g.:
+dnl	RSB_M4_ARGS_TO_ACTUAL_ARGS(`const struct rsb_mtx_t * m, const struct rsb_options_t *o, const void * rhs, void * out')
+dnl	=> m, o, rhs, out
+dnl
+dnl was:
+dnl `'patsubst( patsubst( foreach(`arg',$1,`patsubst(patsubst(arg`',`.+[ *]',` '),`\[.*\]',`')')`', `^ ', `'),` ',`,')`'dnl
+dnl
+define(`RSB_M4_ARGS_TO_ACTUAL_ARGS',`dnl
+dnl
+dnl	WARNING : this is THIN ICE :)
+pushdef(`firstarg',`0')dnl
+foreach(`arg',`$1',`ifelse(firstarg,`0',`pushdef(`firstarg',1)',`,')`'RSB_M4_ARG_TO_ACTUAL_ARG(arg)')`'dnl
+ifelse(firstarg,`1',`popdef(`firstarg')')dnl
+popdef(`firstarg')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_ACTUAL_ARGS_APPLY_MEMBERSHIP
+dnl	-----------------------------------
+dnl	Takes a string extracted from applying RSB_M4_ARGS_TO_ACTUAL_ARGS, and then
+dnl	prepending patterns looking as rsb_mtx_t members with a "mtxAp->".
+dnl
+dnl	e.g.:
+dnl	RSB_M4_ACTUAL_ARGS_APPLY_MEMBERSHIP(`const struct rsb_mtx_t * m, const struct rsb_options_t *o, const void * rhs, void * out, const int rpntr')
+dnl	=> m, o, rhs, out, mtxAp->rpntr
+dnl
+define(`RSB_M4_ACTUAL_ARGS_APPLY_MEMBERSHIP',`dnl
+dnl
+dnl	WARNING : this is THIN ICE :)
+dnl	patsubst(`$@',`\<flags\>\|\<rpntr\>\|\<cpntr\>\|\<bindx\>\|\<bpntr\>\|\<VA\>\|\<indptr\>\|\<Mdim\>\|\<mdim\>\|\<br\>\|\<bc\>\|\<roff\>\|\<coff\>\|\<nnz\>',`mtxAp->\&')`'dnl was \0, but gnu m4 told me to use \&
+ifelse(RSB_M4_WANT_20110206_BOUNDED_BOX_PATCH,1,`dnl
+	patsubst(`patsubst(`patsubst(`patsubst(`$@',`\<flags\>\|\<rpntr\>\|\<cpntr\>\|\<bpntr\>\|\<VA\>\|\<indptr\>\|\<Mdim\>\|\<mdim\>\|\<br\>\|\<bc\>\|\<roff\>\|\<coff\>\|\<nnz\>',`mtxAp->\&')',`\<bindx\>',(citype*)`mtxAp->\&')',`\<br\>',`broff-mtxAp->roff')',`\<bc\>',`bm')`'dnl was \0, but gnu m4 told me to use \&
+',`dnl
+	patsubst(`patsubst(`$@',`\<flags\>\|\<rpntr\>\|\<cpntr\>\|\<bpntr\>\|\<VA\>\|\<indptr\>\|\<Mdim\>\|\<mdim\>\|\<br\>\|\<bc\>\|\<roff\>\|\<coff\>\|\<nnz\>',`mtxAp->\&')',`\<bindx\>',(citype*)`mtxAp->\&')`'dnl was \0, but gnu m4 told me to use \&
+')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_LIST_LENGTH(LIST)
+dnl	------------------------
+dnl	This macro returns the input list length.
+dnl
+define(`RSB_M4_LIST_LENGTH',`$#')dnl
+dnl
+dnl
+dnl	The following values specify the row and column unroll factors
+dnl	to be chosen when a specialized submatrix size function is not found.
+dnl
+dnl	FIXME : these default unrollings should be in!
+define(`RSB_M4_ITH_LIST_ELEMENT',`ifelse($#,1,$2,`ifelse($1,`1',$2,`RSB_M4_ITH_LIST_ELEMENT(decr($1),shift(shift($@)))')')')dnl
+dnl
+define(`RSB_M4_MIDDLE_LIST_ELEMENT',`RSB_M4_ITH_LIST_ELEMENT(eval(($#+1)/2),$@)')dnl
+dnl
+define(`RSB_M4_LAST_LIST_ELEMENT',`RSB_M4_ITH_LIST_ELEMENT($#,$@)')dnl
+dnl
+define(`RSB_M4_FIRST_LIST_ELEMENT',`RSB_M4_ITH_LIST_ELEMENT(1,$@)')dnl
+dnl
+dnl	RSB_M4_EMPTY_LIST
+dnl	-----------------
+dnl	Gives 1 if the list is empty, otherwise 0
+dnl
+define(`RSB_M4_EMPTY_LIST',`ifelse($#,1,0)')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_FIRST
+dnl	------------
+dnl	This macro returns the first input argument.
+define(`RSB_M4_FIRST',`$1')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MAX2
+dnl	-----------
+dnl	This macro returns the maximum input argument among 2 arguments.
+dnl
+define(`RSB_M4_MAX2',`ifelse(eval(`$1>$2'),1,`$1',`$2')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MIN2
+dnl	-----------
+dnl	This macro returns the maximum input argument among 2 arguments.
+dnl
+define(`RSB_M4_MIN2',`ifelse($2,,$1,`ifelse(eval(`$1<$2'),1,`$1',`$2')')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MAXN
+dnl	-----------
+dnl	This macro returns the maximum input argument.
+dnl
+define(`RSB_M4_MAXN',`ifelse($#,1,$1,`ifelse($#,2,`RSB_M4_MAX2($1,$2)',`RSB_M4_MAX2($1,RSB_M4_MAXN(shift($@)))')')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MINN
+dnl	-----------
+dnl	This macro returns the maximum input argument.
+dnl
+define(`RSB_M4_MINN',`ifelse($2,,$1,`ifelse($#,2,`RSB_M4_MIN2($1,$2)',`RSB_M4_MIN2($1,RSB_M4_MINN(shift($@)))')')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_EXCEPT
+dnl	-------------
+dnl	This macro processes the input arguments ($@) removing all occurrences of the first element ($1).
+dnl
+define(`RSB_M4_EXCEPT',`pushdef(`GOT',`0')ifelse($#,0,,`foreach(`exel',($@),`ifelse(exel,$1,,`ifelse(GOT,`1',`,')`'exel`'ifelse(GOT,`0',`popdef(`GOT')pushdef(`GOT',`1')')')')')popdef(`GOT')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SORT
+dnl	-----------
+dnl	This macro sorts the input arguments.
+dnl
+define(`RSB_M4_SORT',`ifelse($#,1,$1,`pushdef(`ALL',`$@')pushdef(`REST',RSB_M4_MINN(ALL))REST,RSB_M4_SORT(RSB_M4_EXCEPT(REST,ALL))popdef(`ALL')popdef(`REST')')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_SAME
+dnl	-----------
+dnl
+define(`RSB_M4_SAME',`ifelse($1,$2,`1',`0')')dnl
+dnl
+dnl
+dnl	RSB_M4_OR
+dnl	---------
+dnl
+define(`RSB_M4_OR',`pushdef(`GOT',`0')foreach(`exel',($@),`ifelse(exel,`1',`ifelse(GOT,`0',`popdef(`GOT')pushdef(`GOT',`1')')')')GOT`'popdef(`GOT')dnl
+')dnl
+dnl
+dnl
+dnl	RSB_M4_AND
+dnl	----------
+dnl
+define(`RSB_M4_AND',`pushdef(`GOT',`1')foreach(`exel',($@),`ifelse(exel,`0',`ifelse(GOT,`1',`popdef(`GOT')pushdef(`GOT',`0')')')')GOT`'popdef(`GOT')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_XOR
+dnl	----------
+dnl
+define(`RSB_M4_XOR',`RSB_M4_NOT(RSB_M4_OR(RSB_M4_AND($1,$2),RSB_M4_AND(RSB_M4_NOT($1),RSB_M4_NOT($2))))')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_IMPLY
+dnl	------------
+dnl
+define(`RSB_M4_IMPLY',`RSB_M4_OR(RSB_M4_NOT($1),$2)')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_NOT
+dnl	----------
+dnl
+define(`RSB_M4_NOT',`ifelse($1,`0',`1',`0')`'foreach(`exel',(shift($@)),`ifelse(exel,`0',`,1',`,0')')')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_MEMBER
+dnl	-------------
+dnl	if $1 is among (shift($@)), returns 1, otherwise 0
+dnl
+define(`RSB_M4_MEMBER',`pushdef(`GOT',`0')foreach(`exel',(shift($@)),`ifelse(exel,$1,`ifelse(GOT,`0',`popdef(`GOT')pushdef(`GOT',`1')')')')GOT`'popdef(`GOT')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_ALL_LIST_ARGS
+dnl	--------------------
+dnl	All macro arguments : used as a syntactical trick.
+define(`RSB_M4_ALL_LIST_ARGS',`$@')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	RSB_M4_LIST_PUSH_BACK
+dnl	---------------------
+dnl	Will give back the second argument list and the second argument.
+dnl	Will not work with an empty list.
+dnl
+define(`RSB_M4_LIST_PUSH_BACK',`RSB_M4_ALL_LIST_ARGS$1,$2')dnl
+dnl
+dnl
+dnl
+dnl
+dnl	ROW AND COLUMN UNROLL FACTORS
+dnl	-----------------------------
+dnl	Unroll count / type parameterizations
+dnl
+define(`RSB_M4_ROWS_UNROLL',(RSB_M4_SORT(WANT_ROW_UNLOOP_FACTORS)))dnl
+define(`RSB_M4_COLUMNS_UNROLL',(RSB_M4_SORT(WANT_COLUMN_UNLOOP_FACTORS)))dnl
+define(`RSB_M4_MATRIX_TYPES',(WANT_TYPES))dnl
+define(`RSB_M4_ALL_MATRIX_TYPES',(WANT_MATRIX_ALL_TYPES))dnl
+dnl
+define(`RSB_M4_INVALID_TYPE',`invalid_type')dnl
+define(`RSB_M4_DEFAULT_TYPE',RSB_M4_FIRST(WANT_TYPES))dnl
+define(`RSB_M4_DEFAULT_POSSIBLY_INTEGER_TYPE',ifelse(RSB_M4_MEMBER(`int',WANT_TYPES),`1',`int',RSB_M4_FIRST(WANT_TYPES)))dnl
+dnl
+define(`RSB_M4_DEFAULT_SYMMETRY',RSB_M4_FIRST(RSB_M4_WANT_MATRIX_SYMMETRY))dnl
+define(`RSB_M4_DEFAULT_TRANSPOSITION',RSB_M4_FIRST(RSB_M4_WANT_MATRIX_TRANSPOSITIONS))dnl
+dnl
+define(`RSB_M4_MATRIX_OPS',(WANT_MATRIX_OPS))dnl
+dnl
+define(`RSB_M4_TRANS_N',`n')dnl
+define(`RSB_M4_TRANS_T',`t')dnl
+define(`RSB_M4_TRANS_C',`c')dnl
+dnl
+define(`RSB_M4_WANT_MATRIX_TRANSPOSITIONS',`RSB_M4_TRANS_N,RSB_M4_TRANS_T,RSB_M4_TRANS_C')dnl
+dnl define(`RSB_M4_WANT_MATRIX_TRANSPOSITIONS',``n',`t',`h'')dnl
+dnl define(`RSB_M4_WANT_MATRIX_TRANSPOSITIONS',``n',`t'')dnl
+dnl define(`RSB_M4_WANT_MATRIX_TRANSPOSITIONS',``n'')dnl
+define(`RSB_M4_SYMBOL_SYMMETRIC',`S')dnl
+define(`RSB_M4_SYMBOL_UNSYMMETRIC',`U')dnl
+define(`RSB_M4_SYMBOL_HERMITIAN',`H')dnl
+define(`RSB_M4_WANT_MATRIX_SYMMETRY',`RSB_M4_SYMBOL_UNSYMMETRIC,RSB_M4_SYMBOL_SYMMETRIC,RSB_M4_SYMBOL_HERMITIAN')dnl
+dnl define(`RSB_M4_WANT_MATRIX_SYMMETRY',`RSB_M4_SYMBOL_UNSYMMETRIC')dnl
+dnl define(`RSB_M4_WANT_MATRIX_SYMMETRY',`RSB_M4_SYMBOL_SYMMETRIC')dnl
+dnl
+define(`RSB_M4_IS_NOT_UNSYMMETRIC',`pushdef(`k_symmetry',$1)RSB_M4_NOT(RSB_M4_SAME(k_symmetry,RSB_M4_SYMBOL_UNSYMMETRIC))popdef(`k_symmetry')')dnl
+define(`RSB_M4_IS_UNSYMMETRIC',`pushdef(`k_symmetry',$1)RSB_M4_SAME(k_symmetry,RSB_M4_SYMBOL_UNSYMMETRIC)popdef(`k_symmetry')')dnl
+define(`RSB_M4_IS_SYMMETRIC',`pushdef(`k_symmetry',$1)RSB_M4_SAME(k_symmetry,RSB_M4_SYMBOL_SYMMETRIC)popdef(`k_symmetry')')dnl
+dnl
+dnl
+define(`RSB_M4_MATRIX_TRANSPOSITIONS',(RSB_M4_WANT_MATRIX_TRANSPOSITIONS))dnl
+dnl
+define(`RSB_M4_MATRIX_UPLO_TYPES',(`u',`l',`g'))dnl
+dnl define(`RSB_M4_MATRIX_UPLO_TYPES',(`g'))dnl
+dnl
+dnl
+define(`RSB_M4_MATRIX_DIAGONAL_DENOMINATION',``diagonal 'ifelse(RSB_M4_IS_DIAGONAL_IMPLICIT(k_diagonal),1,`implicit',`explicit')')dnl
+define(`RSB_M4_MATRIX_DIAGONAL_TYPES',(`e',`i'))dnl
+define(`RSB_M4_DEFAULT_DIAGONAL_TYPE',`e')dnl
+define(`RSB_M4_IS_DIAGONAL_IMPLICIT',`pushdef(`k_diagonal',$1)RSB_M4_SAME(k_diagonal,`i')popdef(`k_diagonal')')dnl
+dnl
+dnl define(`RSB_M4_MATRIX_COORDINATE_TYPES',(`rsb_coo_idx_t',`rsb_half_idx_t'))dnl	FIXME : new
+dnl define(`RSB_M4_MATRIX_COORDINATE_TYPES',(`rsb_half_idx_t'))dnl	FIXME : new
+define(`RSB_M4_MATRIX_COORDINATE_TYPES',(`rsb_coo_idx_t'ifelse(WANT_HALFWORD_INDICES,`yes',`,rsb_half_idx_t',`')))dnl	FIXME : new
+define(`RSB_M4_WANT_SPSM_DIAG_CHECK',ifelse(WANT_SPSM_DIAG_CHECK,`yes',`1',`0'))dnl	FIXME : new
+dnl define(`RSB_M4_WANT_SPSM_DIAG_CHECK',1)dnl	FIXME : new
+define(RSB_M4_TRANSPOSITION_OP_EFFECT,`dnl
+dnl
+pushdef(`transposition',$1)`'dnl
+pushdef(`operand',$2)`'dnl
+dnl
+`{'dnl
+ifelse(transposition,RSB_M4_TRANS_T,`'operand`^T',`dnl
+ifelse(transposition,RSB_M4_TRANS_C,`'operand`^H',`dnl
+ifelse(transposition,RSB_M4_TRANS_N,`'operand`',dnl
+`'operand`')')')`'dnl
+`}'dnl
+dnl
+dnl
+popdef(`operand')`'dnl
+popdef(`transposition')`'dnl
+')dnl
+dnl
+define(`RSB_M4_REALT',`dnl
+pushdef(`type',$1)`'dnl
+dnl
+ifelse(type,`long double complex',`long double')`'dnl
+ifelse(type,`double complex',`double')`'dnl
+ifelse(type,`float complex',`float')`'dnl
+ifelse(type,`long double',`long double')`'dnl
+ifelse(type,`double',`double')`'dnl
+ifelse(type,`float',`float')`'dnl
+ifelse(type,`int',`int')`'dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_CREAL',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`value',$2)`'dnl
+dnl
+ifelse(type,`long double complex',`creall(value)')`'dnl
+ifelse(type,`double complex',`creal(value)')`'dnl
+ifelse(type,`float complex',`crealf(value)')`'dnl
+ifelse(type,`long double',`(value)')`'dnl
+ifelse(type,`double',`(value)')`'dnl
+ifelse(type,`float',`(value)')`'dnl
+ifelse(type,`int',`(value)')`'dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`value')`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_CIMAG',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`value',$2)`'dnl
+dnl
+ifelse(type,`long double complex',`cimagl(value)')`'dnl
+ifelse(type,`double complex',`cimag(value)')`'dnl
+ifelse(type,`float complex',`cimagf(value)')`'dnl
+ifelse(type,`long double',`(value)')`'dnl
+ifelse(type,`double',`(value)')`'dnl
+ifelse(type,`float',`(value)')`'dnl
+ifelse(type,`int',`(value)')`'dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`value')`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_BACKUP_SIZEOF',`dnl
+pushdef(`type',$1)`'dnl
+dnl
+ifelse(type,`double complex',`16')`'dnl
+ifelse(type,`float complex',`8')`'dnl
+ifelse(type,`double',`8')`'dnl
+ifelse(type,`float',`4')`'dnl
+ifelse(type,`int',`4')`'dnl
+dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_ASSIGN',`dnl
+pushdef(`dtype',$1)`'dnl
+pushdef(`stype',$2)`'dnl
+pushdef(`lval',$3)`'dnl
+pushdef(`rval',$4)`'dnl
+dnl
+dnl
+ifelse(RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(dtype),RSB_M4_NOT(RSB_M4_IS_COMPLEX_TYPE(stype))),1,`lval = rval + 0*I;',`')dnl
+ifelse(RSB_M4_AND(RSB_M4_NOT(RSB_M4_IS_COMPLEX_TYPE(dtype)),RSB_M4_IS_COMPLEX_TYPE(stype)),1,`lval = RSB_M4_CREAL(stype,rval);',`')dnl
+ifelse(RSB_M4_NOT(RSB_M4_XOR(RSB_M4_IS_COMPLEX_TYPE(dtype),RSB_M4_IS_COMPLEX_TYPE(stype))),1,`lval = rval;',`')dnl
+dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`rval')`'dnl
+popdef(`lval')`'dnl
+popdef(`stype')`'dnl
+popdef(`dtype')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_ABS',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`value',$2)`'dnl
+dnl
+ifelse(type,`long double complex',`cabsl(value)')`'dnl
+ifelse(type,`double complex',`cabs(value)')`'dnl
+ifelse(type,`float complex',`cabsf(value)')`'dnl
+ifelse(type,`long double',`fabsl(value)')`'dnl
+ifelse(type,`double',`fabs(value)')`'dnl
+ifelse(type,`float',`fabsf(value)')`'dnl
+ifelse(type,`int',`abs(value)')`'dnl
+dnl FIXME : and, for other, non canonical types ? FIXME 
+dnl
+popdef(`value')`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_ABS_IF_1',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`value',$2)`'dnl
+pushdef(`condition',$3)`'dnl
+dnl
+ifelse(condition,`1',`RSB_M4_ABS(type,value)',`value')`'dnl
+dnl
+popdef(`condition')`'dnl
+popdef(`value')`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_POW',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`value',$2)`'dnl
+pushdef(`exp',$3)`'dnl
+dnl
+dnl	FIXME
+dnl
+ifelse(type,`long double complex',`cpowl(value,exp)')`'dnl
+ifelse(type,`double complex',`cpow(value,exp)')`'dnl
+ifelse(type,`float complex',`cpowf(value,exp)')`'dnl
+dnl
+ifelse(type,`long double',`powl(value,exp)')`'dnl
+ifelse(type,`double',`pow(value,exp)')`'dnl
+ifelse(type,`float',`powf(value,exp)')`'dnl
+ifelse(type,`int',`(int)pow((int)(value),(int)(exp))')`'dnl	yeah, it is dumb.
+dnl
+popdef(`exp')`'dnl
+popdef(`value')`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_SQRT',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`value',$2)`'dnl
+dnl
+dnl	FIXME
+dnl
+ifelse(type,`long double complex',`csqrtl(value)')`'dnl
+ifelse(type,`double complex',`csqrt(value)')`'dnl
+ifelse(type,`float complex',`csqrtf(value)')`'dnl
+dnl
+ifelse(type,`long double',`sqrtl(value)')`'dnl
+ifelse(type,`double',`sqrt(value)')`'dnl
+ifelse(type,`float',`sqrtf(value)')`'dnl
+ifelse(type,`int',`(int)sqrt((int)(value))')`'dnl
+dnl
+popdef(`value')`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_ONE',`dnl
+pushdef(`type',$1)`'dnl
+`'((type)(1.0))`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_ZERO',`dnl
+pushdef(`type',$1)`'dnl
+`'((type)(0))`'dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_CONJ_SYM',`dnl
+pushdef(`type',$1)`'dnl
+pushdef(`transposition',$2)`'dnl
+pushdef(`k_symmetry',$3)`'dnl
+dnl
+ifelse(dnl
+RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(type),dnl
+RSB_M4_XOR(RSB_M4_SAME(transposition,RSB_M4_TRANS_C),RSB_M4_SAME(k_symmetry,RSB_M4_SYMBOL_HERMITIAN))),1,`dnl
+ifelse(type,`long double complex',`conjl')`'dnl FIXME : long double complex is not supported
+ifelse(type,`double complex',`conj')`'dnl
+ifelse(type,`float complex',`conjf')`'dnl
+',`dnl
+`'dnl
+')`'dnl
+dnl
+popdef(`k_symmetry')`'dnl
+popdef(`transposition')`'dnl
+popdef(`type')`'dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_CONJ',`dnl
+pushdef(`exp',$1)`'dnl
+pushdef(`type',$2)`'dnl
+pushdef(`transposition',$3)`'dnl
+pushdef(`k_symmetry',$4)`'dnl
+dnl
+ifelse(dnl
+RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(type),dnl
+RSB_M4_XOR(RSB_M4_SAME(transposition,RSB_M4_TRANS_C),RSB_M4_SAME(k_symmetry,RSB_M4_SYMBOL_HERMITIAN))),1,`dnl
+ifelse(type,`long double complex',`conjl(exp)')`'dnl FIXME : long double complex is not supported
+ifelse(type,`double complex',`conj(exp)')`'dnl
+ifelse(type,`float complex',`conjf(exp)')`'dnl
+',`dnl
+exp`'dnl
+')`'dnl
+dnl
+popdef(`k_symmetry')`'dnl
+popdef(`transposition')`'dnl
+popdef(`type')`'dnl
+popdef(`exp')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_H2T_TRANSPOSITION',`dnl
+pushdef(`transposition',$1)`'dnl
+dnl
+ifelse(transposition,RSB_M4_TRANS_T,RSB_M4_TRANS_C)`'dnl
+ifelse(transposition,RSB_M4_TRANS_C,RSB_M4_TRANS_T)`'dnl
+dnl
+popdef(`transposition')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_TRANSPOSE_SYMMETRY',`dnl
+pushdef(`k_symmetry',$1)`'dnl
+dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_HERMITIAN,RSB_M4_SYMBOL_UNSYMMETRIC)`'dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_UNSYMMETRIC,RSB_M4_SYMBOL_HERMITIAN)`'dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_SYMMETRIC,RSB_M4_SYMBOL_HERMITIAN)`'dnl
+dnl
+popdef(`k_symmetry')`'dnl
+')dnl
+dnl
+define(`RSB_M4_TRANSPOSE_TRANSPOSITION',`dnl
+pushdef(`transposition',$1)`'dnl
+dnl
+ifelse(transposition,RSB_M4_TRANS_T,RSB_M4_TRANS_N)`'dnl
+ifelse(transposition,RSB_M4_TRANS_C,RSB_M4_TRANS_N)`'dnl
+ifelse(transposition,RSB_M4_TRANS_N,RSB_M4_TRANS_T)`'dnl
+dnl
+popdef(`transposition')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_MATRIX_SYMMETRY',(RSB_M4_WANT_MATRIX_SYMMETRY))dnl
+dnl
+dnl
+define(RSB_M4_SYMMETRY_SWITCH,`dnl
+dnl
+pushdef(`k_symmetry',$1)`'dnl
+dnl
+dnl	FIXME
+dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_UNSYMMETRIC,RSB_M4_SYMBOL_UNSYMMETRIC,dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_SYMMETRIC,RSB_M4_SYMBOL_UNSYMMETRIC,dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_HERMITIAN,RSB_M4_SYMBOL_UNSYMMETRIC,dnl
+`')))`'dnl
+dnl
+popdef(`k_symmetry')`'dnl
+')dnl
+dnl
+dnl
+define(RSB_M4_SYMMETRY_EFFECT,`dnl
+dnl
+dnl
+pushdef(`k_symmetry',$1)`'dnl
+pushdef(`operand',$2)`'dnl
+dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_UNSYMMETRIC,`'operand` \neq 'operand`^T',dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_SYMMETRIC,`'operand` == 'operand`^T',dnl
+ifelse(k_symmetry,RSB_M4_SYMBOL_HERMITIAN,`'operand` == 'operand`^H',dnl
+`')))`'dnl
+dnl
+dnl
+popdef(`operand')`'dnl
+popdef(`k_symmetry')`'dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_IS_COMPLEX_TYPE',`dnl
+pushdef(`type',$1)dnl
+RSB_M4_MEMBER(type,`double complex',`float complex',`long double complex')`'dnl
+popdef(`type')dnl
+')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_MATRIX_ALL_OPS',(WANT_MATRIX_ALL_OPS))dnl
+define(`RSB_M4_MATRIX_STORAGE',(WANT_MATRIX_STORAGE))dnl
+define(`RSB_M4_ROWS_FALLBACK_UNROLL',RSB_M4_MIDDLE_LIST_ELEMENT(RSB_M4_SORT(WANT_ROW_UNLOOP_FACTORS)))dnl
+define(`RSB_M4_COLUMNS_FALLBACK_UNROLL',RSB_M4_MIDDLE_LIST_ELEMENT(RSB_M4_SORT(WANT_COLUMN_UNLOOP_FACTORS)))dnl
+dnl
+define(`RSB_M4_BCOO_FORMATS',(WANT_MATRIX_BCOO_STORAGE))dnl
+define(`RSB_M4_BCSS_FORMATS',(WANT_MATRIX_BCSS_STORAGE))dnl
+define(`RSB_M4_VB_FORMATS',(WANT_MATRIX_VB_STORAGE))dnl
+dnl
+define(`RSB_M4_IS_IMPLEMENTED_MOP',`RSB_M4_MEMBER($1,WANT_MATRIX_OPS)')dnl
+define(`RSB_M4_IS_FORMAT_BCSS',`RSB_M4_MEMBER($1,`BCSR',`BCSC')')dnl
+define(`RSB_M4_IS_FORMAT_BCOO',`RSB_M4_MEMBER($1,`BCOR',`BCOC')')dnl
+define(`RSB_M4_IS_FORMAT_ROW_MAJOR',`RSB_M4_MEMBER($1,`BCSR',`VBR',`LR',`BCOR')')dnl
+define(`RSB_M4_IS_FORMAT_COLUMN_MAJOR',`RSB_M4_MEMBER($1,`BCSC',`VBC',`LC',`BCOC')')dnl
+define(`RSB_M4_IS_FORMAT_LINKED_LIST',`RSB_M4_MEMBER($1,`LR',`LC')')dnl NEW
+dnl
+dnl define(`RSB_M4_PREFIX',`rsb_')dnl
+define(`RSB_M4_PREFIX',`rsb__')dnl
+dnl
+dnl	RSB_M4_MATRIX_META_OPS
+dnl	----------------------
+dnl
+define(`RSB_M4_MATRIX_META_OPS',dnl
+(RSB_M4_LIST_PUSH_BACK(dnl
+RSB_M4_MATRIX_OPS`'dnl
+dnl (RSB_M4_LIST_PUSH_BACK(RSB_M4_MATRIX_OPS,`sort_only'))dnl
+,`mat_stats'))dnl
+)dnl
+dnl
+dnl	RSB_M4_MATRIX_META_OPS_REDUCED
+dnl	----------------------
+dnl
+define(`RSB_M4_MATRIX_META_OPS_REDUCED',dnl
+(RSB_M4_LIST_PUSH_BACK(dnl
+(`spmv_uaua',`spsv_uxua')`'dnl
+,`mat_stats'))dnl
+)dnl
+dnl
+dnl
+dnl	RSB_M4_MATRIX_OP_IS_META_OP(OP)
+dnl	-------------------------------
+dnl	TODO: should differentiate from RSB_M4_IS_IMPLEMENTED_MOP
+dnl
+define(`RSB_M4_MATRIX_OP_IS_META_OP',`dnl
+pushdef(`mop',$1)`'dnl
+ifelse(RSB_M4_MEMBER(mop,WANT_MATRIX_OPS),`1',`0',`1')dnl
+popdef(`mop')`'dnl
+')dnl
+dnl
+dnl	RSB_M4_INTERVAL_LIST(LOWER_INDEX,UPPER_INDEX,[INCREMENT])
+dnl	---------------------------------------------------------
+dnl
+dnl
+define(`RSB_M4_INTERVAL_LIST',`dnl
+ifelse($3,,`dnl
+forloop(`i',$1,decr($2),i`,')$2`'dnl
+',`dnl
+forloop(`i',0,decr(eval(($2-$1)/$3)),`eval($1+i*$3),')eval($1+eval(($2-$1)/$3)*$3)`'dnl
+')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_ERROR_UNIMPLEMENTED',`#error "missing implementation! Contact the author!"')dnl
+dnl
+define(`RSB_M4_MATRIX_TYPES_ARRAY',`RSB_MATRIX_TYPES_ARRAY')dnl
+define(`RSB_M4_MATRIX_META_OPS_ARRAY',`RSB_MATRIX_OPS_ARRAY')dnl dnl
+dnl
+define(`RSB_M4_ZEROS_ARRAY',`dnl
+`{'`0'forloop(`__dummy',0,decr(eval($1-1)),``,0'')`}'dnl
+')dnl dnl
+dnl
+dnl	RSB_M4_MAKE_FUNCTION_POINTER_TABLE()
+dnl	-----------------------------------------------------------------------
+dnl	The resulting table should be easily addressable by C code.
+dnl
+dnl	UNFINISHED : YOU COULD DELETE THIS CODE NOW AND NO ONE WOULD NOTICE
+dnl
+define(`RSB_M4_MAKE_FUNCTION_POINTER_TABLE',`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`mtype',RSB_M4_MATRIX_TYPES,`dnl
+dnl RSB_M4_DIRECT_KERNEL_DISPATCH_FULLRANGEBENCHMARK_FUNCTION_ARGS(mop,mtype)dnl
+')dnl
+')dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_IS_BCSR',dnl
+pushdef(`rows_unroll',$1)dnl
+pushdef(`cols_unroll',$2)dnl
+dnl
+ifelse(`ifelse(rows_unroll,1,1,0),ifelse(cols_unroll,1,1,0)',11,1,0)`'dnl
+dnl
+popdef(`rows_unroll')dnl
+popdef(`cols_unroll')dnl
+)dnl
+dnl
+define(`RSB_M4_FAKE_DIAG_IMPLICIT_MSG',`/* NOTE: Diagonal implicit is not really handled here: look at caller level. */')dnl
+dnl
+dnl	------------------------------------------------- 20110206
+dnl
+define(`RSB_M4_EARLY_EVICT_INSTRUCTION',`dnl
+dnl	foreach(`location',$1,_mm_prefetch(location+24,_MM_HINT_NTA);
+dnl	)`'dnl
+')dnl dnl
+dnl
+define(`RSB_M4_HEADER_EXTRA_DECLARATIONS',`dnl
+dnl	#include <xmmintrin.h>
+')dnl dnl
+dnl
+dnl	------------------------------------------------- 20110206
+dnl
+dnl	define(`RSB_M4_FORTRAN_SYMBOL_ADD_TO_C',`_f_')dnl
+define(`RSB_M4_FORTRAN_SYMBOL_ADD_TO_C',`_')dnl
+define(`RSB_M4_FORTRAN_SYMBOL_ADD_TO_F',`')dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_INCLUDE_HEADERS',`dnl
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_internals.h"
+')dnl
+dnl
+dnl
diff --git a/rsb_mkl.c b/rsb_mkl.c
new file mode 100644
index 0000000..7bd69ca
--- /dev/null
+++ b/rsb_mkl.c
@@ -0,0 +1,646 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some MKL interfacing functions.
+ * */
+
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_mkl.h"
+#if RSB_WANT_MKL
+
+
+
+rsb_err_t rsb__mkl_gemv(rsb_type_t typecode, const void * Mp, const void*Bp, void*Xp, rsb_nnz_idx_t mdim, rsb_coo_idx_t vdim, rsb_coo_idx_t*udimp)
+{
+	/* FIXME: TODO: incX != 1 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const MKL_INT dim=(rsb_coo_idx_t)sqrt((double)mdim);
+	const MKL_INT incX=1;
+	char transA_mkl=110;
+	if(!Mp || !Xp || !Bp)
+		goto err;
+	if(dim<1 || dim>vdim)
+		goto err;
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float alpha=((float)(1.0)), beta=((float)(1.0));
+		sgemv(&transA_mkl,&dim,&dim, (float*)(&alpha),(const float*)Mp,&dim,(const float*)Bp,&incX,(float*)&beta,(float*)Xp,&incX);
+	}
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	else 
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double alpha=((double)(1.0)), beta=((double)(1.0));
+		dgemv(&transA_mkl,&dim,&dim, (double*)(&alpha),(const double*)Mp,&dim,(const double*)Bp,&incX,(double*)&beta,(double*)Xp,&incX);
+	}
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	else 
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex alpha=((float complex)(1.0)), beta=((float complex)(1.0));
+		cgemv(&transA_mkl,&dim,&dim, (MKL_Complex8*)(&alpha),(const MKL_Complex8*)Mp,&dim,(const MKL_Complex8*)Bp,&incX,(MKL_Complex8*)&beta,(MKL_Complex8*)Xp,&incX);
+	}
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	else 
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex alpha=((double complex)(1.0)), beta=((double complex)(1.0));
+		zgemv(&transA_mkl,&dim,&dim, (MKL_Complex16*)(&alpha),(const MKL_Complex16*)Mp,&dim,(const MKL_Complex16*)Bp,&incX,(MKL_Complex16*)&beta,(MKL_Complex16*)Xp,&incX);
+	}
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	else 
+		errval=RSB_ERR_BADARGS;
+
+	if(udimp)
+		*udimp=dim;
+err:
+	return errval;
+}
+
+static char rsb_rsb_to_mkl_trans(rsb_trans_t transA_mkl)
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	switch(transA_mkl)
+	{
+		case(RSB_TRANSPOSITION_N):
+		return 'n';
+		break;
+		case(RSB_TRANSPOSITION_T):
+		return 't';
+		break;
+		case(RSB_TRANSPOSITION_C):
+		return 'c';
+		break;
+		default:
+		return 'n';	// FIXME
+	}
+}
+
+static char rsb_rsb_to_mkl_sym(rsb_flags_t flags)
+{
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC))
+		return 's';
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_TRIANGULAR))
+		return 't';
+	else
+		return 'g';
+}
+
+static char rsb_rsb_to_mkl_upl(rsb_flags_t flags)
+{
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER))
+		return 'l';
+	else
+		return 'u';
+}
+
+rsb_err_t rsb__mkl_coo_spmv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scoomv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(float*)x,(float*)betap,(float*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcoomv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(double*)x,(double*)betap,(double*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccoomv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(MKL_Complex8*)x,(MKL_Complex8*)betap,(MKL_Complex8*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcoomv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(MKL_Complex16*)x,(MKL_Complex16*)betap,(MKL_Complex16*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__mkl_coo_spmm(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nrhs, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scoomm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_INT*)(&k),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(float*)b,(MKL_INT*)(&ldb),(float*)betap,(float*)c,(MKL_INT*)(&ldc));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcoomm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_INT*)(&k),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(double*)b,(MKL_INT*)(&ldb),(double*)betap,(double*)c,(MKL_INT*)(&ldc));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccoomm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_INT*)(&k),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(MKL_Complex8*)b,(MKL_INT*)(&ldb),(MKL_Complex8*)betap,(MKL_Complex8*)c,(MKL_INT*)(&ldc));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcoomm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_INT*)(&k),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(MKL_Complex16*)b,(MKL_INT*)(&ldb),(MKL_Complex16*)betap,(MKL_Complex16*)c,(MKL_INT*)(&ldc));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__mkl_coo_spsv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+	/* 20101118	MKL 9.1 reference manual declares also k among the parameters */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scoosv(&transA_mkl,(MKL_INT*)(&m),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(float*)x,(float*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcoosv(&transA_mkl,(MKL_INT*)(&m),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(double*)x,(double*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccoosv(&transA_mkl,(MKL_INT*)(&m),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(MKL_Complex8*)x,(MKL_Complex8*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcoosv(&transA_mkl,(MKL_INT*)(&m),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(MKL_Complex16*)x,(MKL_Complex16*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb__do_mkl_csr_spmv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags){
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scsrmv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(float*)x,(float*)betap,(float*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcsrmv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(double*)x,(double*)betap,(double*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccsrmv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex8*)x,(MKL_Complex8*)betap,(MKL_Complex8*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcsrmv(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex16*)x,(MKL_Complex16*)betap,(MKL_Complex16*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+/* The following three look weird, I know. */
+#define RSB_GET_MKL_MAX_THREADS rsb__set_num_threads(RSB_THREADS_GET_MAX_SYS)
+#define RSB_GET_MKL_BASE_THREADS 1 /* FIXME: no mkl_get_num_threads */
+#define RSB_GET_MKL_DEFAULT_THREADS mkl_get_max_threads() /* omp_get_num_threads(); */
+#define RSB_MKL_MAX_AT_TIME 1.0 /* FIXME */
+
+#define RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)				\
+		if(RSB_DT_SAME_THREADS_TNP(otnp))			\
+			lnt = unt = 0; 					\
+		else							\
+		{							\
+			if(RSB_DT_THREADS_TUNE_TNP(otnp))		\
+				; /* ok */				\
+			else						\
+				if(RSB_DT_SPEC_THREADS_TNP(otnp))	\
+			lnt = unt = *otnp;				\
+		}
+
+#define RSB_MKL_THREADS_TUNING_ODECLS					\
+		rsb_time_t tinf = rsb__timer_granularity();		\
+		rsb_time_t best = RSB_CONST_IMPOSSIBLY_BIG_TIME;	\
+		rsb_thread_t ont = RSB_GET_MKL_BASE_THREADS;		\
+		rsb_thread_t nt, lnt = 1, unt = RSB_GET_MKL_MAX_THREADS;\
+		rsb_thread_t otn = ont;					\
+		rsb_thread_t dtn = RSB_GET_MKL_DEFAULT_THREADS;
+
+
+#define RSB_MKL_THREADS_TUNING_IDECLS									\
+			rsb_time_t it = rsb_time(), ct = RSB_TIME_ZERO;	/* initial/current time */	\
+			rsb_time_t dt = it, tt = RSB_TIME_ZERO; /* elapsed (delta) / total  time */	\
+			rsb_time_t bt = RSB_CONST_IMPOSSIBLY_BIG_TIME, wt = RSB_TIME_ZERO; /* best / worst  time */	\
+			rsb_time_t ss = RSB_TIME_ZERO; /* sum of squares */				\
+			rsb_time_t mint = RSB_TIME_ZERO; /* minimal time */				\
+			rsb_int_t times = 0, mintimes = RSB_AT_MIN_TIMES, maxtimes = RSB_AUT0_TUNING_DEFAULT_TIMES ;	\
+			rsb_time_t maxt = RSB_AT_MAX_TIME/* RSB_MKL_MAX_AT_TIME*/;
+
+rsb_err_t rsb__mkl_csr_spmv_bench(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spmv(VA, m, k, nnz, IA, JA, x, y, alphap, betap, transA, typecode, flags);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spmv(VA, m, k, nnz, IA, JA, x, y, alphap, betap, transA, typecode, flags);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+
+static rsb_err_t rsb__do_mkl_csr_spmm(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT n, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags){
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+	MKL_INT ldb_ = n, ldc_ = n; /* for zero based indexing */
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		ldb_ = k, ldc_ = m, /* for one based indexing */
+		matdescra[3] = 'f'; // one based indexing
+
+	#if 1
+	/* n = nrhs */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scsrmm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&n),(MKL_INT*)(&k),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(float*)b,(MKL_INT*)(&ldb_),(float*)betap,(float*)c,(MKL_INT*)(&ldc_));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcsrmm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&n),(MKL_INT*)(&k),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(double*)b,(MKL_INT*)(&ldb_),(double*)betap,(double*)c,(MKL_INT*)(&ldc_));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccsrmm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&n),(MKL_INT*)(&k),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex8*)b,(MKL_INT*)(&ldb_),(MKL_Complex8*)betap,(MKL_Complex8*)c,(MKL_INT*)(&ldc_));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcsrmm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&n),(MKL_INT*)(&k),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex16*)b,(MKL_INT*)(&ldb_),(MKL_Complex16*)betap,(MKL_Complex16*)c,(MKL_INT*)(&ldc_));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+#endif /* 1 */
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__mkl_csr_spmm_bench(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT n, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spmm(VA, m, k, n, nnz, IA, JA, b, ldb, c, ldc, alphap, betap, transA, typecode, flags);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spmm(VA, m, k, n, nnz, IA, JA, b, ldb, c, ldc, alphap, betap, transA, typecode, flags);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+
+rsb_err_t rsb__do_mkl_csr_spsv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scsrsv(&transA_mkl,(MKL_INT*)(&m),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(float*)x,(float*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcsrsv(&transA_mkl,(MKL_INT*)(&m),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(double*)x,(double*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccsrsv(&transA_mkl,(MKL_INT*)(&m),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex8*)x,(MKL_Complex8*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcsrsv(&transA_mkl,(MKL_INT*)(&m),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex16*)x,(MKL_Complex16*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_mkl_csr_spsm(const void *VA, const MKL_INT m, const MKL_INT nrhs, const MKL_INT * IA, const MKL_INT *JA, const void * b, void * c, const void *alphap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, const MKL_INT ldb, const MKL_INT ldc)
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = 'n'; // not unit diagonal
+	matdescra[3] = 'c'; // zero based indexing
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scsrsm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(float*)alphap,matdescra,(float*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(float*)b,(MKL_INT*)&ldb,(float*)c,(MKL_INT*)&ldc);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcsrsm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(double*)alphap,matdescra,(double*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(double*)b,(MKL_INT*)&ldb,(double*)c,(MKL_INT*)&ldc);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccsrsm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_Complex8*)alphap,matdescra,(MKL_Complex8*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex8*)b,(MKL_INT*)&ldb,(MKL_Complex8*)c,(MKL_INT*)&ldc);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcsrsm(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_Complex16*)alphap,matdescra,(MKL_Complex16*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(MKL_Complex16*)b,(MKL_INT*)&ldb,(MKL_Complex16*)c,(MKL_INT*)&ldc);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__mkl_csr_spsv_bench(const void *VA, const MKL_INT m, const MKL_INT k/*, const MKL_INT n*/, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, /*const MKL_INT ldb,*/ void * c,/* const MKL_INT ldc,*/ const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spsv(VA, m, k, nnz, IA, JA, b, c, alphap, betap, transA, typecode, flags);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spsv(VA, m, k, nnz, IA, JA, b, c, alphap, betap, transA, typecode, flags);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+
+rsb_err_t rsb__mkl_csr_spsm_bench(const void *VA, const MKL_INT m, const MKL_INT nrhs, const MKL_INT * IA, const MKL_INT *JA, const void * b, void * c, const void *alphap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, const MKL_INT ldb, const MKL_INT ldc, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spsm(VA, m, nrhs, IA, JA, b, c, alphap, transA, typecode, flags, ldb, ldc);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spsm(VA, m, nrhs, IA, JA, b, c, alphap, transA, typecode, flags, ldb, ldc);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+
+rsb_err_t rsb__mkl_coo2csr(const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const void *IVA, const MKL_INT * IIA, const MKL_INT *IJA, const void *OVA, const MKL_INT * OIA, const MKL_INT *OJA, rsb_type_t typecode, const MKL_INT mib)
+{
+	int info;
+	int job[6];
+	job[0] = 1; // coo2csr (=1, the matrix in the coordinate format is converted to the CSR;=2, the matrix in the coordinate format is converted to the CSR format, and the column indices in CSR representation are sorted in the increasing order within each row.)
+	job[1] = mib; // 0 based csr
+	job[2] = 0; // 0 based coo
+	job[3] = 0; // ignored
+	job[4] = nnz; // ignored here
+	job[5] = 0; // fill all three arrays
+
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	mkl_scsrcoo(job,(MKL_INT*)(&m),(float*)OVA,(MKL_INT*)OJA,(MKL_INT*)OIA,(MKL_INT*)(&nnz),(float*)(IVA),(MKL_INT*)IIA,(MKL_INT*)IJA,&info);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	mkl_dcsrcoo(job,(MKL_INT*)(&m),(double*)OVA,(MKL_INT*)OJA,(MKL_INT*)OIA,(MKL_INT*)(&nnz),(double*)(IVA),(MKL_INT*)IIA,(MKL_INT*)IJA,&info);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	mkl_ccsrcoo(job,(MKL_INT*)(&m),(MKL_Complex8*)OVA,(MKL_INT*)OJA,(MKL_INT*)OIA,(MKL_INT*)(&nnz),(MKL_Complex8*)(IVA),(MKL_INT*)IIA,(MKL_INT*)IJA,&info);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	mkl_zcsrcoo(job,(MKL_INT*)(&m),(MKL_Complex16*)OVA,(MKL_INT*)OJA,(MKL_INT*)OIA,(MKL_INT*)(&nnz),(MKL_Complex16*)(IVA),(MKL_INT*)IIA,(MKL_INT*)IJA,&info);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+#endif /* RSB_WANT_MKL */
+/* @endcond */
+
diff --git a/rsb_mkl.h b/rsb_mkl.h
new file mode 100644
index 0000000..73140d3
--- /dev/null
+++ b/rsb_mkl.h
@@ -0,0 +1,70 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some MKL interfacing functions.
+ * */
+
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+#ifndef RSB_RSB_MKL_H_INCLUDED
+#define RSB_RSB_MKL_H_INCLUDED
+#include "rsb_internals.h"
+#if RSB_WANT_MKL
+#include <mkl.h>
+#include <mkl_blas.h>	/* dgemm, ... */
+#include <mkl_spblas.h>
+/* #include <mkl_types.h> */
+/* #include <mkl_service.h> */ /* mkl_get_version */
+
+
+
+rsb_err_t rsb__mkl_gemv(rsb_type_t typecode, const void * Mp, const void*Bp, void*Xp, rsb_nnz_idx_t mdim, rsb_coo_idx_t vdim, rsb_coo_idx_t*udimp);
+
+
+
+rsb_err_t rsb__mkl_coo_spmv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags);
+rsb_err_t rsb__mkl_coo_spmm(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nrhs, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags);
+rsb_err_t rsb__mkl_coo_spsv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags);
+
+
+rsb_err_t rsb__mkl_csr_spmv_bench(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp);
+
+rsb_err_t rsb__mkl_csr_spmm_bench(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT n, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp);
+rsb_err_t rsb__do_mkl_csr_spsv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags);
+rsb_err_t rsb__do_mkl_csr_spsm(const void *VA, const MKL_INT m, const MKL_INT nrhs, const MKL_INT * IA, const MKL_INT *JA, const void * b, void * c, const void *alphap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, const MKL_INT ldb, const MKL_INT ldc);
+rsb_err_t rsb__mkl_csr_spsv_bench(const void *VA, const MKL_INT m, const MKL_INT k/*, const MKL_INT n*/, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, /*const MKL_INT ldb,*/ void * c,/* const MKL_INT ldc,*/ const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp);
+rsb_err_t rsb__mkl_csr_spsm_bench(const void *VA, const MKL_INT m, const MKL_INT nrhs, const MKL_INT * IA, const MKL_INT *JA, const void * b, void * c, const void *alphap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, const MKL_INT ldb, const MKL_INT ldc, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp);
+rsb_err_t rsb__mkl_coo2csr(const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const void *IVA, const MKL_INT * IIA, const MKL_INT *IJA, const void *OVA, const MKL_INT * OIA, const MKL_INT *OJA, rsb_type_t typecode, const MKL_INT mib);
+#endif /* RSB_WANT_MKL */
+#endif  /* RSB_RSB_MKL_H_INCLUDED */
+/* @endcond */
+
diff --git a/rsb_mkl.m4 b/rsb_mkl.m4
new file mode 100644
index 0000000..8fb9cfd
--- /dev/null
+++ b/rsb_mkl.m4
@@ -0,0 +1,600 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some MKL interfacing functions.
+ * */
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+include(`libspblas_macros.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_RSB_MKL_H_INCLUDED
+#define RSB_RSB_MKL_H_INCLUDED
+#include "rsb_internals.h"
+#if RSB_WANT_MKL
+#include <mkl.h>
+#include <mkl_blas.h>	/* dgemm, ... */
+#include <mkl_spblas.h>
+/* #include <mkl_types.h> */
+/* #include <mkl_service.h> */ /* mkl_get_version */
+',`dnl
+#include "rsb_mkl.h"
+#if RSB_WANT_MKL
+')
+dnl
+
+dnl
+define(`RSB_M4_RSB_TYPE_TO_MKL_TYPE',`dnl
+pushdef(`type',$1)`'dnl
+dnl
+ifelse(type,`double complex',`MKL_Complex16')`'dnl
+ifelse(type,`float complex',`MKL_Complex8')`'dnl
+ifelse(type,`long double',`long double')`'dnl
+ifelse(type,`double',`double')`'dnl
+ifelse(type,`float',`float')`'dnl
+ifelse(type,`int',`MKL_INT')`'dnl
+dnl
+popdef(`type')`'dnl
+dnl
+')dnl
+dnl
+
+rsb_err_t rsb__mkl_gemv(rsb_type_t typecode, const void * Mp, const void*Bp, void*Xp, rsb_nnz_idx_t mdim, rsb_coo_idx_t vdim, rsb_coo_idx_t*udimp)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	/* FIXME: TODO: incX != 1 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const MKL_INT dim=(rsb_coo_idx_t)sqrt((double)mdim);
+	const MKL_INT incX=1;
+	char transA_mkl=110;
+dnl ,transB=110;
+	if(!Mp || !Xp || !Bp)
+		goto err;
+	if(dim<1 || dim>vdim)
+		goto err;
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype alpha=RSB_M4_ONE(mtype), beta=RSB_M4_ONE(mtype);
+		`'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`gemv'(&transA_mkl,&dim,&dim, (RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)(&alpha),(const RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)Mp,&dim,(const RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)Bp,&incX,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)&beta,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)Xp,&incX);
+dnl		`'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`gemm'(&transA_mkl,&transB,&dim,&dim,&dim,&alpha,(const mtype*)Mp,&dim,(const mtype*)Bp,&dim,&beta,(mtype*)Xp,&dim);
+	}
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	else 
+')dnl
+		errval=RSB_ERR_BADARGS;
+
+	if(udimp)
+		*udimp=dim;
+err:
+	return errval;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static char rsb_rsb_to_mkl_trans(rsb_trans_t transA_mkl)
+{
+	/**
+	 * \ingroup gr_internals
+	 */
+	switch(transA_mkl)
+	{
+		case(RSB_TRANSPOSITION_N):
+		return singlequote(n);
+		break;
+		case(RSB_TRANSPOSITION_T):
+		return singlequote(t);
+		break;
+		case(RSB_TRANSPOSITION_C):
+		return singlequote(c);
+		break;
+		default:
+		return singlequote(n);	// FIXME
+	}
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static char rsb_rsb_to_mkl_sym(rsb_flags_t flags)
+{
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC))
+		return singlequote(s);
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_TRIANGULAR))
+		return singlequote(t);
+	else
+		return singlequote(g);
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static char rsb_rsb_to_mkl_upl(rsb_flags_t flags)
+{
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER))
+		return singlequote(l);
+	else
+		return singlequote(u);
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_coo_spmv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`coomv'(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)x,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)betap,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_coo_spmm(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nrhs, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`coomm'(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(MKL_INT*)(&k),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)b,(MKL_INT*)(&ldb),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)betap,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)c,(MKL_INT*)(&ldc));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_coo_spsv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+	/* 20101118	MKL 9.1 reference manual declares also k among the parameters */
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`coosv'(&transA_mkl,(MKL_INT*)(&m),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)IA,(MKL_INT*)JA,(MKL_INT*)(&nnz),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)x,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb__do_mkl_csr_spmv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)dnl
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`csrmv'(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&k),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)x,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)betap,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+/* The following three look weird, I know. */
+#define RSB_GET_MKL_MAX_THREADS rsb__set_num_threads(RSB_THREADS_GET_MAX_SYS)
+#define RSB_GET_MKL_BASE_THREADS 1 /* FIXME: no mkl_get_num_threads */
+#define RSB_GET_MKL_DEFAULT_THREADS mkl_get_max_threads() /* omp_get_num_threads(); */
+#define RSB_MKL_MAX_AT_TIME 1.0 /* FIXME */
+
+#define RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)				\
+		if(RSB_DT_SAME_THREADS_TNP(otnp))			\
+			lnt = unt = 0; 					\
+		else							\
+		{							\
+			if(RSB_DT_THREADS_TUNE_TNP(otnp))		\
+				; /* ok */				\
+			else						\
+				if(RSB_DT_SPEC_THREADS_TNP(otnp))	\
+			lnt = unt = *otnp;				\
+		}
+
+#define RSB_MKL_THREADS_TUNING_ODECLS					\
+		rsb_time_t tinf = rsb__timer_granularity();		\
+		rsb_time_t best = RSB_CONST_IMPOSSIBLY_BIG_TIME;	\
+		rsb_thread_t ont = RSB_GET_MKL_BASE_THREADS;		\
+		rsb_thread_t nt, lnt = 1, unt = RSB_GET_MKL_MAX_THREADS;\
+		rsb_thread_t otn = ont;					\
+		rsb_thread_t dtn = RSB_GET_MKL_DEFAULT_THREADS;
+
+
+#define RSB_MKL_THREADS_TUNING_IDECLS									\
+			rsb_time_t it = rsb_time(), ct = RSB_TIME_ZERO;	/* initial/current time */	\
+			rsb_time_t dt = it, tt = RSB_TIME_ZERO; /* elapsed (delta) / total  time */	\
+			rsb_time_t bt = RSB_CONST_IMPOSSIBLY_BIG_TIME, wt = RSB_TIME_ZERO; /* best / worst  time */	\
+			rsb_time_t ss = RSB_TIME_ZERO; /* sum of squares */				\
+			rsb_time_t mint = RSB_TIME_ZERO; /* minimal time */				\
+			rsb_int_t times = 0, mintimes = RSB_AT_MIN_TIMES, maxtimes = RSB_AUT0_TUNING_DEFAULT_TIMES ;	\
+			rsb_time_t maxt = RSB_AT_MAX_TIME/* RSB_MKL_MAX_AT_TIME*/;
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_csr_spmv_bench(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spmv(VA, m, k, nnz, IA, JA, x, y, alphap, betap, transA, typecode, flags);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spmv(VA, m, k, nnz, IA, JA, x, y, alphap, betap, transA, typecode, flags);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb__do_mkl_csr_spmm(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT n, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)dnl
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+	MKL_INT ldb_ = n, ldc_ = n; /* for zero based indexing */
+
+	if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE))
+		ldb_ = k, ldc_ = m, /* for one based indexing */
+		matdescra[3] = singlequote(f); // one based indexing
+
+	#if 1
+	/* n = nrhs */
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`csrmm'(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&n),(MKL_INT*)(&k),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)b,(MKL_INT*)(&ldb_),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)betap,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)c,(MKL_INT*)(&ldc_));
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+#endif /* 1 */
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_csr_spmm_bench(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT n, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, const MKL_INT ldb, void * c, const MKL_INT ldc, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spmm(VA, m, k, n, nnz, IA, JA, b, ldb, c, ldc, alphap, betap, transA, typecode, flags);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spmm(VA, m, k, n, nnz, IA, JA, b, ldb, c, ldc, alphap, betap, transA, typecode, flags);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_mkl_csr_spsv(const void *VA, const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * x, void * y, const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`csrsv'(&transA_mkl,(MKL_INT*)(&m),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)x,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)y);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_mkl_csr_spsm(const void *VA, const MKL_INT m, const MKL_INT nrhs, const MKL_INT * IA, const MKL_INT *JA, const void * b, void * c, const void *alphap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, const MKL_INT ldb, const MKL_INT ldc)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	char transA_mkl = rsb_rsb_to_mkl_trans(transA);
+	char matdescra[]={RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL,RSB_NUL};
+	matdescra[0] = rsb_rsb_to_mkl_sym(flags); // general ?
+	matdescra[1] = rsb_rsb_to_mkl_upl(flags); // up or lo ?
+	matdescra[2] = singlequote(n); // not unit diagonal
+	matdescra[3] = singlequote(c); // zero based indexing
+
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`csrsm'(&transA_mkl,(MKL_INT*)(&m),(MKL_INT*)(&nrhs),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)alphap,matdescra,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)VA,(MKL_INT*)JA,(MKL_INT*)IA,(MKL_INT*)(IA+1),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)b,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(int)*)&ldb,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)c,(RSB_M4_RSB_TYPE_TO_MKL_TYPE(int)*)&ldc);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_csr_spsv_bench(const void *VA, const MKL_INT m, const MKL_INT k/*, const MKL_INT n*/, const MKL_INT nnz, const MKL_INT * IA, const MKL_INT *JA, const void * b, /*const MKL_INT ldb,*/ void * c,/* const MKL_INT ldc,*/ const void *alphap, const void * betap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spsv(VA, m, k, nnz, IA, JA, b, c, alphap, betap, transA, typecode, flags);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spsv(VA, m, k, nnz, IA, JA, b, c, alphap, betap, transA, typecode, flags);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_csr_spsm_bench(const void *VA, const MKL_INT m, const MKL_INT nrhs, const MKL_INT * IA, const MKL_INT *JA, const void * b, void * c, const void *alphap, rsb_trans_t transA, rsb_type_t typecode, rsb_flags_t flags, const MKL_INT ldb, const MKL_INT ldc, rsb_thread_t *otnp, rsb_time_t *tpop, struct rsb_tattr_t* ttrp, struct rsb_ts_t*tstp)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t dt, tpo;
+
+	if(otnp)
+	{
+		RSB_MKL_THREADS_TUNING_ODECLS
+		RSB_MKL_SET_THREADS_RANGE(lnt,unt,otnp)
+
+		for(nt=lnt;nt<=unt;++nt)
+		{
+			RSB_MKL_THREADS_TUNING_IDECLS
+			if(nt) mkl_set_num_threads(nt);
+
+			do
+			{
+				errval = rsb__do_mkl_csr_spsm(VA, m, nrhs, IA, JA, b, c, alphap, transA, typecode, flags, ldb, ldc);
+				RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,tinf,times);
+			}
+			while(RSB_REPEAT(ct-it,times,mint,mintimes,maxt,maxtimes));
+
+			dt = bt;
+			if(dt < best )
+			{
+				otn = nt;
+				best = RSB_MIN_ABOVE_INF(best,dt,tinf);
+				RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp);
+			}
+			rsb__tattr_sets(ttrp,dtn,nt,dt,otn,times);/* FIXME: if no threads tuning, shall set dtpo = btpo, as well as ttrp.optt=0 */
+			if(dtn == nt) RSB_STAT_TAKE(it,otn,ct,dt,tt,bt,wt,ss,times,tstp+1);
+		}
+		mkl_set_num_threads(ont);
+done:
+		ttrp->ttt += rsb_time(); /* ttrp->ttt = tt; */
+		tpo = best; /* tpo = 1.0 / best; */
+		*otnp = otn;
+	}
+	else
+	{
+		dt = -rsb_time();
+		errval = rsb__do_mkl_csr_spsm(VA, m, nrhs, IA, JA, b, c, alphap, transA, typecode, flags, ldb, ldc);
+		dt += rsb_time();
+		/* tpo = 1.0 / dt; */
+		tpo = dt;
+	}
+	if(tpop)
+		*tpop = tpo;
+	return errval;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__mkl_coo2csr(const MKL_INT m, const MKL_INT k, const MKL_INT nnz, const void *IVA, const MKL_INT * IIA, const MKL_INT *IJA, const void *OVA, const MKL_INT * OIA, const MKL_INT *OJA, rsb_type_t typecode, const MKL_INT mib)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`
+{
+	int info;
+	int job[6];
+	job[0] = 1; // coo2csr (=1, the matrix in the coordinate format is converted to the CSR;=2, the matrix in the coordinate format is converted to the CSR format, and the column indices in CSR representation are sorted in the increasing order within each row.)
+	job[1] = mib; // 0 based csr
+	job[2] = 0; // 0 based coo
+	job[3] = 0; // ignored
+	job[4] = nnz; // ignored here
+	job[5] = 0; // fill all three arrays
+
+foreach(`mtype',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	`mkl_'RSB_M4_SPBLAS_TYPE_CHARCODE(mtype)`csrcoo'(job,(MKL_INT*)(&m),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)OVA,(MKL_INT*)OJA,(MKL_INT*)OIA,(MKL_INT*)(&nnz),(RSB_M4_RSB_TYPE_TO_MKL_TYPE(mtype)*)(IVA),(MKL_INT*)IIA,(MKL_INT*)IJA,&info);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+#endif /* RSB_WANT_MKL */
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif  /* RSB_RSB_MKL_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+
diff --git a/rsb_mmio.c b/rsb_mmio.c
new file mode 100644
index 0000000..72d7189
--- /dev/null
+++ b/rsb_mmio.c
@@ -0,0 +1,519 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Matrix Market IA/O library for ANSI C.
+ */
+/*
+ *   See http://math.nist.gov/MatrixMarket for details.
+ *
+ *   FIXME : remove dangling printf's
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+#include "rsb_mmio.h"
+#include "rsb_mio.h"
+
+int rsb__mm_is_valid(MM_typecode matcode)
+{
+    if (!rsb_mm_is_matrix(matcode)) return 0;
+    if (rsb_mm_is_dense(matcode) && rsb_mm_is_pattern(matcode)) return 0;
+    if (rsb_mm_is_real(matcode) && rsb_mm_is_hermitian(matcode)) return 0;
+    if (rsb_mm_is_pattern(matcode) && (rsb_mm_is_hermitian(matcode) || 
+                rsb_mm_is_skew(matcode))) return 0;
+    return 1;
+}
+
+//int rsb__mm_read_banner(FILE *f, MM_typecode *matcode)
+int rsb__mm_read_banner(FILE *f, FILE * ngzfd, MM_typecode *matcode)
+{
+    char line[MM_MAX_LINE_LENGTH];
+    char banner[MM_MAX_TOKEN_LENGTH];
+    char mtxs[MM_MAX_TOKEN_LENGTH]; 
+    char crd[MM_MAX_TOKEN_LENGTH];
+    char data_type[MM_MAX_TOKEN_LENGTH];
+    char storage_scheme[MM_MAX_TOKEN_LENGTH];
+    char *p;
+
+
+    rsb_mm_clear_typecode(matcode);  
+
+    if(ngzfd)
+    {
+    if (fgets(line,MM_MAX_LINE_LENGTH,ngzfd) == NULL) 
+        return MM_PREMATURE_EOF;
+    }
+    else
+    {
+    		if (((char*)rsb_fgets(line,MM_MAX_LINE_LENGTH,f)) == NULL) /* stupid cast for PGI hairiness */
+        return MM_PREMATURE_EOF;
+    }
+
+    if (sscanf(line, "%s %s %s %s %s", banner, mtxs, crd, data_type, 
+        storage_scheme) != 5)
+        return MM_PREMATURE_EOF;
+
+    for (p=mtxs; *p!='\0'; *p=tolower(*p),p++);  /* convert to lower case */
+    for (p=crd; *p!='\0'; *p=tolower(*p),p++);  
+    for (p=data_type; *p!='\0'; *p=tolower(*p),p++);
+    for (p=storage_scheme; *p!='\0'; *p=tolower(*p),p++);
+
+    /* check for banner */
+    if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0)
+        return MM_NO_HEADER;
+
+    /* first field should be "mtx" */
+    if (strcmp(mtxs, MM_MTX_STR) != 0)
+        return  MM_UNSUPPORTED_TYPE;
+    rsb_mm_set_matrix(matcode);
+
+
+    /* second field describes whether this is a sparse matrix (in coordinate
+            storgae) or a dense array */
+
+
+    if (strcmp(crd, MM_SPARSE_STR) == 0)
+        rsb_mm_set_sparse(matcode);
+    else
+    if (strcmp(crd, MM_DENSE_STR) == 0)
+            rsb_mm_set_dense(matcode);
+    else
+        return MM_UNSUPPORTED_TYPE;
+    
+
+    /* third field */
+
+    if (strcmp(data_type, MM_REAL_STR) == 0)
+        rsb_mm_set_real(matcode);
+    else
+    if (strcmp(data_type, MM_COMPLEX_STR) == 0)
+        rsb_mm_set_complex(matcode);
+    else
+    if (strcmp(data_type, MM_PATTERN_STR) == 0)
+        rsb_mm_set_pattern(matcode);
+    else
+    if (strcmp(data_type, MM_INT_STR) == 0)
+        rsb_mm_set_integer(matcode);
+    else
+        return MM_UNSUPPORTED_TYPE;
+    
+
+    /* fourth field */
+
+    if (strcmp(storage_scheme, MM_GENERAL_STR) == 0)
+        rsb_mm_set_general(matcode);
+    else
+    if (strcmp(storage_scheme, MM_SYMM_STR) == 0)
+        rsb_mm_set_symmetric(matcode);
+    else
+    if (strcmp(storage_scheme, MM_HERM_STR) == 0)
+        rsb_mm_set_hermitian(matcode);
+    else
+    if (strcmp(storage_scheme, MM_SKEW_STR) == 0)
+        rsb_mm_set_skew(matcode);
+    else
+        return MM_UNSUPPORTED_TYPE;
+        
+
+    return 0;
+}
+
+int rsb__mm_write_mtx_crd_size(FILE *f, int M, int N, int nz)
+{
+    if (fprintf(f, "%d %d %d\n", M, N, nz) != 3)
+        return MM_COULD_NOT_WRITE_FILE;
+    else 
+        return 0;
+}
+
+//int rsb__mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz )
+int rsb__mm_read_mtx_crd_size(FILE *f, FILE * ngzfd, int *M, int *N, int *nz)
+{
+    char line[MM_MAX_LINE_LENGTH];
+    int num_items_read;
+
+    /* set return null parameter values, in case we exit with errors */
+    *M = *N = *nz = 0;
+
+    /* now continue scanning until you reach the end-of-comments */
+    do 
+    {
+	    if(ngzfd)
+	    {
+	        if (fgets(line,MM_MAX_LINE_LENGTH,ngzfd) == NULL) 
+	            return MM_PREMATURE_EOF;
+	    }
+	    else
+	    {
+    		if (((char*)rsb_fgets(line,MM_MAX_LINE_LENGTH,f)) == NULL) /* stupid cast for PGI hairiness */
+	            return MM_PREMATURE_EOF;
+	    }
+    }while (line[0] == '%');
+
+    /* line[] is either blank or has M,N, nz */
+    if (sscanf(line, "%d %d %d", M, N, nz) == 3)
+        return 0;
+        
+    else
+    do
+    { 
+	    if(ngzfd)
+        num_items_read = fscanf(ngzfd,"%d %d %d",M,N,nz); 
+	    else
+        num_items_read = rsb_fscanf(f,"%d %d %d",M,N,nz,NULL); 
+        if (num_items_read == EOF) return MM_PREMATURE_EOF;
+    }
+    while (num_items_read != 3);
+
+    return 0;
+}
+
+
+//int rsb__mm_read_mtx_array_size(FILE *f, int *M, int *N)
+int rsb__mm_read_mtx_array_size(FILE *f, FILE *ngzfd, int *M, int *N)
+{
+    char line[MM_MAX_LINE_LENGTH];
+    int num_items_read;
+    /* set return null parameter values, in case we exit with errors */
+    *M = *N = 0;
+	
+    /* now continue scanning until you reach the end-of-comments */
+    if(ngzfd)
+    {
+    do 
+    {
+        if (fgets(line,MM_MAX_LINE_LENGTH,ngzfd) == NULL) 
+            return MM_PREMATURE_EOF;
+    }while (line[0] == '%');
+
+    /* line[] is either blank or has M,N, nz */
+    if (sscanf(line, "%d %d", M, N) == 2)
+        return 0;
+        
+    else /* we have a blank line */
+    do
+    { 
+        num_items_read = fscanf(ngzfd, "%d %d", M, N); 
+        if (num_items_read == EOF) return MM_PREMATURE_EOF;
+    }
+    while (num_items_read != 2);
+    }
+    else
+    if(f)
+    {
+    do 
+    {
+        if ((char*)rsb_fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) 
+            return MM_PREMATURE_EOF;
+    }while (line[0] == '%');
+
+    /* line[] is either blank or has M,N, nz */
+    if (sscanf(line, "%d %d", M, N) == 2)
+        return 0;
+        
+    else /* we have a blank line */
+    do
+    { 
+        num_items_read = rsb_fscanf(f, "%d %d", M, N, NULL, NULL); 
+        if (num_items_read == EOF) return MM_PREMATURE_EOF;
+    }
+    while (num_items_read != 2);
+    }
+
+    return 0;
+}
+
+int rsb__mm_write_mtx_array_size(FILE *f, int M, int N)
+{
+    if (fprintf(f, "%d %d\n", M, N) != 2)
+        return MM_COULD_NOT_WRITE_FILE;
+    else 
+        return 0;
+}
+
+
+
+/*-------------------------------------------------------------------------*/
+
+/******************************************************************/
+/* use when IA[], JA[], and VA[]JA, and VA[] are already allocated */
+/******************************************************************/
+
+int rsb_mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, rsb_coo_idx_t IA[], rsb_coo_idx_t JA[],
+        double VA[], MM_typecode matcode)
+{
+    int i;
+    if (rsb_mm_is_complex(matcode))
+    {
+        for (i=0; i<nz; i++)
+            if (fscanf(f, "%d %d %lg %lg", &IA[i], &JA[i], &VA[2*i], &VA[2*i+1])
+                != 4) return MM_PREMATURE_EOF;
+    }
+    else if (rsb_mm_is_real(matcode))
+    {
+        for (i=0; i<nz; i++)
+        {
+            if (fscanf(f, "%d %d %lg\n", &IA[i], &JA[i], &VA[i])
+                != 3) return MM_PREMATURE_EOF;
+
+        }
+    }
+
+    else if (rsb_mm_is_pattern(matcode))
+    {
+        for (i=0; i<nz; i++)
+            if (fscanf(f, "%d %d", &IA[i], &JA[i])
+                != 2) return MM_PREMATURE_EOF;
+    }
+    else
+        return MM_UNSUPPORTED_TYPE;
+
+    return 0;
+        
+}
+
+int rsb__mm_read_mtx_crd_entry(FILE *f, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA,
+        double *real, double *imag, MM_typecode matcode)
+{
+    int iI,iJ;
+    if (rsb_mm_is_complex(matcode))
+    {
+            if (fscanf(f, "%d %d %lg %lg", &iI, &iJ, real, imag)
+                != 4) return MM_PREMATURE_EOF;
+		IA[0]=(rsb_coo_idx_t)iI;
+		JA[0]=(rsb_coo_idx_t)iJ;
+    }
+    else if (rsb_mm_is_real(matcode))
+    {
+            if (fscanf(f, "%d %d %lg\n", &iI, &iJ, real)
+                != 3) return MM_PREMATURE_EOF;
+		IA[0]=(rsb_coo_idx_t)iI;
+		JA[0]=(rsb_coo_idx_t)iJ;
+    }
+
+    else if (rsb_mm_is_pattern(matcode))
+    {
+            if (fscanf(f, "%d %d", &iI, &iJ ) != 2) return MM_PREMATURE_EOF;
+		IA[0]=(rsb_coo_idx_t)iI;
+		JA[0]=(rsb_coo_idx_t)iJ;
+    }
+    else
+        return MM_UNSUPPORTED_TYPE;
+
+    return 0;
+        
+}
+
+
+/************************************************************************
+    rsb_mm_read_mtx_crd()  fills M, N, nz, array of values, and return
+                        type code, e.g. 'MCRS'
+
+                        if matrix is complex, values[] is of size 2*nz,
+                            (nz pairs of real/imaginary values)
+************************************************************************/
+
+int rsb_mm_read_mtx_crd(char *fname, int *M, int *N, int *nz, rsb_coo_idx_t **IA, rsb_coo_idx_t **JA, 
+        double **VA, MM_typecode *matcode)
+{
+    int ret_code;
+    FILE *f;
+
+    if (strcmp(fname, "stdin") == 0) f=stdin;
+    else
+    if ((f = fopen(fname, "r")) == NULL)
+        return MM_COULD_NOT_READ_FILE;
+
+
+    if ((ret_code = rsb__mm_read_banner(NULL,f, matcode)) != 0)
+        return ret_code;
+
+    if (!(rsb__mm_is_valid(*matcode) && rsb_mm_is_sparse(*matcode) && 
+            rsb_mm_is_matrix(*matcode)))
+        return MM_UNSUPPORTED_TYPE;
+
+    if ((ret_code = rsb__mm_read_mtx_crd_size(f,NULL,M,N,nz)) != 0)
+        return ret_code;
+
+
+    *IA = (rsb_coo_idx_t *)  malloc(*nz * sizeof(rsb_coo_idx_t));
+    *JA = (rsb_coo_idx_t *)  malloc(*nz * sizeof(rsb_coo_idx_t));
+    *VA = NULL;
+
+    if (rsb_mm_is_complex(*matcode))
+    {
+        *VA = (double *) malloc(*nz * 2 * sizeof(double));
+        ret_code = rsb_mm_read_mtx_crd_data(f, *M, *N, *nz, *IA, *JA, *VA, 
+                *matcode);
+        if (ret_code != 0) return ret_code;
+    }
+    else if (rsb_mm_is_real(*matcode))
+    {
+        *VA = (double *) malloc(*nz * sizeof(double));
+        ret_code = rsb_mm_read_mtx_crd_data(f, *M, *N, *nz, *IA, *JA, *VA, 
+                *matcode);
+        if (ret_code != 0) return ret_code;
+    }
+
+    else if (rsb_mm_is_pattern(*matcode))
+    {
+        ret_code = rsb_mm_read_mtx_crd_data(f, *M, *N, *nz, *IA, *JA, *VA, 
+                *matcode);
+        if (ret_code != 0) return ret_code;
+    }
+
+    if (f != stdin) fclose(f);
+    return 0;
+}
+
+int rsb__mm_write_banner(FILE *f, MM_typecode matcode)
+{
+    char *str = rsb__mm_typecode_to_str(matcode);
+    int ret_code;
+
+    ret_code = fprintf(f, "%s %s\n", MatrixMarketBanner, str);
+    free(str);
+    if (ret_code !=2 )
+        return MM_COULD_NOT_WRITE_FILE;
+    else
+        return 0;
+}
+
+int rsb_mm_write_mtx_crd(char fname[], int M, int N, int nz, int IA[], int JA[],
+        double VA[], MM_typecode matcode)
+{
+    FILE *f;
+    int i;
+
+    if (strcmp(fname, "stdout") == 0) 
+        f = stdout;
+    else
+    if ((f = fopen(fname, "w")) == NULL)
+        return MM_COULD_NOT_WRITE_FILE;
+    
+    /* print banner followed by typecode */
+    fprintf(f, "%s ", MatrixMarketBanner);
+    fprintf(f, "%s\n", rsb__mm_typecode_to_str(matcode));
+
+    /* print matrix sizes and nonzeros */
+    fprintf(f, "%d %d %d\n", M, N, nz);
+
+    /* print values */
+    if (rsb_mm_is_pattern(matcode))
+        for (i=0; i<nz; i++)
+            fprintf(f, "%d %d\n", IA[i], JA[i]);
+    else
+    if (rsb_mm_is_real(matcode))
+        for (i=0; i<nz; i++)
+            fprintf(f, "%d %d %20.16g\n", IA[i], JA[i], VA[i]);
+    else
+    if (rsb_mm_is_complex(matcode))
+        for (i=0; i<nz; i++)
+            fprintf(f, "%d %d %20.16g %20.16g\n", IA[i], JA[i], VA[2*i], 
+                        VA[2*i+1]);
+    else
+    {
+        if (f != stdout) fclose(f);
+        return MM_UNSUPPORTED_TYPE;
+    }
+
+    if (f !=stdout) fclose(f);
+
+    return 0;
+}
+  
+
+/**
+*  Create a new copy of a string s.  rsb_mm_strdup() is a common routine, but
+*  not part of ANSI C, so it is included here.  Used by rsb__mm_typecode_to_str().
+*
+*/
+char *rsb_mm_strdup(const char *s)
+{
+	size_t len = strlen(s);
+	char *s2 = (char *) malloc((len+1)*sizeof(char));
+	return strcpy(s2, s);
+}
+
+char  *rsb__mm_typecode_to_str(MM_typecode matcode)
+{
+    char buffer[MM_MAX_LINE_LENGTH];
+    char *types[4];
+    int error =0;
+
+    /* check for MTX type */
+    if (rsb_mm_is_matrix(matcode)) 
+        types[0] = MM_MTX_STR;
+    else
+        error=1;
+
+    /* check for CRD or ARR matrix */
+    if (rsb_mm_is_sparse(matcode))
+        types[1] = MM_SPARSE_STR;
+    else
+    if (rsb_mm_is_dense(matcode))
+        types[1] = MM_DENSE_STR;
+    else
+        return NULL;
+
+    /* check for element data type */
+    if (rsb_mm_is_real(matcode))
+        types[2] = MM_REAL_STR;
+    else
+    if (rsb_mm_is_complex(matcode))
+        types[2] = MM_COMPLEX_STR;
+    else
+    if (rsb_mm_is_pattern(matcode))
+        types[2] = MM_PATTERN_STR;
+    else
+    if (rsb_mm_is_integer(matcode))
+        types[2] = MM_INT_STR;
+    else
+        return NULL;
+
+
+    /* check for symmetry type */
+    if (rsb_mm_is_general(matcode))
+        types[3] = MM_GENERAL_STR;
+    else
+    if (rsb_mm_is_symmetric(matcode))
+        types[3] = MM_SYMM_STR;
+    else 
+    if (rsb_mm_is_hermitian(matcode))
+        types[3] = MM_HERM_STR;
+    else 
+    if (rsb_mm_is_skew(matcode))
+        types[3] = MM_SKEW_STR;
+    else
+        return NULL;
+
+    sprintf(buffer,"%s %s %s %s", types[0], types[1], types[2], types[3]);
+    return rsb_mm_strdup(buffer);
+
+}
+/* @endcond */
diff --git a/rsb_mmio.h b/rsb_mmio.h
new file mode 100644
index 0000000..24b261a
--- /dev/null
+++ b/rsb_mmio.h
@@ -0,0 +1,150 @@
+/*                                                                                                                            
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ *   Matrix Market I/O library for ANSI C.
+ *   See http://math.nist.gov/MatrixMarket for details.
+ * @author Michele Martone
+ */
+
+#ifndef MM_IO_H_INCLUDED
+#define MM_IO_H_INCLUDED
+
+#include "rsb.h"	/* just for some type compatibility */
+
+#define MM_MAX_LINE_LENGTH 1025
+#define MatrixMarketBanner "%%MatrixMarket"
+#define MM_MAX_TOKEN_LENGTH 64
+
+typedef char MM_typecode[];
+
+char *rsb__mm_typecode_to_str(MM_typecode matcode);
+
+int rsb__mm_read_banner(FILE *f, FILE * ngzfd, MM_typecode *matcode);
+int rsb__mm_read_mtx_crd_size(FILE *f, FILE * ngzfd, int *M, int *N, int *nz);
+int rsb__mm_read_mtx_array_size(FILE *f, FILE * ngzfd, int *M, int *N);
+
+int rsb__mm_write_banner(FILE *f, MM_typecode matcode);
+int rsb__mm_write_mtx_crd_size(FILE *f, int M, int N, int nz);
+int rsb__mm_write_mtx_array_size(FILE *f, int M, int N);
+
+
+/********************* MM_typecode query fucntions ***************************/
+
+#define rsb_mm_is_matrix(typecode)	((typecode)[0]=='M')
+
+#define rsb_mm_is_sparse(typecode)	((typecode)[1]=='C')
+#define rsb_mm_is_coordinate(typecode)((typecode)[1]=='C')
+#define rsb_mm_is_dense(typecode)	((typecode)[1]=='A')
+#define rsb_mm_is_array(typecode)	((typecode)[1]=='A')
+
+#define rsb_mm_is_complex(typecode)	((typecode)[2]=='C')
+#define rsb_mm_is_real(typecode)		((typecode)[2]=='R')
+#define rsb_mm_is_pattern(typecode)	((typecode)[2]=='P')
+#define rsb_mm_is_integer(typecode) ((typecode)[2]=='I')
+
+#define rsb_mm_is_symmetric(typecode)((typecode)[3]=='S')
+#define rsb_mm_is_general(typecode)	((typecode)[3]=='G')
+#define rsb_mm_is_skew(typecode)	((typecode)[3]=='K')
+#define rsb_mm_is_hermitian(typecode)((typecode)[3]=='H')
+
+int rsb__mm_is_valid(MM_typecode matcode);		/* too complex for a macro */
+
+
+/********************* MM_typecode modify fucntions ***************************/
+
+#define rsb_mm_set_matrix(typecode)	((*typecode)[0]='M')
+#define rsb_mm_set_coordinate(typecode)	((*typecode)[1]='C')
+#define rsb_mm_set_array(typecode)	((*typecode)[1]='A')
+#define rsb_mm_set_dense(typecode)	rsb_mm_set_array(typecode)
+#define rsb_mm_set_sparse(typecode)	rsb_mm_set_coordinate(typecode)
+
+#define rsb_mm_set_complex(typecode)((*typecode)[2]='C')
+#define rsb_mm_set_real(typecode)	((*typecode)[2]='R')
+#define rsb_mm_set_pattern(typecode)((*typecode)[2]='P')
+#define rsb_mm_set_integer(typecode)((*typecode)[2]='I')
+
+
+#define rsb_mm_set_symmetric(typecode)((*typecode)[3]='S')
+#define rsb_mm_set_general(typecode)((*typecode)[3]='G')
+#define rsb_mm_set_skew(typecode)	((*typecode)[3]='K')
+#define rsb_mm_set_hermitian(typecode)((*typecode)[3]='H')
+
+#define rsb_mm_clear_typecode(typecode) ((*typecode)[0]=(*typecode)[1]= \
+									(*typecode)[2]=' ',(*typecode)[3]='G')
+
+#define rsb_mm_initialize_typecode(typecode) rsb_mm_clear_typecode(typecode)
+
+
+/********************* Matrix Market error codes ***************************/
+
+
+#define MM_COULD_NOT_READ_FILE	11
+#define MM_PREMATURE_EOF		12
+#define MM_NOT_MTX				13
+#define MM_NO_HEADER			14
+#define MM_UNSUPPORTED_TYPE		15
+#define MM_LINE_TOO_LONG		16
+#define MM_COULD_NOT_WRITE_FILE	17
+
+
+/******************** Matrix Market internal definitions ********************
+
+   MM_matrix_typecode: 4-character sequence
+
+				    ojbect 		sparse/   	data        storage 
+						  		dense     	type        scheme
+
+   string position:	 [0]        [1]			[2]         [3]
+
+   Matrix typecode:  M(atrix)  C(oord)		R(eal)   	G(eneral)
+						        A(array)	C(omplex)   H(ermitian)
+											P(attern)   S(ymmetric)
+								    		I(nteger)	K(kew)
+
+ ***********************************************************************/
+
+#define MM_MTX_STR		"matrix"
+#define MM_ARRAY_STR	"array"
+#define MM_DENSE_STR	"array"
+#define MM_COORDINATE_STR "coordinate" 
+#define MM_SPARSE_STR	"coordinate"
+#define MM_COMPLEX_STR	"complex"
+#define MM_REAL_STR		"real"
+#define MM_INT_STR		"integer"
+#define MM_GENERAL_STR  "general"
+#define MM_SYMM_STR		"symmetric"
+#define MM_HERM_STR		"hermitian"
+#define MM_SKEW_STR		"skew-symmetric"
+#define MM_PATTERN_STR  "pattern"
+
+
+/*  high level routines */
+
+int rsb_mm_write_mtx_crd(char fname[], int M, int N, int nz, int IA[], int JA[],
+		 double VA[], MM_typecode matcode);
+int rsb_mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, rsb_coo_idx_t IA[], rsb_coo_idx_t JA[],
+		double VA[], MM_typecode matcode);
+int rsb__mm_read_mtx_crd_entry(FILE *f, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, double *real, double *img, MM_typecode matcode);
+#endif /* MM_IO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_mmls.c b/rsb_mmls.c
new file mode 100644
index 0000000..fa18c78
--- /dev/null
+++ b/rsb_mmls.c
@@ -0,0 +1,125 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief A Matrix Market files oriented `ls' program.
+ */
+#include "rsb_common.h"
+#include "rsb_internals.h"
+int rsb_mtx_ls_main(const int argc, char * argv[])
+{
+#if 0
+	rsb_option options[] = {
+	    {"matrix-ls",		no_argument, NULL,  0x006D6C73},/* should be synced to rsb_mtx_ls_main */
+	    {0,0,0,0}
+	};
+	int opt_index = 0;
+	const char * flags="";
+	int c;
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc, argv, flags , options, &opt_index);
+		if (c == -1)break;
+
+		switch (c)
+		{
+			case 0x006D6C73:
+				++a0;
+			default:
+			break;
+	    	}
+	}
+#endif
+	rsb_bool_t want_latex = RSB_BOOL_TRUE;
+	int a,a0=1;
+
+	if(want_latex)
+		RSB_STDOUT(
+			"\\begin{table}[]"
+			"\\begin{footnotesize}"
+			"\\begin{center} \\begin{tabular}"
+//			"{l@{\extracolsep{.5em}}l@{\extracolsep{.5em}}l@{\extracolsep{.5em}}l@{\extracolsep{.5em}}l@{\extracolsep{.5em}}l}\hline"
+			"{lllll}\\hline\n"
+			"matrix & rows & columns & nnz & nnz/row \\\\\\hline\n"
+			  );
+
+	for(a=a0;a<argc;++a)
+	if(argv[a][0]!='-')
+	{
+		const char * filename = argv[a];
+		rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+		rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+		rsb_bool_t is_pattern = RSB_BOOL_FALSE;
+		rsb_bool_t is_lower = RSB_BOOL_FALSE;
+		rsb_bool_t is_upper = RSB_BOOL_FALSE;
+		rsb_bool_t is_vector = RSB_BOOL_FALSE;
+		rsb_nnz_idx_t nnz;
+		rsb_coo_idx_t m,k;
+		rsb_type_t typecode = RSB_NUMERICAL_TYPE_INVALID_TYPE;
+
+		if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,&m,&k,&nnz,&typecode,&is_symmetric,&is_hermitian,&is_pattern,&is_lower,&is_upper,&is_vector)) || is_vector)
+			RSB_STDERR("problems with \"%s\"\n",filename);
+		else
+		{
+			if(want_latex)
+			RSB_STDOUT("%s & %zd & %zd & %zd & %.0lf"
+				"\\\\%s\n"
+				//,filename
+				,rsb__basename(filename)
+				,(size_t)m,(size_t)k,(size_t)nnz
+				,((double)nnz)/m
+				,is_symmetric?"%%symm":"%%unsymm"
+			);
+			else
+			RSB_STDOUT("%s\t%zd\t%zd\t%zd"
+				"\t%s\t%s\t%s\n"
+				,rsb__basename(filename)
+				,(size_t)m,(size_t)k,(size_t)nnz
+				,is_pattern?  "pattern":""
+				,is_symmetric?"symmetric":""
+				,is_hermitian?"hermitian":""
+			);
+		};
+	}
+	if(want_latex)
+		RSB_STDOUT(
+			"\\hline \\end{tabular} \\caption{Caption.}"
+			"\\label{testbed_matrices}"
+			"\\end{center}"
+			"\\end{footnotesize}"
+			"\\end{table}\n"
+			);
+
+
+	return 0;
+}
+
+/*
+int main(const int argc, char * const argv[])
+{
+	return rsb_mtx_ls_main(argc,argv);
+}
+*/
+
+/* @endcond */
diff --git a/rsb_mod.m4 b/rsb_mod.m4
new file mode 100644
index 0000000..0737bb0
--- /dev/null
+++ b/rsb_mod.m4
@@ -0,0 +1,107 @@
+dnl
+dnl
+include(`rsb_fortran_macros.m4')dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl	FIXME: this code is OBSOLETE and marked for deletion.
+dnl
+!
+dnl ! author: Michele Martone
+!
+! This is a Sparse BLAS interface.
+! It has been generated by a M4 script.
+!
+! supported types       : foreach(`mtype',RSB_M4_MATRIX_TYPES,` RSB_M4_C2F_TYPE(mtype)')
+! supported operations  : foreach(`pmop',RSB_M4_RSBLAS_INTERFACE_OPS,` RSB_M4_RSBLAS_INTERFACE_IDENTIFIER(pmop)')
+!
+!
+dnl      module blas_sparse
+      module rsb_mod
+        implicit none
+
+public
+        foreach(`pmop',RSB_M4_RSBLAS_INTERFACE_OPS,`
+        interface RSB_M4_RSBLAS_INTERFACE_IDENTIFIER(pmop)
+         ! RSB_M4_RSBLAS_SUBROUTINE_HELP_COMMENT(pmop,`*')
+         module procedure  RSB_M4_INTERFACE_LIST(RSB_M4_COMMA_LIST((RSB_M4_CHOPTRAILINGSPACES(foreach(`mtype',RSB_M4_MATRIX_TYPES,`RSB_M4_RSBLAS_SUBROUTINE_IDENTIFIER(pmop,mtype) ')))))dnl
+        end interface
+        ')
+
+        integer, parameter :: rsb_const_success=0
+        integer, parameter :: rsb_const_failure=-1 ! value returned by this interface on failure
+        integer, parameter :: rsb_const_not_available=-9999 ! value returned by this interface when deactivated
+
+contains
+
+        subroutine RSB_M4_RSBLAS_INTERFACE_RADIX`_'init(info)
+          implicit none
+          integer::info
+          info = rsb_const_success
+#ifdef RSB_HAVE_RSB_KERNELS
+          call RSB_M4_RSBLAS2VBR_SUBROUTINE_RADIX`'init(info)
+          if(info.ne.rsb_const_success)info=psb_rsb_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+          info = rsb_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+        end subroutine
+
+        subroutine RSB_M4_RSBLAS_INTERFACE_RADIX`_'exit(info)
+          implicit none
+          integer::info
+          info = rsb_const_success
+#ifdef RSB_HAVE_RSB_KERNELS
+          call RSB_M4_RSBLAS2VBR_SUBROUTINE_RADIX`'exit(info)
+          if(info.ne.rsb_const_success)info=psb_rsb_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+          info = rsb_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+        end subroutine
+
+        foreach(`pmop',RSB_M4_RSBLAS_INTERFACE_OPS,`
+        foreach(`mtype',RSB_M4_MATRIX_TYPES,`
+        subroutine RSB_M4_RSBLAS_SUBROUTINE_IDENTIFIER(pmop,mtype)`'RSB_M4_RSBLAS_SUBROUTINE_ARGS(pmop,mtype)
+          ! RSB_M4_RSBLAS_SUBROUTINE_HELP_COMMENT(pmop,mtype)
+          implicit none
+RSB_M4_RSBLAS_SUBROUTINE_INFO_DECLARATION(info)dnl
+RSB_M4_RSBLAS_SUBROUTINE_ARGS_DECLARATION(pmop,mtype)dnl
+#ifdef RSB_HAVE_RSB_KERNELS
+          info = rsb_const_success
+ifelse(pmop,`get_rows_sparse',`dnl
+          if(append)appendi=1
+          if(has_iren)has_ireni=1
+')`'dnl
+ifelse(pmop,`ussm',`dnl
+dnl          itrans=78 ! FIXME: temporary
+')dnl
+ifelse(pmop,`usmm',`dnl
+dnl          itrans=78 ! FIXME: temporary
+')dnl
+ifelse(pmop,`usmv',`dnl
+dnl          itrans=78 ! FIXME: temporary
+')dnl
+ifelse(pmop,`infinity_norm',`dnl
+dnl          itrans=78 ! FIXME: temporary
+')dnl
+          call RSB_M4_RSBLAS2VBR_SUBROUTINE_IDENTIFIER(pmop,mtype)RSB_M4_ARGS_TO_ACTUAL_ARGS_FOR_RSB_INTERFACE((RSB_M4_RSBLAS_SUBROUTINE_ARGS(pmop,mtype)))
+ifelse(pmop,`destroy_sparse_matrix',`dnl
+')`'dnl
+ifelse(pmop,`allocate_sparse_matrix',`dnl
+')`'dnl
+ifelse(pmop,`get_matrix_nnz',`dnl
+')`'dnl
+ifelse(pmop,`infinity_norm',`dnl
+           real_in=in ! FIXME : this is a conversion
+')dnl
+          if(info.ne.rsb_const_success)info = rsb_const_failure
+#else /* RSB_HAVE_RSB_KERNELS */
+          info = rsb_const_not_available
+#endif /* RSB_HAVE_RSB_KERNELS */
+        end subroutine
+        ')
+        ')
+dnl      end module blas_sparse
+      end module rsb_mod
+dnl
+dnl
+
diff --git a/rsb_msort_up.c b/rsb_msort_up.c
new file mode 100644
index 0000000..5ef175d
--- /dev/null
+++ b/rsb_msort_up.c
@@ -0,0 +1,333 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Sorting routines.
+ */
+#include "rsb_msort_up.h"
+#include "rsb_internals.h"
+
+static rsb_nnz_idx_t iabs(rsb_nnz_idx_t i)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Fortran style iabs().
+  	 */
+	return i<0?-i:i;
+}
+
+static rsb_nnz_idx_t isign(rsb_nnz_idx_t i,rsb_nnz_idx_t j)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Fortran style lsign():
+	 * "Returns the absolute value of A times the sign of B."
+  	 */
+	register rsb_nnz_idx_t ia=i<0?-i:i;
+	return j>=0?ia:-ia;
+}
+
+int rsb_do_msort_up(rsb_nnz_idx_t n, const rsb_nnz_idx_t * RSB_RESTRICT k, rsb_nnz_idx_t * RSB_RESTRICT l)
+{
+	/**
+	 	\ingroup gr_internals
+		Adapted C code from PSBLAS Fortran msort_up routine.
+		\param n
+		\param k an n   sized array, for input indices
+		\param l an n+2 sized array, for permutation links
+	
+		TODO : document
+	*/
+	/* integer k(n),l(0:n+1) */
+	rsb_nnz_idx_t p,q,s,t;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* intrinsic iabs,isign */
+	/*
+		first step: we are preparing ordered sublists, exploiting
+		what order was already in the input data; negative links
+	  	mark the end of the sublists
+	*/
+
+	--k;	/* pointer fix for C */
+
+	l[0] = 1;
+	t = n + 1;
+	for(p = 1; RSB_LIKELY(p<=n - 1);++p)
+	{
+		if (k[p] <= k[p+1])
+		{
+			l[p] = p + 1;
+		}
+		else
+		{
+			l[t] = - (p+1);
+			t = p;
+		}
+	}
+	l[t] = 0;
+	l[n] = 0;
+	// see if the input was already sorted
+	if (l[n+1] == 0)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	else
+	{
+		l[n+1] = iabs(l[n+1]);
+	}
+
+mergepass:
+	/*
+		 otherwise, begin a pass through the list.
+		 throughout all the subroutine we have:
+		  p, q: pointing to the sublists being merged
+		  s: pointing to the most recently processed record
+		  t: pointing to the end of previously completed sublist
+	*/
+	s = 0;
+	t = n + 1;
+	p = l[s];
+	q = l[t];
+	if (RSB_UNLIKELY(q == 0)) goto mergepass_exit;
+
+outer:
+
+	if (k[p] > k[q])
+	{
+		l[s] = isign(q,l[s]);
+		s = q;
+		q = l[q];
+		if (q > 0)
+		{
+			while(1)
+			{
+				if (k[p]<= k[q]) goto outer;
+				s = q;
+				q = l[q];
+				if (q <= 0) break;
+			}
+		}
+		l[s] = p;
+		s = t;
+		while(1)
+		{
+			t = p;
+			p = l[p];
+			if (p <= 0) break;
+		}
+	}
+	else
+	{
+		l[s] = isign(p,l[s]);
+		s = p;
+		p = l[p];
+		if (p>0)
+		{
+			while(1)
+			{
+				if (k[p] > k[q]) goto outer;
+				s = p;
+				p = l[p];
+				if (p <= 0) break;
+			}
+		}
+		//  otherwise, one sublist ended, and we append to it the rest
+		// of the other one.
+		l[s] = q;
+		s = t;
+		while(1)
+		{
+			t = q;
+			q = l[q];
+			if (q <= 0) break;
+		}
+	}
+
+	p = -p;
+	q = -q;
+	if (q == 0)
+	{
+
+		l[s] = isign(p,l[s]);
+		l[t] = 0;
+		goto outer_out;
+	}
+
+	goto outer;
+outer_out:
+
+	goto mergepass;
+mergepass_exit:
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+int rsb__do_msort_up2coo(rsb_nnz_idx_t n, const rsb_coo_idx_t * k, rsb_nnz_idx_t * l)
+{
+	/**
+	 	\ingroup gr_internals
+		Adapted C code from PSBLAS Fortran msort_up routine.
+		Modified to handle lexicographical order.
+		The only difference with rsb_do_msort_up is the comparison on k.
+
+		\param n
+		\param k an 2*n   sized array, for input indices
+		\param l an n+2 sized array, for permutation links
+	
+		TODO : document
+		FIXME : UNTESTED, UNFINISHED
+	*/
+	/* integer k(n),l(0:n+1) */
+	rsb_nnz_idx_t p,q,s,t;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* intrinsic iabs,isign */
+	/*
+		first step: we are preparing ordered sublists, exploiting
+		what order was already in the input data; negative links
+	  	mark the end of the sublists
+	*/
+
+	--k;	/* pointer fix for C */
+	--k;	/* pointer fix for C */
+
+	l[0] = 1;
+	t = n + 1;
+	for(p = 1; p<=n - 1;++p)
+	{
+/* lexicographical order, less than or equal */
+#define RSB_LO_LTOE(H1,L1,H2,L2)	(H1<H2 || (H1==H2 && L1<=L2))
+/* lexicographical order, greater than */
+#define RSB_LO_GT(H1,L1,H2,L2)		(H1>H2 || (H1==H2 && L1> L2))
+
+		if( RSB_LO_LTOE(k[2*p],k[2*p+1],k[2*p+2],k[2*p+2+1]) )
+		{
+			l[p] = p + 1;
+		}
+		else
+		{
+			l[t] = - (p+1);
+			t = p;
+		}
+	}
+	l[t] = 0;
+	l[n] = 0;
+	// see if the input was already sorted
+	if (l[n+1] == 0)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err ;
+	}
+	else
+	{
+		l[n+1] = iabs(l[n+1]);
+	}
+
+mergepass:
+	/*
+		 otherwise, begin a pass through the list.
+		 throughout all the subroutine we have:
+		  p, q: pointing to the sublists being merged
+		  s: pointing to the most recently processed record
+		  t: pointing to the end of previously completed sublist
+	*/
+	s = 0;
+	t = n + 1;
+	p = l[s];
+	q = l[t];
+	if (RSB_UNLIKELY(q == 0)) goto mergepass_exit;
+
+outer:
+
+	if( RSB_LO_GT(k[2*p],k[2*p+1],k[2*q],k[2*q+1]) )
+	{
+		l[s] = isign(q,l[s]);
+		s = q;
+		q = l[q];
+		if (q > 0)
+		{
+			while(1)
+			{
+				if( RSB_LO_LTOE(k[2*p],k[2*p+1],k[2*q],k[2*q+1]) ) goto outer;
+				s = q;
+				q = l[q];
+				if (q <= 0) break;
+			}
+		}
+		l[s] = p;
+		s = t;
+		while(1)
+		{
+			t = p;
+			p = l[p];
+			if (p <= 0) break;
+		}
+	}
+	else
+	{
+		l[s] = isign(p,l[s]);
+		s = p;
+		p = l[p];
+		if (p>0)
+		{
+			while(1)
+			{
+				if( RSB_LO_GT(k[2*p],k[2*p+1],k[2*q],k[2*q+1]) ) goto outer;
+				s = p;
+				p = l[p];
+				if (p <= 0) break;
+			}
+		}
+		//  otherwise, one sublist ended, and we append to it the rest
+		// of the other one.
+		l[s] = q;
+		s = t;
+		while(1)
+		{
+			t = q;
+			q = l[q];
+			if (q <= 0) break;
+		}
+	}
+
+	p = -p;
+	q = -q;
+	if (q == 0)
+	{
+
+		l[s] = isign(p,l[s]);
+		l[t] = 0;
+		goto outer_out;
+	}
+
+	goto outer;
+outer_out:
+
+	goto mergepass;
+mergepass_exit:
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_msort_up.h b/rsb_msort_up.h
new file mode 100644
index 0000000..89cd20d
--- /dev/null
+++ b/rsb_msort_up.h
@@ -0,0 +1,39 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+
+/**
+ * @file
+ * @author Michele Martone
+ * @brief Sorting routines.
+ */
+
+#ifndef RSB_MSORT_UP_H_INCLUDED
+#define RSB_MSORT_UP_H_INCLUDED
+
+#include "rsb_internals.h"
+
+int rsb_do_msort_up(rsb_nnz_idx_t n, const rsb_nnz_idx_t * RSB_RESTRICT k, rsb_nnz_idx_t * RSB_RESTRICT l);
+int rsb__do_msort_up2coo(rsb_nnz_idx_t n, const rsb_coo_idx_t * k, rsb_nnz_idx_t * l);
+
+#endif /* RSB_MSORT_UP_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_ompio.c b/rsb_ompio.c
new file mode 100644
index 0000000..a570a99
--- /dev/null
+++ b/rsb_ompio.c
@@ -0,0 +1,293 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file is an adaptation of Gilles Gouaillardet OpenMP+fgets_unlocked suggested implementation
+ * This code is still experimental and untested.
+ */
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+
+#ifdef RSB_HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif /* RSB_HAVE_SYS_STAT_H */
+#if RSB_WANT_ZLIB_SUPPORT
+#include <zlib.h>
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+#include <stdio.h>
+#include "rsb_internals.h"
+#include "rsb_ompio.h"
+RSB_INTERNALS_COMMON_HEAD_DECLS
+#if RSB_WANT_OMPIO_SUPPORT
+void rsb_ompio_DOUBLE (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,double**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re){
+	rsb_nnz_idx_t i=0;
+# pragma omp parallel RSB_NTC
+{
+	int index = 0, j=0, toread=0;
+	size_t res = 0;
+	char line[RSB_MAX_FGETS_LINES][MM_MAX_LINE_LENGTH];
+	while (index < *nnz) {
+#pragma omp critical
+{
+		index=i;
+		if (i < *nnz) {
+			toread = ((*nnz-index)>RSB_MAX_FGETS_LINES)?RSB_MAX_FGETS_LINES:(*nnz-index);
+			for (j=0;j<toread;j++)
+#if RSB_WANT_ZLIB_SUPPORT
+				if (!ngzfd)
+					gzgets(fd,line[j],MM_MAX_LINE_LENGTH);
+                                else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+	   				fgets_unlocked(line[j],MM_MAX_LINE_LENGTH,ngzfd);
+			i += toread;
+		}
+}
+		if (index < *nnz) {
+			for(j=0;j<toread;j++,index++) {
+			int iI,iJ;
+			char * p1, *p2;
+			p1=line[j];
+
+			iI=strtol(p1,&p2,10);
+			iJ=strtol(p2,&p1,10);
+*(*dval+index)=strtod(p1,NULL);			res += 1;
+			(*IA)[index]=(rsb_coo_idx_t)iI;
+			(*JA)[index]=(rsb_coo_idx_t)iJ;
+	       		(*IA)[index]--;  // adjust from 1-based to 0-based
+        		(*JA)[index]--;
+			}
+		}
+	}
+# pragma omp critical
+{
+	*_re+=res;
+}
+}
+}
+
+void rsb_ompio_FLOAT (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,float**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re){
+	rsb_nnz_idx_t i=0;
+# pragma omp parallel RSB_NTC
+{
+	int index = 0, j=0, toread=0;
+	size_t res = 0;
+	char line[RSB_MAX_FGETS_LINES][MM_MAX_LINE_LENGTH];
+	while (index < *nnz) {
+#pragma omp critical
+{
+		index=i;
+		if (i < *nnz) {
+			toread = ((*nnz-index)>RSB_MAX_FGETS_LINES)?RSB_MAX_FGETS_LINES:(*nnz-index);
+			for (j=0;j<toread;j++)
+#if RSB_WANT_ZLIB_SUPPORT
+				if (!ngzfd)
+					gzgets(fd,line[j],MM_MAX_LINE_LENGTH);
+                                else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+	   				fgets_unlocked(line[j],MM_MAX_LINE_LENGTH,ngzfd);
+			i += toread;
+		}
+}
+		if (index < *nnz) {
+			for(j=0;j<toread;j++,index++) {
+			int iI,iJ;
+			char * p1, *p2;
+			p1=line[j];
+
+			iI=strtol(p1,&p2,10);
+			iJ=strtol(p2,&p1,10);
+*(*dval+index)=strtof(p1,NULL);			res += 1;
+			(*IA)[index]=(rsb_coo_idx_t)iI;
+			(*JA)[index]=(rsb_coo_idx_t)iJ;
+	       		(*IA)[index]--;  // adjust from 1-based to 0-based
+        		(*JA)[index]--;
+			}
+		}
+	}
+# pragma omp critical
+{
+	*_re+=res;
+}
+}
+}
+
+void rsb_ompio_FLOAT_COMPLEX (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,float complex**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re){
+	rsb_nnz_idx_t i=0;
+# pragma omp parallel RSB_NTC
+{
+	int index = 0, j=0, toread=0;
+	size_t res = 0;
+	char line[RSB_MAX_FGETS_LINES][MM_MAX_LINE_LENGTH];
+	while (index < *nnz) {
+#pragma omp critical
+{
+		index=i;
+		if (i < *nnz) {
+			toread = ((*nnz-index)>RSB_MAX_FGETS_LINES)?RSB_MAX_FGETS_LINES:(*nnz-index);
+			for (j=0;j<toread;j++)
+#if RSB_WANT_ZLIB_SUPPORT
+				if (!ngzfd)
+					gzgets(fd,line[j],MM_MAX_LINE_LENGTH);
+                                else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+	   				fgets_unlocked(line[j],MM_MAX_LINE_LENGTH,ngzfd);
+			i += toread;
+		}
+}
+		if (index < *nnz) {
+			for(j=0;j<toread;j++,index++) {
+			int iI,iJ;
+			char * p1, *p2;
+			p1=line[j];
+
+			iI=strtol(p1,&p2,10);
+			iJ=strtol(p2,&p1,10);
+*(((double*)(*dval))+2*index+0)=strtof(p1,&p2);
+*(((double*)(*dval))+2*index+1)=strtof(p2,&p1);			res += 1;
+			(*IA)[index]=(rsb_coo_idx_t)iI;
+			(*JA)[index]=(rsb_coo_idx_t)iJ;
+	       		(*IA)[index]--;  // adjust from 1-based to 0-based
+        		(*JA)[index]--;
+			}
+		}
+	}
+# pragma omp critical
+{
+	*_re+=res;
+}
+}
+}
+
+void rsb_ompio_DOUBLE_COMPLEX (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,double complex**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re){
+	rsb_nnz_idx_t i=0;
+# pragma omp parallel RSB_NTC
+{
+	int index = 0, j=0, toread=0;
+	size_t res = 0;
+	char line[RSB_MAX_FGETS_LINES][MM_MAX_LINE_LENGTH];
+	while (index < *nnz) {
+#pragma omp critical
+{
+		index=i;
+		if (i < *nnz) {
+			toread = ((*nnz-index)>RSB_MAX_FGETS_LINES)?RSB_MAX_FGETS_LINES:(*nnz-index);
+			for (j=0;j<toread;j++)
+#if RSB_WANT_ZLIB_SUPPORT
+				if (!ngzfd)
+					gzgets(fd,line[j],MM_MAX_LINE_LENGTH);
+                                else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+	   				fgets_unlocked(line[j],MM_MAX_LINE_LENGTH,ngzfd);
+			i += toread;
+		}
+}
+		if (index < *nnz) {
+			for(j=0;j<toread;j++,index++) {
+			int iI,iJ;
+			char * p1, *p2;
+			p1=line[j];
+
+			iI=strtol(p1,&p2,10);
+			iJ=strtol(p2,&p1,10);
+*(((double*)(*dval))+2*index+0)=strtod(p1,&p2);
+*(((double*)(*dval))+2*index+1)=strtod(p2,&p1);			res += 1;
+			(*IA)[index]=(rsb_coo_idx_t)iI;
+			(*JA)[index]=(rsb_coo_idx_t)iJ;
+	       		(*IA)[index]--;  // adjust from 1-based to 0-based
+        		(*JA)[index]--;
+			}
+		}
+	}
+# pragma omp critical
+{
+	*_re+=res;
+}
+}
+}
+
+void rsb_ompio_PATTERN (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re){
+	rsb_nnz_idx_t i=0;
+# pragma omp parallel RSB_NTC
+{
+	int index = 0, j=0, toread=0;
+	size_t res = 0;
+	char line[RSB_MAX_FGETS_LINES][MM_MAX_LINE_LENGTH];
+	while (index < *nnz) {
+#pragma omp critical
+{
+		index=i;
+		if (i < *nnz) {
+			toread = ((*nnz-index)>RSB_MAX_FGETS_LINES)?RSB_MAX_FGETS_LINES:(*nnz-index);
+			for (j=0;j<toread;j++)
+#if RSB_WANT_ZLIB_SUPPORT
+				if (!ngzfd)
+					gzgets(fd,line[j],MM_MAX_LINE_LENGTH);
+                                else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+	   				fgets_unlocked(line[j],MM_MAX_LINE_LENGTH,ngzfd);
+			i += toread;
+		}
+}
+		if (index < *nnz) {
+			for(j=0;j<toread;j++,index++) {
+			int iI,iJ;
+			char * p1, *p2;
+			p1=line[j];
+
+			iI=strtol(p1,&p2,10);
+			iJ=strtol(p2,&p1,10);
+			res += 1;
+			(*IA)[index]=(rsb_coo_idx_t)iI;
+			(*JA)[index]=(rsb_coo_idx_t)iJ;
+	       		(*IA)[index]--;  // adjust from 1-based to 0-based
+        		(*JA)[index]--;
+			}
+		}
+	}
+# pragma omp critical
+{
+	*_re+=res;
+}
+}
+}
+
+
+#endif	/* RSB_WANT_OMPIO_SUPPORT */
+
+/* @endcond */
diff --git a/rsb_ompio.h b/rsb_ompio.h
new file mode 100644
index 0000000..8b02568
--- /dev/null
+++ b/rsb_ompio.h
@@ -0,0 +1,57 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file is an adaptation of Gilles Gouaillardet OpenMP+fgets_unlocked suggested implementation
+ * This code is still experimental and untested.
+ */
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+
+#ifndef RSB_OMPIO_H_INCLUDED
+#define RSB_OMPIO_H_INCLUDED
+
+#define RSB_MAX_FGETS_LINES 1536
+#if RSB_WANT_OMPIO_SUPPORT
+void rsb_ompio_DOUBLE (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,double**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re);void rsb_ompio_FLOAT (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,float**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re);void rsb_ompio_FLOAT_COMPLEX (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd,float complex**dval, rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re);void rsb_ompio_DOUBLE_COMPLEX (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngz [...]
+#endif	/* RSB_WANT_OMPIO_SUPPORT */
+
+#endif	/* RSB_OMPIO_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_ompio.m4 b/rsb_ompio.m4
new file mode 100644
index 0000000..fa5313a
--- /dev/null
+++ b/rsb_ompio.m4
@@ -0,0 +1,130 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+include(`rsb_krnl_macros.m4')dnl
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file is an adaptation of Gilles Gouaillardet OpenMP+fgets_unlocked suggested implementation
+ * This code is still experimental and untested.
+ */
+dnl RSB_M4_HEADER_MESSAGE()dnl
+dnl
+dnl
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_OMPIO_H_INCLUDED
+#define RSB_OMPIO_H_INCLUDED
+')
+dnl
+dnl 
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#define RSB_MAX_FGETS_LINES 1536
+',`dnl
+#ifdef RSB_HAVE_SYS_STAT_H
+#include <sys/stat.h>
+#endif /* RSB_HAVE_SYS_STAT_H */
+#if RSB_WANT_ZLIB_SUPPORT
+#include <zlib.h>
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+#include <stdio.h>
+#include "rsb_internals.h"
+#include "rsb_ompio.h"
+RSB_INTERNALS_COMMON_HEAD_DECLS
+')dnl
+dnl
+`#if RSB_WANT_OMPIO_SUPPORT'
+dnl
+foreach(`mtype',(WANT_TYPES,pattern),`dnl
+void rsb_ompio_`'touppercase(RSB_M4_CHOPSPACES(mtype))`' (rsb_nnz_idx_t *nnz, FILE * fd, FILE * ngzfd`'ifelse(mtype,`pattern',`',`,'mtype`**dval'), rsb_coo_idx_t ** IA, rsb_coo_idx_t ** JA, size_t *_re)`'dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	rsb_nnz_idx_t i=0;
+# pragma omp parallel RSB_NTC
+{
+	int index = 0, j=0, toread=0;
+	size_t res = 0;
+	char line[RSB_MAX_FGETS_LINES][MM_MAX_LINE_LENGTH];
+	while (index < *nnz) {
+#pragma omp critical
+{
+		index=i;
+		if (i < *nnz) {
+			toread = ((*nnz-index)>RSB_MAX_FGETS_LINES)?RSB_MAX_FGETS_LINES:(*nnz-index);
+			for (j=0;j<toread;j++)
+#if RSB_WANT_ZLIB_SUPPORT
+				if (!ngzfd)
+					gzgets(fd,line[j],MM_MAX_LINE_LENGTH);
+                                else
+#endif /* RSB_WANT_ZLIB_SUPPORT */
+	   				fgets_unlocked(line[j],MM_MAX_LINE_LENGTH,ngzfd);
+			i += toread;
+		}
+}
+		if (index < *nnz) {
+			for(j=0;j<toread;j++,index++) {
+			int iI,iJ;
+			char * p1, *p2;
+			p1=line[j];
+
+			iI=strtol(p1,&p2,10);
+			iJ=strtol(p2,&p1,10);
+ifelse(mtype,`pattern',`',`dnl
+`'dnl
+ifelse(mtype,`double',`*(*dval+index)=strtod(p1,NULL);',`dnl
+ifelse(mtype,`float',`*(*dval+index)=strtof(p1,NULL);',`dnl
+ifelse(mtype,`double complex',`dnl
+*(((double*)(*dval))+2*index+0)=strtod(p1,&p2);
+*(((double*)(*dval))+2*index+1)=strtod(p2,&p1);dnl
+',`dnl
+ifelse(mtype,`float complex',`dnl
+*(((double*)(*dval))+2*index+0)=strtof(p1,&p2);
+*(((double*)(*dval))+2*index+1)=strtof(p2,&p1);dnl
+',`dnl
+ifelse(mtype,`int',`*(*dval+index)=strtol(p1,NULL);',`dnl
+ifelse(mtype,`char',`*(*dval+index)=strtol(p1,NULL);',`dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+dnl
+')dnl
+			res += 1;
+			(*IA)[index]=(rsb_coo_idx_t)iI;
+			(*JA)[index]=(rsb_coo_idx_t)iJ;
+	       		(*IA)[index]--;  // adjust from 1-based to 0-based
+        		(*JA)[index]--;
+			}
+		}
+	}
+# pragma omp critical
+{
+	*_re+=res;
+}
+}
+}
+
+')dnl
+')dnl
+dnl
+
+`#endif'	/* RSB_WANT_OMPIO_SUPPORT */
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+')dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif	/* RSB_OMPIO_H_INCLUDED */
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_op.c b/rsb_op.c
new file mode 100644
index 0000000..eeb96b5
--- /dev/null
+++ b/rsb_op.c
@@ -0,0 +1,35 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains generic operation representation structures.
+ * */
+
+/* TODO: find a more elegant solution for the following gcc -Wall oriented hack */
+static int goo(void);
+static int foo(void){return goo();}
+static int goo(void){return foo();}
+
+/* @endcond */
diff --git a/rsb_op.h b/rsb_op.h
new file mode 100644
index 0000000..5ca2b90
--- /dev/null
+++ b/rsb_op.h
@@ -0,0 +1,72 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains generic operation representation structures.
+ * */
+
+#ifndef RSB_OP_H_INCLUDED
+#define RSB_OP_H_INCLUDED
+
+#include "rsb_common.h"
+
+	/* FIXME: NEW */
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper enumeration.
+ */
+enum rsb_opname_t{
+            rsb_opn_spmv = 1,
+            rsb_opn_spsv = 2,
+            rsb_opn_scal = 3,	/* NOTE: this is a non const matrix op */
+            rsb_opn_inrm = 4,
+            rsb_opn_nop = 0
+};
+
+/*!
+ * \ingroup gr_internals
+ * \brief An internal, helper structure.
+ */
+struct rsb_c_mop_t
+{
+	/**
+	 * const matrix op, of the form:
+	 * y <- alpha A^(trans)  * x + beta * y
+	 * y <- alpha A^(-trans) * y
+	 * ...
+	 */
+	enum rsb_opname_t op;	/* the operation at hand				*/
+	const struct rsb_mtx_t * matrix;	/* the operand matrix 			*/
+	const void * alphap;	/* result vector post-scaling				*/
+	const void * betap;	/* output vector pre-scaling (only if y!=x)	 	*/
+	void * y;		/* (input) output vector 				*/
+	const void * x;		/* input vector 					*/
+	size_t nrhs;		/* number for right hand sides -- applies to both x,y 	*/
+	rsb_trans_t trans;	/* transposition parameter 				*/
+};
+
+#endif /* RSB_OP_H_INCLUDED */
+/* @endcond */
+
diff --git a/rsb_partition.c b/rsb_partition.c
new file mode 100644
index 0000000..407bbc5
--- /dev/null
+++ b/rsb_partition.c
@@ -0,0 +1,262 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ * Auxiliary functionalities file.
+ */
+
+#include <stdlib.h>
+#include "rsb.h"
+#include "rsb_internals.h"
+
+/* void qsort(void *base, size_t nmemb, size_t size,
+	int(*compar)(const void *, const void *)); */
+
+extern struct rsb_session_handle_t rsb_global_session_handle;
+#if 0
+/* 20121001 old code, declaring static vars */
+/**
+ * \ingroup gr_internals
+ * \brief An internal use only structure.
+ * */
+static struct nzinfo
+{
+	int i;	/* index */
+	int n;	/* value */
+	rsb_flags_t flags;
+};
+#define BLOCK_START 1
+
+const struct nzinfo *g_sort_array;
+
+static int rsb_compar_nzinfo(const void * ap, const void * bp)
+{
+	int a=*(int*)ap;
+	int b=*(int*)bp;
+	return ( g_sort_array[a].n - g_sort_array[b].n );
+}
+
+static void rsb_qsort_nzinfo(const struct nzinfo*nnzpr, const struct nzinfo*nnzpc, rsb_coo_idx_t *nnzpr_s, rsb_coo_idx_t *nnzpc_s, size_t rows, size_t columns)
+{
+	/* sort by smallest */
+	g_sort_array=nnzpr;
+	qsort( nnzpr_s , (size_t)rows   , sizeof(rsb_coo_idx_t), &rsb_compar_nzinfo );
+	g_sort_array=nnzpc;
+	qsort( nnzpc_s , (size_t)columns, sizeof(rsb_coo_idx_t), &rsb_compar_nzinfo );
+	g_sort_array=NULL;
+}
+
+#define MEDBLOCKSIZE	8		/* FIXME */
+#define MAXBLOCKSIZE	16
+
+static int rsb_do_partition(struct nzinfo * nnzpx,rsb_blk_idx_t * p_x,rsb_blk_idx_t *X_b, rsb_blk_idx_t maxk, int blocksize)
+{
+	int k;
+	rsb_coo_idx_t last_i=0;
+
+	
+	*X_b=0;
+	if( ! ( nnzpx[0].flags & BLOCK_START ) || nnzpx[0].i!=0 )
+	{
+		p_x[*X_b]=0;/* if not here, this will be performed in the next loop */
+		(*X_b)++;
+	}
+
+	for(k=0;k<maxk;++k)
+	{
+		if(
+			( nnzpx[k].flags & BLOCK_START) || 
+			( nnzpx[k].i-last_i >= blocksize) ||
+			( k>0 && k+1<maxk && nnzpx[k-1].n>nnzpx[k].n && 	/* local minumum */
+				nnzpx[k+1].n>nnzpx[k].n && nnzpx[k].i-last_i /* >= MEDBLOCKSIZE */ ) 
+		)
+		{
+			last_i=nnzpx[k].i;
+			p_x[*X_b]=last_i;
+			(*X_b)++;
+			RSB_DO_FLAG_ADD(nnzpx[k].flags,BLOCK_START);	/* we mark a new block */
+		}
+	}
+//	p_x[*X_b]=0;
+//	for(k=0;k<*X_b;++k) p_x[k+1]=p_x[k+1]+p_x[k];
+	p_x[*X_b]=maxk;
+	return 0;
+}
+
+/* 
+ * This implementation partitions a a matrix specified through its nonzero coordinates into a vbr partitioning
+ * */
+int rsb__util_nnz2vbr(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const rsb_blk_idx_t rows, const rsb_blk_idx_t columns, rsb_blk_idx_t **p_rp, rsb_blk_idx_t **p_cp, rsb_blk_idx_t *M_b, rsb_blk_idx_t *K_b, int blockrowsize, int blockcolumnsize)
+{
+/**
+	FIXME : UNTESTED
+
+*/
+
+/* Example driver program:
+#include <stdlib.h>
+#include <rsb_partition.h>
+
+
+int main()
+{
+	const int rows=10,columns=10;
+	const int IA[]={0,1,1,2,3,4,5,6,7,8,9};
+	const int JA[]={0,2,1,2,3,4,5,6,7,8,9};
+	const int NNZ=11;
+	int *p_rp;
+	int *p_cp;
+	return nnz2vbr(IA,JA,NNZ,rows,columns,&p_r,&p_c);
+}
+*/
+
+	struct nzinfo *nnzpc=NULL  ,*nnzpr=NULL  ;
+	rsb_coo_idx_t *nnzpc_s,*nnzpr_s;
+	int k;
+	rsb_blk_idx_t *p_r,*p_c;
+	if(!M_b||!K_b||!IA||!JA||nnz<1||rows<1||columns<1||!p_rp||!p_cp)return -1;
+
+	nnzpr=calloc(rows   ,sizeof(struct nzinfo));
+	nnzpc=calloc(columns,sizeof(struct nzinfo));
+	nnzpr_s=calloc(rows   ,sizeof(rsb_coo_idx_t));
+	nnzpc_s=calloc(columns,sizeof(rsb_coo_idx_t));
+	p_r=calloc(1+rows,sizeof(rsb_coo_idx_t));
+	p_c=calloc(1+columns,sizeof(rsb_coo_idx_t));
+
+	/* FIXME */
+	if(!nnzpr_s)goto err;
+	if(!nnzpc_s)goto err;
+	if(!nnzpr)goto err;
+	if(!nnzpc)goto err;
+//	if(!p_r)goto err;
+//	if(!p_c)goto err;
+
+	/* check */
+	for(k=0;k<nnz;++k) if(IA[k]>=rows   )goto err;
+	for(k=0;k<nnz;++k) if(JA[k]>=columns)goto err;
+	for(k=0;k<nnz;++k) if(IA[k]< 0   )goto err;
+	for(k=0;k<nnz;++k) if(JA[k]< 0   )goto err;
+
+	/* we count the nonzeros on each column and on each row */
+	for(k=0;k<nnz;++k) nnzpr[IA[k]].i=IA[k], nnzpc[JA[k]].i=JA[k];
+	for(k=0;k<nnz;++k) nnzpr[IA[k]].n++, nnzpc[JA[k]].n++;
+	for(k=0;k<rows   ;++k) nnzpr_s[k]=k;
+	for(k=0;k<columns;++k) nnzpc_s[k]=k;
+
+	/* we sort the index arrays on the basis of the count value */
+	rsb_qsort_nzinfo(nnzpr, nnzpc, nnzpr_s, nnzpc_s, rows, columns);
+//	for(k=0;k<columns;++k) RSB_STDERR("(%d -> %d)\n", nnzpc[nnzpc_s[k]].i, nnzpc[nnzpc_s[k]].n) ; RSB_STDERR("---\n");
+//	for(k=0;k<rows   ;++k) RSB_STDERR("(%d -> %d)\n", nnzpr[nnzpr_s[k]].i, nnzpr[nnzpr_s[k]].n) ; RSB_STDERR("---\n");
+//	for(k=0;k<columns;++k) RSB_STDERR("%d ", nnzpc[nnzpc_s[k]].n) ; RSB_STDERR("\n");
+//	for(k=0;k<rows   ;++k) RSB_STDERR("%d ", nnzpr[nnzpr_s[k]].n) ; RSB_STDERR("\n");
+
+	
+//	RSB_STDERR("c=[0"); for(k=0;k<columns;++k) RSB_STDERR(",%d", nnzpc[k].n) ; RSB_STDERR("];\n");
+//	RSB_STDERR("r=[0"); for(k=0;k<rows   ;++k) RSB_STDERR(",%d", nnzpr[k].n) ; RSB_STDERR("];\n");
+
+	/* now we determine blocks dimensions, using least populated columns/rows as split points */
+
+	/* first heuristic : partition tightly, then expand */
+	for(k=0;k<rows   ;++k) p_r[k]=k;
+	for(k=0;k<columns;++k) p_c[k]=k;
+	
+	/* we sorted the arrays with per-column and per-row nnz counts */
+
+	
+	/* we start with a minimal partitioning (bogus) */
+	*M_b= (rows    / ( MAXBLOCKSIZE/4 ));
+	*K_b= (columns / ( MAXBLOCKSIZE/4 ));
+	/* we use the minimal column and row indices as first block starting indices */
+	for(k=0;k<*M_b;++k) { RSB_DO_FLAG_ADD(nnzpr[nnzpr_s[k]].flags,BLOCK_START); }
+	for(k=0;k<*K_b;++k) { RSB_DO_FLAG_ADD(nnzpc[nnzpc_s[k]].flags,BLOCK_START); }
+//	for(k=0;k<*M_b;++k) RSB_STDERR("break at %d\n",nnzpr[nnzpr_s[k]].i);
+
+	/* 
+	 * we create (right now, with no furhter strategy, but we would like to use local minimality criteria)
+	 * intermediate blocks of average size
+	 * */
+
+	if(blockrowsize   <1)blockrowsize   =MEDBLOCKSIZE;/* FIXME */
+	if(blockcolumnsize<1)blockcolumnsize=MEDBLOCKSIZE;/* FIXME */
+
+	*M_b=0;
+	*K_b=0;
+	rsb_do_partition(nnzpr,p_r,M_b,rows   ,blockrowsize);
+	rsb_do_partition(nnzpc,p_c,K_b,columns,blockcolumnsize);
+//	RSB_STDERR("M_b:%d\n",*M_b);
+//	RSB_STDERR("K_b:%d\n",*K_b);
+
+
+//	for(k=0;k<*K_b;++k) p_c[k]=p_c[k+1]-p_c[k];
+//	for(k=0;k<*M_b;++k) p_r[k]=p_r[k+1]-p_r[k];
+
+//	for(k=0;k<M_b   ;++k) RSB_STDERR(" %d ", p_r[k]);RSB_STDERR("\n");
+//	for(k=0;k<K_b   ;++k) RSB_STDERR(" %d ", p_c[k]);RSB_STDERR("\n");
+
+	if(nnzpr_s) free(nnzpr_s);
+	if(nnzpc_s) free(nnzpc_s);
+	if(nnzpr) free(nnzpr);
+	if(nnzpc) free(nnzpc);
+//	if(p_r) free(p_r);
+//	if(p_c) free(p_c);
+
+	/* FIXME : MISSING REALLOC FOR P_R, P_C ! */
+	*p_rp=p_r,
+	*p_cp=p_c;
+
+	/* FIXME : FREE MEMORY ! */
+	return 0;
+err:
+	if(nnzpr_s) free(nnzpr_s);
+	if(nnzpc_s) free(nnzpc_s);
+	if(nnzpr) free(nnzpr);
+	if(nnzpc) free(nnzpc);
+//	if(p_r) free(p_r);
+//	if(p_c) free(p_c);
+	/* FIXME : FREE MEMORY ! */
+	return -1;
+}
+#endif
+
+rsb_bool_t rsb__should_rejoin_small_leaf(rsb_nnz_idx_t nnz, rsb_nnz_idx_t mk, rsb_nnz_idx_t uk, rsb_nnz_idx_t lk, rsb_type_t typecode)
+{
+	/**
+	 *
+	 */
+	return
+		(
+		(( nnz-lk>0 && nnz-lk < ((rsb_global_session_handle.min_leaf_matrix_bytes)/RSB_SIZEOF(typecode))) ||
+		( lk-mk>0 && lk-mk < ((rsb_global_session_handle.min_leaf_matrix_bytes)/RSB_SIZEOF(typecode))) ||
+		( mk-uk>0 && mk-uk < ((rsb_global_session_handle.min_leaf_matrix_bytes)/RSB_SIZEOF(typecode))) ||
+		( uk>0 && uk < ((rsb_global_session_handle.min_leaf_matrix_bytes)/RSB_SIZEOF(typecode))))
+		&&
+		(( nnz-lk>0 && nnz-lk > ((rsb_global_session_handle.avg_leaf_matrix_bytes)/RSB_SIZEOF(typecode))) ||
+		( lk-mk>0 && lk-mk > ((rsb_global_session_handle.avg_leaf_matrix_bytes)/RSB_SIZEOF(typecode))) ||
+		( mk-uk>0 && mk-uk > ((rsb_global_session_handle.avg_leaf_matrix_bytes)/RSB_SIZEOF(typecode))) ||
+		( uk>0 && uk > ((rsb_global_session_handle.avg_leaf_matrix_bytes)/RSB_SIZEOF(typecode))))
+		);
+}
+
+/* @endcond */
diff --git a/rsb_partition.h b/rsb_partition.h
new file mode 100644
index 0000000..9922712
--- /dev/null
+++ b/rsb_partition.h
@@ -0,0 +1,38 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @author Michele Martone
+ * @brief
+ * Auxiliary functionalities header file.
+ */
+#ifndef RSB_PARTITION_H_INCLUDED
+#define RSB_PARTITION_H_INCLUDED
+
+#include "rsb_internals.h"
+
+int rsb__util_nnz2vbr(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const rsb_blk_idx_t rows, const rsb_blk_idx_t columns, rsb_blk_idx_t **p_rp, rsb_blk_idx_t **p_cp, rsb_blk_idx_t *M_b, rsb_blk_idx_t *K_b, int blockrowsize, int blockcolumnsize);
+rsb_bool_t rsb__should_rejoin_small_leaf(rsb_nnz_idx_t nnz, rsb_nnz_idx_t mk, rsb_nnz_idx_t uk, rsb_nnz_idx_t lk, rsb_type_t typecode);
+
+#endif /* RSB_PARTITION_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_pcnt.c b/rsb_pcnt.c
new file mode 100644
index 0000000..5d550da
--- /dev/null
+++ b/rsb_pcnt.c
@@ -0,0 +1,463 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Perfomance counters related code.
+ * @author Michele Martone
+ * */
+
+#include "rsb_internals.h"
+#include "rsb-config.h"
+#if RSB_HAVE_PTHREAD_H
+#include <pthread.h>
+#endif /* RSB_HAVE_PTHREAD_H */
+
+#ifdef RSB_HAVE_PAPI
+static const rsb_papi_int_t rsb_papi_eventlist [] = {
+#if 0
+	PAPI_L1_DCA,
+	PAPI_L1_DCM,
+	PAPI_L1_DCH,
+	PAPI_L1_DCR,
+	PAPI_L1_DCW,
+
+	PAPI_L1_TCA,
+	PAPI_L1_TCR,
+	PAPI_L1_TCW,
+	PAPI_L1_TCH,
+	PAPI_L1_TCM,
+
+	PAPI_L2_DCA,
+	PAPI_L2_DCM,
+	PAPI_L2_DCH,
+	PAPI_L2_DCR,
+#endif /* RSB_HAVE_PAPI */
+#define RSB_WANT_PAPI_P4_COUNTERS 0
+#if RSB_WANT_PAPI_P4_COUNTERS
+	PAPI_L2_TCH,	/* p4 */
+	PAPI_L2_TCM,	/* p4 */
+	
+//	PAPI_PRF_DM,	/* Prefetch data instruction caused a miss */
+	PAPI_L1_LDM,	/* Level 1 load misses */
+#if 0
+	PAPI_L1_STM,
+	PAPI_L2_LDM,
+	PAPI_L2_STM,
+
+	PAPI_L2_TCM,	/* p4 */
+	PAPI_L2_TCH,	/* p4 */
+#endif
+	PAPI_TOT_CYC,	/* p4 */
+	PAPI_TOT_INS,	/* p4 */
+	PAPI_TLB_IM,	/* p4 */
+#if 0
+	PAPI_TLB_TL,
+	PAPI_FMA_INS,	/* p4 */
+#endif
+//	PAPI_TOT_IIS,	/* p4 */
+	PAPI_FP_INS,	/* p4 */
+#if 1
+//	PAPI_INT_INS,	/* p4 */
+//	PAPI_LD_INS,	/* p4 */
+//	PAPI_SR_INS,	/* p4 */
+//	PAPI_VEC_INS,	/* p4 */
+#endif
+	PAPI_RES_STL,	/* p4 */
+#endif
+//	PAPI_FP_STAL,	/* p4 */
+#if 0
+
+	PAPI_FML_INS,
+	PAPI_FAD_INS,
+	PAPI_FDV_INS,
+	PAPI_FSQ_INS,
+	PAPI_FNV_INS,
+#endif
+//	PAPI_FP_OPS,	/* p4 */
+#define RSB_WANT_PAPI_ATOM_COUNTERS 1
+#if RSB_WANT_PAPI_ATOM_COUNTERS
+	/*PAPI_L1_LDM,*/	/* Level 1 load misses */
+	/* PAPI_L2_TCM, */	/* Level 2 load misses */
+	PAPI_L1_TCM,PAPI_L2_TCM,	/* Level 1 load misses */
+#endif /* RSB_WANT_PAPI_ATOM_COUNTERS */
+	0
+};
+#define rsb_num_hwcntrs (sizeof(rsb_papi_eventlist)/sizeof(rsb_papi_int_t)-1)
+#if 0
+rsb_papi_long eventvals [ rsb_num_hwcntrs ];	/* FIXME */
+rsb_papi_int_t EventSet=PAPI_NULL;
+float real_time=0,proc_time=0,mflips=0;
+float mflops=0,ips=0,ipc=0;/* FIXME : temporary */
+char descr[PAPI_MAX_STR_LEN];
+
+rsb_papi_long flpins=0,flpops=0;
+rsb_papi_long ins=0;
+const PAPI_hw_info_t *hwinfo = NULL;
+#else
+struct rsb_papi_stuff_t{
+rsb_papi_int_t eventlist [rsb_num_hwcntrs];
+rsb_papi_long eventvals [ rsb_num_hwcntrs ];	/* FIXME */
+rsb_papi_int_t EventSet;
+float real_time,proc_time,mflips;
+float mflops,ips,ipc;/* FIXME : temporary */
+char descr[PAPI_MAX_STR_LEN];
+rsb_papi_long flpins,flpops;
+rsb_papi_long ins;
+const PAPI_hw_info_t *hwinfo;
+};
+static struct rsb_papi_stuff_t rps;
+#endif
+#define rsb_papi_mode 0
+
+static rsb_err_t rsb_perf_counters_call(void)
+{
+#define E(M)  {RSB_STDERR("rsb_perf_counters_call : "M);/*goto err;*/}
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if((errval=PAPI_flips(&rps.real_time, &rps.proc_time, &rps.flpins, &rps.mflips))!=PAPI_OK)
+	E("problem calling PAPI_flips()\n")
+	if((errval=PAPI_flops(&rps.real_time, &rps.proc_time, &rps.flpops, &rps.mflops))!=PAPI_OK)
+	E("problem calling PAPI_flops()\n")
+	if((errval=PAPI_ipc(&rps.real_time, &rps.proc_time, &rps.ins, &rps.ipc))!=PAPI_OK)
+	E("problem calling PAPI_ipc()\n")
+#undef E
+
+	RSB_DO_ERR_RETURN(errval)
+	//err:
+	/* should print the error code */
+	//RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+rsb_err_t rsb_perf_counters_init(void)
+#ifndef RSB_HAVE_PAPI
+{
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+}
+#else /* RSB_HAVE_PAPI */
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_papi_err_t perrval=PAPI_OK;
+	rps.EventSet=PAPI_NULL;
+	RSB_BZERO_P(&rps);
+	RSB_MEMCPY(&rps.eventlist,&rsb_papi_eventlist,sizeof(rsb_papi_eventlist));
+
+#if 0
+	/* Initialize the PAPI library and get the number of counters available */
+	if ((rsb_num_hwcntrs = PAPI_num_counters()) <= PAPI_OK) 
+		{errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+	/* FIXME :  RSB_STDOUT seems flawed!  */
+	RSB_STDOUT("This system has %d available counters.\n", rsb_num_hwcntrs);
+
+	if (rsb_num_hwcntrs > 2)
+		rsb_num_hwcntrs = 2;
+
+#endif
+
+	if(rsb_papi_mode!=0)
+	{
+		int i;
+
+		if (((perrval=PAPI_library_init(PAPI_VER_CURRENT)) != PAPI_VER_CURRENT) && perrval > 0 )
+			{RSB_ERROR("PAPI_library_init() failed with code %d\n",(int)perrval);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+		if ((rps.hwinfo = PAPI_get_hardware_info()) == NULL)
+			{RSB_ERROR("PAPI_get_hardware_info() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+#if 0
+		if ( perrval=PAPI_set_granularity(PAPI_GRN_PROCG ) != PAPI_OK )
+			{RSB_ERROR(RSB_ERRM_NL);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+		if ( perrval=PAPI_set_cmp_granularity(PAPI_GRN_PROCG /*PAPI_GRN_MAX*/,0) != PAPI_OK )
+			{RSB_ERROR(RSB_ERRM_NL);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#endif
+		/* Start counting events */
+		if ((perrval=PAPI_create_eventset(&rps.EventSet)) != PAPI_OK)
+			{RSB_ERROR("PAPI_create_eventset() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+		for (i = 0; rps.eventlist[i] != 0; i++)
+		{
+			PAPI_event_code_to_name(rps.eventlist[i], rps.descr);
+			if((perrval=PAPI_add_event(rps.EventSet,rps.eventlist[i])) != PAPI_OK)
+			{
+				//RSB_STDERR("PAPI_add_event(%d=%s) failed\n",rps.eventlist[i],descr);continue;
+			}
+			else
+			{
+				//RSB_STDERR("PAPI_add_event(%d=%s) ok\n",rps.eventlist[i],descr);
+			}
+	//		if(PAPI_remove_event(EventSet,rps.eventlist[i]) != PAPI_OK)
+	//		{RSB_STDERR("PAPI_remove_event(%d) failed\n",rps.eventlist[i]);errval = RSB_ERR_INTERNAL_ERROR;continue;}
+		}
+
+		if ((perrval=PAPI_start(rps.EventSet)) != PAPI_OK)
+		{
+			RSB_ERROR("PAPI_start() failed : %s\n",PAPI_strerror(perrval));
+			errval = RSB_ERR_INTERNAL_ERROR;goto err;
+		}
+
+	}
+	else
+	{
+		int num_events = PAPI_num_counters();
+		if(num_events<2)
+			{RSB_ERROR("PAPI_num_counters() = %d < 2 \n",num_events);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+		//else
+		//	RSB_STDERR("PAPI_num_counters() = %d\n",num_events);
+
+		perrval = PAPI_library_init(PAPI_VER_CURRENT);
+		if (perrval != PAPI_VER_CURRENT) {RSB_ERROR("PAPI_library_init() failed: %x\n",perrval);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#if 0
+		// perrval = PAPI_set_cmp_granularity(PAPI_GRN_THD, 0);
+		perrval = PAPI_set_cmp_granularity(PAPI_GRN_PROC, 0);
+		/* perrval = PAPI_set_cmp_granularity(PAPI_GRN_SYS, 0); */
+		if (perrval != PAPI_OK) {RSB_ERROR("PAPI_set_cmp_granularity() failed: %x\n",perrval);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+		// perrval = PAPI_create_eventset(&EventSet);
+		// if (perrval != PAPI_OK) {RSB_ERROR(" \n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#endif
+		/* PAPI_set_domain( PAPI_DOM_ALL ); */
+		/* Start counting events */
+		if ((perrval = PAPI_start_counters(rps.eventlist, rsb_num_hwcntrs)) != PAPI_OK)
+			{RSB_ERROR("PAPI_start_counters() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+#if RSB_HAVE_PTHREAD_H
+		/* FIXME: in this version of librsb, PAPI threading support is NOT complete. */
+		/* omp_get_thread_num is not adequate, see doc*/
+	//	if (((perrval= PAPI_thread_init(omp_get_thread_num)) != PAPI_OK) )
+		//if (((perrval= PAPI_thread_init(&pthread_self)) != PAPI_OK) )
+		if (((perrval = PAPI_thread_init(pthread_self)) != PAPI_OK) )
+			{RSB_ERROR("PAPI_thread_init() failed with code %d\n",(int)perrval);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#endif /* RSB_HAVE_PTHREAD_H */
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+
+
+	}
+
+	/* TODO : finish here */
+//	rsb_perf_counters_call();
+
+	RSB_STDERR("rsb_perf_counters_init: PAPI initialization ok.\n");
+	return RSB_ERR_NO_ERROR;
+err:
+	{
+		char pes[RSB_MAX_STRERRLEN];/* Flawfinder: ignore */
+		/* PAPI_perror(perrval,pes,sizeof(pes)); */
+		PAPI_perror("");
+		pes[RSB_MAX_STRERRLEN-1]=RSB_NUL;
+		RSB_ERROR("error in rsb_perf_counters_init(\"%s\")\n",pes);
+	}
+	rsb__do_perror(NULL,errval);
+	/* error message printout */
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_perf_counters_reset(void)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if 0
+	if (PAPI_stop_counters ( eventvals, rsb_num_hwcntrs ) != PAPI_OK)
+	{RSB_STDERR("PAPI_stop_counters() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+	if (PAPI_start_counters ( eventvals, rsb_num_hwcntrs ) != PAPI_OK)
+	{RSB_STDERR("PAPI_start_counters() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#else
+	if (PAPI_reset ( rps.EventSet ) != PAPI_OK)
+	{RSB_STDERR("PAPI_reset() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_perf_counters_dump(const rsb_char_t *premsg, const rsb_char_t *postmsg, rsb_int_t tdiv, struct rsb_pci_t *pcip)
+{
+	/*
+	  \ingroup gr_internals
+	 hardware counters information update
+	 */
+#if RSB_ALLOW_STDOUT
+	const rsb_char_t *prmstr="";
+	const rsb_char_t *pomstr="";
+	int i;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(premsg)prmstr=premsg;
+	if(postmsg)pomstr=postmsg;
+
+#ifdef RSB_HAVE_PAPI
+	if(pcip)
+		RSB_BZERO_P(pcip);
+	if(pcip)
+	for (i = 0; i < RSB_MIN(rsb_num_hwcntrs,RSB_PC_MAX_ITEMS); i++) /* FIXME: shall put a stringent limit */
+	{
+		pcip->eventvals[i]=rps.eventvals[i]/tdiv;
+		pcip->eventlist[i]=rps.eventlist[i];
+		PAPI_event_code_to_name(pcip->eventlist[i],pcip->eventdesc[i]);
+		pcip->eventnum=i+1; /* ehm...  */
+	}
+#endif /* RSB_HAVE_PAPI */
+	else
+	/*RSB_STDOUT("summary of PAPI output: \n");*/
+	for (i = 0; i < rsb_num_hwcntrs; i++)
+	{	
+		PAPI_event_code_to_name(rps.eventlist[i], rps.descr);
+		/*RSB_STDOUT("counters values : %s%s %lld\n",prmstr,descr,eventvals[i]);*/
+		RSB_STDOUT("%s%s:\t%lld%s\n",prmstr,rps.descr,rps.eventvals[i]/tdiv,pomstr);
+	}
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb_perf_counters_update(void)
+{
+	/**
+	  \ingroup gr_internals
+	   
+	 */
+
+	/* hardware counters information update */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	/* fixme ! this accumulates, and does not reset! */
+	/* unlike said in http://icl.cs.utk.edu/projects/papi/files/html_man3/papi_read_counters.html */
+	if (PAPI_read_counters ( rps.eventvals, rsb_num_hwcntrs ) != PAPI_OK)
+	{RSB_STDERR("PAPI_read_counters() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+//	if (PAPI_accum_counters( eventvals, rsb_num_hwcntrs ) != PAPI_OK)
+//	{RSB_STDERR("PAPI_accum_counters() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_HAVE_PAPI */
+
+rsb_err_t rsb_perf_counters_finalize(void)
+#ifndef RSB_HAVE_PAPI
+{ return RSB_ERR_UNSUPPORTED_FEATURE; }
+#else /* RSB_HAVE_PAPI */
+{
+	/* will finalize and maybe dump some performance info */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+//	RSB_STDERR("rsb_perf_counters_finalize..\n");
+
+	/* WRITE ME */
+	/* TODO : finish here */
+//	rsb_perf_counters_call();
+
+#if 0
+	{
+	RSB_STDERR("sizeof(rsb_papi_long)  %d\n",sizeof(rsb_papi_long));
+	RSB_STDERR("real time          %g\n",real_time);
+	RSB_STDERR("processor time     %g\n",proc_time);
+	RSB_STDERR("fl.p. instructions %d\n",flpins);
+	RSB_STDERR("fl.p. ops          %d\n",flpops);
+	RSB_STDERR("mflips             %g\n",mflips);
+	RSB_STDERR("mflops             %g\n",mflops);
+	RSB_STDERR("instructions       %d\n",ins);
+	RSB_STDERR("ins. per cycle     %g\n",ipc);
+	RSB_STDERR("ins. per second    %g\n",ips);
+	}
+#endif
+
+	RSB_BZERO(rps.eventvals,sizeof(rps.eventvals));
+
+	//rsb_perf_counters_update();
+	//rsb_perf_counters_dump();
+
+	if(rsb_papi_mode!=0)
+	{
+		int code=0;
+		if (PAPI_stop(rps.EventSet, rps.eventvals ) != PAPI_OK)
+		{RSB_STDERR("PAPI_stop() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+		if (PAPI_cleanup_eventset(rps.EventSet ) != PAPI_OK)
+		{RSB_STDERR("PAPI_cleanup_eventset() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+
+		if ((code=PAPI_destroy_eventset(&rps.EventSet)) != PAPI_OK)
+		{
+			RSB_STDERR("PAPI_destroy_eventset() failed : %s\n",PAPI_strerror(code));
+			errval = RSB_ERR_INTERNAL_ERROR;goto err;
+		}
+	}
+	else
+	{
+		if (PAPI_stop_counters ( rps.eventvals, rsb_num_hwcntrs ) != PAPI_OK)
+		{RSB_STDERR("PAPI_stop_counters() failed\n");errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+	}
+
+
+	goto ok;
+err:
+	RSB_STDERR("error in rsb_perf_counters_finalize()\n");
+	rsb__do_perror(NULL,errval);
+	/* error message printout */
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_HAVE_PAPI */
+
+rsb_err_t rsb_hc_main()		/* preliminary */
+{
+	/**
+	  \ingroup gr_internals
+	   UNFINISHED
+	   A miniprogram for preliminary play with hardware counters.
+	  
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int i,j;
+	//register double f=0;
+	//register float f=0;
+	//register char f=0;
+	register int f=0;
+
+
+	errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS);	
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+	
+	for(j=0;j<10;++j)
+	{
+		// see diff for 1.0 or 2.0 add !
+		//for(i=0;i<100000;++i) f=f+1.0f;
+		for(i=0;i<100000;++i) f=f+1;
+		//for(i=0;i<100000;++i) f=f+2.0f;
+		//rsb_perf_counters_update();
+		//rsb_perf_counters_dump();
+		//rsb_perf_counters_reset();
+	}
+
+	errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS);	
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	RSB_STDERR("ignore this printout :) hc: %lf\n",(double)f);
+
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+
+
+/* @endcond */
diff --git a/rsb_pcnt.h b/rsb_pcnt.h
new file mode 100644
index 0000000..3607d47
--- /dev/null
+++ b/rsb_pcnt.h
@@ -0,0 +1,68 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Perfomance tuning or measuring code.
+ * @author Michele Martone
+ * */
+
+#ifndef RSB_PCNT_H_INCLUDED
+#define RSB_PCNT_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <stdio.h>
+#include "rsb_internals.h"
+#include "rsb_perf.h"
+
+#ifdef RSB_HAVE_PAPI
+#include <papi.h>		/*  http://icl.cs.utk.edu/papi/ */
+typedef  long_long rsb_papi_long; /* long_long is a typedef originating in the papi headers */
+typedef int rsb_papi_int_t;
+typedef int rsb_papi_err_t;
+#define RSB_PC_MAX_ITEMS 3
+#endif /* RSB_HAVE_PAPI */
+struct rsb_pci_t
+{
+	int eventnum;
+#ifdef RSB_HAVE_PAPI
+	rsb_papi_int_t eventlist[RSB_PC_MAX_ITEMS];
+	rsb_papi_long eventvals[RSB_PC_MAX_ITEMS];
+	char          eventdesc[RSB_PC_MAX_ITEMS][PAPI_MAX_STR_LEN];
+#endif /* RSB_HAVE_PAPI */
+};
+
+rsb_err_t rsb_perf_counters_init(void);
+rsb_err_t rsb_perf_counters_finalize(void);
+rsb_err_t rsb_perf_counters_dump(const rsb_char_t *premsg, const rsb_char_t *postmsg, rsb_int_t tdiv, struct rsb_pci_t *pcip);
+
+rsb_err_t rsb_hc_main(void);	/* preliminary */
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_PCNT_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_perf.c b/rsb_perf.c
new file mode 100644
index 0000000..ead1437
--- /dev/null
+++ b/rsb_perf.c
@@ -0,0 +1,1211 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Perfomance measuring/reporting code (this is mostly devel stuff).
+ * @author Michele Martone
+ * */
+
+#include <strings.h>		/* bzero */
+#include "rsb_internals.h"
+#include "rsb-config.h"
+#include <stdint.h> /* int64_t / uint64_t */
+
+static struct rsb_global_reference_performance_info_t rsb_gpi;
+static struct rsb_mbw_cm_t rsb_gmpi;		/**< a memory bandwidth benchmark record for each level+extra_level */
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#if RSB_WANT_PERFORMANCE_FILE
+static rsb_err_t rsb__read_global_reference_performance_info(struct rsb_global_reference_performance_info_t *gpip)
+{
+	/*!
+	  \ingroup gr_internals
+	   Reads in the system specific performance information binary file, which
+	   has been created by exactly this library instance on this system.
+	   TODO: Error handling is insufficient, e.g. in closing files.
+	 */
+	FILE *fp = rsb__util_fopen(rsb_global_session_handle.performance_binary_dump_file,"r+b");
+	char sigbuf[RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE_MAX_CHARS];
+	rsb_int sl = rsb__strlen(RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE);
+	size_t s o =0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!fp)
+	{
+		/* for a while, this will not be displayed so loudly */
+		/*RSB_ERROR("error opening performance file (no such file?).\n");*/
+		errval = RSB_ERR_NO_USER_CONFIGURATION;
+		goto err;
+	}
+	if( fread(sigbuf,sl,1,fp)!=1 )
+	{
+		RSB_ERROR("problems reading performance file signature .\n");
+		errval = RSB_ERR_GENERIC_ERROR;
+		goto err;
+	}
+	sigbuf[sizeof(sigbuf)-1]='\0';
+	if( strncmp(sigbuf,RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE,sl) != 0 )
+	{
+		/* Warning: This could print out unterminated junk */
+		RSB_ERROR("read an unknown performance file signature: %s.\n",sigbuf);
+		errval = RSB_ERR_GENERIC_ERROR;
+		goto err;
+	}
+	if( fread(&so,sizeof(size_t),1,fp)!=1 )
+	{
+		RSB_ERROR("problems reading performance file size.\n");
+		errval = RSB_ERR_GENERIC_ERROR;
+		goto err;
+	}
+	if( so != sizeof(*gpip) )
+	{
+		RSB_STDERR("perfomance file size (%zd) should be %zd!\n",so,sizeof(*gpip));
+		errval = RSB_ERR_GENERIC_ERROR;
+		goto err;
+	}
+	if(
+		fread(gpip,sizeof(*gpip),1,fp)!=1 
+	)
+	{
+		RSB_ERROR(RSB_ERRM_EQRPF);
+		errval = RSB_ERR_GENERIC_ERROR;
+		fclose(fp);
+		goto err;
+	}
+
+	if(RSB_SOME_ERROR(rsb_load_bw_info(&rsb_gmpi, fp)))
+	{
+		errval = RSB_ERR_GENERIC_ERROR;
+		goto err;
+/*	if(rsb__print_mem_hier_timings(&rsb_gmpi))
+  		goto err;*/
+	}
+
+	if( fclose(fp) != 0)
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_GENERIC_ERROR);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+
+static rsb_err_t rsb__load_performance_info(rsb_bool_t force_benchmark)
+{
+	/**
+	  \ingroup gr_internals
+	   loads performance info from file.
+	 */
+
+#if RSB_ALLOW_STDOUT
+	/* NOTE : just a shortcut, not a real check */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(rsb_gmpi.mb!=NULL)
+		return RSB_ERR_NO_ERROR;/* assume already loaded */
+
+#if RSB_WANT_PERFORMANCE_FILE
+	if(RSB_ERR_NO_ERROR != rsb__read_global_reference_performance_info(&rsb_gpi))
+#else /* RSB_WANT_PERFORMANCE_FILE */
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	{
+	
+		if(!force_benchmark)
+			return RSB_ERR_NO_ERROR;/* assume we'll load it later */
+
+		RSB_STDOUT("there is no reference performance information available.\n");
+		RSB_STDOUT("running reference performance benchmark ...\n");
+		errval = rsb__do_referencebenchmark();
+		RSB_STDOUT("..done.\n");
+		if(RSB_ERR_NO_ERROR!=errval)
+			goto err;/* NEW */
+#if RSB_WANT_PERFORMANCE_FILE
+		if(RSB_ERR_NO_ERROR != rsb__read_global_reference_performance_info(&rsb_gpi))
+		{
+			RSB_ERROR("A problem occurred reading global reference performance info.\n");
+			/* filesystem may be full : it is not necessarily an internal error */
+			return RSB_ERR_GENERIC_ERROR;
+		/*	return RSB_ERR_INTERNAL_ERROR; */
+		}
+#else /* RSB_WANT_PERFORMANCE_FILE */
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	}
+	rsb_gpi.initialized=1;
+err:
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__perf_init(void)
+{
+	/**
+	  \ingroup gr_internals
+
+	  Blanks performance info structures.
+	  Loads some performance info.
+	*/
+	RSB_BZERO_P(&rsb_gmpi);
+	RSB_BZERO_P(&rsb_gpi);
+	return rsb__load_performance_info(0);
+}
+
+rsb_err_t rsb__perf_exit(void)
+{
+	/**
+	  \ingroup gr_internals
+	  Frees performance info structures.
+	 */
+	if(rsb_gmpi.mb!=NULL)
+	{
+		rsb__free(rsb_gmpi.mb); /*  we'll rather need a destructor function */
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__print_mop_reference_performance_info_header(void)
+{
+#if RSB_ALLOW_STDOUT
+	RSB_INFO("#type\top\t");
+	RSB_INFO("rows\tcols\tbr\tbc\tmflops p.s.\te_mflops p.s.\tnnz\tfillin\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_err_t rsb_print_mop_maxmins(const struct rsb_mop_reference_performance_info_t *pi)
+{
+	/**
+	  \ingroup gr_internals
+	   Analyzing this data further we can bound the optimization gains.
+ 	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int si=0,ci=0,ri=0;
+	rsb_int rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_int cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int mri=0,mci=0,Mri=0,Mci=0;
+
+	for(si=0;si<RSB_FITTING_SAMPLES;++si)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+		{
+			if( pi->pipfs[si].m_flops[ri][ci] > pi->pipfs[si].m_flops[Mri][Mci] )
+				Mri = ri,Mci=ci;
+		}
+		
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+		{
+			if( pi->pipfs[si].m_flops[ri][ci] < pi->pipfs[si].m_flops[mri][mci] )
+				mri = ri,mci=ci;
+		}
+		RSB_STDOUT("sample %zd : %zd %zd : max:%lg\n",
+		(rsb_printf_int_t)si,(rsb_printf_int_t)rua[Mri],(rsb_printf_int_t)cua[Mci],pi->pipfs[si].m_flops[Mri][Mci]);
+		RSB_STDOUT("sample %zd : %zd %zd : min:%lg\n",
+		(rsb_printf_int_t)si,(rsb_printf_int_t)rua[mri],(rsb_printf_int_t)cua[mci],pi->pipfs[si].m_flops[mri][mci]);
+	}
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__print_mop_reference_performance_info(const struct rsb_mop_reference_performance_info_t *pi, char *s)
+{
+	/**
+	  \ingroup gr_internals
+	 * NEW
+	 * but OBSOLETE
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int si=0,ci=0,ri=0;
+	rsb_int rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_int cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	/*char *s_="";
+	if(s)s_=s;*/
+	for(si=0;si<RSB_FITTING_SAMPLES;++si)
+	for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		RSB_STDOUT("%s"
+			"%zd\t%zd\t"
+			"%zd\t%zd\t"
+			"%lg\t%lg\t"
+			"%zd\t%lg\n",s,
+			(rsb_printf_int_t)pi->pipfs[si].rows,
+			(rsb_printf_int_t)pi->pipfs[si].cols,
+			(rsb_printf_int_t)rua[ri],
+			(rsb_printf_int_t)cua[ci],
+			pi->pipfs[si].m_flops[ri][ci] /pi->pipfs[si].seconds[ri][ci],
+			pi->pipfs[si].e_mflops[ri][ci]/pi->pipfs[si].seconds[ri][ci],
+			(rsb_printf_int_t)pi->pipfs[si].nnz,
+			pi->pipfs[si].fillin[ri][ci]
+		);
+	}
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_int rsb_dump_mops_performance_info(const struct rsb_mops_performance_info_t *mpi)
+{
+	/**
+	  \ingroup gr_internals
+	   Dumps the whole struct in C format.
+	   but OBSOLETE
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int oi;
+	RSB_STDOUT("{\n");
+	RSB_STDOUT(".pipmo={\n");
+	for(oi=0;oi<RSB_IMPLEMENTED_META_MOPS;++oi)
+	{
+		rsb__dump_performance_info(mpi->pipmo+oi,"");
+		RSB_STDOUT(",\n");
+	}
+	RSB_STDOUT("}\n");
+	RSB_STDOUT("}\n");
+	return 0;
+#else /* RSB_ALLOW_STDOUT */
+	return -1;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__dump_global_performance_info(const struct rsb_global_performance_info_t *gpip)
+{
+	/**
+	  \ingroup gr_internals
+	 * Dumps the whole struct in C format.
+	 * but OBSOLETE
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int ti;
+	RSB_STDOUT("#include \"rsb_krnl.h\"\n");
+	RSB_STDOUT("struct rsb_global_performance_info_t gpi=\n");
+	RSB_STDOUT("{\n");
+	RSB_STDOUT(".gpi={\n");
+	for(ti=0;ti<RSB_IMPLEMENTED_TYPES;++ti)
+	{
+		rsb_dump_mops_performance_info(gpip->gpi+ti);
+		RSB_STDOUT(",\n");
+	}
+	RSB_STDOUT("}\n");
+	RSB_STDOUT("}\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_int rsb_dump_reference_mop_performance_info(const struct rsb_mop_reference_performance_info_t *mpi)
+{
+	/**
+	  \ingroup gr_internals
+	   Dumps the whole struct in C format.
+	   but OBSOLETE
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int oi;
+	RSB_STDOUT("{ /* struct rsb_mop_reference_performance_info_t  */ \n");
+	RSB_STDOUT(".pipfs={\n");
+	for(oi=0;oi<RSB_FITTING_SAMPLES;++oi)
+	{
+		rsb__dump_performance_info(mpi->pipfs+oi,"");
+		RSB_STDOUT(",\n");
+	}
+	RSB_STDOUT("},\n");
+
+	RSB_STDOUT(".blocks_per_row=\n{");
+	for(oi=0;oi<RSB_FITTING_SAMPLES;++oi)
+		RSB_STDOUT("%lg,",mpi->blocks_per_row[oi]);
+	RSB_STDOUT("},\n");
+
+	/** alpha, beta, gamma parameterization as in the accels experimental setup*/
+	rsb__dump_performance_array("alpha" ,(const double*)mpi->alpha);
+	rsb__dump_performance_array("beta"  ,(const double*)mpi->beta);
+	rsb__dump_performance_array("gamma" ,(const double*)mpi->gamma);
+
+	RSB_STDOUT("}\n");
+	return 0;
+#else /* RSB_ALLOW_STDOUT */
+	return -1;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_int rsb_dump_reference_mops_performance_info(const struct rsb_mops_reference_performance_info_t *mpi)
+{
+	/**
+	  \ingroup gr_internals
+	   Dumps the whole struct in C format.
+	   but OBSOLETE
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int oi;
+	const char * mops[] = RSB_MATRIX_OPS_ARRAY;
+
+	RSB_STDOUT("{ /* struct rsb_mops_reference_performance_info_t  */ \n");
+	RSB_STDOUT(".pipmo={\n");
+	for(oi=0;oi<RSB_IMPLEMENTED_META_MOPS;++oi)
+	{
+		RSB_STDOUT("/* mop is %s */\n",mops[oi]);
+		rsb_dump_reference_mop_performance_info(mpi->pipmo+oi);
+		RSB_STDOUT(",\n");
+	}
+	RSB_STDOUT("}\n");
+	RSB_STDOUT("}\n");
+	return 0;
+#else /* RSB_ALLOW_STDOUT */
+	return -1;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+FILE *rsb__util_fopen(const char *path, const char *mode)
+{
+	/**
+	  \ingroup gr_internals
+	  A fopen wrapper.
+	 */
+/*	struct stat stat_s;
+	if(-1==stat(path,&stat_s))return NULL;
+	if( S_IFREG(stat_s.st_mode))return NULL;*/
+	return fopen(path,mode);/* Flawfinder: ignore */
+}
+
+rsb_err_t rsb_save_bw_info(const struct rsb_mbw_cm_t *mi, FILE *fp)
+{
+	/*!
+	  \ingroup gr_internals
+	  Saves performance information on memory bandwidth.
+	 */
+	size_t sw=0;
+
+	if(!fp || !mi)
+	{
+		RSB_ERROR("error.\n");
+		return RSB_ERR_BADARGS;
+	}
+
+	sw = sizeof(*(mi->mb))*(mi->cln+mi->extra_level);
+
+	if( fwrite(mi,sizeof(*mi),1,fp)!=1 )
+		goto err;
+
+	if(!mi->mb)
+		goto err;/* should give an internal error ? */
+
+	if( fwrite(mi->mb,sw,1,fp)!=1 )
+		goto err;
+
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB_ERROR("error writing memory performance file.\n");
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+#if RSB_WANT_PERFORMANCE_FILE
+rsb_err_t rsb__save_global_reference_performance_info(const struct rsb_global_reference_performance_info_t *gpip)
+{
+	/*!
+	  \ingroup gr_internals
+	 * Writes out the system specific performance information binary file, which
+	 * should be read by exactly this library instance on this system.
+	 */
+	FILE *fp = rsb__util_fopen(rsb_global_session_handle.performance_binary_dump_file,"w+b");
+	size_t so=sizeof(*gpip);
+	rsb_int sl=rsb__strlen(RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE);
+
+	if(!fp)
+	{
+		RSB_ERROR("error opening performance file %s.\n",rsb_global_session_handle.performance_binary_dump_file);
+		return RSB_ERR_GENERIC_ERROR;
+	}
+
+	if( fwrite(RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE,sl,1,fp)!=1 )
+		goto err;
+
+	if( fwrite(&so,sizeof(size_t),1,fp)!=1 )
+		goto err;
+	if( fwrite(gpip,sizeof(*gpip),1,fp)!=1 )
+		goto err;
+
+	if(rsb__mem_hier_timings(&rsb_gmpi))
+		goto err;
+
+/*	if(rsb__print_mem_hier_timings(&rsb_gmpi))
+  		goto err;*/
+
+	if(RSB_SOME_ERROR(rsb_save_bw_info(&rsb_gmpi, fp)))
+		goto err;
+
+	if( fclose(fp) == 0)
+		return RSB_ERR_NO_ERROR;
+	else
+		return RSB_ERR_GENERIC_ERROR;
+
+err:
+	RSB_ERROR("error writing performance file.\n");
+	fclose(fp);
+	return RSB_ERR_GENERIC_ERROR;
+}
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+
+#if RSB_WANT_PERFORMANCE_FILE
+static rsb_err_t rsb_load_bw_info(struct rsb_mbw_cm_t *mi, FILE *fp)
+{
+	/*!
+	  \ingroup gr_internals
+	  Loads performance information on memory bandwidth.
+	  TODO: Error handling is insufficient.
+	 */
+	size_t sr=0;
+
+	if(!fp || !mi)
+	{
+		RSB_ERROR(RSB_ERRM_ERROR);
+		return RSB_ERR_BADARGS;
+	}
+
+	if( fread(mi,sizeof(*mi),1,fp)!=1 )
+		goto err;
+
+	sr = sizeof(*(mi->mb))*(mi->cln+mi->extra_level);
+	mi->mb = rsb__calloc(sr);
+
+	if(!mi->mb)
+		goto ferr;
+	
+	if( fread(mi->mb,sr,1,fp)!=1 )
+		goto ferr;
+
+	return RSB_ERR_NO_ERROR;
+ferr:
+	RSB_CONDITIONAL_FREE(mi->mb);
+err:
+	RSB_BZERO_P(mi);
+	RSB_ERROR(RSB_ERRM_ELMPF);
+	return RSB_ERR_GENERIC_ERROR;
+}
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+
+rsb_err_t rsb__dump_global_reference_performance_info(const struct rsb_global_reference_performance_info_t *gpip)
+{
+	/**
+	  \ingroup gr_internals
+	   Dumps the whole struct in C format.
+	   but OBSOLETE
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_int ti;
+	const char * types[] = RSB_MATRIX_TYPES_ARRAY;
+	RSB_STDOUT("{ /* struct rsb_global_reference_performance_info_t */ \n");
+	RSB_STDOUT(".initialized=%d,\n",gpip->initialized);
+	RSB_STDOUT(".gpi={ \n");
+	for(ti=0;ti<RSB_IMPLEMENTED_TYPES;++ti)
+	{
+		RSB_STDOUT("/* type is %s */\n",types[ti]);
+		rsb_dump_reference_mops_performance_info(gpip->gpi+ti);
+		RSB_STDOUT(",\n");
+	}
+	RSB_STDOUT("}\n");
+	RSB_STDOUT("}\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__dump_current_global_reference_performance_info(void)
+{
+	/**
+	  \ingroup gr_internals
+	   but OBSOLETE
+	 */
+#ifdef 	RSB_WITH_FEEDBACK
+	/* Warning: this is a dirty hack */
+	return rsb__dump_global_reference_performance_info(&rsb_gpi);
+#else /* RSB_WITH_FEEDBACK */
+	RSB_BZERO_P(&rsb_gpi);
+	return rsb__dump_global_reference_performance_info(&rsb_gpi);
+#endif /* RSB_WITH_FEEDBACK */
+}
+
+rsb_err_t rsb__dump_performance_info_line(const struct rsb_mop_performance_info_t * pi)
+{
+	/**
+	  \ingroup gr_internals
+	   NEW
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	rsb_blk_idx_t ri,ci;	/* row index, columns index */
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+
+	RSB_STDOUT(
+		"#m\tk\t"
+		"br\tbc\t"
+		"nnz\t"
+		"fillin\tm_flops\t"
+		"e_mflops\t"
+		"m.p.s.\t"
+		"seconds\t"
+		"\n"
+		);
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+		{
+
+			double perf;
+			if( !pi->seconds[ri][ci] )continue;/* this for handling special cases of quasi empty records */
+
+			perf= ( pi->m_flops[ri][ci]/pi->seconds[ri][ci])/pi->fillin[ri][ci] ;
+
+			RSB_STDOUT(
+				"%zd\t%zd\t"
+				"%zd\t%zd\t"
+				"%zd\t"
+				"%lg\t"
+				"%lg\t"
+				"%lg\t"
+				"%lg\t"
+				"%lg"
+				"\n"
+				//"\n"
+				,
+				(rsb_printf_int_t)pi->rows,(rsb_printf_int_t)pi->cols,
+				(rsb_printf_int_t)rua[ri],(rsb_printf_int_t)cua[ci],
+				(rsb_printf_int_t)pi->nnz,
+				pi->fillin[ri][ci],
+				pi->m_flops[ri][ci],
+				pi->e_mflops[ri][ci],/* effective mflops */
+				perf,
+				pi->seconds[ri][ci]
+				);
+		}
+	}
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+rsb_err_t rsb__dump_performance_info(const struct rsb_mop_performance_info_t * pi, const char * pid)
+{
+	/*!
+	  \ingroup gr_internals
+	   Another benchmark info dumping function.
+          
+           FIXME : UNFINISHED
+          
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 */
+#if RSB_ALLOW_STDOUT
+	if(!pi)
+		return RSB_ERR_BADARGS;
+	if(!pid)
+		pid="pi";
+	
+	if(0)
+	RSB_STDOUT("\n"
+	"#define RSB_ROWS_UNROLL_ARRAY_LENGTH 4\n"
+	"#define RSB_COLUMNS_UNROLL_ARRAY_LENGTH 4\n");
+
+	RSB_STDOUT("{\n");
+	RSB_STDOUT("/* rsb_mop_performance_info_t */\n");
+	RSB_STDOUT(".rows=%zd,.cols=%zd,.nnz=%zd, /** some matrix info : size_t rows,cols,nnz; */\n",pi->rows,pi->cols,pi->nnz);
+	rsb__dump_performance_array("m_flops" ,(const double*)pi->m_flops);
+	rsb__dump_performance_array("e_mflops",(const double*)pi->e_mflops);
+	rsb__dump_performance_array("fillin"  ,(const double*)pi->fillin);
+	rsb__dump_performance_array("seconds" ,(const double*)pi->seconds);
+	RSB_STDOUT("}\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+#if 0
+rsb_err_t rsb_print_all_system_info(void)
+{
+	/*
+	  \ingroup gr_internals
+	   NEW
+	   but OBSOLETE
+	 */
+
+	rsb__sys_info();
+
+	/* temporary dumpout */
+	if(rsb_gpi.initialized)	/* FIXME : only partially */
+		return rsb__dump_global_reference_performance_info(&rsb_gpi);
+	else
+		RSB_INFO("There is no hardcoded machine performance information in this build.\n");
+	return RSB_ERR_NO_ERROR;
+}
+#endif
+
+rsb_nnz_idx_t rsb__fillin_estimation_nnz_count(
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, 
+	const  rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags, rsb_int nprobes
+)
+{
+	rsb_nnz_idx_t pnnz=0;/* probing non zeros */
+	// size_t el_size=0;
+	rsb_int fraction=100;
+//	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	/* NEW */
+	/**
+	  \ingroup gr_internals
+		FIXME : unfinished
+		\return 0 in case of error
+
+		TODO : should follow some cache blocking/throughput based criteria
+	*/
+	const rsb_nnz_idx_t minnnz=10000;
+
+	if(!IA || !JA || nnz<1)
+	{
+		RSB_ERROR("bad args to rsb__fillin_estimation_nnz_count()!");
+		return 0;
+	}
+
+	// el_size = RSB_SIZEOF(typecode);
+
+	/* FIXME : should be a FRACTION ! :) */
+	pnnz=(nnz>minnnz)?minnnz:nnz;
+
+	if(pnnz<minnnz)
+		goto ok;
+
+	if((nnz/fraction)*nprobes<=nnz)/* this is one among many probes */
+	{
+		pnnz=nnz/fraction;
+	}
+
+	if(pnnz<minnnz)
+		pnnz=minnnz;
+		
+ok:
+	return pnnz;
+}
+
+rsb_err_t rsb__estimate_expected_raw_performance_for_blocking(
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	const  rsb_nnz_idx_t nnz, rsb_type_t typecode, 
+	rsb_flags_t flags,
+	double efillin,
+	double*eperf)
+{
+	/**
+		FIXME : only BCSR !	
+		FIXME : unfinished
+	*/
+#ifdef RSB_OPTYPE_INDEX_SPMV_UAUZ
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blk_idx_t ri=0,ci=0;	/* row index, columns index */
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	rsb_int si=0,oi = RSB_NUMERICAL_OP_INDEX_FROM_CODE(RSB_OPTYPE_INDEX_SPMV_UAUZ);
+	rsb_int ti = RSB_NUMERICAL_TYPE_INDEX_FROM_CODE(typecode);
+	/* FIXME */
+
+	for(ri=0;ri<RSB_ROWS_UNROLL_ARRAY_LENGTH;++ri)
+	{
+		if(rua[ri]==mB)
+			goto okr;
+	}
+	goto failr;
+okr:
+	for(ci=0;ci<RSB_COLUMNS_UNROLL_ARRAY_LENGTH;++ci)
+	{
+		if(cua[ci]==kB)
+			goto okc;
+	}
+	goto failc;
+okc:
+
+	#if 1
+	*eperf = rsb_gpi.gpi[ti].pipmo[oi].pipfs[si].m_flops[ri][ci];
+	#else
+	/* mB per s */
+	*eperf= (rsb_gmpi.mb[rsb_gmpi.cln].nr[RSB_MB_READ].t/
+		 rsb_gmpi.mb[rsb_gmpi.cln].times) 
+		* rsb_gmpi.mb[rsb_gmpi.cln].sz ;
+	/* spmv per s  */
+	*eperf/=((double)rsb_spmv_memory_accessed_bytes_(
+		mB, kB,
+		m,k,
+		efillin*nnz,
+		(efillin*nnz)*mB*kB,
+		m/mB,
+		RSB_SIZEOF(typecode)
+		));
+
+	/* mflops */
+	*e/perf*=((2*nnz)*efillin)*(1.e-6);
+
+	//RSB_STDERR("cache levels : %d + %d\n",rsb_gmpi.cln,rsb_gmpi.extra_level);
+	rsb_int i=0;
+	//for(i=0;i<rsb_gmpi.cln; ++i)
+	//	RSB_STDERR("time at %d : %lg\n",i,rsb_gmpi.mb[i].nr[RSB_MB_READ].t);
+	#endif
+	goto err;
+failr:
+failc:
+	*eperf=0;
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_OPTYPE_INDEX_SPMV_UAUZ */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_OPTYPE_INDEX_SPMV_UAUZ */
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__estimate_expected_fillin_for_blocking(
+	const void * VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, 
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	const  rsb_nnz_idx_t nnz, rsb_type_t typecode, 
+	rsb_flags_t flags,
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	double *efillinp)
+{
+	/**
+	  	\ingroup gr_internals
+
+		Should estimate the fillin for a single matrix blocking.
+		Assumes the input arrays to be already sorted for this blocking.
+
+		FIXME : unfinished
+		TODO  : should use const array arguments.
+		FIXME : should not re-sort if sorted flag on.
+		FIXME : only BCSR !
+		FIXME : to work safely, should support Z ordering (which should perform nice for this purpose)
+			AND copy the probing area in some temporary array..
+		FIXME : IT IS SLOW SLOW SLOW
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	rsb_time_t mt;
+//	rsb_int verbose=1;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_nnz_idx_t pnnz=0;
+
+	void *new_VA  = NULL;
+	rsb_coo_idx_t *new_IA = NULL, *new_JA = NULL;
+
+	if(!VA || !IA || !JA || nnz<1 || !efillinp)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+				
+	if(
+	 ( flags & RSB_FLAG_SORTED_INPUT ) || 
+	 ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR ) 
+	)
+	{
+		/* FIXME : disabled, because we are not prepared for Z sorted input ... */
+		errval = RSB_ERR_UNIMPLEMENTED_YET;
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+
+#if 0
+	if((!(flags & RSB_FLAG_SORTED_INPUT)) || (flags & RSB_FLAG_SORT_INPUT) )
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		/* we want sorted input */
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}				
+#endif
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+
+	new_VA = rsb__clone_area( VA , RSB_SIZEOF(typecode)    * nnz );
+	new_IA = rsb__clone_area( IA , sizeof(rsb_coo_idx_t) * nnz );
+	new_JA = rsb__clone_area( JA , sizeof(rsb_coo_idx_t) * nnz );
+
+	if( !new_VA || !new_IA || !new_JA )
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_ENOMEM;goto err;
+	}
+	
+	if(RSB_SOME_ERROR(errval))
+        {
+                RSB_ERROR(RSB_ERRM_SLIINS);
+                goto err;/* NOTE : this jump will cause memory leaks */
+        }
+
+	pnnz = rsb__fillin_estimation_nnz_count( new_IA, new_JA, nnz, typecode, flags, 1 );
+
+	if(pnnz<1)
+	{
+		RSB_WARN("rsb__fillin_estimation_nnz_count() gave pnnz<1!\n");
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}				
+	
+#if 1
+	if( flags & RSB_FLAG_QUAD_PARTITIONING)
+		RSB_WARN("ignoring RSB_FLAG_QUAD_PARTITIONING in %s\n",__func__);
+	if( flags & RSB_FLAG_AUTO_BLOCKING)
+		RSB_WARN("ignoring RSB_FLAG_AUTO_BLOCKING in %s\n",__func__);
+	if( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR)
+		RSB_WARN("ignoring RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR in %s\n",__func__);
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_AUTO_BLOCKING);
+        RSB_DO_FLAG_DEL(flags,RSB_FLAG_QUAD_PARTITIONING);   /* problems otherwise */
+        RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR);   /* problems otherwise */
+#else
+	if( flags & RSB_FLAG_AUTO_BLOCKING && 0) /* if 1, segfault */
+	{
+		*efillinp = RSB_REAL_ZERO;
+		//RSB_DO_FLAG_DEL(flags,RSB_FLAG_AUTO_BLOCKING); // this flags causes trouble here (FIXME)
+		goto ok;
+	}
+#endif
+		
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_OWN_PARTITIONING_ARRAYS);
+
+	mt = - rsb_time();
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(new_VA,new_IA,new_JA,nnz,typecode,m,k,mB,kB,flags,&errval);
+	mt += rsb_time();
+
+	if(!mtxAp || (RSB_SOME_ERROR(errval)))
+	{
+		RSB_ERROR(RSB_ERRM_MBE);
+		errval = RSB_ERR_INTERNAL_ERROR;
+		goto err;
+	}
+
+	*efillinp = rsb__do_get_matrix_fillin(mtxAp);
+
+
+	if(mtxAp)
+		rsb__do_mtx_free(mtxAp);
+err:
+	rsb__do_perror(NULL,errval);
+	RSB_CONDITIONAL_FREE(new_VA);
+	RSB_CONDITIONAL_FREE(new_IA);
+	RSB_CONDITIONAL_FREE(new_JA);
+	goto ok;
+ok:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+void rsb__pinfo_init(struct rsb_mtx_partitioning_info_t * pinfop,
+	rsb_blk_idx_t M_b, rsb_blk_idx_t K_b,
+	rsb_coo_idx_t *rpntr,rsb_coo_idx_t *cpntr,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_blk_idx_t br, rsb_blk_idx_t bc)
+{
+	/**
+	  \ingroup gr_internals
+           Using this function as an initializer serves as a reminder
+	   when changing this datatype definition.
+	 */
+	if(!pinfop)return;
+
+	pinfop->rpntr = rpntr;
+	pinfop->cpntr=cpntr;
+	pinfop->nr=m;
+	pinfop->nc=k;
+	pinfop->M_b=M_b;
+	pinfop->K_b=K_b;
+	pinfop->br=br;
+	pinfop->bc=bc;
+}
+
+rsb_err_t rsb__dump_system_performance_summary(void)
+{
+	/* TODO: find a better placement for this. */
+#if RSB_ALLOW_STDOUT
+	rsb_int oi, ti;
+	const char * types[] = RSB_MATRIX_TYPES_ARRAY;
+	const char * mops[] = RSB_MATRIX_OPS_ARRAY;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if RSB_WANT_PERFORMANCE_FILE
+	errval = rsb__read_global_reference_performance_info(&rsb_gpi);
+#else /* RSB_WANT_PERFORMANCE_FILE */
+	errval = RSB_ERR_NO_USER_CONFIGURATION;
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+	if(errval == RSB_ERR_NO_USER_CONFIGURATION)
+	{
+		/* not a critical error; we can restore no error condition */
+		errval = RSB_ERR_NO_ERROR;
+		goto err;
+	}
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	errval = rsb__dump_global_reference_performance_info(&rsb_gpi);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+	for(oi=0;oi<RSB_IMPLEMENTED_META_MOPS;++oi)
+	for(ti=0;ti<RSB_IMPLEMENTED_TYPES;++ti)
+	{
+		RSB_STDOUT("%s %s:\n",types[ti],mops[oi]);
+		errval = rsb_print_mop_maxmins(&(rsb_gpi.gpi[ti].pipmo[oi]));
+		if(RSB_SOME_ERROR(errval))
+			goto err;
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+#else /* RSB_ALLOW_STDOUT */
+	RSB_DO_ERR_RETURN(RSB_ERR_UNSUPPORTED_FEATURE)
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+size_t rsb_spmv_memory_accessed_bytes_max(const struct rsb_mtx_t * mtxAp)
+{
+	/** 
+		NEW : EXPERIMENTAL (ONLY BCSR) (UNFINISHED)
+	*/
+	rsb_blk_idx_t columns;
+	rsb_blk_idx_t rows;
+
+	if(!mtxAp)
+	{
+		RSB_ERROR("rsb_spmv_memory_accessed_bytes_max : null matrix\n");
+		return 0;
+	}
+	if(!rsb__is_bcsr_matrix(mtxAp))
+	{
+		//RSB_ERROR("rsb_spmv_memory_accessed_bytes_max: matrix is not rsb__is_bcsr_matrix!\n");
+		return 0;
+	}
+
+	rsb__get_blocking_size(mtxAp, &rows, &columns);
+
+	if(rows < 0 || columns < 0)
+	{
+		RSB_ERROR(RSB_ERRM_NL);
+		return 0; /* error */
+	}
+
+	/* pessimistic , in the sense that there is a lot of accesses */
+	return 	/* FIXME : possible overflow */
+		mtxAp->el_size *
+			(
+			 mtxAp->element_count	/* 1 time each element */
+			+(mtxAp->element_count /*/ rows*/)	/* the rhs, multiple times (one time per block width)  */
+			+mtxAp->Mdim		/* the out vector, one time */
+			)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->Mdim	/* bpntr */)*mtxAp->block_count
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->block_count	/* bindx */)*mtxAp->block_count
+		;
+}
+
+size_t rsb_spmv_memory_accessed_bytes_min(const struct rsb_mtx_t * mtxAp)
+{
+	/** NEW : EXPERIMENTAL (ONLY BCSR) (UNFINISHED) */
+	rsb_blk_idx_t columns;
+	rsb_blk_idx_t rows;
+
+	if(!mtxAp)
+		return 0;
+	if(!rsb__is_bcsr_matrix(mtxAp))
+		return 0;
+
+	rsb__get_blocking_size(mtxAp, &rows, &columns);
+
+	if(rows < 0 || columns < 0)
+		return 0; /* error */
+
+	/* optimistic, in the sense that there are few accesses */
+	return 	/* FIXME : possible overflow */
+		mtxAp->el_size *
+			(
+			 mtxAp->element_count	/* 1 time each element */
+			+mtxAp->Mdim		/* the out vector, one time */
+			+mtxAp->mdim		/* the rhs vector, one time */
+			)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->Mdim	/* bpntr */)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->block_count	/* bindx */)
+		;
+}
+
+size_t rsb_spmv_memory_accessed_bytes_(
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_nnz_idx_t element_count,
+	rsb_nnz_idx_t block_count,
+	rsb_blk_idx_t Mdim,
+	size_t el_size
+)
+{
+	if(mB < 0 || kB < 0)
+	{
+		RSB_ERROR("no blocking info supplied : can't estimate memory footprint.");
+		return 0; /* error */
+	}
+
+	/* (quasi) pessimistic , in the sense that there is a lot of accesses */
+	return 	/* FIXME : possible overflow */
+		el_size *
+			(
+			 element_count	/* 1 time each element */
+			+(element_count / mB)	/* the rhs, multiple times (one time per block width)  */
+			+m		/* the out vector, one time */
+			)
+		+
+		sizeof(rsb_nnz_idx_t) * ( Mdim	/* bpntr */)
+		+
+		sizeof(rsb_nnz_idx_t) * ( block_count	/* bindx */)
+		;
+}
+
+static size_t rsb_spmv_memory_accessed_bytes_leaf(const struct rsb_mtx_t * mtxAp)
+{
+	/** NEW : EXPERIMENTAL (ONLY BCSR) (UNFINISHED) */
+	rsb_blk_idx_t bcolumns;
+	rsb_blk_idx_t brows;
+	
+	if(!mtxAp)
+		return 0;
+
+	if(!rsb__is_bcsr_matrix(mtxAp))
+		return 0;
+
+	rsb__get_blocking_size(mtxAp, &brows, &bcolumns);
+
+	if(brows < 0 || bcolumns < 0)
+		return 0; /* error */
+
+	/* (quasi) pessimistic , in the sense that there is a lot of accesses */
+	return   /* FIXME : possible overflow */
+		mtxAp->el_size *
+			(
+			 mtxAp->element_count	/* 1 time each element */
+			//+(mtxAp->element_count / brows)	/* the rhs, multiple times (one time per block width)  */
+			+(mtxAp->element_count)	/* the rhs, multiple times (one time per block width)  */
+			+mtxAp->nr		/* the out vector, one time */
+			)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->Mdim	/* bpntr */)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->block_count	/* bindx */)
+		;
+}
+
+size_t rsb_spmv_memory_accessed_bytes(const struct rsb_mtx_t * mtxAp)
+{
+	/** NEW : EXPERIMENTAL (ONLY BCSR) (UNFINISHED) */
+	rsb_submatrix_idx_t i,j;
+	const struct rsb_mtx_t * submatrix;
+	size_t sum=0;
+
+	if(!mtxAp)
+		return 0;
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			sum += rsb_spmv_memory_accessed_bytes(submatrix);
+	}
+	else
+	{
+		sum = rsb_spmv_memory_accessed_bytes_leaf(mtxAp);
+	}
+	
+	return sum;
+}
+
+double rsb_spmv_memory_accessed_bytes_wr_ratio(const struct rsb_mtx_t * mtxAp)
+{
+	/** NEW : EXPERIMENTAL (ONLY BCSR) (UNFINISHED) */
+	rsb_blk_idx_t columns;
+	rsb_blk_idx_t rows;
+	double rb,wb;
+
+	if(!mtxAp)
+		return 0;
+	if(!rsb__is_bcsr_matrix(mtxAp))
+		return 0;
+
+	rsb__get_blocking_size(mtxAp, &rows, &columns);
+
+	if(rows < 0 || columns < 0)
+		return 0; /* error */
+
+	/* (quasi) pessimistic , in the sense that there is a lot of accesses */
+	rb=(double) 	/* FIXME : possible overflow */
+		mtxAp->el_size *
+			(
+			 mtxAp->element_count	/* 1 time each element */
+			+(mtxAp->element_count / rows)	/* the rhs, multiple times (one time per block width)  */
+			)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->Mdim	/* bpntr */)
+		+
+		sizeof(rsb_nnz_idx_t) * ( mtxAp->block_count	/* bindx */)
+		;
+	wb=(double)mtxAp->el_size*mtxAp->Mdim;		/* the out vector, one time */
+
+	return wb/rb;
+}
+
+rsb_err_t rsb__dump_performance_record(const char * s, const struct rsb_mtx_t * mtxAp, rsb_real_t rsb_NMflops_ps, rsb_real_t rsb_RMflops_ps, const char *op, rsb_flags_t inflags)
+{
+	/**
+		\ingroup gr_internals
+		writes on stdout a line suitable for plotting and later analysis
+		s and matrix are optional
+	*/
+#if RSB_ALLOW_STDOUT
+	/* FIXME : buffer overflow risk */
+	char buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+	/* rsb_NMflops_psm rsb_RMflops_ps :
+	   algorithmic millions of ops per second */
+
+	/* single line output, ideal for benchmark data to be processed later */
+	RSB_STDOUT ("%-20s	%s",s,rsb__sprint_matrix_implementation_code2(mtxAp,buf,inflags));
+	RSB_STDOUT ("	%.3lf	%lg",rsb_RMflops_ps,rsb_NMflops_ps);
+	{rsb_char_t buf[RSB_MAX_LINE_LENGTH];
+	RSB_STDOUT ("	%s",rsb__sprint_matrix_implementation_code(mtxAp,op,inflags,buf));}
+	RSB_STDOUT ("\n");
+
+	return RSB_ERR_NO_ERROR ;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+/* @endcond */
diff --git a/rsb_perf.h b/rsb_perf.h
new file mode 100644
index 0000000..bcd434d
--- /dev/null
+++ b/rsb_perf.h
@@ -0,0 +1,171 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Perfomance tuning or measuring code.
+ * @author Michele Martone
+ * */
+
+#ifndef RSB_PERF_H_INCLUDED
+#define RSB_PERF_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <stdio.h>
+#include "rsb_internals.h"
+
+/*! \brief Performance info for a single matrix operation, all possible unrollings. */
+struct rsb_mop_performance_info_t
+{
+	/* TODO : should we add 'flags' and 'runs' field ? */
+
+	/** some matrix info */
+	size_t rows,cols,nnz,element_count;
+
+	/** training matrix info */
+	rsb_flags_t flags,storage;
+	rsb_type_t typecode;
+
+	/** millions of floating point operations */
+	double m_flops[RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+
+	/** millions of effective floating point operations (==m_flops/fillin) */
+	double e_mflops[RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+
+	/** fillin */
+	double fillin[RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+
+	/** time in seconds */
+	double seconds[RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+};
+/*! \brief Reference performance info for a single matrix operation, all possible unrollings. */
+struct rsb_mop_reference_performance_info_t
+{
+	/** performance info per fitting sample */
+	struct rsb_mop_performance_info_t pipfs[RSB_FITTING_SAMPLES];
+
+	/** blocks per row density              */
+	double                        blocks_per_row[RSB_FITTING_SAMPLES];
+
+	/** alpha, beta, gamma parameterization as in the accels experimental setup*/
+	double alpha[RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+	double beta [RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+	double gamma[RSB_ROWS_UNROLL_ARRAY_LENGTH][RSB_COLUMNS_UNROLL_ARRAY_LENGTH];
+};
+/*! \brief Performance info for multiple matrix operations. */
+struct rsb_mops_performance_info_t
+{
+	/** performance info per matrix operation */
+	struct rsb_mop_performance_info_t	pipmo[RSB_IMPLEMENTED_META_MOPS];
+};
+/*! \brief Reference performance info for multiple matrix operations. */
+struct rsb_mops_reference_performance_info_t
+{
+	/** performance info per matrix operation */
+	struct rsb_mop_reference_performance_info_t pipmo[RSB_IMPLEMENTED_META_MOPS];
+};
+/*! \brief Global performance info for all matrix operations and types. */
+struct rsb_global_performance_info_t
+{
+	/** global performance info */
+	struct rsb_mops_performance_info_t	gpi[RSB_IMPLEMENTED_TYPES];
+};
+/*! \brief Global reference performance info for all matrix operations and types. */
+struct rsb_global_reference_performance_info_t
+{
+	rsb_bool_t initialized; /** if not zero, measurements should be considered valid **/
+	/** global performance info */
+	struct rsb_mops_reference_performance_info_t gpi[RSB_IMPLEMENTED_TYPES];
+};
+
+rsb_err_t rsb__print_mop_reference_performance_info_header(void);	/* temporary */
+rsb_err_t rsb__print_mop_reference_performance_info(const struct rsb_mop_reference_performance_info_t *pi, char *s);	/* temporary */
+rsb_err_t rsb__dump_global_performance_info(const struct rsb_global_performance_info_t *gpip);	/* temporary */
+rsb_err_t rsb__dump_global_reference_performance_info(const struct rsb_global_reference_performance_info_t *gpip);	/* temporary */
+#if RSB_WANT_PERFORMANCE_FILE
+rsb_err_t rsb__save_global_reference_performance_info(const struct rsb_global_reference_performance_info_t *gpip);
+#endif /* RSB_WANT_PERFORMANCE_FILE */
+rsb_err_t rsb__dump_performance_info(const struct rsb_mop_performance_info_t * pi, const char * pid);
+rsb_err_t rsb__dump_performance_info_line(const struct rsb_mop_performance_info_t * pi);/* new */
+rsb_err_t rsb__dump_current_global_reference_performance_info(void);
+void rsb__pinfo_init(struct rsb_mtx_partitioning_info_t * pinfop,
+	rsb_blk_idx_t M_b, rsb_blk_idx_t K_b,
+	rsb_coo_idx_t *rpntr,rsb_coo_idx_t *cpntr,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_blk_idx_t br, rsb_blk_idx_t bc);
+rsb_err_t rsb__dump_system_performance_summary(void);
+
+#define RSB_PERFORMANCE_BINARY_DUMP_FILE "rsb_performance_profile.bin"
+
+rsb_err_t rsb__perf_init(void);
+rsb_err_t rsb__perf_exit(void);
+size_t rsb_spmv_memory_accessed_bytes(const struct rsb_mtx_t * mtxAp);
+size_t rsb_spmv_memory_accessed_bytes_min(const struct rsb_mtx_t * mtxAp);
+size_t rsb_spmv_memory_accessed_bytes_max(const struct rsb_mtx_t * mtxAp);
+double rsb_spmv_memory_accessed_bytes_wr_ratio(const struct rsb_mtx_t * mtxAp);
+rsb_nnz_idx_t rsb__fillin_estimation_nnz_count(
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, 
+	const  rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags, rsb_int nprobes
+);/* NEW */
+rsb_err_t rsb__estimate_expected_fillin_for_blocking(
+	const void * VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, 
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	const  rsb_nnz_idx_t nnz, rsb_type_t typecode, 
+	rsb_flags_t flags,
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	double *efillinp);/* NEW */
+
+rsb_err_t rsb__estimate_expected_raw_performance_for_blocking(
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	const  rsb_nnz_idx_t nnz, rsb_type_t typecode, 
+	rsb_flags_t flags,
+	double efillin,
+	double*eperf);
+
+size_t rsb_spmv_memory_accessed_bytes_(
+	rsb_coo_idx_t mB, rsb_coo_idx_t kB,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_nnz_idx_t element_count,
+	rsb_nnz_idx_t block_count,
+	rsb_blk_idx_t Mdim,
+	size_t el_size
+);
+rsb_err_t rsb__dump_performance_record(const char * s, const struct rsb_mtx_t * mtxAp, rsb_real_t rsb_NMflops_ps, rsb_real_t rsb_RMflops_ps, const char *op, rsb_flags_t inflags);
+FILE *rsb__util_fopen(const char *path, const char *mode);
+
+/* 
+	NEW, EXPERIMENTAL
+	TODO : should be user-specified
+ */
+#define RSB_FIRST_FITTING_SAMPLE_BW_MIN 10
+#define RSB_FIRST_FITTING_SAMPLE_BW_MAX 100
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_PERF_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_permute.c b/rsb_permute.c
new file mode 100644
index 0000000..07c19e2
--- /dev/null
+++ b/rsb_permute.c
@@ -0,0 +1,1169 @@
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Permutation functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb_common.h"
+
+rsb_err_t rsb__do_permute_values_in_place_with_coo_index(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type){
+	/**
+		This is in place swapping, and it is much slower than not in place.
+		
+		FIXME : document 
+
+		Sadly, this is O(nnz^2).
+		... Or qsorts order ?
+	 */
+	rsb_coo_idx_t n;/* this is the case where coo cannot overflow */
+
+	switch(type)
+	{
+		/* supported (double,float,float complex,double complex) */
+	case RSB_NUMERICAL_TYPE_DOUBLE 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_coo_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(double,((double*)VA)[n],((double*)VA)[t]);
+	}
+			break;
+	case RSB_NUMERICAL_TYPE_FLOAT 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_coo_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(float,((float*)VA)[n],((float*)VA)[t]);
+	}
+			break;
+	case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_coo_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(float complex,((float complex*)VA)[n],((float complex*)VA)[t]);
+	}
+			break;
+	case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_coo_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(double complex,((double complex*)VA)[n],((double complex*)VA)[t]);
+	}
+			break;
+			/* unsupported type */
+		default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_permute_values_in_place_with_nnz_index(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type){
+		/*	
+			This is in place swapping, and it is much slower than not in place.
+			
+			FIXME : document and finish (s/double/ * /).
+
+			Sadly, this is O(nnz^2).
+			... Or qsorts order ?
+		 */
+	rsb_nnz_idx_t n;
+
+	switch(type)
+	{
+		/* supported (double,float,float complex,double complex) */
+	case RSB_NUMERICAL_TYPE_DOUBLE 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_nnz_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(double,((double*)VA)[n],((double*)VA)[t]);
+	}
+			break;
+	case RSB_NUMERICAL_TYPE_FLOAT 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_nnz_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(float,((float*)VA)[n],((float*)VA)[t]);
+	}
+			break;
+	case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_nnz_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(float complex,((float complex*)VA)[n],((float complex*)VA)[t]);
+	}
+			break;
+	case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_nnz_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(double complex,((double complex*)VA)[n],((double complex*)VA)[t]);
+	}
+			break;
+			/* unsupported type */
+		default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_permute_values_with_coo_index( void * rVA, const void *VA, rsb_coo_idx_t * rIA, const rsb_coo_idx_t * IA, rsb_coo_idx_t * rJA, const rsb_coo_idx_t * JA, const rsb_coo_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type)	{
+		/*
+		 * FIXME : UNOPTIMIZED !
+		 */
+		rsb_coo_idx_t i;/* in this algorithm, coo cannot overflow */
+
+		/* should permute here */
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+		{
+			RSB_DEBUG_ASSERT(K[i]>=0);
+			RSB_DEBUG_ASSERT(K[i]<nnz);
+
+			rIA [i]=IA [K[i]];
+			rJA [i]=JA [K[i]];
+		}
+
+		switch(type)
+		{
+			/* supported (double,float,float complex,double complex) */
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((double*)rVA)[i+0 ]=((double*)VA)[K[(i+0 )]];
+	((double*)rVA)[i+1 ]=((double*)VA)[K[(i+1 )]];
+	((double*)rVA)[i+2 ]=((double*)VA)[K[(i+2 )]];
+	((double*)rVA)[i+3 ]=((double*)VA)[K[(i+3 )]];
+	((double*)rVA)[i+4 ]=((double*)VA)[K[(i+4 )]];
+	((double*)rVA)[i+5 ]=((double*)VA)[K[(i+5 )]];
+	((double*)rVA)[i+6 ]=((double*)VA)[K[(i+6 )]];
+	((double*)rVA)[i+7 ]=((double*)VA)[K[(i+7 )]];
+	((double*)rVA)[i+8 ]=((double*)VA)[K[(i+8 )]];
+	((double*)rVA)[i+9 ]=((double*)VA)[K[(i+9 )]];
+	((double*)rVA)[i+10 ]=((double*)VA)[K[(i+10 )]];
+	((double*)rVA)[i+11 ]=((double*)VA)[K[(i+11 )]];
+	((double*)rVA)[i+12 ]=((double*)VA)[K[(i+12 )]];
+	((double*)rVA)[i+13 ]=((double*)VA)[K[(i+13 )]];
+	((double*)rVA)[i+14 ]=((double*)VA)[K[(i+14 )]];
+	((double*)rVA)[i+15 ]=((double*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((double*)rVA)[i+0 ]=((double*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			case RSB_NUMERICAL_TYPE_FLOAT 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((float*)rVA)[i+0 ]=((float*)VA)[K[(i+0 )]];
+	((float*)rVA)[i+1 ]=((float*)VA)[K[(i+1 )]];
+	((float*)rVA)[i+2 ]=((float*)VA)[K[(i+2 )]];
+	((float*)rVA)[i+3 ]=((float*)VA)[K[(i+3 )]];
+	((float*)rVA)[i+4 ]=((float*)VA)[K[(i+4 )]];
+	((float*)rVA)[i+5 ]=((float*)VA)[K[(i+5 )]];
+	((float*)rVA)[i+6 ]=((float*)VA)[K[(i+6 )]];
+	((float*)rVA)[i+7 ]=((float*)VA)[K[(i+7 )]];
+	((float*)rVA)[i+8 ]=((float*)VA)[K[(i+8 )]];
+	((float*)rVA)[i+9 ]=((float*)VA)[K[(i+9 )]];
+	((float*)rVA)[i+10 ]=((float*)VA)[K[(i+10 )]];
+	((float*)rVA)[i+11 ]=((float*)VA)[K[(i+11 )]];
+	((float*)rVA)[i+12 ]=((float*)VA)[K[(i+12 )]];
+	((float*)rVA)[i+13 ]=((float*)VA)[K[(i+13 )]];
+	((float*)rVA)[i+14 ]=((float*)VA)[K[(i+14 )]];
+	((float*)rVA)[i+15 ]=((float*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((float*)rVA)[i+0 ]=((float*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((float complex*)rVA)[i+0 ]=((float complex*)VA)[K[(i+0 )]];
+	((float complex*)rVA)[i+1 ]=((float complex*)VA)[K[(i+1 )]];
+	((float complex*)rVA)[i+2 ]=((float complex*)VA)[K[(i+2 )]];
+	((float complex*)rVA)[i+3 ]=((float complex*)VA)[K[(i+3 )]];
+	((float complex*)rVA)[i+4 ]=((float complex*)VA)[K[(i+4 )]];
+	((float complex*)rVA)[i+5 ]=((float complex*)VA)[K[(i+5 )]];
+	((float complex*)rVA)[i+6 ]=((float complex*)VA)[K[(i+6 )]];
+	((float complex*)rVA)[i+7 ]=((float complex*)VA)[K[(i+7 )]];
+	((float complex*)rVA)[i+8 ]=((float complex*)VA)[K[(i+8 )]];
+	((float complex*)rVA)[i+9 ]=((float complex*)VA)[K[(i+9 )]];
+	((float complex*)rVA)[i+10 ]=((float complex*)VA)[K[(i+10 )]];
+	((float complex*)rVA)[i+11 ]=((float complex*)VA)[K[(i+11 )]];
+	((float complex*)rVA)[i+12 ]=((float complex*)VA)[K[(i+12 )]];
+	((float complex*)rVA)[i+13 ]=((float complex*)VA)[K[(i+13 )]];
+	((float complex*)rVA)[i+14 ]=((float complex*)VA)[K[(i+14 )]];
+	((float complex*)rVA)[i+15 ]=((float complex*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((float complex*)rVA)[i+0 ]=((float complex*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((double complex*)rVA)[i+0 ]=((double complex*)VA)[K[(i+0 )]];
+	((double complex*)rVA)[i+1 ]=((double complex*)VA)[K[(i+1 )]];
+	((double complex*)rVA)[i+2 ]=((double complex*)VA)[K[(i+2 )]];
+	((double complex*)rVA)[i+3 ]=((double complex*)VA)[K[(i+3 )]];
+	((double complex*)rVA)[i+4 ]=((double complex*)VA)[K[(i+4 )]];
+	((double complex*)rVA)[i+5 ]=((double complex*)VA)[K[(i+5 )]];
+	((double complex*)rVA)[i+6 ]=((double complex*)VA)[K[(i+6 )]];
+	((double complex*)rVA)[i+7 ]=((double complex*)VA)[K[(i+7 )]];
+	((double complex*)rVA)[i+8 ]=((double complex*)VA)[K[(i+8 )]];
+	((double complex*)rVA)[i+9 ]=((double complex*)VA)[K[(i+9 )]];
+	((double complex*)rVA)[i+10 ]=((double complex*)VA)[K[(i+10 )]];
+	((double complex*)rVA)[i+11 ]=((double complex*)VA)[K[(i+11 )]];
+	((double complex*)rVA)[i+12 ]=((double complex*)VA)[K[(i+12 )]];
+	((double complex*)rVA)[i+13 ]=((double complex*)VA)[K[(i+13 )]];
+	((double complex*)rVA)[i+14 ]=((double complex*)VA)[K[(i+14 )]];
+	((double complex*)rVA)[i+15 ]=((double complex*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((double complex*)rVA)[i+0 ]=((double complex*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			/* unsupported type */
+			default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+
+
+rsb_err_t rsb__do_permute_rows_with_coo_index( rsb_coo_idx_t * IA, const rsb_coo_idx_t * K, rsb_nnz_idx_t nnz)	{
+		/*
+		 * FIXME : UNOPTIMIZED !
+		 */
+		rsb_coo_idx_t i;/* in this algorithm, coo cannot overflow */
+
+		/* should permute here */
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+		{
+			RSB_DEBUG_ASSERT(K[i]>=0);
+			RSB_DEBUG_ASSERT(K[i]<nnz);
+
+			IA [i]=K[IA[i]];
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+
+
+rsb_err_t rsb__do_permute_values_with_nnz_index( void * rVA, const void *VA, rsb_coo_idx_t * rIA, const rsb_coo_idx_t * IA, rsb_coo_idx_t * rJA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t typecode)	{
+		/*
+		 * FIXME : UNOPTIMIZED !
+		 */
+		rsb_nnz_idx_t i;
+
+		/* should permute here */
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+		{
+			RSB_DEBUG_ASSERT(K[i]>=0);
+			RSB_DEBUG_ASSERT(K[i]<nnz);
+
+			rIA [i]=IA [K[i]];
+			rJA [i]=JA [K[i]];
+		}
+
+		switch(typecode)
+		{
+			/* supported (double,float,float complex,double complex) */
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((double*)rVA)[i+0 ]=((double*)VA)[K[(i+0 )]];
+	((double*)rVA)[i+1 ]=((double*)VA)[K[(i+1 )]];
+	((double*)rVA)[i+2 ]=((double*)VA)[K[(i+2 )]];
+	((double*)rVA)[i+3 ]=((double*)VA)[K[(i+3 )]];
+	((double*)rVA)[i+4 ]=((double*)VA)[K[(i+4 )]];
+	((double*)rVA)[i+5 ]=((double*)VA)[K[(i+5 )]];
+	((double*)rVA)[i+6 ]=((double*)VA)[K[(i+6 )]];
+	((double*)rVA)[i+7 ]=((double*)VA)[K[(i+7 )]];
+	((double*)rVA)[i+8 ]=((double*)VA)[K[(i+8 )]];
+	((double*)rVA)[i+9 ]=((double*)VA)[K[(i+9 )]];
+	((double*)rVA)[i+10 ]=((double*)VA)[K[(i+10 )]];
+	((double*)rVA)[i+11 ]=((double*)VA)[K[(i+11 )]];
+	((double*)rVA)[i+12 ]=((double*)VA)[K[(i+12 )]];
+	((double*)rVA)[i+13 ]=((double*)VA)[K[(i+13 )]];
+	((double*)rVA)[i+14 ]=((double*)VA)[K[(i+14 )]];
+	((double*)rVA)[i+15 ]=((double*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((double*)rVA)[i+0 ]=((double*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			case RSB_NUMERICAL_TYPE_FLOAT 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((float*)rVA)[i+0 ]=((float*)VA)[K[(i+0 )]];
+	((float*)rVA)[i+1 ]=((float*)VA)[K[(i+1 )]];
+	((float*)rVA)[i+2 ]=((float*)VA)[K[(i+2 )]];
+	((float*)rVA)[i+3 ]=((float*)VA)[K[(i+3 )]];
+	((float*)rVA)[i+4 ]=((float*)VA)[K[(i+4 )]];
+	((float*)rVA)[i+5 ]=((float*)VA)[K[(i+5 )]];
+	((float*)rVA)[i+6 ]=((float*)VA)[K[(i+6 )]];
+	((float*)rVA)[i+7 ]=((float*)VA)[K[(i+7 )]];
+	((float*)rVA)[i+8 ]=((float*)VA)[K[(i+8 )]];
+	((float*)rVA)[i+9 ]=((float*)VA)[K[(i+9 )]];
+	((float*)rVA)[i+10 ]=((float*)VA)[K[(i+10 )]];
+	((float*)rVA)[i+11 ]=((float*)VA)[K[(i+11 )]];
+	((float*)rVA)[i+12 ]=((float*)VA)[K[(i+12 )]];
+	((float*)rVA)[i+13 ]=((float*)VA)[K[(i+13 )]];
+	((float*)rVA)[i+14 ]=((float*)VA)[K[(i+14 )]];
+	((float*)rVA)[i+15 ]=((float*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((float*)rVA)[i+0 ]=((float*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((float complex*)rVA)[i+0 ]=((float complex*)VA)[K[(i+0 )]];
+	((float complex*)rVA)[i+1 ]=((float complex*)VA)[K[(i+1 )]];
+	((float complex*)rVA)[i+2 ]=((float complex*)VA)[K[(i+2 )]];
+	((float complex*)rVA)[i+3 ]=((float complex*)VA)[K[(i+3 )]];
+	((float complex*)rVA)[i+4 ]=((float complex*)VA)[K[(i+4 )]];
+	((float complex*)rVA)[i+5 ]=((float complex*)VA)[K[(i+5 )]];
+	((float complex*)rVA)[i+6 ]=((float complex*)VA)[K[(i+6 )]];
+	((float complex*)rVA)[i+7 ]=((float complex*)VA)[K[(i+7 )]];
+	((float complex*)rVA)[i+8 ]=((float complex*)VA)[K[(i+8 )]];
+	((float complex*)rVA)[i+9 ]=((float complex*)VA)[K[(i+9 )]];
+	((float complex*)rVA)[i+10 ]=((float complex*)VA)[K[(i+10 )]];
+	((float complex*)rVA)[i+11 ]=((float complex*)VA)[K[(i+11 )]];
+	((float complex*)rVA)[i+12 ]=((float complex*)VA)[K[(i+12 )]];
+	((float complex*)rVA)[i+13 ]=((float complex*)VA)[K[(i+13 )]];
+	((float complex*)rVA)[i+14 ]=((float complex*)VA)[K[(i+14 )]];
+	((float complex*)rVA)[i+15 ]=((float complex*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((float complex*)rVA)[i+0 ]=((float complex*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+		{
+for(i=0;i+15<nnz;i+=16){
+((double complex*)rVA)[i+0 ]=((double complex*)VA)[K[(i+0 )]];
+	((double complex*)rVA)[i+1 ]=((double complex*)VA)[K[(i+1 )]];
+	((double complex*)rVA)[i+2 ]=((double complex*)VA)[K[(i+2 )]];
+	((double complex*)rVA)[i+3 ]=((double complex*)VA)[K[(i+3 )]];
+	((double complex*)rVA)[i+4 ]=((double complex*)VA)[K[(i+4 )]];
+	((double complex*)rVA)[i+5 ]=((double complex*)VA)[K[(i+5 )]];
+	((double complex*)rVA)[i+6 ]=((double complex*)VA)[K[(i+6 )]];
+	((double complex*)rVA)[i+7 ]=((double complex*)VA)[K[(i+7 )]];
+	((double complex*)rVA)[i+8 ]=((double complex*)VA)[K[(i+8 )]];
+	((double complex*)rVA)[i+9 ]=((double complex*)VA)[K[(i+9 )]];
+	((double complex*)rVA)[i+10 ]=((double complex*)VA)[K[(i+10 )]];
+	((double complex*)rVA)[i+11 ]=((double complex*)VA)[K[(i+11 )]];
+	((double complex*)rVA)[i+12 ]=((double complex*)VA)[K[(i+12 )]];
+	((double complex*)rVA)[i+13 ]=((double complex*)VA)[K[(i+13 )]];
+	((double complex*)rVA)[i+14 ]=((double complex*)VA)[K[(i+14 )]];
+	((double complex*)rVA)[i+15 ]=((double complex*)VA)[K[(i+15 )]];
+	}
+for(     ;i<nnz;++i){ ((double complex*)rVA)[i+0 ]=((double complex*)VA)[K[(i+0 )]];
+	 }
+}
+
+			
+			break;
+			/* unsupported type */
+			default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+
+
+void rsb_ip_reord(rsb_nnz_idx_t n, void * VAp, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * P, rsb_type_t typecode){
+	/**
+		This is an adapted PSBLAS psb_ip_reord_d1i2 routine.
+	*/
+	
+	switch(typecode)
+	{
+			/* supported (double,float,float complex,double complex) */
+	case RSB_NUMERICAL_TYPE_DOUBLE 	:
+	{
+		rsb_coo_idx_t isw1, isw2;
+		rsb_nnz_idx_t lswap, lp, k;
+		double swap;
+		double * VA=VAp;
+
+		lp = P[0];
+		k  = 1;
+		while(1)
+		{
+			if (RSB_UNLIKELY((lp==0) || (k>n))) break;
+			while(1)
+			{
+				if (lp >= k) break;
+				lp = P[lp];
+			}
+			lswap    = P[lp];
+			P[lp]  = P[k];
+			P[k]   = lp;
+			--lp;
+			--k;
+			swap   = VA[lp];
+			VA[lp] = VA[k];
+			VA[k]  = swap;
+			isw1   = IA[lp];
+			IA[lp] = IA[k];
+			IA[k]  = isw1;
+			isw2   = JA[lp];
+			JA[lp] = JA[k];
+			JA[k]  = isw2;
+			++k;
+			lp = lswap ;
+			k  = k + 1;
+		}
+	}
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT 	:
+	{
+		rsb_coo_idx_t isw1, isw2;
+		rsb_nnz_idx_t lswap, lp, k;
+		float swap;
+		float * VA=VAp;
+
+		lp = P[0];
+		k  = 1;
+		while(1)
+		{
+			if (RSB_UNLIKELY((lp==0) || (k>n))) break;
+			while(1)
+			{
+				if (lp >= k) break;
+				lp = P[lp];
+			}
+			lswap    = P[lp];
+			P[lp]  = P[k];
+			P[k]   = lp;
+			--lp;
+			--k;
+			swap   = VA[lp];
+			VA[lp] = VA[k];
+			VA[k]  = swap;
+			isw1   = IA[lp];
+			IA[lp] = IA[k];
+			IA[k]  = isw1;
+			isw2   = JA[lp];
+			JA[lp] = JA[k];
+			JA[k]  = isw2;
+			++k;
+			lp = lswap ;
+			k  = k + 1;
+		}
+	}
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+	{
+		rsb_coo_idx_t isw1, isw2;
+		rsb_nnz_idx_t lswap, lp, k;
+		float complex swap;
+		float complex * VA=VAp;
+
+		lp = P[0];
+		k  = 1;
+		while(1)
+		{
+			if (RSB_UNLIKELY((lp==0) || (k>n))) break;
+			while(1)
+			{
+				if (lp >= k) break;
+				lp = P[lp];
+			}
+			lswap    = P[lp];
+			P[lp]  = P[k];
+			P[k]   = lp;
+			--lp;
+			--k;
+			swap   = VA[lp];
+			VA[lp] = VA[k];
+			VA[k]  = swap;
+			isw1   = IA[lp];
+			IA[lp] = IA[k];
+			IA[k]  = isw1;
+			isw2   = JA[lp];
+			JA[lp] = JA[k];
+			JA[k]  = isw2;
+			++k;
+			lp = lswap ;
+			k  = k + 1;
+		}
+	}
+		break;
+		case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+	{
+		rsb_coo_idx_t isw1, isw2;
+		rsb_nnz_idx_t lswap, lp, k;
+		double complex swap;
+		double complex * VA=VAp;
+
+		lp = P[0];
+		k  = 1;
+		while(1)
+		{
+			if (RSB_UNLIKELY((lp==0) || (k>n))) break;
+			while(1)
+			{
+				if (lp >= k) break;
+				lp = P[lp];
+			}
+			lswap    = P[lp];
+			P[lp]  = P[k];
+			P[k]   = lp;
+			--lp;
+			--k;
+			swap   = VA[lp];
+			VA[lp] = VA[k];
+			VA[k]  = swap;
+			isw1   = IA[lp];
+			IA[lp] = IA[k];
+			IA[k]  = isw1;
+			isw2   = JA[lp];
+			JA[lp] = JA[k];
+			JA[k]  = isw2;
+			++k;
+			lp = lswap ;
+			k  = k + 1;
+		}
+	}
+		break;
+	
+		/* unsupported type */
+		default :
+			return;
+	}
+
+
+}
+
+void rsb_util_do_scatter_rows(void * RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, const void * RSB_RESTRICT iVA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT PA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode){
+	/**
+		This is an adapted PSBLAS psb_ip_reord_d1i2 routine.
+	*/
+	
+	switch(typecode)
+	{
+			/* supported (double,float,float complex,double complex) */
+	case RSB_NUMERICAL_TYPE_DOUBLE 	:
+	{
+		rsb_nnz_idx_t nzi;
+		double*VA=(double*)oVA;
+		{
+for(nzi=0;nzi+15<nnz;nzi+=16){
+VA[PA[IA[nzi+0 ]]]=((double*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		VA[PA[IA[nzi+1 ]]]=((double*)iVA)[nzi+1 ];
+			oIA[PA[IA[nzi+1 ]]]=IA[nzi+1 ];
+			oJA[PA[IA[nzi+1 ]]]=JA[nzi+1 ];
+			PA[IA[nzi+1 ]]++;
+		VA[PA[IA[nzi+2 ]]]=((double*)iVA)[nzi+2 ];
+			oIA[PA[IA[nzi+2 ]]]=IA[nzi+2 ];
+			oJA[PA[IA[nzi+2 ]]]=JA[nzi+2 ];
+			PA[IA[nzi+2 ]]++;
+		VA[PA[IA[nzi+3 ]]]=((double*)iVA)[nzi+3 ];
+			oIA[PA[IA[nzi+3 ]]]=IA[nzi+3 ];
+			oJA[PA[IA[nzi+3 ]]]=JA[nzi+3 ];
+			PA[IA[nzi+3 ]]++;
+		VA[PA[IA[nzi+4 ]]]=((double*)iVA)[nzi+4 ];
+			oIA[PA[IA[nzi+4 ]]]=IA[nzi+4 ];
+			oJA[PA[IA[nzi+4 ]]]=JA[nzi+4 ];
+			PA[IA[nzi+4 ]]++;
+		VA[PA[IA[nzi+5 ]]]=((double*)iVA)[nzi+5 ];
+			oIA[PA[IA[nzi+5 ]]]=IA[nzi+5 ];
+			oJA[PA[IA[nzi+5 ]]]=JA[nzi+5 ];
+			PA[IA[nzi+5 ]]++;
+		VA[PA[IA[nzi+6 ]]]=((double*)iVA)[nzi+6 ];
+			oIA[PA[IA[nzi+6 ]]]=IA[nzi+6 ];
+			oJA[PA[IA[nzi+6 ]]]=JA[nzi+6 ];
+			PA[IA[nzi+6 ]]++;
+		VA[PA[IA[nzi+7 ]]]=((double*)iVA)[nzi+7 ];
+			oIA[PA[IA[nzi+7 ]]]=IA[nzi+7 ];
+			oJA[PA[IA[nzi+7 ]]]=JA[nzi+7 ];
+			PA[IA[nzi+7 ]]++;
+		VA[PA[IA[nzi+8 ]]]=((double*)iVA)[nzi+8 ];
+			oIA[PA[IA[nzi+8 ]]]=IA[nzi+8 ];
+			oJA[PA[IA[nzi+8 ]]]=JA[nzi+8 ];
+			PA[IA[nzi+8 ]]++;
+		VA[PA[IA[nzi+9 ]]]=((double*)iVA)[nzi+9 ];
+			oIA[PA[IA[nzi+9 ]]]=IA[nzi+9 ];
+			oJA[PA[IA[nzi+9 ]]]=JA[nzi+9 ];
+			PA[IA[nzi+9 ]]++;
+		VA[PA[IA[nzi+10 ]]]=((double*)iVA)[nzi+10 ];
+			oIA[PA[IA[nzi+10 ]]]=IA[nzi+10 ];
+			oJA[PA[IA[nzi+10 ]]]=JA[nzi+10 ];
+			PA[IA[nzi+10 ]]++;
+		VA[PA[IA[nzi+11 ]]]=((double*)iVA)[nzi+11 ];
+			oIA[PA[IA[nzi+11 ]]]=IA[nzi+11 ];
+			oJA[PA[IA[nzi+11 ]]]=JA[nzi+11 ];
+			PA[IA[nzi+11 ]]++;
+		VA[PA[IA[nzi+12 ]]]=((double*)iVA)[nzi+12 ];
+			oIA[PA[IA[nzi+12 ]]]=IA[nzi+12 ];
+			oJA[PA[IA[nzi+12 ]]]=JA[nzi+12 ];
+			PA[IA[nzi+12 ]]++;
+		VA[PA[IA[nzi+13 ]]]=((double*)iVA)[nzi+13 ];
+			oIA[PA[IA[nzi+13 ]]]=IA[nzi+13 ];
+			oJA[PA[IA[nzi+13 ]]]=JA[nzi+13 ];
+			PA[IA[nzi+13 ]]++;
+		VA[PA[IA[nzi+14 ]]]=((double*)iVA)[nzi+14 ];
+			oIA[PA[IA[nzi+14 ]]]=IA[nzi+14 ];
+			oJA[PA[IA[nzi+14 ]]]=JA[nzi+14 ];
+			PA[IA[nzi+14 ]]++;
+		VA[PA[IA[nzi+15 ]]]=((double*)iVA)[nzi+15 ];
+			oIA[PA[IA[nzi+15 ]]]=IA[nzi+15 ];
+			oJA[PA[IA[nzi+15 ]]]=JA[nzi+15 ];
+			PA[IA[nzi+15 ]]++;
+		}
+for(     ;nzi<nnz;++nzi){ VA[PA[IA[nzi+0 ]]]=((double*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		 }
+}
+
+	}
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT 	:
+	{
+		rsb_nnz_idx_t nzi;
+		float*VA=(float*)oVA;
+		{
+for(nzi=0;nzi+15<nnz;nzi+=16){
+VA[PA[IA[nzi+0 ]]]=((float*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		VA[PA[IA[nzi+1 ]]]=((float*)iVA)[nzi+1 ];
+			oIA[PA[IA[nzi+1 ]]]=IA[nzi+1 ];
+			oJA[PA[IA[nzi+1 ]]]=JA[nzi+1 ];
+			PA[IA[nzi+1 ]]++;
+		VA[PA[IA[nzi+2 ]]]=((float*)iVA)[nzi+2 ];
+			oIA[PA[IA[nzi+2 ]]]=IA[nzi+2 ];
+			oJA[PA[IA[nzi+2 ]]]=JA[nzi+2 ];
+			PA[IA[nzi+2 ]]++;
+		VA[PA[IA[nzi+3 ]]]=((float*)iVA)[nzi+3 ];
+			oIA[PA[IA[nzi+3 ]]]=IA[nzi+3 ];
+			oJA[PA[IA[nzi+3 ]]]=JA[nzi+3 ];
+			PA[IA[nzi+3 ]]++;
+		VA[PA[IA[nzi+4 ]]]=((float*)iVA)[nzi+4 ];
+			oIA[PA[IA[nzi+4 ]]]=IA[nzi+4 ];
+			oJA[PA[IA[nzi+4 ]]]=JA[nzi+4 ];
+			PA[IA[nzi+4 ]]++;
+		VA[PA[IA[nzi+5 ]]]=((float*)iVA)[nzi+5 ];
+			oIA[PA[IA[nzi+5 ]]]=IA[nzi+5 ];
+			oJA[PA[IA[nzi+5 ]]]=JA[nzi+5 ];
+			PA[IA[nzi+5 ]]++;
+		VA[PA[IA[nzi+6 ]]]=((float*)iVA)[nzi+6 ];
+			oIA[PA[IA[nzi+6 ]]]=IA[nzi+6 ];
+			oJA[PA[IA[nzi+6 ]]]=JA[nzi+6 ];
+			PA[IA[nzi+6 ]]++;
+		VA[PA[IA[nzi+7 ]]]=((float*)iVA)[nzi+7 ];
+			oIA[PA[IA[nzi+7 ]]]=IA[nzi+7 ];
+			oJA[PA[IA[nzi+7 ]]]=JA[nzi+7 ];
+			PA[IA[nzi+7 ]]++;
+		VA[PA[IA[nzi+8 ]]]=((float*)iVA)[nzi+8 ];
+			oIA[PA[IA[nzi+8 ]]]=IA[nzi+8 ];
+			oJA[PA[IA[nzi+8 ]]]=JA[nzi+8 ];
+			PA[IA[nzi+8 ]]++;
+		VA[PA[IA[nzi+9 ]]]=((float*)iVA)[nzi+9 ];
+			oIA[PA[IA[nzi+9 ]]]=IA[nzi+9 ];
+			oJA[PA[IA[nzi+9 ]]]=JA[nzi+9 ];
+			PA[IA[nzi+9 ]]++;
+		VA[PA[IA[nzi+10 ]]]=((float*)iVA)[nzi+10 ];
+			oIA[PA[IA[nzi+10 ]]]=IA[nzi+10 ];
+			oJA[PA[IA[nzi+10 ]]]=JA[nzi+10 ];
+			PA[IA[nzi+10 ]]++;
+		VA[PA[IA[nzi+11 ]]]=((float*)iVA)[nzi+11 ];
+			oIA[PA[IA[nzi+11 ]]]=IA[nzi+11 ];
+			oJA[PA[IA[nzi+11 ]]]=JA[nzi+11 ];
+			PA[IA[nzi+11 ]]++;
+		VA[PA[IA[nzi+12 ]]]=((float*)iVA)[nzi+12 ];
+			oIA[PA[IA[nzi+12 ]]]=IA[nzi+12 ];
+			oJA[PA[IA[nzi+12 ]]]=JA[nzi+12 ];
+			PA[IA[nzi+12 ]]++;
+		VA[PA[IA[nzi+13 ]]]=((float*)iVA)[nzi+13 ];
+			oIA[PA[IA[nzi+13 ]]]=IA[nzi+13 ];
+			oJA[PA[IA[nzi+13 ]]]=JA[nzi+13 ];
+			PA[IA[nzi+13 ]]++;
+		VA[PA[IA[nzi+14 ]]]=((float*)iVA)[nzi+14 ];
+			oIA[PA[IA[nzi+14 ]]]=IA[nzi+14 ];
+			oJA[PA[IA[nzi+14 ]]]=JA[nzi+14 ];
+			PA[IA[nzi+14 ]]++;
+		VA[PA[IA[nzi+15 ]]]=((float*)iVA)[nzi+15 ];
+			oIA[PA[IA[nzi+15 ]]]=IA[nzi+15 ];
+			oJA[PA[IA[nzi+15 ]]]=JA[nzi+15 ];
+			PA[IA[nzi+15 ]]++;
+		}
+for(     ;nzi<nnz;++nzi){ VA[PA[IA[nzi+0 ]]]=((float*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		 }
+}
+
+	}
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+	{
+		rsb_nnz_idx_t nzi;
+		float complex*VA=(float complex*)oVA;
+		{
+for(nzi=0;nzi+15<nnz;nzi+=16){
+VA[PA[IA[nzi+0 ]]]=((float complex*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		VA[PA[IA[nzi+1 ]]]=((float complex*)iVA)[nzi+1 ];
+			oIA[PA[IA[nzi+1 ]]]=IA[nzi+1 ];
+			oJA[PA[IA[nzi+1 ]]]=JA[nzi+1 ];
+			PA[IA[nzi+1 ]]++;
+		VA[PA[IA[nzi+2 ]]]=((float complex*)iVA)[nzi+2 ];
+			oIA[PA[IA[nzi+2 ]]]=IA[nzi+2 ];
+			oJA[PA[IA[nzi+2 ]]]=JA[nzi+2 ];
+			PA[IA[nzi+2 ]]++;
+		VA[PA[IA[nzi+3 ]]]=((float complex*)iVA)[nzi+3 ];
+			oIA[PA[IA[nzi+3 ]]]=IA[nzi+3 ];
+			oJA[PA[IA[nzi+3 ]]]=JA[nzi+3 ];
+			PA[IA[nzi+3 ]]++;
+		VA[PA[IA[nzi+4 ]]]=((float complex*)iVA)[nzi+4 ];
+			oIA[PA[IA[nzi+4 ]]]=IA[nzi+4 ];
+			oJA[PA[IA[nzi+4 ]]]=JA[nzi+4 ];
+			PA[IA[nzi+4 ]]++;
+		VA[PA[IA[nzi+5 ]]]=((float complex*)iVA)[nzi+5 ];
+			oIA[PA[IA[nzi+5 ]]]=IA[nzi+5 ];
+			oJA[PA[IA[nzi+5 ]]]=JA[nzi+5 ];
+			PA[IA[nzi+5 ]]++;
+		VA[PA[IA[nzi+6 ]]]=((float complex*)iVA)[nzi+6 ];
+			oIA[PA[IA[nzi+6 ]]]=IA[nzi+6 ];
+			oJA[PA[IA[nzi+6 ]]]=JA[nzi+6 ];
+			PA[IA[nzi+6 ]]++;
+		VA[PA[IA[nzi+7 ]]]=((float complex*)iVA)[nzi+7 ];
+			oIA[PA[IA[nzi+7 ]]]=IA[nzi+7 ];
+			oJA[PA[IA[nzi+7 ]]]=JA[nzi+7 ];
+			PA[IA[nzi+7 ]]++;
+		VA[PA[IA[nzi+8 ]]]=((float complex*)iVA)[nzi+8 ];
+			oIA[PA[IA[nzi+8 ]]]=IA[nzi+8 ];
+			oJA[PA[IA[nzi+8 ]]]=JA[nzi+8 ];
+			PA[IA[nzi+8 ]]++;
+		VA[PA[IA[nzi+9 ]]]=((float complex*)iVA)[nzi+9 ];
+			oIA[PA[IA[nzi+9 ]]]=IA[nzi+9 ];
+			oJA[PA[IA[nzi+9 ]]]=JA[nzi+9 ];
+			PA[IA[nzi+9 ]]++;
+		VA[PA[IA[nzi+10 ]]]=((float complex*)iVA)[nzi+10 ];
+			oIA[PA[IA[nzi+10 ]]]=IA[nzi+10 ];
+			oJA[PA[IA[nzi+10 ]]]=JA[nzi+10 ];
+			PA[IA[nzi+10 ]]++;
+		VA[PA[IA[nzi+11 ]]]=((float complex*)iVA)[nzi+11 ];
+			oIA[PA[IA[nzi+11 ]]]=IA[nzi+11 ];
+			oJA[PA[IA[nzi+11 ]]]=JA[nzi+11 ];
+			PA[IA[nzi+11 ]]++;
+		VA[PA[IA[nzi+12 ]]]=((float complex*)iVA)[nzi+12 ];
+			oIA[PA[IA[nzi+12 ]]]=IA[nzi+12 ];
+			oJA[PA[IA[nzi+12 ]]]=JA[nzi+12 ];
+			PA[IA[nzi+12 ]]++;
+		VA[PA[IA[nzi+13 ]]]=((float complex*)iVA)[nzi+13 ];
+			oIA[PA[IA[nzi+13 ]]]=IA[nzi+13 ];
+			oJA[PA[IA[nzi+13 ]]]=JA[nzi+13 ];
+			PA[IA[nzi+13 ]]++;
+		VA[PA[IA[nzi+14 ]]]=((float complex*)iVA)[nzi+14 ];
+			oIA[PA[IA[nzi+14 ]]]=IA[nzi+14 ];
+			oJA[PA[IA[nzi+14 ]]]=JA[nzi+14 ];
+			PA[IA[nzi+14 ]]++;
+		VA[PA[IA[nzi+15 ]]]=((float complex*)iVA)[nzi+15 ];
+			oIA[PA[IA[nzi+15 ]]]=IA[nzi+15 ];
+			oJA[PA[IA[nzi+15 ]]]=JA[nzi+15 ];
+			PA[IA[nzi+15 ]]++;
+		}
+for(     ;nzi<nnz;++nzi){ VA[PA[IA[nzi+0 ]]]=((float complex*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		 }
+}
+
+	}
+		break;
+		case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+	{
+		rsb_nnz_idx_t nzi;
+		double complex*VA=(double complex*)oVA;
+		{
+for(nzi=0;nzi+15<nnz;nzi+=16){
+VA[PA[IA[nzi+0 ]]]=((double complex*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		VA[PA[IA[nzi+1 ]]]=((double complex*)iVA)[nzi+1 ];
+			oIA[PA[IA[nzi+1 ]]]=IA[nzi+1 ];
+			oJA[PA[IA[nzi+1 ]]]=JA[nzi+1 ];
+			PA[IA[nzi+1 ]]++;
+		VA[PA[IA[nzi+2 ]]]=((double complex*)iVA)[nzi+2 ];
+			oIA[PA[IA[nzi+2 ]]]=IA[nzi+2 ];
+			oJA[PA[IA[nzi+2 ]]]=JA[nzi+2 ];
+			PA[IA[nzi+2 ]]++;
+		VA[PA[IA[nzi+3 ]]]=((double complex*)iVA)[nzi+3 ];
+			oIA[PA[IA[nzi+3 ]]]=IA[nzi+3 ];
+			oJA[PA[IA[nzi+3 ]]]=JA[nzi+3 ];
+			PA[IA[nzi+3 ]]++;
+		VA[PA[IA[nzi+4 ]]]=((double complex*)iVA)[nzi+4 ];
+			oIA[PA[IA[nzi+4 ]]]=IA[nzi+4 ];
+			oJA[PA[IA[nzi+4 ]]]=JA[nzi+4 ];
+			PA[IA[nzi+4 ]]++;
+		VA[PA[IA[nzi+5 ]]]=((double complex*)iVA)[nzi+5 ];
+			oIA[PA[IA[nzi+5 ]]]=IA[nzi+5 ];
+			oJA[PA[IA[nzi+5 ]]]=JA[nzi+5 ];
+			PA[IA[nzi+5 ]]++;
+		VA[PA[IA[nzi+6 ]]]=((double complex*)iVA)[nzi+6 ];
+			oIA[PA[IA[nzi+6 ]]]=IA[nzi+6 ];
+			oJA[PA[IA[nzi+6 ]]]=JA[nzi+6 ];
+			PA[IA[nzi+6 ]]++;
+		VA[PA[IA[nzi+7 ]]]=((double complex*)iVA)[nzi+7 ];
+			oIA[PA[IA[nzi+7 ]]]=IA[nzi+7 ];
+			oJA[PA[IA[nzi+7 ]]]=JA[nzi+7 ];
+			PA[IA[nzi+7 ]]++;
+		VA[PA[IA[nzi+8 ]]]=((double complex*)iVA)[nzi+8 ];
+			oIA[PA[IA[nzi+8 ]]]=IA[nzi+8 ];
+			oJA[PA[IA[nzi+8 ]]]=JA[nzi+8 ];
+			PA[IA[nzi+8 ]]++;
+		VA[PA[IA[nzi+9 ]]]=((double complex*)iVA)[nzi+9 ];
+			oIA[PA[IA[nzi+9 ]]]=IA[nzi+9 ];
+			oJA[PA[IA[nzi+9 ]]]=JA[nzi+9 ];
+			PA[IA[nzi+9 ]]++;
+		VA[PA[IA[nzi+10 ]]]=((double complex*)iVA)[nzi+10 ];
+			oIA[PA[IA[nzi+10 ]]]=IA[nzi+10 ];
+			oJA[PA[IA[nzi+10 ]]]=JA[nzi+10 ];
+			PA[IA[nzi+10 ]]++;
+		VA[PA[IA[nzi+11 ]]]=((double complex*)iVA)[nzi+11 ];
+			oIA[PA[IA[nzi+11 ]]]=IA[nzi+11 ];
+			oJA[PA[IA[nzi+11 ]]]=JA[nzi+11 ];
+			PA[IA[nzi+11 ]]++;
+		VA[PA[IA[nzi+12 ]]]=((double complex*)iVA)[nzi+12 ];
+			oIA[PA[IA[nzi+12 ]]]=IA[nzi+12 ];
+			oJA[PA[IA[nzi+12 ]]]=JA[nzi+12 ];
+			PA[IA[nzi+12 ]]++;
+		VA[PA[IA[nzi+13 ]]]=((double complex*)iVA)[nzi+13 ];
+			oIA[PA[IA[nzi+13 ]]]=IA[nzi+13 ];
+			oJA[PA[IA[nzi+13 ]]]=JA[nzi+13 ];
+			PA[IA[nzi+13 ]]++;
+		VA[PA[IA[nzi+14 ]]]=((double complex*)iVA)[nzi+14 ];
+			oIA[PA[IA[nzi+14 ]]]=IA[nzi+14 ];
+			oJA[PA[IA[nzi+14 ]]]=JA[nzi+14 ];
+			PA[IA[nzi+14 ]]++;
+		VA[PA[IA[nzi+15 ]]]=((double complex*)iVA)[nzi+15 ];
+			oIA[PA[IA[nzi+15 ]]]=IA[nzi+15 ];
+			oJA[PA[IA[nzi+15 ]]]=JA[nzi+15 ];
+			PA[IA[nzi+15 ]]++;
+		}
+for(     ;nzi<nnz;++nzi){ VA[PA[IA[nzi+0 ]]]=((double complex*)iVA)[nzi+0 ];
+			oIA[PA[IA[nzi+0 ]]]=IA[nzi+0 ];
+			oJA[PA[IA[nzi+0 ]]]=JA[nzi+0 ];
+			PA[IA[nzi+0 ]]++;
+		 }
+}
+
+	}
+		break;
+	
+		/* unsupported type */
+		default :
+			return;
+	}
+}
+
+#if 0
+void rsb_util_do_scatter_rows(void * RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT PA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode){
+	/**
+		This is an adapted PSBLAS psb_ip_reord_d1i2 routine.
+	*/
+	
+	switch(typecode)
+	{
+			/* supported (double,float,float complex,double complex) */
+	case RSB_NUMERICAL_TYPE_DOUBLE 	:
+	{
+		rsb_nnz_idx_t n;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			((double*)oVA)[PA[IA[n]]]=((double*)VA)[n],
+			oIA[PA[IA[n]]]=IA[n],
+			oJA[PA[IA[n]]]=JA[n],
+			PA[IA[n]]++;
+		}
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT 	:
+	{
+		rsb_nnz_idx_t n;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			((float*)oVA)[PA[IA[n]]]=((float*)VA)[n],
+			oIA[PA[IA[n]]]=IA[n],
+			oJA[PA[IA[n]]]=JA[n],
+			PA[IA[n]]++;
+		}
+		break;
+		case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+	{
+		rsb_nnz_idx_t n;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			((float complex*)oVA)[PA[IA[n]]]=((float complex*)VA)[n],
+			oIA[PA[IA[n]]]=IA[n],
+			oJA[PA[IA[n]]]=JA[n],
+			PA[IA[n]]++;
+		}
+		break;
+		case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+	{
+		rsb_nnz_idx_t n;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			((double complex*)oVA)[PA[IA[n]]]=((double complex*)VA)[n],
+			oIA[PA[IA[n]]]=IA[n],
+			oJA[PA[IA[n]]]=JA[n],
+			PA[IA[n]]++;
+		}
+		break;
+	
+		/* unsupported type */
+		default :
+			return;
+	}
+}
+
+#endif /* 0 */
+
+
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+/* @endcond */
diff --git a/rsb_permute.h b/rsb_permute.h
new file mode 100644
index 0000000..2a2215e
--- /dev/null
+++ b/rsb_permute.h
@@ -0,0 +1,68 @@
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Permutation functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+
+#ifndef RSB_PERMUTE_H_INCLUDED
+#define RSB_PERMUTE_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb_common.h"
+
+rsb_err_t rsb__do_permute_values_in_place_with_coo_index(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type);
+rsb_err_t rsb__do_permute_values_in_place_with_nnz_index(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type);
+rsb_err_t rsb__do_permute_values_with_coo_index( void * rVA, const void *VA, rsb_coo_idx_t * rIA, const rsb_coo_idx_t * IA, rsb_coo_idx_t * rJA, const rsb_coo_idx_t * JA, const rsb_coo_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type);
+
+rsb_err_t rsb__do_permute_rows_with_coo_index( rsb_coo_idx_t * IA, const rsb_coo_idx_t * K, rsb_nnz_idx_t nnz);
+
+rsb_err_t rsb__do_permute_values_with_nnz_index( void * rVA, const void *VA, rsb_coo_idx_t * rIA, const rsb_coo_idx_t * IA, rsb_coo_idx_t * rJA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t typecode);
+
+void rsb_ip_reord(rsb_nnz_idx_t n, void * VAp, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * P, rsb_type_t typecode);
+void rsb_util_do_scatter_rows(void * RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, const void * RSB_RESTRICT iVA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT PA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode);
+#if 0
+void rsb_util_do_scatter_rows(void * RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT PA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode);
+#endif /* 0 */
+
+
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_PERMUTE_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_permute.m4 b/rsb_permute.m4
new file mode 100644
index 0000000..d6c1fed
--- /dev/null
+++ b/rsb_permute.m4
@@ -0,0 +1,372 @@
+dnl
+dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+dnl
+/* @cond INNERDOC */
+dnl
+/**
+ * @file
+ * @brief
+ * Permutation functions.
+ */
+RSB_M4_HEADER_MESSAGE()dnl
+
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_PERMUTE_H_INCLUDED
+#define RSB_PERMUTE_H_INCLUDED
+')
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+dnl
+#include "rsb_common.h"
+dnl 
+
+rsb_err_t rsb__do_permute_values_in_place_with_coo_index(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+		This is in place swapping, and it is much slower than not in place.
+		
+		FIXME : document 
+
+		Sadly, this is O(nnz^2).
+		... Or qsorts order ?
+	 */
+	rsb_coo_idx_t n;/* this is the case where coo cannot overflow */
+
+	switch(type)
+	{
+		/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+	case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_coo_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(type,((type*)VA)[n],((type*)VA)[t]);
+	}
+			break;
+')dnl
+			/* unsupported type */
+		default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+rsb_err_t rsb__do_permute_values_in_place_with_nnz_index(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+		/*	
+			This is in place swapping, and it is much slower than not in place.
+			
+			FIXME : document and finish (s/double/ * /).
+
+			Sadly, this is O(nnz^2).
+			... Or qsorts order ?
+		 */
+	rsb_nnz_idx_t n;
+
+	switch(type)
+	{
+		/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+	case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		register rsb_nnz_idx_t t,m=n;
+		RSB_DEBUG_ASSERT(K[n]>=0);
+		if(K[n]==n)
+			continue;
+		if(K[n]>n)
+			t=K[n];
+		else
+		{
+			/* follow swap chain */
+			while(K[K[m]]<n)
+				m=K[m];
+
+			t=K[K[m]];
+			RSB_DEBUG_ASSERT(t>=0);
+
+#if RSB_DEBUG_SORT_STUFF 
+			K[K[m]]=-1;	// just a debug measure
+#endif /* RSB_DEBUG_SORT_STUFF */
+			K[m]=t;
+		}
+		/* perform the swap */
+		RSB_SWAP(rsb_coo_idx_t,IA[n],IA[t]);
+		RSB_SWAP(rsb_coo_idx_t,JA[n],JA[t]);
+		RSB_SWAP(type,((type*)VA)[n],((type*)VA)[t]);
+	}
+			break;
+')dnl
+			/* unsupported type */
+		default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+rsb_err_t rsb__do_permute_values_with_coo_index( void * rVA, const void *VA, rsb_coo_idx_t * rIA, const rsb_coo_idx_t * IA, rsb_coo_idx_t * rJA, const rsb_coo_idx_t * JA, const rsb_coo_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t type)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+	{
+		/*
+		 * FIXME : UNOPTIMIZED !
+		 */
+		rsb_coo_idx_t i;/* in this algorithm, coo cannot overflow */
+
+		/* should permute here */
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+		{
+			RSB_DEBUG_ASSERT(K[i]>=0);
+			RSB_DEBUG_ASSERT(K[i]<nnz);
+
+			rIA [i]=IA [K[i]];
+			rJA [i]=JA [K[i]];
+		}
+
+		switch(type)
+		{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`nnz',` ((type*)rVA)[i+LI]=((type*)VA)[K[(i+LI)]];
+	')
+			
+			break;
+')dnl
+			/* unsupported type */
+			default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+')dnl
+
+
+rsb_err_t rsb__do_permute_rows_with_coo_index( rsb_coo_idx_t * IA, const rsb_coo_idx_t * K, rsb_nnz_idx_t nnz)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+	{
+		/*
+		 * FIXME : UNOPTIMIZED !
+		 */
+		rsb_coo_idx_t i;/* in this algorithm, coo cannot overflow */
+
+		/* should permute here */
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+		{
+			RSB_DEBUG_ASSERT(K[i]>=0);
+			RSB_DEBUG_ASSERT(K[i]<nnz);
+
+			IA [i]=K[IA[i]];
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+')dnl
+
+
+rsb_err_t rsb__do_permute_values_with_nnz_index( void * rVA, const void *VA, rsb_coo_idx_t * rIA, const rsb_coo_idx_t * IA, rsb_coo_idx_t * rJA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+	{
+		/*
+		 * FIXME : UNOPTIMIZED !
+		 */
+		rsb_nnz_idx_t i;
+
+		/* should permute here */
+		for(i=0;RSB_LIKELY(i<nnz);++i)
+		{
+			RSB_DEBUG_ASSERT(K[i]>=0);
+			RSB_DEBUG_ASSERT(K[i]<nnz);
+
+			rIA [i]=IA [K[i]];
+			rJA [i]=JA [K[i]];
+		}
+
+		switch(typecode)
+		{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`nnz',` ((type*)rVA)[i+LI]=((type*)VA)[K[(i+LI)]];
+	')
+			
+			break;
+')dnl
+			/* unsupported type */
+			default :
+				return RSB_ERR_UNSUPPORTED_TYPE	;
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+')dnl
+
+
+void rsb_ip_reord(rsb_nnz_idx_t n, void * VAp, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t * P, rsb_type_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+		This is an adapted PSBLAS psb_ip_reord_d1i2 routine.
+	*/
+	
+	switch(typecode)
+	{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+	case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+	{
+		rsb_coo_idx_t isw1, isw2;
+		rsb_nnz_idx_t lswap, lp, k;
+		type swap;
+		type * VA=VAp;
+
+		lp = P[0];
+		k  = 1;
+		while(1)
+		{
+			if (RSB_UNLIKELY((lp==0) || (k>n))) break;
+			while(1)
+			{
+				if (lp >= k) break;
+				lp = P[lp];
+			}
+			lswap    = P[lp];
+			P[lp]  = P[k];
+			P[k]   = lp;
+			--lp;
+			--k;
+			swap   = VA[lp];
+			VA[lp] = VA[k];
+			VA[k]  = swap;
+			isw1   = IA[lp];
+			IA[lp] = IA[k];
+			IA[k]  = isw1;
+			isw2   = JA[lp];
+			JA[lp] = JA[k];
+			JA[k]  = isw2;
+			++k;
+			lp = lswap ;
+			k  = k + 1;
+		}
+	}
+		break;
+	')
+		/* unsupported type */
+		default :
+			return;
+	}
+
+
+}
+')dnl
+
+void rsb_util_do_scatter_rows(void * RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, const void * RSB_RESTRICT iVA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT PA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+		This is an adapted PSBLAS psb_ip_reord_d1i2 routine.
+	*/
+	
+	switch(typecode)
+	{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+	case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+	{
+		rsb_nnz_idx_t nzi;
+		type*VA=(type*)oVA;
+		RSB_M4_SIMPLE_LOOP_UNROLL(`nzi',`LI',`0',`nnz',` 
+			VA[PA[IA[nzi+LI]]]=((type*)iVA)[nzi+LI];
+			oIA[PA[IA[nzi+LI]]]=IA[nzi+LI];
+			oJA[PA[IA[nzi+LI]]]=JA[nzi+LI];
+			PA[IA[nzi+LI]]++;
+		')
+	}
+		break;
+	')
+		/* unsupported type */
+		default :
+			return;
+	}
+}
+')dnl
+
+#if 0
+void rsb_util_do_scatter_rows(void * RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t * RSB_RESTRICT PA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+		This is an adapted PSBLAS psb_ip_reord_d1i2 routine.
+	*/
+	
+	switch(typecode)
+	{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+	case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+	{
+		rsb_nnz_idx_t n;
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			((type*)oVA)[PA[IA[n]]]=((type*)VA)[n],
+			oIA[PA[IA[n]]]=IA[n],
+			oJA[PA[IA[n]]]=JA[n],
+			PA[IA[n]]++;
+		}
+		break;
+	')
+		/* unsupported type */
+		default :
+			return;
+	}
+}
+')dnl
+
+#endif /* 0 */
+
+
+
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_PERMUTE_H_INCLUDED */
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_pr.c b/rsb_pr.c
new file mode 100644
index 0000000..5d5f86f
--- /dev/null
+++ b/rsb_pr.c
@@ -0,0 +1,2566 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Perfomance reporting code. This source uses rsb__getenv(), therefore should not be linked to librsb but to rsbench directly.
+ * @author Michele Martone
+ * */
+
+#include <strings.h>		/* bzero */
+#include "rsb_internals.h"
+#include "rsb-config.h"
+#include <stdint.h> /* int64_t / uint64_t */
+
+#define rsb__strcpy(A,B) strcpy((rsb_char_t*)A,B)
+#define rsb__strlen(A) strlen((rsb_char_t*)A)
+#define RSB_RMEMCPY(DEST,SRC,N) RSB_MEMCPY((void*RSB_RESTRICT)(DEST),(const void*RSB_RESTRICT)(SRC),(N))
+#define RSB__PR_FREE(P) {rsb__pr_free(P);(P)=NULL;}
+#define RSB_XFLOPS(TIME,NRHS,CANONICAL_MATRIX_OP_FLOPS)  ( (TIME)?(((double)NRHS)*(CANONICAL_MATRIX_OP_FLOPS))/(TIME):RSB_TIME_ZERO )
+#define RSB_PRD_STYLE_TBL 0
+#define RSB_PRD_STYLE_CMP 1
+#define RSB_PRD_STYLE_PLT_BASE 2 /* new, experimental */
+#define RSB_PRD_STYLE_PLT_AT_SPEEDUP_RSB 2 /* new, experimental */
+#define RSB_PRD_STYLE_PLT_SUBM_BS 3 /* new, experimental */
+#define RSB_PRD_CMP_MDUMP -1
+#define RSB_PRD_CMP_DFLT 0
+#define RSB_PRD_CMP_DIV 1
+#define RSB_PRD_CMP_DIFF 2
+#define RSB_PRD_CMP_APPEND 3
+#define RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH 1
+
+#define RSB_ON_IF_LEM(X,Y,ONIFLESS,ONIFEQUAL,ONIFMORE)	\
+	( (X)==(Y) ? (ONIFEQUAL) : ((X)<(Y)? (ONIFLESS) : (ONIFMORE) ))
+
+/* rsb sampled performance sample structure (internal) */
+/* to keep I/O portable, don't use pointers or unportable variables within it */
+struct rsb_rsps_t
+{
+	rsb_perf_t op_time;
+	rsb_perf_t mkl_csr_op_time;
+	rsb_perf_t at_op_time;
+	rsb_time_t at_t;
+	rsb_perf_t at_mkl_csr_op_time;
+	rsb_trans_t transA;
+	double cmflops; /* canonical mflops considering nrhs==1 */
+	rsb_flags_t flagsA;
+	rsb_submatrix_idx_t nsubm, at_nsubm;
+	/*size_t*/ int64_t /*uint64_t*/ isa, at_isa;
+	rsb_int_t at_cn, at_mkl_csr_cn;
+	rsb_int_t uc; /* updates count */
+	rsb_coo_idx_t nrA,ncA;
+	rsb_nnz_idx_t nnzA;
+	rsb_int_t at_eps; /* effective steps */
+        struct rsb_ts_t otpos, btpos; /* dumpable with RSB_STAT_DUMP_TS */
+        struct rsb_ts_t otpms, btpms;
+};
+
+/* rsb sampled performance record structure  (internal)*/
+struct rsb_rspr_t
+{
+	rsb_int_t  filenamen,   cn,   incXn,   incYn,   nrhsn,  ntypecodes,   tn, csf /* count so far */;
+        rsb_int_t filenamebl, cabl, incXabl, incYabl, nrhsabl, typecodesbl, tabl; /* ... byte length */
+        /* the following shall not be saved */
+        rsb_bool_t ror; /* representing only ratios */
+	struct rsb_rsps_t * psa; /* performance samples array */
+        struct rsb_rspra_t * rsprap; /*  */
+};
+
+#define RSB_PRL_TCS "pr: "
+#define RSB_PRL_LCC_IE rsb__getenv("RSB_PR_WLTC") ? '%' : ( rsb__getenv("RSB_PR_PRL_LCC") ? *rsb__getenv("RSB_PR_PRL_LCC") : '#') /*  line comment char */
+#define RSB_PRL_TCS_IE rsb__getenv("RSB_PR_WLTC") ? " " : ( rsb__getenv("RSB_PR_PRL_TCS") ?  rsb__getenv("RSB_PR_PRL_TCS") : RSB_PRL_TCS ) /* table comment string */
+#define RSB_PRL_ENDLSTR_IE rsb__getenv("RSB_PR_WLTC") ? "\\\\" : ( rsb__getenv("RSB_PR_ENDLSTR") ? rsb__getenv("RSB_PR_ENDLSTR") : "" )
+#define RSB_PRL_FSEPSTR_IE rsb__getenv("RSB_PR_WLTC") ? " & " : (rsb__getenv("RSB_PR_FSEPSTR") ? rsb__getenv("RSB_PR_FSEPSTR") : " ")
+#define RSB_PR_NOC(RSPRP) ((RSPRP)->filenamen * (RSPRP)->cn * (RSPRP)->incXn * (RSPRP)->incYn * (RSPRP)->nrhsn * (RSPRP)->ntypecodes * (RSPRP)->tn )
+#define RSB_PRC RSB_STDOUT
+#define RSB_PRL RSB_PRC("%c%s",rsb_prl_lcc,RSB_PRL_TCS),RSB_PRC
+#define RSB_PRT RSB_PRC("%s",rsb_prl_tcs),RSB_PRC
+#define RSB_PRL_SEP RSB_STDOUT("%cpr: ======== ",rsb_prl_lcc),RSB_STDOUT
+#define RSB_PRWL RSB_PRC("#pr: Warning:"),RSB_PRC
+
+static rsb_err_t rsb__pr_alloc(struct rsb_rspr_t ** rsprpp, const struct rsb_rspr_t * rsprcp, rsb_int_t filenamen, rsb_int_t cn, rsb_int_t incXn, rsb_int_t incYn, rsb_int_t nrhsn, rsb_int_t ntypecodes, rsb_int_t tn)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_rspr_t * rsprp = NULL;
+	rsb_int_t noc = 0; /* number of combinations */
+	size_t ab = 0; /* allocated bytes */
+
+	rsprp = rsb__calloc(sizeof(struct rsb_rspr_t));
+	if( ! rsprp )
+       	{
+	       	errval = RSB_ERR_ENOMEM;
+	       	goto err;
+       	}
+
+        rsprp->ror = RSB_BOOL_FALSE;
+
+        if( rsprcp )
+                *rsprp = *rsprcp;
+
+	rsprp->filenamen = filenamen;
+	rsprp->cn = cn;
+	rsprp->incXn = incXn;
+	rsprp->incYn = incYn;
+	rsprp->nrhsn = nrhsn;
+	rsprp->ntypecodes = ntypecodes;
+	rsprp->tn = tn;
+
+	noc = RSB_PR_NOC(rsprp);
+	ab = sizeof(struct rsb_rsps_t)*noc;
+	rsprp->psa = rsb__calloc(ab);
+	if( ! rsprp->psa )
+	{
+	       	errval = RSB_ERR_ENOMEM;
+	       	goto err;
+       	}
+	RSB_ASSIGN_IF(rsprpp,rsprp)
+err:
+        return errval;
+}
+
+struct rsb_rspra_t /* ... record arrays */
+{
+        const rsb_char_t**filenamea; rsb_int_t*ca; const rsb_int_t*incXa; const rsb_int_t*incYa; const rsb_int_t*nrhsa; const rsb_type_t*typecodes; const rsb_int_t*ta;
+};
+
+#define RSB_RPR_FILE_HDR "%RPR-0..""        ""        ""        "
+#define RSB_RPR_FILE_HDL 32
+#define RSB_PR_WR RSB_BOOL_FALSE
+#define RSB_PR_RD RSB_BOOL_TRUE
+#define RSB_RW(ROW,PTR,SIZE,NMEMB,STREAM)                               \
+        {                                                               \
+                sh = (SIZE) * (NMEMB);                                  \
+                if(ROW)                                                 \
+                        hd = fread((PTR),(SIZE),(NMEMB),(STREAM));           \
+                else                                                    \
+                        hd = fwrite ((PTR),(SIZE),(NMEMB),(STREAM));         \
+                hd *= (SIZE);                                           \
+                if( hd != sh ) RSB_PERR_GOTO(err,RSB_ERRM_ES);     \
+        }
+
+static rsb_err_t rsb__rsprp_rw(struct rsb_rspr_t * rsprp, FILE * stream, rsb_bool_t row)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+        size_t hd = 0, sh = 0; /* have done, should have */
+
+	RSB_RW(row,&rsprp->filenamen,sizeof(rsprp->filenamen),1,stream);
+	RSB_RW(row,&rsprp->cn,sizeof(rsprp->cn),1,stream);
+	RSB_RW(row,&rsprp->incXn,sizeof(rsprp->incXn),1,stream);
+	RSB_RW(row,&rsprp->incYn,sizeof(rsprp->incYn),1,stream);
+	RSB_RW(row,&rsprp->nrhsn,sizeof(rsprp->nrhsn),1,stream);
+	RSB_RW(row,&rsprp->ntypecodes,sizeof(rsprp->ntypecodes),1,stream);
+	RSB_RW(row,&rsprp->tn,sizeof(rsprp->tn),1,stream);
+	RSB_RW(row,&rsprp->csf,sizeof(rsprp->csf),1,stream);
+	RSB_RW(row,&rsprp->filenamebl,sizeof(rsprp->filenamebl),1,stream);
+	RSB_RW(row,&rsprp->cabl,sizeof(rsprp->cabl),1,stream);
+	RSB_RW(row,&rsprp->incXabl,sizeof(rsprp->incXabl),1,stream);
+	RSB_RW(row,&rsprp->incYabl,sizeof(rsprp->incYabl),1,stream);
+	RSB_RW(row,&rsprp->nrhsabl,sizeof(rsprp->nrhsabl),1,stream);
+	RSB_RW(row,&rsprp->typecodesbl,sizeof(rsprp->typecodesbl),1,stream);
+	RSB_RW(row,&rsprp->tabl,sizeof(rsprp->tabl),1,stream);
+        goto ret;
+err:
+        errval = RSB_ERR_INTERNAL_ERROR;
+        RSB_ERROR("%s only %zd bytes instead of %zd !\n",row?"read":"wrote",hd,sh);
+ret:
+        return errval;
+}
+
+static rsb_err_t rsb__ts_rw(struct rsb_ts_t * tsp, FILE * stream, rsb_bool_t row)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+        size_t hd = 0, sh = 0; /* have done, should have */
+
+	RSB_RW(row,&tsp->avg,sizeof(tsp->avg),1,stream);
+	RSB_RW(row,&tsp->min,sizeof(tsp->min),1,stream);
+	RSB_RW(row,&tsp->max,sizeof(tsp->max),1,stream);
+	RSB_RW(row,&tsp->sd ,sizeof(tsp->sd ),1,stream);
+	RSB_RW(row,&tsp->ns ,sizeof(tsp->ns ),1,stream);
+        goto ret;
+err:
+        errval = RSB_ERR_INTERNAL_ERROR;
+        RSB_ERROR("%s only %zd bytes instead of %zd !\n",row?"read":"wrote",hd,sh);
+ret:
+        return errval;
+}
+
+static rsb_err_t rsb__psp_rw(struct rsb_rsps_t * psp, FILE * stream, rsb_bool_t row)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+        size_t hd = 0, sh = 0; /* have done, should have */
+
+	RSB_RW(row,&psp->op_time,sizeof(psp->op_time),1,stream);
+	RSB_RW(row,&psp->mkl_csr_op_time,sizeof(psp->mkl_csr_op_time),1,stream);
+	RSB_RW(row,&psp->at_op_time,sizeof(psp->at_op_time),1,stream);
+	RSB_RW(row,&psp->at_t,sizeof(psp->at_t),1,stream);
+	RSB_RW(row,&psp->at_mkl_csr_op_time,sizeof(psp->at_mkl_csr_op_time),1,stream);
+	RSB_RW(row,&psp->transA,sizeof(psp->transA),1,stream);
+	RSB_RW(row,&psp->cmflops,sizeof(psp->cmflops),1,stream);
+	RSB_RW(row,&psp->flagsA,sizeof(psp->flagsA),1,stream);
+	RSB_RW(row,&psp->nsubm,sizeof(psp->nsubm),1,stream);
+        RSB_RW(row,&psp->at_nsubm,sizeof(psp->at_nsubm),1,stream);
+	RSB_RW(row,&psp->isa,sizeof(psp->isa),1,stream);
+        RSB_RW(row,&psp->at_isa,sizeof(psp->at_isa),1,stream);
+	RSB_RW(row,&psp->at_cn,sizeof(psp->at_cn),1,stream);
+        RSB_RW(row,&psp->at_mkl_csr_cn,sizeof(psp->at_mkl_csr_cn),1,stream);
+	RSB_RW(row,&psp->uc,sizeof(psp->uc),1,stream);
+	RSB_RW(row,&psp->nrA,sizeof(psp->nrA),1,stream);
+	RSB_RW(row,&psp->ncA,sizeof(psp->ncA),1,stream);
+	RSB_RW(row,&psp->nnzA,sizeof(psp->nnzA),1,stream);
+	RSB_RW(row,&psp->at_eps,sizeof(psp->at_eps),1,stream);
+
+        errval = rsb__ts_rw(&psp->otpos,stream,row);
+        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+        errval = rsb__ts_rw(&psp->btpos,stream,row);
+        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+        errval = rsb__ts_rw(&psp->otpms,stream,row);
+        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+        errval = rsb__ts_rw(&psp->btpms,stream,row);
+        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+
+        goto ret;
+err:
+        errval = RSB_ERR_INTERNAL_ERROR;
+        RSB_ERROR("%s only %zd bytes instead of %zd !\n",row?"read":"wrote",hd,sh);
+ret:
+        return errval;
+}
+
+static int rsb__file_exists(const rsb_char_t * RSB_RESTRICT filename)
+{
+	FILE*stream = NULL;
+
+        stream = fopen(filename,"r");
+        if(stream != NULL)
+        {
+                fclose(stream);
+                return 1;
+        }
+        return 0;
+}
+
+rsb_err_t rsb__pr_save(const rsb_char_t * RSB_RESTRICT filename, /*const*/ void * RSB_RESTRICT rsprpv,
+        const rsb_char_t**RSB_RESTRICT filenamea, rsb_int_t*RSB_RESTRICT ca, const rsb_int_t*RSB_RESTRICT incXa, const rsb_int_t*RSB_RESTRICT incYa, const rsb_int_t*RSB_RESTRICT nrhsa, const rsb_type_t*RSB_RESTRICT typecodes, const rsb_int_t*RSB_RESTRICT ta,
+        rsb_bool_t can_overwrite)
+{
+        /*
+                Saves a performace record.
+                FIXME: TODO: error handling can be improved.
+                TODO: join common code with rsb__pr_load .
+        */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+        /*const*/ struct rsb_rspr_t * rsprp = rsprpv; /* FIXME: this shall be const */
+	rsb_int_t noc = RSB_PR_NOC(rsprp);
+	FILE*stream = NULL;
+        int filenamei;
+        struct rsb_rspra_t rspra;
+        struct rsb_rspra_t * rsprap = NULL;
+        rsb_byte_t * bbp = NULL; /* binary blob pointer */
+        size_t bbo = 0, bbl = 0, bbs = 0; /* binary blob offset/length/skip */
+        rsb_int_t idx;
+        const rsb_char_t * sgntr = RSB_RPR_FILE_HDR;
+	const char rsb_prl_lcc = RSB_PRL_LCC_IE;
+
+       	if(filename == NULL)
+		stream = RSB_DEFAULT_FD;
+	else
+        {
+                if(can_overwrite == RSB_BOOL_FALSE )
+                if(rsb__file_exists(filename))
+                {
+		        RSB_WARN("File %s already exists! Refusing to overwrite.\n",filename);
+		        errval = RSB_ERR_INTERNAL_ERROR;
+                        RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                }
+		stream = fopen(filename,"wb");
+        }
+ 
+        if( stream == NULL )
+        {
+		errval = RSB_ERR_INTERNAL_ERROR;
+	        RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+        RSB_BZERO_P(&rspra);
+
+        rsprp->filenamebl = 0;
+        bbs = sizeof(rspra) + sizeof(filenamea[0])*rsprp->filenamen;
+        for(     filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+                rsprp->filenamebl += rsb__strlen(filenamea[filenamei]) + 1;
+        rsprp->cabl = sizeof(*rsprap->ca)*rsprp->cn;
+        rsprp->incXabl = sizeof(*rsprap->incXa)*rsprp->incXn;
+        rsprp->incYabl = sizeof(*rsprap->incYa)*rsprp->incYn;
+        rsprp->nrhsabl = sizeof(*rsprap->nrhsa)*rsprp->nrhsn;
+        rsprp->typecodesbl = sizeof(*rsprap->typecodes)*( rsprp->ntypecodes + 1 );
+        rsprp->tabl = ta ? sizeof(*rsprap->ta)*rsprp->tn : 0;
+        bbl = rsprp->filenamebl  + rsprp->cabl + rsprp->incXabl + rsprp->incYabl + rsprp->nrhsabl + rsprp->typecodesbl + rsprp->tabl;
+
+        fwrite(sgntr,RSB_RPR_FILE_HDL,1,stream);;
+
+        errval = rsb__rsprp_rw(rsprp, stream, RSB_PR_WR);
+        if(RSB_SOME_ERROR(errval))
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+
+        for(idx=0;idx<noc;++idx)
+        {
+                struct rsb_rsps_t*psp = &(rsprp->psa[idx]);
+
+                errval = rsb__psp_rw(psp, stream, RSB_PR_WR);
+                if(RSB_SOME_ERROR(errval))
+                        RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+	RSB_ASSERT(rsprp->filenamebl);
+
+	rsprap = rsb__calloc( bbl + bbs );
+	bbp = (void*) rsprap;
+        if (! bbp ) { errval = RSB_ERR_ENOMEM; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+        bbp += sizeof(rspra);
+        rspra.filenamea = (void*) bbp;
+        bbp += sizeof(filenamea[0])*rsprp->filenamen;
+        bbp += rsprp->filenamebl;
+        rspra.ca = (void*) bbp;
+        bbp += rsprp->cabl;
+        rspra.incXa = (void*) bbp;
+        bbp += rsprp->incXabl;
+        rspra.incYa = (void*) bbp;
+        bbp += rsprp->incYabl;
+        rspra.nrhsa = (void*) bbp;
+        bbp += rsprp->nrhsabl;
+        rspra.typecodes = (rsb_char_t*) bbp;
+        bbp += rsprp->typecodesbl;
+        if(rsprp->tabl)
+                        rspra.ta = (void*) bbp;
+        bbp += rsprp->tabl;
+
+        if(!rsprap)
+        {
+                errval = RSB_ERR_ENOMEM;
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+        *rsprap = rspra;
+
+        bbp = (void*) rsprap;
+
+        bbo = 0;
+        for(    filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+        {
+                rsb__strcpy(bbp+bbs+bbo,filenamea[filenamei]);
+                bbo += rsb__strlen(filenamea[filenamei]) + 1;
+        }
+	RSB_ASSERT(bbl);
+        RSB_ASSERT(bbo == rsprp->filenamebl);
+        RSB_RMEMCPY(rspra.ca       ,ca       ,rsprp->cabl       );
+        RSB_RMEMCPY(rspra.incXa    ,incXa    ,rsprp->incXabl    );
+        RSB_RMEMCPY(rspra.incYa    ,incYa    ,rsprp->incYabl    );
+        RSB_RMEMCPY(rspra.nrhsa    ,nrhsa    ,rsprp->nrhsabl    );
+        RSB_RMEMCPY(rspra.typecodes,typecodes,rsprp->typecodesbl);
+        if(ta)
+                        RSB_RMEMCPY(rspra.ta       ,ta       ,rsprp->tabl       );
+
+        if( 1 != fwrite(((rsb_byte_t*)(rsprap))+bbs, bbl, 1, stream) )
+        {
+                errval = RSB_ERR_ENOMEM;
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+       	if(filename == NULL)
+		;
+	else
+		errval = ( EOF == fclose(stream) ) ? RSB_ERR_INTERNAL_ERROR : errval;
+err:
+	if(!RSB_SOME_ERROR(errval))
+                RSB_PRL_SEP ("Saved a performance record of %d samples to %s\n",noc,filename);
+
+	RSB_CONDITIONAL_FREE(rsprap);
+        return errval;
+}
+
+static rsb_int_t rsb__pr_idx(const void*rsprpv, rsb_int_t filenamei, rsb_int_t ci, rsb_int_t incXi, rsb_int_t incYi, rsb_int_t nrhsi, rsb_int_t ntypecodei, rsb_int_t ti)
+{
+	/* 
+	 * compute performance record index
+	 * */
+	const struct rsb_rspr_t * rsprp = rsprpv;
+	rsb_int_t
+	off6 = 1    * rsprp->tn,
+	off5 = off6 * rsprp->ntypecodes,
+	off4 = off5 * rsprp->nrhsn,
+	off3 = off4 * rsprp->incYn,
+	off2 = off3 * rsprp->incXn,
+	off1 = off2 * rsprp->cn;
+
+	return ti + ntypecodei * off6 + nrhsi * off5 + incYi * off4 + incXi * off3 + ci * off2 + filenamei * off1;
+}
+
+static rsb_err_t rsb__pr_load(const rsb_char_t * filename, struct rsb_rspr_t ** rsprpvp)
+{
+        /* 
+                Loads a performace record.
+                Overwrites the target pointer.
+                If *rsprpvp, then only the first struct will be load.
+
+                TODO: a header, e.g.: "RPR " would do no wrong.
+                TODO: a header signature check.
+                TODO: error handling can be improved.
+                TODO: relax the typecode checks, as values from a differently configured build may have to be be read.
+         */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_rspr_t rspr;
+	struct rsb_rspr_t * rsprp = NULL;
+	struct rsb_rspr_t ** rsprpp = rsprpvp;
+	FILE*stream = NULL;
+	rsb_int_t noc = 0, idx; /* number of combinations */
+        rsb_char_t sgntr [ RSB_RPR_FILE_HDL ];
+        struct rsb_rspra_t rspra;
+        struct rsb_rspra_t * rsprap = NULL;
+        rsb_byte_t * bbp = NULL; /* binary blob pointer */
+        size_t bbo = 0, bbl = 0, bbs = 0; /* binary blob offset/length/skip */
+	rsb_int_t filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti;
+
+        RSB_BZERO_P(&rspr);
+
+       	if(filename == NULL)
+		stream = stdin;
+	else
+		stream = fopen(filename,"rb");
+
+        if( stream == NULL )
+        {
+		errval = RSB_ERR_INTERNAL_ERROR;
+	        RSB_PERR_GOTO(err,"Error opening performance record file \"%s\" for reading !\n",filename?filename:"stdin");
+        }
+
+        if( 1 != fread(sgntr,RSB_RPR_FILE_HDL,1,stream) )
+        {
+		errval = RSB_ERR_INTERNAL_ERROR;
+                RSB_PERR_GOTO(err,"Unable to read header!\n");
+        }
+
+        if ( strncmp(sgntr,RSB_RPR_FILE_HDR,RSB_RPR_FILE_HDL) ) 
+        {
+                /* TODO: need support for different versions ... */
+		errval = RSB_ERR_INTERNAL_ERROR;
+                RSB_PERR_GOTO(err,"File deconding error!\n");
+        }
+
+        rsprp = &rspr;
+        errval = rsb__rsprp_rw(rsprp, stream, RSB_PR_RD);
+	if(RSB_SOME_ERROR(errval))
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+
+	if(*rsprpp)
+        {
+                /* ok to return now */
+	        **rsprpp = rspr;
+                goto cret;
+        }
+
+        errval = rsb__pr_alloc(&rsprp, &rspr, rspr.filenamen, rspr.cn, rspr.incXn, rspr.incYn, rspr.nrhsn, rspr.ntypecodes, rspr.tn);
+	if(RSB_SOME_ERROR(errval))
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+
+        if(rsprp->csf == 0)
+	{
+                errval = RSB_ERR_CORRUPT_INPUT_DATA;
+                RSB_PERR_GOTO(err,"Seems like the input performance record is empty!\n");
+	}
+        RSB_ASSERT(rsprp->csf);
+
+	noc = RSB_PR_NOC(rsprp);
+
+        //for(idx=0;idx<noc;++idx)
+	for(     filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+	for(ci=0;ci<rsprp->cn;++ci)
+	for(     incXi=0;     incXi<rsprp->incXn     ;++incXi     )
+	for(     incYi=0;     incYi<rsprp->incYn     ;++incYi     )
+	for(     nrhsi=0;     nrhsi<rsprp->nrhsn     ;++nrhsi     )
+	for(typecodesi=0;typecodesi<rsprp->ntypecodes;++typecodesi)
+	for(ti=0;ti<rsprp->tn;++ti)
+	{
+		size_t idx = rsb__pr_idx(rsprp, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti);
+	        struct rsb_rsps_t*psp = &(rsprp->psa[idx]);
+
+                errval = rsb__psp_rw(psp, stream, RSB_PR_RD);
+               	if(RSB_SOME_ERROR(errval))
+                        RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                /* compatibility patch for old (pre-1.2) values of RSB_TRANSPOSITION_*  */
+		if(psp->transA==0x00) psp->transA = RSB_TRANSPOSITION_N;
+		if(psp->transA==0x01) psp->transA = RSB_TRANSPOSITION_T;
+		if(psp->transA==0x02) psp->transA = RSB_TRANSPOSITION_C;
+
+                if ( rsb__getenv("RSB_PR_RD_NULLIFY_FILENAMEI") ) /* proof of concept */
+                {
+                        int nidx = rsb__util_atoi(rsb__getenv("RSB_PR_RD_NULLIFY_FILENAMEI"));
+                        if ( idx / (noc/rspr.filenamen) == nidx )
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_RESTRICT_FILENAMEI") ) /* proof of concept */
+                {
+                        int nidx = rsb__util_atoi(rsb__getenv("RSB_PR_RD_RESTRICT_FILENAMEI"));
+                        if ( idx / (noc/rspr.filenamen) != nidx )
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_NULLIFY_SAMPLEIDX") ) /* proof of concept */
+                {
+                        int nidx = rsb__util_atoi(rsb__getenv("RSB_PR_RD_NULLIFY_SAMPLEIDX"));
+                        if ( idx == nidx )
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_RESTRICT_SAMPLEIDX") ) /* proof of concept */
+                {
+                        int nidx = rsb__util_atoi(rsb__getenv("RSB_PR_RD_RESTRICT_SAMPLEIDX"));
+                        if ( idx != nidx )
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_RESTRICT_TRANSA") ) /* proof of concept */
+                {
+                        rsb_trans_t no_transA = (*rsb__getenv("RSB_PR_RD_RESTRICT_TRANSA"));
+                        if ( psp->transA != no_transA )
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_NULLIFY_TRANSA") ) /* proof of concept */
+                {
+                        rsb_trans_t no_transA = (*rsb__getenv("RSB_PR_RD_NULLIFY_TRANSA"));
+                        if ( psp->transA == no_transA )
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_NULLIFY_NRHSI") ) /* proof of concept */
+                {
+                        int nidx = rsb__util_atoi(rsb__getenv("RSB_PR_RD_NULLIFY_NRHSI"));
+			if(nrhsi==nidx)
+			       	psp->uc = 0;
+                }
+                if ( rsb__getenv("RSB_PR_RD_RESTRICT_NRHSI") ) /* proof of concept */
+                {
+                        int nidx = rsb__util_atoi(rsb__getenv("RSB_PR_RD_RESTRICT_NRHSI"));
+			if(nrhsi!=nidx)
+			       	psp->uc = 0;
+                }
+        }
+
+        RSB_BZERO_P(&rspra);
+	
+	bbl = rspr.filenamebl + rspr.cabl + rspr.incXabl + rspr.incYabl + rspr.nrhsabl + rspr.typecodesbl + rspr.tabl;
+	bbs = sizeof(rspra) + sizeof(rspra.filenamea[0])*rspr.filenamen;
+	
+        rsprap = rsb__calloc( bbl + bbs );
+        bbp = (void*) rsprap;
+	if (! bbp ) { errval = RSB_ERR_ENOMEM; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+	bbp += sizeof(rspra);
+	rspra.filenamea = (void*) bbp;
+	bbp += sizeof(rspra.filenamea[0])*rspr.filenamen;
+	bbp += rspr.filenamebl;
+	rspra.ca = (void*) bbp;
+	bbp += rspr.cabl;
+	rspra.incXa = (void*) bbp;
+	bbp += rspr.incXabl;
+	rspra.incYa = (void*) bbp;
+	bbp += rspr.incYabl;
+	rspra.nrhsa = (void*) bbp;
+	bbp += rspr.nrhsabl;
+	rspra.typecodes = (rsb_char_t*) (void*) bbp;
+	bbp += rspr.typecodesbl;
+	if(rspr.tabl)
+	        rspra.ta = (void*) bbp;
+	bbp += rspr.tabl;
+	
+        RSB_ASSERT(rspr.filenamebl);
+        RSB_ASSERT(rspr.nrhsabl);
+	*rsprap = rspra;
+	
+	if( 1 != fread( ((rsb_byte_t*)rsprap)+bbs, bbl, 1, stream) )
+	{
+                errval = RSB_ERR_INTERNAL_ERROR;
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	bbp = (void*) rsprap;
+	bbp += bbs;
+	bbo = 0;
+
+        for(     filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+	{
+                rspra.filenamea[filenamei] = (rsb_char_t*) bbp + bbo;
+                bbo += rsb__strlen(bbp + bbo) + 1;
+	}
+
+	rsprp->rsprap = rsprap;
+
+err:
+ 	if (rsprp)
+ 	        RSB_ASSIGN_IF(rsprpp,rsprp)
+
+ 	if(rsprpp == NULL)
+                RSB__PR_FREE(rsprp); /* bogus load ... */
+cret:
+       	if(filename == NULL)
+		;
+	else
+		errval = ( stream && EOF == fclose(stream) ) ? RSB_ERR_INTERNAL_ERROR : errval;
+
+        return errval;
+}
+
+#if 0
+static rsb_err_t rsb__pr_dumpfile(const rsb_char_t *filename)
+{
+	/* Obsoleted by rsb__pr_dumpfiles. */
+        /*
+                Due to subtle build configuration dependent problems, it's better to declare this as experimental.
+                FIXME: error handling is insufficient.
+        */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_rspr_t * rsprp = NULL;
+
+        errval = rsb__pr_load(filename,&rsprp);
+	if(RSB_SOME_ERROR(errval))
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        RSB_PRL_SEP("\n");
+	errval = rsb__pr_dump(rsprp, rsprp->rsprap->filenamea, rsprp->rsprap->ca, rsprp->rsprap->incXa, rsprp->rsprap->incYa, rsprp->rsprap->nrhsa, rsprp->rsprap->typecodes, NULL );
+        RSB_PRL_SEP("\n");
+err:
+	RSB__PR_FREE(rsprp);
+        return errval;
+}
+#endif
+
+#if 0
+static rsb_err_t rsb__pr_sort(struct rsb_rspr_t * rsprp)
+{
+	/* 
+	 * TODO: this function is yet incomplete.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+        if(!rsprp->rsprap)
+                goto err;
+
+        if(rsprp->rsprap->filenamea);
+        if(rsprp->rsprap->ca);
+        if(rsprp->rsprap->incXa);
+        if(rsprp->rsprap->incYa);
+        if(rsprp->rsprap->nrhsa);
+        if(rsprp->rsprap->typecodes);
+        if(rsprp->rsprap->ta);
+
+	rsb_int_t  filenamen,   cn,   incXn,   incYn,   nrhsn,  ntypecodes,   tn, csf /* count so far */;
+err:
+        return errval;
+}
+#endif
+
+static rsb_err_t rsb__pr_cmp(/*const*/ struct rsb_rspr_t * rspr0p, const struct rsb_rspr_t * rspr1p, int wr)
+{
+	/* 
+	 * wr = 1 ratio, 2 diff.
+	 * TODO: this function is yet experimental.
+	 * TODO: input error checking is missing.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_rsps_t dps; /* difference performance sample */
+        rsb_int_t idx = 0, noc = 0, pr = 0, csf = 0;
+	const char rsb_prl_lcc = '#';
+
+        RSB_ASSERT(rspr0p);
+        RSB_ASSERT(rspr1p);
+
+        RSB_ASSERT(rspr0p->filenamen == rspr1p->filenamen);
+        RSB_ASSERT(rspr0p->filenamebl == rspr1p->filenamebl);
+        RSB_ASSERT(rspr0p->cn == rspr1p->cn);
+        /* RSB_ASSERT(rspr0p->csf == rspr1p->csf); */
+        RSB_ASSERT(rspr0p->csf > 0 && rspr1p->csf > 0);
+        RSB_ASSERT(rspr0p->cabl == rspr1p->cabl);
+        RSB_ASSERT(rspr0p->incXn == rspr1p->incXn );
+        RSB_ASSERT(rspr0p->incXabl == rspr1p->incXabl );
+        RSB_ASSERT(rspr0p->incYn == rspr1p->incYn );
+        RSB_ASSERT(rspr0p->incYabl == rspr1p->incYabl );
+        RSB_ASSERT(rspr0p->nrhsn == rspr1p->nrhsn );
+        RSB_ASSERT(rspr0p->nrhsabl == rspr1p->nrhsabl );
+        RSB_ASSERT(rspr0p->ntypecodes == rspr1p->ntypecodes );
+        RSB_ASSERT(rspr0p->typecodesbl == rspr1p->typecodesbl );
+        RSB_ASSERT(rspr0p->tn == rspr1p->tn );
+        RSB_ASSERT(rspr0p->tabl == rspr1p->tabl );
+
+	csf = noc = RSB_PR_NOC(rspr0p);
+
+        if(noc != RSB_PR_NOC(rspr1p))
+        {
+		errval = RSB_ERR_INTERNAL_ERROR;
+	        RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+	if( rspr0p->csf != rspr1p->csf )
+	{
+		csf = RSB_MIN(rspr0p->csf, rspr1p->csf);
+               	RSB_PRL("Out of %d samples, one record has %d and the other %d (incomplete record ?). Limiting to the minimum of the two (EXPERIMENTAL!).\n", noc, rspr0p->csf, rspr1p->csf);
+	}
+
+        rspr0p->ror = RSB_BOOL_TRUE;
+
+        switch(wr){
+        case(RSB_PRD_CMP_DIV):
+        for(idx=0;idx<csf;++idx)
+        if(rspr0p->psa[idx].uc && rspr1p->psa[idx].uc)
+        {
+	        dps = rspr0p->psa[idx];
+                dps.op_time = rspr0p->psa[idx].op_time / rspr1p->psa[idx].op_time;
+                dps.mkl_csr_op_time = rspr0p->psa[idx].mkl_csr_op_time / rspr1p->psa[idx].mkl_csr_op_time;
+                dps.at_mkl_csr_op_time = rspr0p->psa[idx].at_mkl_csr_op_time / rspr1p->psa[idx].at_mkl_csr_op_time;
+                dps.at_t = rspr0p->psa[idx].at_t / rspr1p->psa[idx].at_t;
+                dps.at_op_time = rspr0p->psa[idx].at_op_time / rspr1p->psa[idx].at_op_time ;
+	        dps.at_eps = 0;
+	        rspr0p->psa[idx] = dps;
+                ++pr;
+        }
+        break;
+        case(RSB_PRD_CMP_DIFF):
+        for(idx=0;idx<csf;++idx)
+        if(rspr0p->psa[idx].uc && rspr1p->psa[idx].uc)
+        {
+	        dps = rspr0p->psa[idx];
+                dps.op_time = rspr0p->psa[idx].op_time - rspr1p->psa[idx].op_time;
+                dps.mkl_csr_op_time = rspr0p->psa[idx].mkl_csr_op_time - rspr1p->psa[idx].mkl_csr_op_time;
+                dps.at_mkl_csr_op_time = rspr0p->psa[idx].at_mkl_csr_op_time - rspr1p->psa[idx].at_mkl_csr_op_time;
+                dps.at_t = rspr0p->psa[idx].at_t - rspr1p->psa[idx].at_t;
+                dps.at_op_time = rspr0p->psa[idx].at_op_time - rspr1p->psa[idx].at_op_time ;
+                /* and: */
+                dps.at_eps = rspr0p->psa[idx].at_eps - rspr1p->psa[idx].at_eps ;
+                dps.at_isa = rspr0p->psa[idx].at_isa - rspr1p->psa[idx].at_isa ;
+	        rspr0p->psa[idx] = dps;
+                ++pr;
+        }
+        break;
+        default:
+		errval = RSB_ERR_INTERNAL_ERROR;
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+        if(pr == 0)
+	{
+		RSB_WARN("No pair of samples has been found to be conformable!\n");
+		// errval = RSB_ERR_BADARGS; goto err;
+	}
+err:
+        return errval;
+}
+
+static rsb_err_t rsb__pr_merge(struct rsb_rspr_t * rspr0p, const struct rsb_rspr_t * rspr1p)
+{
+	/* 
+         * Joins two records by appending one to another and returning a new one.
+	 * TODO: this function is yet experimental.
+         * TODO: shall extend the joining mechanism to arbitrarily many records. 
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int_t noc0, noc1;
+        rsb_int_t idx;
+	const char rsb_prl_lcc = '#';
+
+        RSB_PRL("Warning: joining assuming ALL parameters are conformant (except filenames)\n");
+
+        RSB_ASSERT(rspr0p);
+        RSB_ASSERT(rspr1p);
+
+        RSB_ASSERT(rspr0p->filenamen == rspr1p->filenamen);
+        RSB_ASSERT(rspr0p->filenamebl == rspr1p->filenamebl);
+        RSB_ASSERT(rspr0p->csf>0);
+        RSB_ASSERT(rspr1p->csf>0);
+        RSB_ASSERT(rspr0p->cn == rspr1p->cn);
+        RSB_ASSERT(rspr0p->cabl == rspr1p->cabl);
+        RSB_ASSERT(rspr0p->incXn == rspr1p->incXn );
+        RSB_ASSERT(rspr0p->incXabl == rspr1p->incXabl );
+        RSB_ASSERT(rspr0p->incYn == rspr1p->incYn );
+        RSB_ASSERT(rspr0p->incYabl == rspr1p->incYabl );
+        RSB_ASSERT(rspr0p->nrhsn == rspr1p->nrhsn );
+        RSB_ASSERT(rspr0p->nrhsabl == rspr1p->nrhsabl );
+        RSB_ASSERT(rspr0p->ntypecodes == rspr1p->ntypecodes );
+        RSB_ASSERT(rspr0p->typecodesbl == rspr1p->typecodesbl );
+        RSB_ASSERT(rspr0p->tn == rspr1p->tn );
+        RSB_ASSERT(rspr0p->tabl == rspr1p->tabl );
+
+        if(!rspr0p->rsprap)
+                goto err;
+        if(!rspr1p->rsprap)
+                goto err;
+
+	noc0 = RSB_PR_NOC(rspr0p);
+	noc1 = RSB_PR_NOC(rspr1p);
+
+        if( noc0 != noc1 )
+        {
+                errval = RSB_ERR_INTERNAL_ERROR;
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+        RSB_ASSERT( noc0 == noc1 );
+
+        for(idx=0;idx<noc0;++idx)
+        {
+                struct rsb_rsps_t*psp = &(rspr0p->psa[idx]);
+
+                if( psp->uc == 0 )
+                        *psp = (rspr1p->psa[idx]);
+        }
+err:
+        return errval;
+}
+
+static rsb_err_t rsb__pr_join(struct rsb_rspr_t ** rsprpp, const struct rsb_rspr_t * rspr0p, const struct rsb_rspr_t * rspr1p)
+{
+	/* 
+         * Joins two records by appending one to another and returning a new one.
+	 * TODO: this function is yet experimental.
+         * TODO: shall extend the joining mechanism to arbitrarily many records. 
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+        struct rsb_rspr_t * rsprp = NULL;
+        struct rsb_rspra_t rspra;
+	rsb_int_t noc0, noc1;
+        size_t bbo = 0, bbl = 0, bbs = 0; /* binary blob offset/length/skip */
+        rsb_byte_t * bbp = NULL; /* binary blob pointer */
+	rsb_int_t filenamei;
+	const char rsb_prl_lcc = '#';
+
+        RSB_PRL("Warning: joining assuming ALL parameters are conformant (except filenames)\n");
+
+        RSB_ASSERT(rspr0p);
+        RSB_ASSERT(rspr1p);
+
+        /* RSB_ASSERT(rspr0p->filenamen == rspr1p->filenamen); */
+        /* RSB_ASSERT(rspr0p->filenamebl == rspr1p->filenamebl); */
+        RSB_ASSERT(rspr0p->csf>0);
+        RSB_ASSERT(rspr1p->csf>0);
+        RSB_ASSERT(rspr0p->cn == rspr1p->cn);
+        RSB_ASSERT(rspr0p->cabl == rspr1p->cabl);
+        RSB_ASSERT(rspr0p->incXn == rspr1p->incXn );
+        RSB_ASSERT(rspr0p->incXabl == rspr1p->incXabl );
+        RSB_ASSERT(rspr0p->incYn == rspr1p->incYn );
+        RSB_ASSERT(rspr0p->incYabl == rspr1p->incYabl );
+        RSB_ASSERT(rspr0p->nrhsn == rspr1p->nrhsn );
+        RSB_ASSERT(rspr0p->nrhsabl == rspr1p->nrhsabl );
+        RSB_ASSERT(rspr0p->ntypecodes == rspr1p->ntypecodes );
+        RSB_ASSERT(rspr0p->typecodesbl == rspr1p->typecodesbl );
+        RSB_ASSERT(rspr0p->tn == rspr1p->tn );
+        RSB_ASSERT(rspr0p->tabl == rspr1p->tabl );
+
+        if(!rspr0p->rsprap)
+                goto err;
+        if(!rspr1p->rsprap)
+                goto err;
+
+        errval = rsb__pr_alloc(&rsprp, rspr0p, rspr0p->filenamen + rspr1p->filenamen, rspr0p->cn, rspr0p->incXn, rspr0p->incYn, rspr0p->nrhsn, rspr0p->ntypecodes, rspr0p->tn);
+
+	if(RSB_SOME_ERROR(errval))
+                RSB_PERR_GOTO(err,RSB_ERRM_ES);
+
+        rsprp->csf = rspr0p->csf + rspr1p->csf;
+        rsprp->filenamen = rspr0p->filenamen + rspr1p->filenamen;
+        rsprp->filenamebl = rspr0p->filenamebl + rspr1p->filenamebl;
+
+	noc0 = RSB_PR_NOC(rspr0p);
+	noc1 = RSB_PR_NOC(rspr1p);
+
+        RSB_RMEMCPY(rsprp->psa+0*noc0,rspr0p->psa,sizeof(*rsprp->psa)*noc0);
+        RSB_RMEMCPY(rsprp->psa+1*noc0,rspr1p->psa,sizeof(*rsprp->psa)*noc1);
+
+        RSB_BZERO_P(&rspra);
+	
+	bbl = rsprp->filenamebl + rsprp->cabl + rsprp->incXabl + rsprp->incYabl + rsprp->nrhsabl + rsprp->typecodesbl + rsprp->tabl;
+	bbs = sizeof(rspra) + sizeof(rspra.filenamea[0])*rsprp->filenamen;
+	
+        /* FIXME: encapsulate the follwing in a function */
+        rsprp->rsprap = rsb__calloc( bbl + bbs );
+        bbp = (void*) rsprp->rsprap;
+	if (! bbp ) { errval = RSB_ERR_ENOMEM; RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+	bbp += sizeof(rspra);
+	rspra.filenamea = (void*) bbp;
+	bbp += sizeof(rspra.filenamea[0])*rsprp->filenamen;
+	bbp += rsprp->filenamebl;
+	rspra.ca = (void*) bbp;
+	bbp += rsprp->cabl;
+	rspra.incXa = (void*) bbp;
+	bbp += rsprp->incXabl;
+	rspra.incYa = (void*) bbp;
+	bbp += rsprp->incYabl;
+	rspra.nrhsa = (void*) bbp;
+	bbp += rsprp->nrhsabl;
+	rspra.typecodes = (rsb_char_t*) (void*) bbp;
+	bbp += rsprp->typecodesbl;
+	if(rsprp->tabl)
+	        rspra.ta = (void*) bbp;
+	bbp += rsprp->tabl;
+	
+        RSB_ASSERT(rsprp->filenamebl);
+        RSB_ASSERT(rsprp->nrhsabl);
+	
+	bbp = (void*) rsprp->rsprap;
+	bbp += bbs;
+	bbo = 0;
+
+        for(     filenamei=0;     filenamei<rspr0p->filenamen ;++filenamei     )
+	{
+                rspra.filenamea[rspr0p->filenamen*0+filenamei] = (rsb_char_t*) bbp + bbo;
+                rsb__strcpy(bbp+bbo,rspr0p->rsprap->filenamea[filenamei]);
+                bbo += rsb__strlen(bbp + bbo) + 1;
+	}
+
+        for(     filenamei=0;     filenamei<rspr1p->filenamen ;++filenamei     )
+	{
+                rspra.filenamea[rspr0p->filenamen*1+filenamei] = (rsb_char_t*) bbp + bbo;
+                rsb__strcpy(bbp+bbo,rspr1p->rsprap->filenamea[filenamei]);
+                bbo += rsb__strlen(bbp + bbo) + 1;
+	}
+
+        RSB_ASSERT(bbo == rsprp->filenamebl);
+        RSB_RMEMCPY(rspra.ca       ,rspr0p->rsprap->ca       ,rsprp->cabl       );
+        RSB_RMEMCPY(rspra.incXa    ,rspr0p->rsprap->incXa    ,rsprp->incXabl    );
+        RSB_RMEMCPY(rspra.incYa    ,rspr0p->rsprap->incYa    ,rsprp->incYabl    );
+        RSB_RMEMCPY(rspra.nrhsa    ,rspr0p->rsprap->nrhsa    ,rsprp->nrhsabl    );
+        RSB_RMEMCPY(rspra.typecodes,rspr0p->rsprap->typecodes,rsprp->typecodesbl);
+
+        *rsprp->rsprap = rspra;
+	RSB_ASSIGN_IF(rsprpp,rsprp)
+
+        /* TODO: may also adjoin the remaining arrays, extending to cabl, incXabl, incYabl, nrhsabl, typecodesbl, tabl */
+err:
+        return errval;
+}
+
+rsb_err_t rsb__pr_dumpfiles(const rsb_char_t **argv, const int argc)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+        int ds = RSB_PRD_CMP_DFLT;
+        struct rsb_rspr_t * rsprp = NULL;
+        int argi;
+	rsb_int_t noc = 0, noc0; /* number of combinations */
+	char rsb_prl_lcc = RSB_PRL_LCC_IE ;
+
+        if( !argv || argc < 1 || strlen(argv[0]) < 1 )
+        {
+	        RSB_PRL("No performance record files to dump !? Please specify at least one.\n");
+	        RSB_PRL("Consider further options, specifiable via environment variables:\n");
+		RSB_PRL("# begin of help message\n");
+		RSB_PRL("# This feature of librsb is not ufficially supported.\n");
+		RSB_PRL("# threshold (expressed as ratio) between values:\n");
+		RSB_PRL("RSB_CMP_THR # nearly same threshold\n");
+		RSB_PRL("RSB_APE_THR # close values threshold\n");
+		RSB_PRL("RSB_RLD_THR # relevant difference threshold\n");
+		RSB_PRL("RSB_HUD_THR # huge difference threshold\n");
+		RSB_PRL("RSB_PRD_STYLE_PLT_FMT # (if RSB_PR_SR=2) plot file format: EPS if set, PNG otherwise\n");
+		RSB_PRL("RSB_PRD_STYLE_PLT_PFN # (if RSB_PR_SR=2) plot file name\n");
+		RSB_PRL("RSB_PR_FSEPSTR # Field separator string\n");
+		RSB_PRL("RSB_PR_ENDLSTR # End of line separator string\n");
+		RSB_PRL("RSB_PR_PRL_CC  # Beginning of line comment char\n");
+		RSB_PRL("RSB_PR_PRL_LCC # \n");
+		RSB_PRL("RSB_PR_PRL_TCS # \n");
+		RSB_PRL("RSB_PR_WLTC # If > 0 and RSB_PR_SR=0, will emit LaTeX tables	(setting accordingly RSB_PR_PRL_LCC, RSB_PR_PRL_TCS, RSB_PR_ENDLSTR, RSB_PR_FSEPSTR); if > 1 output will be colored\n");
+                RSB_PRL("RSB_PR_MULTIDUMP #  %d=dump %d=auto/append %d=ratio %d=diff %d=merge.\n",RSB_PRD_CMP_MDUMP,RSB_PRD_CMP_DFLT,RSB_PRD_CMP_DIV,RSB_PRD_CMP_DIFF,RSB_PRD_CMP_APPEND);
+		RSB_PRL("RSB_PR_RD_NULLIFY_FILENAMEI # exclude a matrix' index\n");
+		RSB_PRL("RSB_PR_RD_RESTRICT_FILENAMEI # restrict to one matrix' index\n");
+		RSB_PRL("RSB_PR_RD_NULLIFY_TRANSA # exclude a transposition\n");
+		RSB_PRL("RSB_PR_RD_RESTRICT_TRANSA # restrict to one transposition\n");
+		RSB_PRL("RSB_PR_RD_NULLIFY_NRHSI # exclude a nrhs index\n");
+		RSB_PRL("RSB_PR_RD_RESTRICT_NRHSI # restrict to one nrhs index\n");
+		RSB_PRL("RSB_PR_RD_NULLIFY_SAMPLEIDX # exclude a matrix' index\n");
+		RSB_PRL("RSB_PR_RD_RESTRICT_SAMPLEIDX # restrict to one matrix' index\n");
+		RSB_PRL("RSB_PR_ONLY_TOTAL_TABLE # only the total table, not the 'limited' slices\n");
+		RSB_PRL("RSB_PR_SAVE_MULTIDUMP # output performance record filename\n");
+		RSB_PRL("RSB_PR_SR # 0 for table output, 1 for comparison table output, 2 for plot\n");
+		RSB_PRL("# end of help message\n");
+		goto err;
+        }
+        RSB_PRL_SEP("\n");
+
+        if(argc > 1)
+        {
+                RSB_PRL("You can control multiple files dump with RSB_PR_MULTIDUMP= %d=dump %d=auto/append %d=ratio %d=diff %d=merge.\n",RSB_PRD_CMP_MDUMP,RSB_PRD_CMP_DFLT,RSB_PRD_CMP_DIV,RSB_PRD_CMP_DIFF,RSB_PRD_CMP_APPEND);
+        }
+
+        if( NULL != rsb__getenv("RSB_PR_MULTIDUMP") )
+                ds = rsb__util_atoi(rsb__getenv("RSB_PR_MULTIDUMP"));
+        else
+        {
+                for(argi=0;argi<argc;++argi)
+                {
+                        struct rsb_rspr_t rspr;
+                        rsprp = &rspr;
+                        RSB_BZERO_P(&rspr);
+                        errval = rsb__pr_load(argv[argi],&rsprp);
+		        if(RSB_SOME_ERROR(errval))
+			{
+                        	rsprp = NULL;
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+                        noc = RSB_PR_NOC(&rspr); /* FIXME: this is quite a weak test.. */
+                        if(argi == 0) noc0 = noc;
+                        if(argi  > 0 && noc != noc0) { noc = 0; break; }
+                        rsprp = NULL;
+                }
+
+                if ( argc > 1 &&  noc != 0 )
+                {
+        	        RSB_PRL("Warning: hazarding the guess you are working with complementary performance record files, therefore attempting merging!.\n");
+                        ds = RSB_PRD_CMP_APPEND;
+                }
+        }
+
+        if(ds < RSB_PRD_CMP_MDUMP || ds > RSB_PRD_CMP_APPEND)
+        {
+		RSB_ERROR("Set RSB_PR_MULTIDUMP to a bad value !\n");
+		return RSB_ERR_BADARGS;
+                goto err;
+        }
+
+        if(ds >= RSB_PRD_CMP_DFLT)
+        {
+                errval = rsb__pr_load(argv[0],&rsprp);
+	        if(RSB_SOME_ERROR(errval))
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+        if(argc > 1)
+                RSB_PRL("Will display summary of %d performance records\n", argc);
+
+        if(ds == RSB_PRD_CMP_DFLT || ds == RSB_PRD_CMP_APPEND)
+        {
+                for(argi=1;argi<argc;++argi)
+                if(ds==RSB_PRD_CMP_DFLT)
+                {
+                        struct rsb_rspr_t * rspr1p = NULL, * rspr0p = NULL;
+                        RSB_PRL("Will append performance records of file %d/%d: %s to that of %s.\n",argi+1,argc,argv[argi],argv[0]);
+                        errval = rsb__pr_load(argv[argi],&rspr0p);
+                        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                        errval = rsb__pr_join(&rspr1p, rsprp, rspr0p);
+                        RSB__PR_FREE(rspr0p);
+                        RSB__PR_FREE(rsprp);
+                        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                        rsprp = rspr1p;
+                        rspr1p = NULL;
+                }
+                else
+                if(ds==RSB_PRD_CMP_APPEND)
+                {
+                        struct rsb_rspr_t * rspr0p = NULL;
+                        RSB_PRL("Will merge performance records of file %d/%d: %s to that of %s.\n",argi+1,argc,argv[argi],argv[0]);
+                        errval = rsb__pr_load(argv[argi],&rspr0p);
+                        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                        errval = rsb__pr_merge(rsprp, rspr0p);
+                        RSB__PR_FREE(rspr0p);
+                        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                }
+                errval = rsb__pr_dump(rsprp, rsprp->rsprap->filenamea, rsprp->rsprap->ca, rsprp->rsprap->incXa, rsprp->rsprap->incYa, rsprp->rsprap->nrhsa, rsprp->rsprap->typecodes, NULL );
+                if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                
+                if ( rsb__getenv("RSB_PR_SAVE_MULTIDUMP") )
+                {
+                        const char * of = rsb__getenv("RSB_PR_SAVE_MULTIDUMP");
+                        /* errval = rsb__pr_save(of, rsprp, NULL, NULL, NULL, NULL, NULL, NULL, NULL ); */
+                        errval = rsb__pr_save(of, rsprp, rsprp->rsprap->filenamea, rsprp->rsprap->ca, rsprp->rsprap->incXa, rsprp->rsprap->incYa, rsprp->rsprap->nrhsa, rsprp->rsprap->typecodes, rsprp->rsprap->ta, RSB_BOOL_FALSE);
+                        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                }
+        }
+
+        if(ds == RSB_PRD_CMP_DIV || ds == RSB_PRD_CMP_DIFF)
+        for(argi=1;argi<argc;++argi)
+        {
+                struct rsb_rspr_t * rspr0p = NULL;
+                RSB_PRL_SEP("\n");
+                RSB_PRL("Will compare performance records of file %d/%d: %s to that of %s (first divided by second). Warning: assuming ALL parameters are conformant\n",argi+1,argc,argv[argi],argv[0]);
+                errval = rsb__pr_load(argv[argi],&rspr0p);
+                if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		if( rsprp->csf != rspr0p->csf && RSB_PR_NOC(rsprp) == RSB_PR_NOC(rspr0p))
+                	RSB_PRL("It seems like one of the two records is incomplete!\n");
+                errval = rsb__pr_cmp(rsprp,rspr0p,ds);
+                RSB__PR_FREE(rspr0p);
+                if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                errval = rsb__pr_dump(rsprp, rsprp->rsprap->filenamea, rsprp->rsprap->ca, rsprp->rsprap->incXa, rsprp->rsprap->incYa, rsprp->rsprap->nrhsa, rsprp->rsprap->typecodes, NULL );
+                RSB__PR_FREE(rsprp);
+                if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                errval = rsb__pr_load(argv[0],&rsprp);
+	        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+        if(ds == RSB_PRD_CMP_MDUMP)
+        for(argi=0;argi<argc;++argi)
+        {
+                RSB_PRL_SEP("\n");
+                RSB_PRL("Dumping performance records of file %d/%d: %s\n",argi+1,argc,argv[argi]);
+                errval = rsb__pr_load(argv[argi],&rsprp);
+                if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+                errval = rsb__pr_dump(rsprp, rsprp->rsprap->filenamea, rsprp->rsprap->ca, rsprp->rsprap->incXa, rsprp->rsprap->incYa, rsprp->rsprap->nrhsa, rsprp->rsprap->typecodes, NULL );
+                RSB__PR_FREE(rsprp);
+	        if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+        }
+
+        RSB_PRL_SEP("\n");
+err:
+        RSB__PR_FREE(rsprp);
+        return errval;
+}
+
+/* performance samples recording / dumping facility for rsbench : begin */
+rsb_err_t rsb__pr_init(void**rsprpv, const struct rsb_mtx_t *mtxAp, rsb_int_t filenamen, rsb_int_t cn, rsb_int_t incXn, rsb_int_t incYn, rsb_int_t nrhsn, rsb_int_t ntypecodes, rsb_int_t tn)
+{
+	/* 
+	 * initialize a performance record 
+	 * */
+	struct rsb_rspr_t * rsprp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int_t noc = 0; /* number of combinations */
+	size_t ab = 0; /* allocated bytes */
+	const char rsb_prl_lcc = '#';
+
+#if 1
+	if( ! rsprpv )
+       	{
+	       	errval = RSB_ERR_ENOMEM;
+	        RSB_PERR_GOTO(err,RSB_ERRM_ES);
+       	}
+        errval = rsb__pr_alloc(&rsprp, NULL, filenamen, cn, incXn, incYn, nrhsn, ntypecodes, tn);
+	*rsprpv = rsprp;
+#else
+	rsprp = rsb__calloc(sizeof(struct rsb_rspr_t));
+	if( ! rsprpv )
+       	{
+	       	errval = RSB_ERR_ENOMEM;
+	       	goto err;
+       	}
+	*rsprpv = rsprp;
+
+	rsprp->filenamen = filenamen;
+	rsprp->cn = cn;
+	rsprp->incXn = incXn;
+	rsprp->incYn = incYn;
+	rsprp->nrhsn = nrhsn;
+	rsprp->ntypecodes = ntypecodes;
+	rsprp->tn = tn;
+
+	noc = RSB_PR_NOC(rsprp);
+	ab = sizeof(struct rsb_rsps_t)*noc;
+	rsprp->psa = rsb__calloc(ab);
+	if( ! rsprp->psa )
+	{
+	       	errval = RSB_ERR_ENOMEM;
+	       	goto err;
+       	}
+#endif
+	RSB_PRL("allocated a performance record for %d samples (%zd bytes).\n",noc,ab);
+
+	return RSB_ERR_NO_ERROR;
+err:
+	RSB__PR_FREE(rsprp);
+	return errval;
+}
+
+static rsb_err_t rsb__pr_set_idx(void*rsprpv, size_t idx, const struct rsb_mtx_t *mtxAp, const struct rsb_mtx_t *at_mtxAp, rsb_trans_t transA, rsb_perf_t op_time_best, rsb_perf_t mkl_csr_op_time_best, rsb_perf_t at_op_time_best, rsb_perf_t at_mkl_csr_op_time_best, rsb_int_t at_cn, rsb_int_t at_mkl_csr_cn, rsb_time_t at_t, rsb_int_t at_eps, const struct rsb_ts_t*otposp, const struct rsb_ts_t*btposp, const struct rsb_ts_t*otpmsp, const struct rsb_ts_t*btpmsp)
+{
+	/* 
+	 * set performance record information
+	 * Note: This inner, idx-based version can be invoked by internal, index-agnostic functions.
+	 * */
+	struct rsb_rspr_t * rsprp = rsprpv;
+	
+	rsb_bool_t have_own = RSB_BOOL_FALSE;
+
+	if( rsprp->psa[idx].at_nsubm && rsprp->psa[idx].nsubm  )
+		have_own = RSB_BOOL_TRUE; /* only mkl missing */
+
+	if(RSB_CONST_IMPOSSIBLY_BIG_TIME != mkl_csr_op_time_best)
+		rsprp->psa[idx].mkl_csr_op_time = mkl_csr_op_time_best;
+	if(RSB_CONST_IMPOSSIBLY_BIG_TIME != op_time_best)
+		rsprp->psa[idx].op_time = op_time_best;
+	if(RSB_CONST_IMPOSSIBLY_BIG_TIME != at_mkl_csr_op_time_best)
+		rsprp->psa[idx].at_mkl_csr_op_time = at_mkl_csr_op_time_best;
+	if(RSB_IS_VALID_THREAD_COUNT( at_mkl_csr_cn )  )
+		rsprp->psa[idx].at_mkl_csr_cn = at_mkl_csr_cn;
+
+       	RSB_ASSIGN_IF_SP(rsprp->psa[idx].btpms,btpmsp)
+       	RSB_ASSIGN_IF_SP(rsprp->psa[idx].otpms,otpmsp)
+
+	if(!have_own)
+	{
+		if(RSB_CONST_IMPOSSIBLY_BIG_TIME != at_op_time_best)
+			rsprp->psa[idx].at_op_time = at_op_time_best;
+		if(RSB_CONST_IMPOSSIBLY_BIG_TIME != at_t)
+			rsprp->psa[idx].at_t = at_t;
+		if(RSB_IS_VALID_THREAD_COUNT( at_cn) )
+			rsprp->psa[idx].at_cn = at_cn;
+		if( -1 != at_eps ) 
+			rsprp->psa[idx].at_eps = at_eps;
+
+		rsprp->psa[idx].transA = transA;
+
+        	RSB_ASSIGN_IF_SP(rsprp->psa[idx].btpos,btposp)
+        	RSB_ASSIGN_IF_SP(rsprp->psa[idx].otpos,otposp)
+	}
+
+	if(!have_own)
+	if(mtxAp)
+	{
+		rsprp->psa[idx].cmflops = rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+		rsprp->psa[idx].flagsA = mtxAp->flags;
+		rsprp->psa[idx].nsubm = mtxAp->all_leaf_matrices_n;
+		rsprp->psa[idx].nrA = mtxAp->nr;
+		rsprp->psa[idx].ncA = mtxAp->nc;
+		rsprp->psa[idx].nnzA = mtxAp->nnz;
+#if RSB_STORE_IDXSA
+		rsprp->psa[idx].isa = mtxAp->idxsa;
+#else
+		rsprp->psa[idx].isa = rsb__get_index_storage_amount(mtxAp);
+#endif
+	}
+
+	if( at_mtxAp == NULL )
+		at_mtxAp = mtxAp; /* FIXME: this shall be handled better  */
+
+	if(!have_own)
+	if( at_mtxAp )
+	{
+#if RSB_STORE_IDXSA
+		rsprp->psa[idx].at_isa = at_mtxAp->idxsa;
+#else
+		rsprp->psa[idx].at_isa = rsb__get_index_storage_amount(at_mtxAp);
+#endif
+		rsprp->psa[idx].at_nsubm = at_mtxAp->all_leaf_matrices_n;
+	}
+
+	if( 0 == rsprp->psa[idx].uc ) /* if first encounter of sample, we increment pointer */
+		rsprp->csf ++;
+
+	rsprp->psa[idx].uc ++ ;
+
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__pr_set(void*rsprpv, const struct rsb_mtx_t *mtxAp, const struct rsb_mtx_t *at_mtxAp, rsb_int_t filenamei, rsb_int_t ci, rsb_int_t incXi, rsb_int_t incYi, rsb_int_t nrhsi, rsb_int_t typecodesi, rsb_int_t ti, rsb_trans_t transA, rsb_perf_t op_time_best, rsb_perf_t mkl_csr_op_time_best, rsb_perf_t at_op_time_best, rsb_perf_t at_mkl_csr_op_time_best, rsb_int_t at_cn, rsb_int_t at_mkl_csr_cn, rsb_time_t at_t, rsb_int_t at_eps, const struct rsb_ts_t*otposp, const struct rsb_ts_t [...]
+{
+	/* 
+	 * set performance record information
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_rspr_t * rsprp = rsprpv;
+	size_t idx = rsb__pr_idx(rsprpv, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti);
+	const char rsb_prl_lcc = '#';
+#if 1
+	RSB_PRL("updating sample at index %zd (%d^th of %d), %d^th touch for (%d,%d,%d,%d,%d,%d,%d).\n",idx+1,rsprp->csf,RSB_PR_NOC(rsprp),rsprp->psa[idx].uc,filenamei,ci,incXi,incYi,nrhsi,typecodesi,ti);
+	errval = rsb__pr_set_idx(rsprpv, idx, mtxAp, at_mtxAp, transA, op_time_best, mkl_csr_op_time_best, at_op_time_best, at_mkl_csr_op_time_best, at_cn, at_mkl_csr_cn, at_t, at_eps,otposp,btposp,otpmsp,btpmsp);
+	return errval;
+#else
+	rsb_bool_t have_own = RSB_BOOL_FALSE;
+
+	if( rsprp->psa[idx].at_nsubm && rsprp->psa[idx].nsubm  )
+		have_own = RSB_BOOL_TRUE; /* only mkl missing */
+
+	if(RSB_CONST_IMPOSSIBLY_BIG_TIME != mkl_csr_op_time_best)
+		rsprp->psa[idx].mkl_csr_op_time = mkl_csr_op_time_best;
+	if(RSB_CONST_IMPOSSIBLY_BIG_TIME != op_time_best)
+		rsprp->psa[idx].op_time = op_time_best;
+	if(RSB_CONST_IMPOSSIBLY_BIG_TIME != at_mkl_csr_op_time_best)
+		rsprp->psa[idx].at_mkl_csr_op_time = at_mkl_csr_op_time_best;
+	if(RSB_IS_VALID_THREAD_COUNT( at_mkl_csr_cn )  )
+		rsprp->psa[idx].at_mkl_csr_cn = at_mkl_csr_cn;
+
+	if(!have_own)
+	{
+		if(RSB_CONST_IMPOSSIBLY_BIG_TIME != at_op_time_best)
+			rsprp->psa[idx].at_op_time = at_op_time_best;
+		if(RSB_CONST_IMPOSSIBLY_BIG_TIME != at_t)
+			rsprp->psa[idx].at_t = at_t;
+		if(RSB_IS_VALID_THREAD_COUNT( at_cn) )
+			rsprp->psa[idx].at_cn = at_cn;
+		if( -1 != at_eps ) 
+			rsprp->psa[idx].at_eps = at_eps;
+
+		rsprp->psa[idx].transA = transA;
+	}
+
+	if(!have_own)
+	if(mtxAp)
+	{
+		rsprp->psa[idx].cmflops = rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+		rsprp->psa[idx].flagsA = mtxAp->flags;
+		rsprp->psa[idx].nsubm = mtxAp->all_leaf_matrices_n;
+		rsprp->psa[idx].nrA = mtxAp->nr;
+		rsprp->psa[idx].ncA = mtxAp->nc;
+		rsprp->psa[idx].nnzA = mtxAp->nnz;
+		rsprp->psa[idx].isa = rsb__get_index_storage_amount(mtxAp);
+	}
+
+	if( at_mtxAp == NULL )
+		at_mtxAp = mtxAp; /* FIXME: this shall be handled better  */
+
+	if(!have_own)
+	if( at_mtxAp )
+	{
+		rsprp->psa[idx].at_isa = rsb__get_index_storage_amount(at_mtxAp);
+		rsprp->psa[idx].at_nsubm = at_mtxAp->all_leaf_matrices_n;
+	}
+
+	if( 0 == rsprp->psa[idx].uc ) /* if first encounter of sample, we increment pointer */
+		rsprp->csf ++;
+
+	rsprp->psa[idx].uc ++ ;
+	RSB_PRL("updating sample at index %zd (%d^th of %d), %d^th touch for (%d,%d,%d,%d,%d,%d,%d).\n",idx+1,rsprp->csf,RSB_PR_NOC(rsprp),rsprp->psa[idx].uc,filenamei,ci,incXi,incYi,nrhsi,typecodesi,ti);
+
+	return RSB_ERR_NO_ERROR;
+#endif
+}
+
+#define RSB_SYMCHAR(FLAGS) ( (RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_SYMMETRIC)) ? 'S' : ( (RSB_DO_FLAG_HAS(FLAGS,RSB_FLAG_HERMITIAN)) ? 'H' : 'G') )
+#define RSB_DIV_NOT_BY_ZERO(D,Q) D = ( (Q) ? (D) / (Q) : 0.0 )
+#define RSB_UPD_TO_MAX(VAR,VAL) (VAR)=RSB_MAX((VAR),(VAL))
+#define RSB_UPD_TO_MIN(VAR,VAL) (VAR)=RSB_MIN((VAR),(VAL))
+
+#define RSB_UPD_AMM(WHAT,ACC,MIN,MAX) (ACC)+=(WHAT); RSB_UPD_TO_MIN((MIN),(WHAT)); RSB_UPD_TO_MAX((MAX),(WHAT));
+/* #define RSB_CMP_THR 1.00  */ /* 0% */
+#define RSB_CMP_THR 1.01 /* below this we ignore the difference */
+#define RSB_APE_THR 1.05 /* approximately equal or small difference  */
+#define RSB_RLD_THR 2.00 /* relevant difference threshold */
+#define RSB_HUD_THR 10.00 /* huge difference threshold */
+#define RSB_CMP_THR_EXP /* "nearly same" threshold */ rsb__getenv("RSB_CMP_THR") ? rsb__util_atof(rsb__getenv("RSB_CMP_THR")): RSB_CMP_THR
+#define RSB_APE_THR_EXP /* "approximately close" threshold */ rsb__getenv("RSB_APE_THR") ? rsb__util_atof(rsb__getenv("RSB_APE_THR")): RSB_APE_THR
+#define RSB_RLD_THR_EXP /* "relevant difference" threshold */ rsb__getenv("RSB_RLD_THR") ? rsb__util_atof(rsb__getenv("RSB_RLD_THR")): RSB_RLD_THR
+#define RSB_HUD_THR_EXP /* "huge difference" threshold */ rsb__getenv("RSB_HUD_THR") ? rsb__util_atof(rsb__getenv("RSB_HUD_THR")): RSB_HUD_THR
+
+#define RSB_FSTR_THN_THR(T1,T2,CMPT) ( (T1) * (CMPT) < (T2) ) /* faster only according to a small threshold; e.g. CMPT = RSB_CMP_THR */
+#define RSB_SLWR_THN_THR(T1,T2) ( (T1) * RSB_CMP_THR >=(T2) && (T1) != (T2)  ) /* slower only according to a small threshold */
+#define RSB_APPROX_EQUAL(T1,T2,APPT) ( ( (T1) * (APPT) >= (T2) ) && ( (T2) * (APPT) >= (T1) ) ) /* e.g. APPT = RSB_APE_THR  */
+#define RSB_BOTH_FINITE(T1,T2)  ( ((T1)!=RSB_TIME_ZERO) && ((T2)!=RSB_TIME_ZERO) )
+#define RSB_BIG_DIFF(T1,T2,RLDT) ( ( (T1) * (RLDT) < (T2) ) || ( (T2) * (RLDT) < (T1) ) ) /* e.g. RLDT = RSB_RLD_THR */
+#define RSB_HUGE_DIFF(T1,T2,HGDT) ( ( (T1) * (HGDT) < (T2) ) || ( (T2) * (HGDT) < (T1) ) ) /* e.g. HGDT = RSB_HUD_THR  */
+#define RSB_MIN_FINITE(X,Y) ( ((X)==(Y)) ? (X) : ( RSB_MIN(RSB_MAX(X,RSB_TIME_ZERO),RSB_MAX(Y,RSB_TIME_ZERO))) )
+
+static void rsb__mtxfn_bncp(char* dst, const char*src, int lm)
+{
+	/* 
+	 * Matrix file name base name copy.
+	 * This shall NOT be a library function !
+	 * */
+	size_t sl = 0;
+		
+	if(!dst || !src)
+		goto ret;
+
+	if(lm==0)
+		rsb__strcpy(dst,rsb__basename(src));
+	else
+	{
+		/* LaTeX underscore sanitization */
+		const char*sp = rsb__basename(src);
+		char*dp = dst;
+		while(*sp)
+		{
+			*dp = *sp;
+			if(*dp=='_')
+				dp[0] = '\\',
+				dp[1] = '_',
+				++dp;
+			++sp;
+			++dp;
+		}
+		*dp = *sp;
+	}
+	sl = rsb__strlen(dst);
+	if( strcmp(dst+sl-7,".mtx.gz") == 0 )
+		dst[sl-7] = '\0';
+	if( strcmp(dst+sl-4,".mtx") == 0 )
+		dst[sl-4] = '\0';
+ret:
+	return;
+}
+
+static rsb_err_t rsb__pr_dump_sample(const void*rsprpv, const rsb_char_t**filenamea, rsb_int_t*ca, const rsb_int_t*incXa, const rsb_int_t*incYa, const rsb_int_t*nrhsa, const rsb_type_t*typecodes, const rsb_int_t*ta, const int*filenameifp, const int*ifilenameifp, const int*cifp , const int*incXifp , const int*incYifp , const int*nrhsifp , const int*typecodefip , const int*tifp, const rsb_trans_t*tfp, rsb_flags_t flagsA, rsb_flags_t nflagsA, rsb_int_t filenamei, rsb_int_t ci, rsb_int_t inc [...]
+{
+        /* TODO: may print a different record if( rsprp->ror == RSB_BOOL_TRUE ) */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const struct rsb_rspr_t * rsprp = rsprpv;
+
+	{
+		size_t idx = rsb__pr_idx(rsprpv, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti);
+		struct rsb_rsps_t*psp = &(rsprp->psa[idx]);
+		rsb_int_t nnzA = psp->nnzA, nrA = psp->nrA, ncA = psp->ncA;
+		rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+		rsb_perf_t brsb_op_time = psp->op_time;
+		rsb_perf_t bmkl_op_time = psp->mkl_csr_op_time;
+
+		rsb__mtxfn_bncp(fnbuf,filenamea[filenamei],wltm);
+		
+		if( psp->at_op_time != RSB_TIME_ZERO )
+			brsb_op_time = RSB_MIN( brsb_op_time, psp->at_op_time );
+
+		if( psp->at_mkl_csr_op_time != RSB_TIME_ZERO )
+			bmkl_op_time = RSB_MIN( bmkl_op_time, psp->at_mkl_csr_op_time );
+#if 0
+                /* TODO: will have to start here for the detailed stats: */ 
+                RSB_STAT_DUMP_TS(psp->otpos); 
+                RSB_STAT_DUMP_TS(psp->btpos); 
+                RSB_STAT_DUMP_TS(psp->otpms); 
+                RSB_STAT_DUMP_TS(psp->btpms);
+#endif
+        	if(rds==RSB_PRD_STYLE_TBL)
+		{
+        		double cmpt = RSB_CMP_THR_EXP, appt = RSB_APE_THR_EXP, rldt = RSB_RLD_THR_EXP, hgdt = RSB_HUD_THR_EXP;
+			const char * ss = RSB_PRL_FSEPSTR_IE; /* separator string */
+			const char * ts = RSB_PRL_ENDLSTR_IE; /* terminator string */
+			const char * rcc = "\\cellcolor{red}"; /* red colored cell */
+			const char * gcc = "\\cellcolor{PaleGreen1}"; /* requires x11names */
+			const char * Gcc = "\\cellcolor{green}"; /* green (greener!) colored cell */
+			//const char * gcc = "\\cellcolor{green}"; /* green colored cell */
+			const char * bcc = "\\cellcolor{blue}"; /* blue colored cell */
+			//const char * ycc = "\\cellcolor{yellow}"; /* yellow colored cell */
+			const char * ycc = "\\cellcolor{LightGoldenrod1}"; /* requires x11names */
+			const char * pcc = "\\cellcolor{pink}"; /* pink colored cell */
+			const char * ncc = ""; /* no color cell  (no LaTeX Markup) */
+			const char * bfs = "\\bfseries "; /* bold font series */
+			const char * rlns = psp->mkl_csr_op_time ? RSB_ON_IF_LEM( psp->op_time*rldt ,psp->mkl_csr_op_time, bfs , bfs, ncc) : ""; /* relevant non-autotuned speedup over mkl */
+			const char * rlas = psp->at_mkl_csr_op_time ? RSB_ON_IF_LEM( psp->at_op_time*rldt ,psp->at_mkl_csr_op_time,bfs , bfs, ncc) : ""; /* relevant autotuned speedup over mkl */
+
+			if(wltm < 2)
+				rcc = gcc = Gcc = bcc = ycc = pcc = bfs = rlns = rlas = ncc; /* nullify LaTeX markup */
+
+			RSB_STDOUT("%s", RSB_ON_IF_LEM(psp->at_op_time, psp->at_mkl_csr_op_time , gcc, ncc, rcc));/* EXPERIMENTAL */
+			//RSB_STDOUT("%s", RSB_ON_IF_LEM(psp->at_op_time, psp->at_mkl_csr_op_time , RSB_ON_IF_LEM( psp->at_op_time*rldt ,psp->at_mkl_csr_op_time,Gcc , gcc, gcc), ncc, rcc));/* EXPERIMENTAL */
+			RSB_STDOUT("%s",ss);
+			RSB_STDOUT("%s%s%d%s%d%s%d%s",
+				fnbuf,ss,
+				nrA,ss,
+				ncA,ss,
+				nnzA,ss
+			);
+			if(rsprp->incXn > 1 && rsprp->incYn > 1)
+			RSB_STDOUT("%d%s%d%s",
+				incXa[incXi],ss,
+				incYa[incYi],ss
+			);
+			RSB_STDOUT("%d%s%c%s%c%s%c%s",
+			       	nrhsa[nrhsi],ss,
+			       	typecodes[typecodesi],ss,
+				RSB_SYMCHAR(psp->flagsA),ss,
+				RSB_TRANSPOSITION_AS_CHAR(psp->transA),ss
+			);
+			RSB_STDOUT(
+				"%2d%s%s%2d%s%s%2d%s",
+				ca[ci],ss,
+				ca[ci] == psp->at_cn ? ncc : (ca[ci] / 2 >= psp->at_cn ? rcc : ycc),/* EXPERIMENTAL */
+				psp->at_cn,ss,
+				ca[ci] == psp->at_mkl_csr_cn ? ncc : (ca[ci] / 2 >= psp->at_mkl_csr_cn ? rcc : ycc),/* EXPERIMENTAL */
+				psp->at_mkl_csr_cn,ss
+				);
+			RSB_STDOUT(
+				"%.4lf%s%s%.4lf%s",
+				((rsb_perf_t)(psp->isa))/nnzA,ss,
+				RSB_ON_IF_LEM(psp->at_isa , psp->isa , gcc, ncc, pcc),/* EXPERIMENTAL */
+				((rsb_perf_t)(psp->at_isa))/nnzA,ss
+				);
+			RSB_STDOUT(
+				"%d%s%s%d%s",
+				psp->nsubm,ss,
+				RSB_ON_IF_LEM(psp->at_nsubm , psp->nsubm , gcc, ncc, pcc),/* EXPERIMENTAL */
+				psp->at_nsubm,ss
+				);
+			RSB_STDOUT(
+				"%.2lf%s%2.3le%s%s%2.3le%s",
+                                RSB_XFLOPS(brsb_op_time,nrhsa[nrhsi],psp->cmflops),ss,
+				psp->op_time,ss,
+				rlns,
+				psp->mkl_csr_op_time,ss
+				);
+			RSB_STDOUT(
+				"%s%2.3le%s%s%s%2.3le%s%2.3le%s",
+				RSB_ON_IF_LEM(psp->at_op_time, psp->op_time, gcc, ncc, pcc),/* EXPERIMENTAL */
+				psp->at_op_time,ss,
+				RSB_ON_IF_LEM(psp->at_mkl_csr_op_time, psp->mkl_csr_op_time, gcc, ncc, pcc),/* EXPERIMENTAL */
+				rlas,
+				psp->at_mkl_csr_op_time,ss,
+				psp->at_t,ss
+				);
+#if RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH
+{
+			size_t so = RSB_SIZEOF_BACKUP(typecodes[typecodesi]); /* size of */
+			size_t mo = (so*psp->nnzA+((rsb_perf_t)(psp->at_isa))); /* matrix occupation */
+			size_t oo = so*nrhsa[nrhsi]*(psp->nrA+psp->ncA); /* operands occupation */
+			size_t owt = so*nrhsa[nrhsi]*((RSB_DOES_TRANSPOSE(psp->transA)?0:1)*psp->nrA+(RSB_DOES_TRANSPOSE(psp->transA)?1:0)*psp->ncA); /* operands write traffic */
+			/* size_t mrt = oo + mo; */ /* minimal read traffic */
+			size_t mwt = oo + mo + owt; /* minimal read+write traffic */
+			/* rsb_perf_t mrb = ((rsb_perf_t)mrt)/(psp->at_op_time*1e9);*/ /* minimal read bandwidth, GBps */
+			rsb_perf_t mwb = ((rsb_perf_t)mwt)/(psp->at_op_time*1e9); /* minimal read+write bandwidth, GBps */
+			/* RSB_STDOUT( "%3.2le%s", mrb, ss); */ /* BW/RDminBWIDTH: minimal bandwidth, GB/s */
+			RSB_STDOUT( "%3.2le%s", mwb, ss); /* BW/RWminBW: minimal bandwidth, GB/s */
+}
+{
+			size_t so = RSB_SIZEOF_BACKUP(typecodes[typecodesi]); /* size of */
+			size_t mo = (so*psp->nnzA+((rsb_perf_t)(psp->at_isa))); /* matrix occupation */
+			size_t oo = so*nrhsa[nrhsi]*(nrA+ncA); /* operands occupation */
+			size_t mt = oo + mo; /* minimal traffic */
+			rsb_perf_t om = 1e6 *(psp->cmflops * nrhsa[nrhsi]); /* operation flops */
+			rsb_perf_t bm = (1.0 / om) * mt; /* bytes per flops */
+			RSB_STDOUT( "%3.2le%s", bm, ss); /* CB: code balance, bpf = bytes per flop */
+}
+#endif /* RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH */
+			RSB_STDOUT(
+				"%s%d%s%3.2le%s\n",
+				psp->at_eps == 0 ? rcc : ncc,/* EXPERIMENTAL */
+				psp->at_eps,ss,
+				psp->cmflops*nrhsa[nrhsi],
+				ts
+			);
+			if(wltm > 1)
+				RSB_STDOUT("%%...\n");
+
+		}
+
+        	if(rds==RSB_PRD_STYLE_CMP)
+                {
+		RSB_STDOUT("%d:%s %d %d %d %d %d %d %c %c %c",
+				filenamei,fnbuf, 
+				nrA,ncA,nnzA,
+				incXa[incXi], 
+				incYa[incYi],
+			       	nrhsa[nrhsi],
+			       	typecodes[typecodesi],
+				RSB_SYMCHAR(psp->flagsA),
+				RSB_TRANSPOSITION_AS_CHAR(psp->transA)
+			);
+		RSB_STDOUT(" %.2lf", bmkl_op_time/ brsb_op_time);
+		RSB_STDOUT(" %.2lf %.2lf %.2lf %.2lf",
+                                RSB_XFLOPS(brsb_op_time,nrhsa[nrhsi],psp->cmflops),
+                                (psp->        op_time/brsb_op_time),
+                                RSB_XFLOPS(bmkl_op_time,nrhsa[nrhsi],psp->cmflops),
+                                (psp->mkl_csr_op_time/bmkl_op_time)
+			);
+		RSB_STDOUT("\n");
+                }
+	}
+	return errval;
+}
+
+static rsb_err_t rsb__pr_filter(struct rsb_rsps_t*psp, const rsb_int_t*ta, const int*filenameifp, const int*ifilenameifp, const int*cifp , const int*incXifp , const int*incYifp , const int*nrhsifp , const int*typecodefip , const int*tifp, const rsb_trans_t*tfp, rsb_flags_t flagsA, rsb_flags_t nflagsA,
+	       	rsb_int_t filenamei, rsb_int_t ci, rsb_int_t incXi, rsb_int_t incYi, rsb_int_t nrhsi, rsb_int_t typecodesi, rsb_int_t ti)
+{
+		if( filenameifp && ( *filenameifp != filenamei  ) )
+			goto skipit;
+		if(ifilenameifp && (*ifilenameifp <  filenamei  ) )
+			goto skipit;
+		if( cifp        && (        *cifp != ci         ) )
+			goto skipit;
+		if( incXifp     && (     *incXifp != incXi      ) )
+			goto skipit;
+		if( incYifp     && (     *incYifp != incYi      ) )
+			goto skipit;
+		if( nrhsifp     && (     *nrhsifp !=    nrhsi   ) )
+			goto skipit;
+		if( typecodefip && ( *typecodefip != typecodesi ) )
+			goto skipit;
+		if( ( flagsA != RSB_FLAG_NOFLAGS) && ! (psp->flagsA &  flagsA) )
+			goto skipit;
+		if( (nflagsA != RSB_FLAG_NOFLAGS) &&   (psp->flagsA & nflagsA) )
+			goto skipit;
+		if(ta)
+		{
+			if( tifp        && (     *tifp    !=    ti      ) )
+				goto skipit;
+		}
+		else
+		{
+			if( tfp         && (      *tfp    !=    psp->transA ) )
+				goto skipit;
+		}
+
+	if( psp->uc > 0 && psp->uc < 3 )
+		;
+	else
+		goto skipit;/* we skip this iteration's sample */
+
+		return RSB_ERR_NO_ERROR;
+skipit:
+		return RSB_ERR_GENERIC_ERROR;
+}
+
+rsb_err_t rsb__pr_dump_inner(const void*rsprpv, const rsb_char_t**filenamea, rsb_int_t*ca, const rsb_int_t*incXa, const rsb_int_t*incYa, const rsb_int_t*nrhsa, const rsb_type_t*typecodes, const rsb_int_t*ta, const int*filenameifp, const int*ifilenameifp, const int*cifp , const int*incXifp , const int*incYifp , const int*nrhsifp , const int*typecodefip , const int*tifp, const rsb_trans_t*tfp, rsb_flags_t flagsA, rsb_flags_t nflagsA, rsb_char_t *ltag)
+{
+	/*
+	 * dump a performance record, inner
+         * May introduce strict checks or verbosity options.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const struct rsb_rspr_t * rsprp = rsprpv;
+	rsb_int_t filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti;
+	rsb_int_t nocsa = 0; /* number of considered samples */
+	rsb_int_t noats = 0, noatf = 0; /* number of auto tuning successes / failures */
+	rsb_int_t ntufm = 0, ntusm = 0; /* number (of) times untuned rsb (was) faster/slower (than) mkl */
+	rsb_int_t nttfm = 0, nttsm = 0; /* number (of) times  tuned  rsb (was) faster/slower (than   tuned) mkl */
+	rsb_int_t nttfu = 0, nttsu = 0; /* number (of) times  tuned  rsb (was) faster/slower (than untuned) mkl */
+	rsb_int_t ntmfm = 0, ntmsm = 0; /* number (of times)  tuned mkl (was) faster/slower (than untuned) mkl */
+	rsb_int_t ntasl = 0, ntasm = 0, ntase = 0; /* number (of times)  autotuned (was) subdivided less/more/equally */
+	rsb_int_t ntatl = 0, ntatm = 0, ntate = 0; /* number (of times)  autotuned used  less/more/equal threads */
+	rsb_int_t ntsrf = 0; /* number of times multi-nrhs rsb was faster (than rsb unstrided)  */
+	rsb_int_t ntsmf = 0; /* number of times multi-nrhs mkl was faster (than mkl unstrided)  */
+	rsb_int_t vscm = 0; /* valid samples containing mkl */
+	double aoatsp = 0.0; /* average of auto tuning speedup (percentage) */
+	double aoatsr = 0.0; /* average of auto tuning speedup (ratio) */
+	double aoatac = 0.0, miatac = RSB_CONST_IMPOSSIBLY_BIG_TIME, maatac = 0.0; /* average,min,max of auto tuning amortization cost */
+	double aoatam = 0.0, miatam = RSB_CONST_IMPOSSIBLY_BIG_TIME, maatam = 0.0; /* average,min,max of auto tuning amortization to (tuned) mkl (cost) */
+	double aoatau = 0.0, miatau = RSB_CONST_IMPOSSIBLY_BIG_TIME, maatau = 0.0; /* average,min,max of auto tuning amortization to (untuned) mkl (cost) */
+	double aoatuo = 0.0, miatuo = RSB_CONST_IMPOSSIBLY_BIG_TIME, maatuo = 0.0, toatuo = 0.0; /* average,min,max,total of auto tuning untuned ops */
+	double aoatto = 0.0, miatto = RSB_CONST_IMPOSSIBLY_BIG_TIME, maatto = 0.0, toatto = 0.0; /* average,min,max,total of auto tuning  tuned  ops */
+	double aouatc = 0.0, miuatc = RSB_CONST_IMPOSSIBLY_BIG_TIME, mauatc = 0.0, touatc = 0.0; /* average,min,max,total of unsuccessful auto tuning cost  */
+	double aotstm = 0.0; /* average of  tuned  speedup with respect to mkl  */
+	double aotstu = 0.0; /* average of  tuned  speedup with respect to (untuned) mkl  */
+	double aoustm = 0.0; /* average of untuned speedup with respect to mkl  */
+	double aotssm = 0.0; /* average of  tuned  speedup (of mkl to rsb, when) slower (than) mkl */
+	double aoussm = 0.0; /* average of untuned speedup (of mkl to rsb, when) slower (than) mkl */
+	double aotsmm = 0.0; /* average of  tuned  speedup (of mkl to ) to mkl (always) */
+	double msurwm = 0.0; /* maximal speedup (of) untuned rsb w.r.t. mkl  */
+	double mstrwm = 0.0; /* maximal speedup (of)   tuned rsb w.r.t. mkl  */
+	double mstrwu = 0.0; /* maximal speedup (of)   tuned rsb w.r.t. untuned mkl  */
+	double mstmwm = 0.0; /* maximal speedup (of)   tuned mkl w.r.t. mkl  */
+	double msumwr = 0.0; /* maximal speedup (of) untuned mkl w.r.t. rsb  */
+	double mstmwr = 0.0; /* maximal speedup (of)   tuned mkl w.r.t. rsb  */
+	double mstrwr = 0.0; /* maximal speedup (of)   tuned rsb w.r.t. rsb  */
+	double aoratt = 0.0, miratt = RSB_CONST_IMPOSSIBLY_BIG_TIME, maratt = 0.0, toratt = 0.0; /* average,min,max,total of rsb auto tuning time */
+	double aosatt = 0.0, misatt = RSB_CONST_IMPOSSIBLY_BIG_TIME, masatt = 0.0, tosatt = 0.0; /* average,min,max,total of rsb auto tuning time (when   successful) */
+	double aouatt = 0.0, miuatt = RSB_CONST_IMPOSSIBLY_BIG_TIME, mauatt = 0.0, touatt = 0.0; /* average,min,max,total of rsb auto tuning time (when unsuccessful) */
+	/* double aomatt = 0.0, mimatt = RSB_CONST_IMPOSSIBLY_BIG_TIME, mamatt = 0.0, tomatt = 0.0;*/ /* average,min,max,total of mkl auto tuning time */
+	double avrmps = 0.0, mirmps = RSB_CONST_IMPOSSIBLY_BIG_TIME, marmps = 0.0; /* average,min,max rsb (canonical) mflops per second (tuned) */
+	double avRmps = 0.0, miRmps = RSB_CONST_IMPOSSIBLY_BIG_TIME, maRmps = 0.0; /* average,min,max rsb (canonical) mflops per second (untuned) */
+	double avmmps = 0.0, mimmps = RSB_CONST_IMPOSSIBLY_BIG_TIME, mammps = 0.0; /* average,min,max mkl (canonical) mflops per second (tuned) */
+	double avMmps = 0.0, miMmps = RSB_CONST_IMPOSSIBLY_BIG_TIME, maMmps = 0.0; /* average,min,max mkl (canonical) mflops per second (untuned) */
+	double aorott = 0.0, mirott = RSB_CONST_IMPOSSIBLY_BIG_TIME, marott = 0.0, torott = 0.0; /* average,min,max,total of rsb auto operation time (tuned) */
+	double aoRott = 0.0, miRott = RSB_CONST_IMPOSSIBLY_BIG_TIME, maRott = 0.0, toRott = 0.0; /* average,min,max,total of rsb auto operation time (untuned) */
+	double aomott = 0.0, mimott = RSB_CONST_IMPOSSIBLY_BIG_TIME, mamott = 0.0, tomott = 0.0; /* average,min,max,total of mkl auto operation time (tuned) */
+	double aoMott = 0.0, miMott = RSB_CONST_IMPOSSIBLY_BIG_TIME, maMott = 0.0, toMott = 0.0; /* average,min,max,total of mkl auto operation time (untuned) */
+	double avrsmv = 0.0, mirsmv = RSB_CONST_IMPOSSIBLY_BIG_TIME, marsmv = 0.0; /* average,min,max of rsb speedup for multi-vector */
+	double avmsmv = 0.0, mimsmv = RSB_CONST_IMPOSSIBLY_BIG_TIME, mamsmv = 0.0; /* average,min,max of mkl speedup for multi-vector */
+	double avnzbt = 0.0, minzbt = RSB_CONST_IMPOSSIBLY_BIG_TIME, manzbt = 0.0; /* average,min,max of nnz p.s. in untuned before tuning */
+	double avnzat = 0.0, minzat = RSB_CONST_IMPOSSIBLY_BIG_TIME, manzat = 0.0; /* average,min,max of nnz p.s. in successfully tuned */
+	double avbybt = 0.0, mibybt = RSB_CONST_IMPOSSIBLY_BIG_TIME, mabybt = 0.0; /* average,min,max of bytes p.s. in untuned before tuning */
+	double avbyat = 0.0, mibyat = RSB_CONST_IMPOSSIBLY_BIG_TIME, mabyat = 0.0; /* average,min,max of bytes p.s. in successfully tuned */
+	double avbpna = 0.0, mibpna = RSB_CONST_IMPOSSIBLY_BIG_TIME, mabpna = 0.0; /* average,min,max of bytes p.nnz in untuned before tuning */
+	double avbpnb = 0.0, mibpnb = RSB_CONST_IMPOSSIBLY_BIG_TIME, mabpnb = 0.0; /* average,min,max of bytes p.nnz in successfully tuned */
+#if RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH
+	double avlorb = 0.0, milorb = RSB_CONST_IMPOSSIBLY_BIG_TIME, malorb = 0.0; /* average,min,max (autotuned) liminal/minimal operands reading bandwidth */
+	double avlowb = 0.0, milowb = RSB_CONST_IMPOSSIBLY_BIG_TIME, malowb = 0.0; /* average,min,max (autotuned) liminal/minimal operands   r/w   bandwidth */
+	double avcoba = 0.0, micoba = RSB_CONST_IMPOSSIBLY_BIG_TIME, macoba = 0.0; /* average,min,max code balance */
+#endif /* RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH */
+	rsb_int_t noc = 0;
+        rsb_int phase = 0; /* first count samples, then dump */
+        double cmpt = RSB_CMP_THR_EXP, appt = RSB_APE_THR_EXP, rldt = RSB_RLD_THR_EXP, hgdt = RSB_HUD_THR_EXP;
+        const rsb_int_t wdbg = 0; /* want debug */
+        int rds = rsb__getenv("RSB_PR_SR") ? rsb__util_atof(rsb__getenv("RSB_PR_SR")): RSB_PRD_STYLE_TBL;
+        int wltm = rsb__getenv("RSB_PR_WLTC") ? rsb__util_atof(rsb__getenv("RSB_PR_WLTC")) : 0; /* Want LaTeX tables mode */
+	const char * ss = RSB_PRL_FSEPSTR_IE; /* separator string */
+	const char * ts = RSB_PRL_ENDLSTR_IE; /* terminator string */
+	const char rsb_prl_lcc = RSB_PRL_LCC_IE;
+	const char*rsb_prl_tcs = RSB_PRL_TCS_IE;
+
+	if(0 /* FIXME: move this notice to an outer function */)
+        if(rds!=RSB_PRD_STYLE_TBL)
+	{
+        	RSB_PRL("Further environment variables:\n");
+        	RSB_PRL("RSB_CMP_THR\n");
+        	RSB_PRL("RSB_APE_THR\n");
+        	RSB_PRL("RSB_RLD_THR\n");
+        	RSB_PRL("RSB_HUD_THR\n");
+	}
+
+	RSB_DEBUG_ASSERT(rsprp);
+
+	noc = RSB_PR_NOC(rsprp);
+gop2:
+        if(phase == 1)
+	{
+		if(rds==RSB_PRD_STYLE_TBL && wltm > 0)
+		{
+			rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+			rsb__mtxfn_bncp(fnbuf,rsb__basename(ltag),1);
+			if(ltag)
+				RSB_PRT("\\section{Record: %s}\n",fnbuf);
+		}
+        	RSB_PRL("Dump from a base of %d samples (of max %d) ordered by ",rsprp->csf,noc);
+        	RSB_STDOUT("(%d,%d,%d,%d,%d,%d,%d) = (%s).\n",
+        		rsprp->filenamen, rsprp->cn, rsprp->incXn, rsprp->incYn,
+        	       	rsprp->nrhsn, rsprp->ntypecodes, rsprp->tn,
+        		"filename x cores x incX x incY x nrhs x typecode x transA");
+        if(rds==RSB_PRD_STYLE_TBL)
+        {
+		if(wltm > 0)
+			//RSB_PRT("\\begin{table}[ht]\\begin{center}\\begin{tabular}{r*{26}{r}r}\\hline\n");
+			RSB_PRT("\\begin{longtabu}{r*{26}{r}r}\\hline\n");
+        	if(!ifilenameifp) /* no printout of records in this mode */
+        	{
+			// RSB_PRL("Each sample:\n"); 
+			RSB_PRT("BESTCODE%sMTX%sNR%sNC%sNNZ%s", ss,ss,ss,ss,ss);
+			if(rsprp->incXn > 1 && rsprp->incYn > 1)
+				RSB_PRT("INCX%sINCY%s",ss,ss);
+			RSB_PRT("NRHS%sTYPE%sSYM%sTRANS%sNT%sAT-NT%sAT-MKL-NT%sBPNZ%sAT-BPNZ%sNSUBM%sAT-SUBM%sRSBBEST-MFLOPS%sOPTIME%sMKL-OPTIME%sAT-OPTIME%sAT-MKL-OPTIME%sAT-TIME%s""RWminBW-GBps%s""CB-bpf%sAT-MS%sCMFLOPS%s\n",ss,ss,ss,ss,ss,ss,ss,ss,ss, ss,ss,ss,ss,ss,ss,ss,ss,ss,ss, ss,ts); /* FIXME: RWminBW and CB depend on RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH=0 */
+        	}
+		if(wltm > 0)
+			RSB_PRT("\\hline\n");
+        }
+        if(rds==RSB_PRD_STYLE_CMP)
+        {
+        	if(!ifilenameifp) /* no printout of records in this mode */
+        	{
+        		RSB_PRL("Each sample: BESTCODE MTX NR NC NNZ INCX INCY NRHS TYPE SYM TRANS MKL_OP_T/RSB_OP_T RSB_OP_T RSB_MFLOPS MKL_OP_T MKL_MFLOPS\n");
+        	}
+        }
+	}
+
+	for(     filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+	for(ci=0;ci<rsprp->cn;++ci)
+	for(     incXi=0;     incXi<rsprp->incXn     ;++incXi     )
+	for(     incYi=0;     incYi<rsprp->incYn     ;++incYi     )
+	for(     nrhsi=0;     nrhsi<rsprp->nrhsn     ;++nrhsi     )
+	for(typecodesi=0;typecodesi<rsprp->ntypecodes;++typecodesi)
+	for(ti=0;ti<rsprp->tn;++ti)
+	{
+		size_t idx = rsb__pr_idx(rsprpv, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti);
+		struct rsb_rsps_t*psp = &(rsprp->psa[idx]);
+		/* rsb_int_t nnzA = psp->nnzA, nrA = psp->nrA, ncA = psp->ncA; */
+		rsb_bool_t atweost = ( ( psp->nsubm != psp->at_nsubm ) && ( psp->at_nsubm != 0 ) ); /* autotuning was effective on structure */
+		rsb_bool_t atweoth = ( ( ca[ci] != psp->at_cn ) && ( psp->at_cn != 0 ) ); /* autotuning was effective on threads   */
+		rsb_bool_t atweoti = ( ( psp->op_time > psp->at_op_time ) && ( psp->at_op_time != RSB_TIME_ZERO ) ); /* autotuning was effective on time (this is here for testing purposes) */
+		rsb_bool_t atwe = RSB_BOOL_OR/*RSB_BOOL_AND*/(atweoti,RSB_BOOL_OR(atweost,atweoth));  /* autotuning was effective; 'or' condition is for testing purposes */
+#if 1
+		if( RSB_ERR_NO_ERROR != rsb__pr_filter(psp, ta, filenameifp, ifilenameifp, cifp , incXifp , incYifp , nrhsifp , typecodefip , tifp, tfp, flagsA, nflagsA, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti) )
+			continue;
+#else
+		/* obsolete code */
+		if( filenameifp && ( *filenameifp != filenamei  ) )
+			continue;
+		if(ifilenameifp && (*ifilenameifp <  filenamei  ) )
+			continue;
+		if( cifp        && (        *cifp != ci         ) )
+			continue;
+		if( incXifp     && (     *incXifp != incXi      ) )
+			continue;
+		if( incYifp     && (     *incYifp != incYi      ) )
+			continue;
+		if( nrhsifp     && (     *nrhsifp !=    nrhsi   ) )
+			continue;
+		if( typecodefip && ( *typecodefip != typecodesi ) )
+			continue;
+		if( ( flagsA != RSB_FLAG_NOFLAGS) && ! (psp->flagsA &  flagsA) )
+			continue;
+		if( (nflagsA != RSB_FLAG_NOFLAGS) &&   (psp->flagsA & nflagsA) )
+			continue;
+		if(ta)
+		{
+			if( tifp        && (     *tifp    !=    ti      ) )
+				continue;
+		}
+		else
+		{
+			if( tfp         && (      *tfp    !=    psp->transA ) )
+				continue;
+		}
+#endif
+		
+	if( psp->uc > 2 )
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_ERROR("Updates count (%d) illegal --- max is 2 (internal error)!\n",psp->uc);
+	       	goto ret;
+	}
+
+	if( psp->uc > 0 && psp->uc < 3 )
+		nocsa ++; /* we consider this iteration's sample */
+	else
+		continue; /* we skip this iteration's sample */
+
+        if( phase == 0)
+                continue; /* we only want to evaluate nocsa */
+
+	if( psp->mkl_csr_op_time )
+		vscm++;
+
+	if(!ifilenameifp) /* no printout of records in this mode */
+	if( psp->uc > 0 && psp->uc < 3 ) /* TODO: now useless loop; merge this ... */
+	{
+		rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+		rsb_char_t usdc,asdc,csdc = '_'; /* untuned/autotuned/comparison  score descriptor char  */
+		rsb_time_t best_rsb_op_time = RSB_MIN_FINITE(psp->op_time,        psp->at_op_time        );
+		rsb_time_t best_mkl_op_time = RSB_MIN_FINITE(psp->mkl_csr_op_time,psp->at_mkl_csr_op_time);
+
+		usdc = ( psp->op_time    <    psp->mkl_csr_op_time || psp->mkl_csr_op_time == RSB_TIME_ZERO )?(psp->op_time > psp->at_op_time ? 'R' : 'r' ) : ( psp->mkl_csr_op_time > psp->at_mkl_csr_op_time ? 'M' : 'm' );
+		asdc = ( psp->at_op_time < psp->at_mkl_csr_op_time || psp->at_mkl_csr_op_time == RSB_TIME_ZERO )?(psp->op_time > psp->at_op_time ? 'R' : 'r' ) : ( psp->mkl_csr_op_time > psp->at_mkl_csr_op_time ? 'M' : 'm' );
+	
+		if( RSB_APPROX_EQUAL(best_rsb_op_time, best_mkl_op_time, appt ) )
+			csdc = '~';
+		if( RSB_BOTH_FINITE( best_rsb_op_time, best_mkl_op_time) && RSB_BIG_DIFF( best_rsb_op_time, best_mkl_op_time, rldt ) )
+			csdc = '.';
+		if( RSB_BOTH_FINITE( best_rsb_op_time, best_mkl_op_time) && RSB_HUGE_DIFF( best_rsb_op_time, best_mkl_op_time, hgdt ) )
+			csdc = '!';
+/*
+		if( RSB_BOTH_FINITE( best_rsb_op_time, best_mkl_op_time) && csdc == ' ')
+			csdc = '*';
+*/
+		rsb__mtxfn_bncp(fnbuf,filenamea[filenamei],0);
+		RSB_PRT("%4zd:%c%s%c%c ",idx+1, usdc, ((wltm && csdc=='_')?"\\":""), csdc, asdc);
+		rsb__pr_dump_sample(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, filenameifp, ifilenameifp, cifp, incXifp , incYifp , nrhsifp , typecodefip , tifp, tfp, flagsA, nflagsA, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, rds, wltm);
+	}
+
+		if(     /* nrhsi==0 && */ rsprp->nrhsn > 1 )
+		{
+			rsb_coo_idx_t miv = rsb__util_find_min_index(nrhsa, rsprp->nrhsn);
+			rsb_coo_idx_t mav = rsb__util_find_max_index(nrhsa, rsprp->nrhsn);
+
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(mav));
+			RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(miv));
+
+			if( nrhsa[miv] < nrhsa[mav] && nrhsi != miv ) /* if min and max differ */
+			{
+                                rsb_coo_idx_t nrhsi0 = /*miv*/ miv;
+                                rsb_coo_idx_t nrhsi1 = /*mav*/ nrhsi;
+                                size_t idx0 = rsb__pr_idx(rsprpv, filenamei, ci, incXi, incYi, nrhsi0, typecodesi, ti);
+                                size_t idx1 = rsb__pr_idx(rsprpv, filenamei, ci, incXi, incYi, nrhsi1, typecodesi, ti);
+                                /* ratio of performances (canonical flops per time) */
+                                RSB_UPD_AMM( (rsprp->psa[idx0].at_op_time*nrhsa[nrhsi1] )     / (         rsprp->psa[idx1].at_op_time*nrhsa[nrhsi0] ) ,avrsmv, mirsmv , marsmv);
+                                RSB_UPD_AMM( (rsprp->psa[idx0].mkl_csr_op_time*nrhsa[nrhsi1]) / ( rsprp->psa[idx1].at_mkl_csr_op_time*nrhsa[nrhsi0] ) ,avmsmv, mimsmv , mamsmv);
+				ntsrf++;
+				ntsmf++;
+	       		}
+	       	}
+
+		RSB_UPD_AMM(psp->at_t,                                               aoratt, miratt, maratt );
+		/* RSB_UPD_AMM(psp->at_mkl_csr_t,                                    aomatt, mimatt, mamatt ); */
+		RSB_UPD_AMM(psp->at_op_time,                                         aorott, mirott, marott );
+		RSB_UPD_AMM(psp->op_time,                                            aoRott, miRott, maRott );
+		RSB_UPD_AMM(psp->at_mkl_csr_op_time,                                 aomott, mimott, mamott );
+		RSB_UPD_AMM(psp->mkl_csr_op_time,                                    aoMott, miMott, maMott );
+		RSB_UPD_AMM((psp->cmflops * nrhsa[nrhsi]) / psp->at_op_time,         avrmps, mirmps, marmps );
+		RSB_UPD_AMM((psp->cmflops * nrhsa[nrhsi]) / psp->op_time,            avRmps, miRmps, maRmps );
+		RSB_UPD_AMM((psp->cmflops * nrhsa[nrhsi]) / psp->at_mkl_csr_op_time, avmmps, mimmps, mammps );
+		RSB_UPD_AMM((psp->cmflops * nrhsa[nrhsi]) / psp->mkl_csr_op_time,    avMmps, miMmps, maMmps );
+
+		if( psp->nsubm && psp->at_nsubm )
+		if( psp->nsubm != psp->at_nsubm )
+		if( psp->isa == psp->at_isa )
+		if( psp->isa != ((double)(8.0)) * psp->nnzA && psp->isa != ((double)(4.0)) * psp->nnzA )
+		{
+			RSB_PRWL("both auto tuned (%zd subm) and non autotuned (%zd subm) matrices use %zd bytes (%lg bpnz) of indices --- isn't that suspect ?\n",(size_t)psp->at_nsubm,(size_t)psp->nsubm,(size_t)psp->isa,((double)psp->isa)/psp->nnzA);
+		}
+
+		if( psp->nsubm && psp->at_nsubm )
+		{
+			if( RSB_FSTR_THN_THR(psp->at_op_time,psp->op_time,cmpt) && atwe )
+			{
+				double ratio = ( psp->op_time / psp->at_op_time );
+				const rsb_type_t typecode = typecodes[typecodesi];
+				size_t so = RSB_SIZEOF(typecode);
+
+				if( so == 0 ) /* reading the record of a differently configured build */
+                                {
+                                        rsb_type_t gtc = toupper(typecode);
+
+                                        so = RSB_SIZEOF_BACKUP(gtc);
+                                        if(wdbg)
+		                                RSB_PRWL("reading file originating from a differently configured build: guessed type code size of '%c' to be %d.\n",gtc,so);
+				        if( so == 0 ) /* reading the record of a differently configured build */
+                                        {
+		                                RSB_PRWL(" Warning: reading file originating from a differently configured build, unable to guess correct type size for type code '%c'.\n",gtc);
+                                        }
+                                }
+
+				noats ++;
+				aoatsp += RSB_SPEEDUP_TO_PCT(ratio);
+				aoatsr += ratio;
+				RSB_UPD_TO_MAX(mstrwr, ratio);
+				RSB_UPD_AMM(psp->at_t / ( psp->op_time - psp->at_op_time),aoatac,miatac,maatac);
+				/* aoatuo += ( psp->at_t / psp->op_time ); */
+		                RSB_UPD_AMM(psp->at_t / psp->op_time, aoatuo, miatuo, maatuo);
+				/*aoatto += ( psp->at_t / psp->at_op_time );*/
+		                RSB_UPD_AMM(psp->at_t / psp->at_op_time, aoatto, miatto, maatto);
+				RSB_UPD_AMM( (((double)    psp->nnzA)  / psp->nsubm   ), avnzbt, minzbt, manzbt);
+				RSB_UPD_AMM( (((double)    psp->nnzA)  / psp->at_nsubm), avnzat, minzat, manzat);
+				RSB_UPD_AMM( (((double)(so*psp->nnzA)) / psp->nsubm   ), avbybt, mibybt, mabybt);
+				RSB_UPD_AMM( (((double)(so*psp->nnzA)) / psp->at_nsubm), avbyat, mibyat, mabyat);
+				RSB_UPD_AMM( (((double)(psp->isa))     / psp->nnzA    ), avbpnb, mibpnb, mabpnb);
+#if RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH
+				{
+			size_t so = RSB_SIZEOF_BACKUP(typecodes[typecodesi]); /* size of */
+			size_t mo = (so*psp->nnzA+psp->at_isa); /* matrix occupation */
+			size_t oo = so*nrhsa[nrhsi]*(psp->nrA+psp->ncA); /* operands occupation */
+			size_t owt = so*nrhsa[nrhsi]*((RSB_DOES_TRANSPOSE(psp->transA)?0:1)*psp->nrA+(RSB_DOES_TRANSPOSE(psp->transA)?1:0)*psp->ncA); /* operands write traffic */
+			size_t mrt = oo + mo, mwt = oo + mo + owt; /* minimal read / read+write traffic */
+			rsb_perf_t mrb = ((rsb_perf_t)mrt)/(psp->at_op_time*1e9); /* minimal read bandwidth,       GBps */
+			rsb_perf_t mwb = ((rsb_perf_t)mwt)/(psp->at_op_time*1e9); /* minimal read/write bandwidth, GBps */
+			RSB_UPD_AMM( mwb, avlowb, milowb, malowb);
+			RSB_UPD_AMM( mrb, avlorb, milorb, malorb);
+				}
+				{
+			size_t so = RSB_SIZEOF_BACKUP(typecodes[typecodesi]); /* size of */
+			size_t mo = (so*psp->nnzA+((rsb_perf_t)(psp->at_isa))); /* matrix occupation */
+			size_t oo = so*nrhsa[nrhsi]*(psp->nrA+psp->ncA); /* operands occupation */
+			size_t mt = oo + mo; /* minimal traffic */
+			rsb_perf_t om = 1e6 *(psp->cmflops * nrhsa[nrhsi]); /* operation flops */
+			rsb_perf_t bm = (1.0 / om) * mt; /* bytes per mflops */;
+			RSB_UPD_AMM( bm, avcoba, micoba, macoba);
+				}
+#endif /* RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH */
+				RSB_UPD_AMM( (((double)(psp->at_isa))  / psp->nnzA    ), avbpna, mibpna, mabpna);
+		                RSB_UPD_AMM(psp->at_t,                                   aosatt, misatt, masatt );
+
+				RSB_ASSERT(psp->at_op_time != RSB_TIME_ZERO);
+				RSB_ASSERT(avbyat != RSB_TIME_ZERO);
+				RSB_ASSERT(avbybt != RSB_TIME_ZERO);
+				RSB_ASSERT(psp->at_cn != 0);
+				RSB_ASSERT(psp->at_nsubm != 0);
+				RSB_ASSERT(psp->   op_time != RSB_TIME_ZERO);
+				RSB_ASSERT(psp->   nsubm != 0);
+
+				if( psp->nsubm > psp->at_nsubm )
+					ntasl++;
+				else
+				{
+					if( psp->nsubm < psp->at_nsubm )
+						ntasm++;
+					else
+						ntase++;
+				}
+
+				if( ca[ci] > psp->at_cn )
+					ntatl++;
+				else
+				{
+					if( ca[ci] < psp->at_cn )
+						ntatm++;
+					else
+						ntate++;
+				}
+			}
+			else
+#if 0
+			if( RSB_SLWR_THN_THR(psp->at_op_time, psp->op_time) )
+#endif
+			{
+				/* rsb tuning unsuccess */
+				noatf ++;
+		                RSB_UPD_AMM(psp->at_t / psp->op_time, aouatc, miuatc, mauatc );
+		                RSB_UPD_AMM(psp->at_t,                aouatt, miuatt, mauatt );
+			}
+
+			if( RSB_FSTR_THN_THR(psp->at_op_time, psp->mkl_csr_op_time, cmpt ) )
+			{
+				/* tuned rsb success over untuned mkl */
+				double ratio = ( psp->mkl_csr_op_time / psp->at_op_time );
+				nttfu ++;
+				aotstu += ratio;
+				RSB_UPD_TO_MAX(mstrwu,ratio);
+				RSB_UPD_AMM(psp->at_t / (psp->mkl_csr_op_time - psp->at_op_time),aoatau, miatau, maatau);
+			}
+			else
+			if( RSB_SLWR_THN_THR(psp->at_op_time, psp->mkl_csr_op_time ) )
+				nttsu ++;
+
+			if( RSB_FSTR_THN_THR(psp->op_time, psp->mkl_csr_op_time, cmpt) )
+			{
+				/* untuned rsb success over mkl */
+				double ratio = ( psp->mkl_csr_op_time / psp->op_time );
+				ntufm ++;
+				aoustm += ratio;
+				RSB_UPD_TO_MAX(msurwm,ratio);
+			}
+			else
+			if( RSB_SLWR_THN_THR(psp->op_time, psp->mkl_csr_op_time) )
+			{
+				/* untuned rsb unsuccess over mkl */
+				double ratio = ( psp->op_time / psp->mkl_csr_op_time );
+			       	ntusm ++;
+				aoussm += ratio;
+				RSB_UPD_TO_MAX(msumwr,ratio);
+			}
+
+			if( RSB_FSTR_THN_THR(psp->at_op_time, psp->at_mkl_csr_op_time, cmpt ) )
+			{
+				/* tuned rsb success over mkl */
+				double ratio = ( psp->at_mkl_csr_op_time / psp->at_op_time );
+				nttfm ++;
+				aotstm += ratio;
+				RSB_UPD_TO_MAX(mstrwm,ratio);
+				RSB_UPD_AMM(psp->at_t / (psp->at_mkl_csr_op_time - psp->at_op_time),aoatam, miatam, maatam);
+			}
+			else
+			if( RSB_SLWR_THN_THR(psp->at_op_time, psp->at_mkl_csr_op_time) )
+			{
+				/* tuned rsb unsuccess over mkl */
+				double ratio = ( psp->at_op_time / psp->at_mkl_csr_op_time );
+			       	nttsm ++;
+				aotssm += ratio;
+				RSB_UPD_TO_MAX(mstmwr,ratio);
+			}
+
+		} /* ... nsubm ...  */
+
+		if( RSB_FSTR_THN_THR(psp->at_mkl_csr_op_time, psp->mkl_csr_op_time, cmpt ) )
+		{
+			/* mkl tuning success */
+			double ratio = ( psp->mkl_csr_op_time / psp->at_mkl_csr_op_time );
+			ntmfm ++;
+			aotsmm += ratio;
+			RSB_UPD_TO_MAX(mstmwm,ratio);
+
+		}
+		else
+		if( RSB_SLWR_THN_THR(psp->at_mkl_csr_op_time, psp->mkl_csr_op_time ) )
+			ntmsm ++;
+	} /* ti, typecodesi, ... */
+
+
+        if(phase == 1)
+        if(rds==RSB_PRD_STYLE_TBL)
+        if(nocsa > 0)
+	if(wltm > 0)
+	{
+		rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+		rsb__mtxfn_bncp(fnbuf,rsb__basename(ltag),1);
+		//RSB_PRT("\\hline\\end{tabular}\\end{center}CAPTION\\end{table}\n");
+		RSB_PRT("\\hline\\caption{%s}\\\\\\hline\\end{longtabu}\n",ltag?fnbuf:"...");
+	}
+
+
+	/* begin plot subcase */
+        if(nocsa > 0)
+        if(rds>=RSB_PRD_STYLE_PLT_BASE)
+        {
+		const rsb_char_t * pl = NULL;
+		const rsb_char_t * ppl = "";
+		rsb_char_t pfn[RSB_MAX_FILENAME_LENGTH];
+
+                if(rsb__getenv("RSB_PRD_STYLE_PLT_PFN"))
+			ppl = rsb__getenv("RSB_PRD_STYLE_PLT_PFN");
+
+                if( rsb__util_atoi(rsb__getenv("RSB_PRD_STYLE_PLT_FMT")) )
+		{
+			pl = "set term postscript eps color;";
+			sprintf(pfn,"%s%s.eps",ppl,ltag?ltag:"plot");
+		}
+		else
+		{
+			pl = "set term png;";
+			sprintf(pfn,"%s%s.png",ppl,ltag?ltag:"plot");
+		}
+
+        	if(rds==RSB_PRD_STYLE_PLT_AT_SPEEDUP_RSB)
+		{
+       			RSB_STDOUT("%sset output '%s'; set title 'autotuning effect'; unset ytics;set yrange [0: 2];\n",pl,pfn);
+			RSB_STDOUT("plot '-' using 1:2 title 'rsb' lt rgb 'red'\n");
+			RSB_STDOUT("set xlabel 'speedup'\n");
+			RSB_STDOUT("set ylabel ' '\n");
+		}
+
+        	if(rds==RSB_PRD_STYLE_PLT_SUBM_BS)
+		{
+       			RSB_STDOUT("%sset output '%s';",pl,pfn);
+		       	//RSB_STDOUT("set title 'autotuning effect'; unset ytics;set yrange [0: 3];\n");
+			RSB_STDOUT("set xlabel 'bytes per submatrix'\n");
+			RSB_STDOUT("set ylabel 'performance, Mflops/s'\n");
+			RSB_STDOUT("set xtics rotate by -45\n");
+			RSB_STDOUT("plot '-' using 1:2:3:4 with vectors title 'rsb' lt rgb 'red'\n");
+		}
+
+		for(     filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+		for(ci=0;ci<rsprp->cn;++ci)
+		for(     incXi=0;     incXi<rsprp->incXn     ;++incXi     )
+		for(     incYi=0;     incYi<rsprp->incYn     ;++incYi     )
+		for(     nrhsi=0;     nrhsi<rsprp->nrhsn     ;++nrhsi     )
+		for(typecodesi=0;typecodesi<rsprp->ntypecodes;++typecodesi)
+		for(ti=0;ti<rsprp->tn;++ti)
+		{
+			size_t idx = rsb__pr_idx(rsprpv, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti);
+			struct rsb_rsps_t*psp = &(rsprp->psa[idx]);
+			const rsb_type_t typecode = typecodes[typecodesi];
+			size_t so = RSB_SIZEOF(typecode);
+			so = RSB_SIZEOF_BACKUP(toupper(typecode));
+
+			if( RSB_ERR_NO_ERROR != rsb__pr_filter(psp, ta, filenameifp, ifilenameifp, cifp , incXifp , incYifp , nrhsifp , typecodefip , tifp, tfp, flagsA, nflagsA, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti) )
+				continue;
+        		if(rds==RSB_PRD_STYLE_PLT_AT_SPEEDUP_RSB)
+        			RSB_STDOUT("%le %d\n",psp->op_time/psp->at_op_time,1);
+        		if(rds==RSB_PRD_STYLE_PLT_SUBM_BS)
+			{
+				double avmbybt = (((double)(so*psp->nnzA)) / psp->nsubm   );
+				double avmbyat = (((double)(so*psp->nnzA)) / psp->at_nsubm);
+				//double avmbpnb = (((double)(psp->isa))     / psp->nnzA    );
+				//double avmbpna = (((double)(psp->at_isa))     / psp->nnzA    );
+				double avmrmps = ((psp->cmflops * nrhsa[nrhsi]) / psp->at_op_time);
+		     		double avmRmps = ((psp->cmflops * nrhsa[nrhsi]) / psp->op_time);
+				//double avmmmps = ((psp->cmflops * nrhsa[nrhsi]) / psp->at_mkl_csr_op_time);
+			     	//double avmMmps = ((psp->cmflops * nrhsa[nrhsi]) / psp->mkl_csr_op_time);
+
+        			RSB_STDOUT("%le %le %le %le\n",avmbybt,avmRmps,avmbyat,avmrmps-avmRmps);
+        			//RSB_STDOUT("%le %d %le %d\n",avmbybt,1,avmbyat,1);
+        			//RSB_STDOUT("%le %le %le %le\n",avmbybt,avmbpnb,avmbyat,(avmbpna-avmbpnb));
+			}
+		}
+        	if(rds==RSB_PRD_STYLE_PLT_AT_SPEEDUP_RSB)
+        		RSB_STDOUT("e\n");
+        	if(rds==RSB_PRD_STYLE_PLT_SUBM_BS)
+        		RSB_STDOUT("e\n");
+		goto ret;
+        }
+	/* end plot subcase */
+
+	if( nocsa <= 0 )
+	{
+		RSB_PRL(" No sample (out of %d) matched the dump criteria -- skipping dump round.\n",rsprp->csf);
+		goto ret; /* skip any further printout */
+
+	}
+	else 
+        {
+	        if( rsprp->csf != nocsa )
+	        {
+                        RSB_PRL(" %d samples (out of %d) matched the dump limiting criteria.\n",nocsa,rsprp->csf);
+                }
+                if(phase == 0)
+                {
+                        phase = 1;
+                        nocsa = 0;
+                        goto gop2;
+                }
+        }
+        if( rsprp->ror == RSB_BOOL_TRUE )
+        {
+                goto ret;
+        }
+        
+        if(rds==RSB_PRD_STYLE_TBL) if(wltm > 0) RSB_PRT("\\begin{verbatim}\n");
+
+	if( ( noats > 0 ) || ( noatf > 0 ) )
+	{
+
+		RSB_DIV_NOT_BY_ZERO(aoatsr, noats);
+		/* RSB_DIV_NOT_BY_ZERO(mstrwr, noats); */
+		RSB_DIV_NOT_BY_ZERO(aoatsp, noats);
+		RSB_DIV_NOT_BY_ZERO(aoatac, noats);
+		RSB_DIV_NOT_BY_ZERO(avnzbt, noats);
+		RSB_DIV_NOT_BY_ZERO(avnzat, noats);
+		RSB_DIV_NOT_BY_ZERO(avbybt, noats);
+		RSB_DIV_NOT_BY_ZERO(avbyat, noats);
+		RSB_DIV_NOT_BY_ZERO(avbpna, noats);
+		RSB_DIV_NOT_BY_ZERO(avbpnb, noats);
+#if RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH
+		RSB_DIV_NOT_BY_ZERO(avlorb, noats);
+		RSB_DIV_NOT_BY_ZERO(avcoba, noats);
+#endif /* RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH */
+		toatuo = aoatuo;
+		RSB_DIV_NOT_BY_ZERO(aoatuo, noats);
+		toatto = aoatto;
+		RSB_DIV_NOT_BY_ZERO(aoatto, noats);
+		touatc = aouatc;
+		RSB_DIV_NOT_BY_ZERO(aouatc, noatf);
+		touatt = aouatt;
+		RSB_DIV_NOT_BY_ZERO(aouatt, noatf);
+		tosatt = aosatt;
+		RSB_DIV_NOT_BY_ZERO(aosatt, noats);
+
+		RSB_PRL("above, '~' marks that rsb and mkl are close within %lgx, '.' marks that rsb is better than mkl by >%lgx, '!' marks that rsb is better than mkl by >%lgx\n",appt,rldt,hgdt);
+		RSB_PRL("below, we define 'successful' autotuning when speedup of %lfx is exceeded, and 'tuned' results even the ones which are same as untuned\n", cmpt);
+		
+	       	if(noats >  0)
+		{
+			RSB_PRL("rsb autotuning was successful in %5d cases (%3.2lf %%) and unsuccessful in %d cases (%3.2lf %%)\n", noats, RSB_PCT(noats,noats+noatf), noatf, RSB_PCT(noatf,noats+noatf) );
+			RSB_PRL(" (in succ. cases was  avg. %5.1lf %% faster, avg. sp. ratio %5.3lf, max sp. ratio %5.3lf)\n", aoatsp, aoatsr, mstrwr );
+			RSB_PRL(" (in succ. cases rsb autotuning took an avg/min/max/tot of: %5.1lf/%5.1lf/%5.1lf/%5.1lf   tuned ops)\n", aoatto, miatto, maatto, toatto);
+			RSB_PRL(" (in succ. cases rsb autotuning took an avg/min/max/tot of: %5.1lf/%5.1lf/%5.1lf/%5.1lf untuned ops)\n", aoatuo, miatuo, maatuo, toatuo);
+	       		RSB_PRL(" (and amortizes from untuned rsb in avg. %5.1lf, min. %5.1lf, max. %5.1lf ops)\n",aoatac,miatac,maatac);
+			RSB_PRL(" (avg/min/max (avg) nnz   per subm before successful tuning were %10.0lf/%10.0lf/%10.0lf)\n", avnzbt, minzbt, manzbt );
+			RSB_PRL(" (avg/min/max (avg) nnz   per subm after  successful tuning were %10.0lf/%10.0lf/%10.0lf)\n", avnzat, minzat, manzat );
+			RSB_PRL(" (avg/min/max (avg) bytes per subm before successful tuning were %10.0lf/%10.0lf/%10.0lf)\n", avbybt, mibybt, mabybt );
+			RSB_PRL(" (avg/min/max (avg) bytes per subm after  successful tuning were %10.0lf/%10.0lf/%10.0lf)\n", avbyat, mibyat, mabyat );
+			RSB_PRL(" (avg/min/max (avg) bytes per nnz  before successful tuning were %10.3lf/%10.3lf/%10.3lf)\n", avbpnb, mibpnb, mabpnb );
+#if RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH
+			RSB_PRL(" (avg/min/max operands (mtx,lhs,rhs) read bandwidth lower bound  %10.3lf/%10.3lf/%10.3lf,GBps)\n", avlorb, milorb, malorb);
+			RSB_PRL(" (avg/min/max operands (mtx,rhs:r;lhs:rw) bandwidth lower bound  %10.3lf/%10.3lf/%10.3lf,GBps)\n", avlowb, milowb, malowb);
+			RSB_PRL(" (avg/min/max code balance (bytes read at least once per flop)   %10.3lf/%10.3lf/%10.3lf)\n", avcoba, micoba, macoba);
+#endif /* RSB_PRD_WANT_CODE_BALANCE_AND_BANDWIDTH */
+			RSB_PRL(" (avg/min/max (avg) bytes per nnz  after  successful tuning were %10.3lf/%10.3lf/%10.3lf)\n", avbpna, mibpna, mabpna );
+			RSB_PRL(" (matrix has been subdivided  more/less/same            in resp.  %d / %d /%d cases)\n", ntasm,ntasl,ntase );
+			RSB_PRL(" (matrix has used             more/less/same    threads in resp.  %d / %d /%d cases)\n", ntatm,ntatl,ntate );
+		}
+	}
+	if(noats == 0)
+		RSB_PRL("no successful rsb autotuning attempt (according to %5.3lgx threshold)\n",RSB_CMP_THR );
+	if(noatf == 0)
+		RSB_PRL("no unsuccessful rsb autotuning attempt (according to %5.3lgx threshold) \n",RSB_CMP_THR );
+	if(noatf >  0)
+		RSB_PRL("unsuccessful rsb autotuning attempts (%5d cases) took avg/min/max/tot of equivalent %5.1lf/%5.1lf/%5.1lf/%5.1lf ops\n", noatf, aouatc, miuatc, mauatc, touatc );
+
+	RSB_DIV_NOT_BY_ZERO(aotsmm, ntmfm);
+	if(vscm)
+	if( ntmfm || ntmsm )
+		RSB_PRL("mkl threads tuning was successful in %5d cases (avg. sp. ratio %5.3lf, max sp. ratio %5.3lf) and unsuccessful in %5d cases\n", ntmfm, aotsmm, mstmwm, ntmsm);
+
+	RSB_DIV_NOT_BY_ZERO(aoustm, ntufm);
+	RSB_DIV_NOT_BY_ZERO(aoussm, ntusm);
+	RSB_DIV_NOT_BY_ZERO(aotstm, nttfm);
+	RSB_DIV_NOT_BY_ZERO(aotssm, nttsm);
+	RSB_DIV_NOT_BY_ZERO(aoatam, nttfm);
+	RSB_DIV_NOT_BY_ZERO(aoatau, nttfu);
+	RSB_DIV_NOT_BY_ZERO(aotstu, nttfu);
+
+	if(vscm)
+	{
+		RSB_PRL("untuned rsb has been faster than untuned mkl %5d times",ntufm);
+		if( ntufm )
+			RSB_PRC(", avg. sp. %2.3lf x, max %2.3lf x",aoustm,msurwm);
+		RSB_PRC("\n");
+
+		RSB_PRL("untuned rsb has been slower than untuned mkl %5d times",ntusm);
+		if( ntusm )
+			RSB_PRC(", avg. sl. %2.3lf x, max %2.3lf x",aoussm,msumwr);
+		RSB_PRC("\n");
+
+		RSB_PRL("tuned   rsb has been faster than   tuned mkl %5d times",nttfm);
+		if( nttfm )
+			RSB_PRC(", avg. sp. %2.3lf x, max %2.3lf x",aotstm,mstrwm);
+		RSB_PRC("\n");
+		if( nttfm )
+			RSB_PRL(" (in these cases autotuning amortizes in avg. %5.1lf, min. %5.1lf, max. %5.1lf   tuned mkl ops)\n",aoatam,miatam,maatam);
+
+		RSB_PRL("tuned   rsb has been faster than untuned mkl %5d times",nttfu);
+		if( nttfu )
+			RSB_PRC(", avg. sp. %2.3lf x, max %2.3lf x",aotstu,mstrwu);
+		RSB_PRC("\n");
+		if( nttfu )
+			RSB_PRL(" (in these cases autotuning amortizes in avg. %5.1lf, min. %5.1lf, max. %5.1lf untuned mkl ops)\n",aoatau,miatau,maatau);
+
+		RSB_PRL("tuned   rsb has been slower than   tuned mkl %5d times",nttsm);
+		if( nttsm )
+			RSB_PRC(", avg. sl. %2.3lf x, max %2.3lf x",aotssm,mstmwr);
+		RSB_PRC("\n");
+	}
+        
+        torott = aorott;
+        toRott = aoRott;
+        tomott = aomott;
+        toMott = aoMott;
+	toratt = aoratt;
+#if 0
+	tomatt = aomatt;
+	RSB_DIV_NOT_BY_ZERO(aomatt, nocsa);
+#endif
+	RSB_DIV_NOT_BY_ZERO(aoratt, nocsa);
+	RSB_DIV_NOT_BY_ZERO(aomott, nocsa);
+	RSB_DIV_NOT_BY_ZERO(aoMott, nocsa);
+	RSB_DIV_NOT_BY_ZERO(aorott, nocsa);
+	RSB_DIV_NOT_BY_ZERO(aoRott, nocsa);
+	RSB_DIV_NOT_BY_ZERO(avrmps, nocsa);
+	RSB_DIV_NOT_BY_ZERO(avRmps, nocsa);
+	RSB_DIV_NOT_BY_ZERO(avmmps, nocsa);
+	RSB_DIV_NOT_BY_ZERO(avMmps, nocsa);
+	RSB_DIV_NOT_BY_ZERO(avrsmv, ntsrf);
+	RSB_DIV_NOT_BY_ZERO(avmsmv, ntsmf);
+
+        if(noats || noatf)
+	RSB_PRL("rsb auto tuning (either succ. or uns.) time was: on avg.: %5.2lf s, min %5.2lf s, max %5.2lf s, tot %5.2lf s (%d samples)\n",aoratt,miratt,maratt,toratt,nocsa );
+        if(noats)
+	RSB_PRL("rsb auto tuning (   only successful  ) time was: on avg.: %5.2lf s, min %5.2lf s, max %5.2lf s, tot %5.2lf s (%d samples)\n",aosatt,misatt,masatt,tosatt,noats );
+        if(noatf)
+	RSB_PRL("rsb auto tuning ( only unsuccessful  ) time was: on avg.: %5.2lf s, min %5.2lf s, max %5.2lf s, tot %5.2lf s (%d samples)\n",aouatt,miuatt,mauatt,touatt,noatf );
+#if 0
+	if(vscm)
+	RSB_PRL("mkl auto tuning (either succ. or uns.) time was: on avg.: %5.2lf s, min %5.2lf s, max %5.2lf s, tot %5.2lf s (%d samples)\n",aomatt,mimatt,mamatt,tomatt,nocsa );
+#endif
+
+	if(noats) /* TODO: noats != nocsa */
+	RSB_PRL(" best tun. rsb canon. mflops were: on avg. %2.3le,  min %2.3le,  max %2.3le  (%d samples)\n",avrmps, mirmps, marmps, nocsa);
+
+	RSB_PRL(" ref. unt. rsb canon. mflops were: on avg. %2.3le,  min %2.3le,  max %2.3le  (%d samples)\n",avRmps, miRmps, maRmps, nocsa);
+	if(vscm)
+	{
+		RSB_PRL(" best tun. mkl canon. mflops were: on avg. %2.3le,  min %2.3le,  max %2.3le  (%d samples)\n",avmmps, mimmps, mammps, nocsa);
+		RSB_PRL(" ref. unt. mkl canon. mflops were: on avg. %2.3le,  min %2.3le,  max %2.3le  (%d samples)\n",avMmps, miMmps, maMmps, nocsa);
+	}
+
+	if(noats) /* TODO: noats != nocsa */
+	RSB_PRL(" best tun. rsb operation time was: on avg. %2.3les, min %2.3les, max %2.3les, tot %2.3les (%d samples)\n",aorott,mirott,marott,torott, nocsa );
+	RSB_PRL(" ref. unt. rsb operation time was: on avg. %2.3les, min %2.3les, max %2.3les, tot %2.3les (%d samples)\n",aoRott,miRott,maRott,toRott, nocsa );
+        /* TODO: 'tot' -> 'sum' should be more appropriate */
+	if(vscm)
+	{
+		RSB_PRL(" best tun. mkl operation time was: on avg. %2.3les, min %2.3les, max %2.3les, tot %2.3les (%d samples)\n",aomott,mimott,mamott,tomott, nocsa );
+		RSB_PRL(" ref. unt. mkl operation time was: on avg. %2.3les, min %2.3les, max %2.3les, tot %2.3les (%d samples)\n",aoMott,miMott,maMott,toMott, nocsa );
+	}
+
+	if(ntsrf > 0)
+		RSB_PRL(" rsb nrhs-to-overall-min-rhs speed ratio was: on avg.    %2.3le x, min %2.3le x, max %2.3le x (%d samples, the non-min-nrhs ones)\n",avrsmv, mirsmv, marsmv, ntsrf);
+	if(vscm) /* vscm does not properly apply here; but ntsmf alone is not enough */
+	if(ntsmf)
+		RSB_PRL(" mkl nrhs-to-overall-min-rhs speed ratio was: on avg.    %2.3le x, min %2.3le x, max %2.3le x (%d samples, the non-min-nrhs ones)\n",avmsmv, mimsmv, mamsmv, ntsmf);
+
+#define RSB_PR_LOOP_EL(IVAR,CVAL,UVAL) for(IVAR=(CVAL==-1?0:CVAL);IVAR<(CVAL==-1?UVAL:(CVAL+1));++IVAR)
+#define RSB_PR_LOOP(FNV,CNV,IXV,IYV,NRV,TCV,TNV) \
+	RSB_PR_LOOP_EL(filenamei,FNV,rsprp->filenamen) \
+	RSB_PR_LOOP_EL(ci,CNV,rsprp->cn) \
+	RSB_PR_LOOP_EL(incXi,IXV,rsprp->incXn     ) \
+	RSB_PR_LOOP_EL(incYi,IYV,rsprp->incYn     ) \
+	RSB_PR_LOOP_EL(nrhsi,NRV,rsprp->nrhsn) \
+	RSB_PR_LOOP_EL(typecodesi,TCV,rsprp->ntypecodes) \
+	RSB_PR_LOOP_EL(ti,TNV,rsprp->tn) \
+	{ \
+	} /* TODO: still unused; complete this... */
+
+	if(rsprp->filenamen > 0)
+		;/* plot for all matrices, performance for each case */
+	if(rsprp->cn > 0)
+		;/* plot for all matrices, performance for increasing cores */
+	if(rsprp->incXn > 0)
+		;/* plot per matrix, performance for increasing incX */
+	if(rsprp->incYn > 0)
+		;/* plot per matrix, performance for increasing incY */
+	if(rsprp->nrhsn > 0)
+		;/* plot per matrix, performance for increasing nrhs */
+	if(rsprp->ntypecodes > 0)
+		;/* plot per matrix, performance for different types */
+	if(rsprp->tn > 0)
+		;/* plot per matrix, different transpositions: TODO: incorporate in the usual plots .. */
+
+	/* TODO: for each matrix, performance + MKL performance / speedup */
+	/* TODO: for each matrix, thread tuned performance to normal performance / speedup */
+	/* TODO: for each matrix, structure tuned performance to normal performance / speedup */
+	/* TODO: for each matrix, total tuned performance to normal performance / speedup */
+	/* TODO: label each of these with timestamp;
+	 *
+	 * plot-DATE-mtrx-TYPE-CORES-INCX-INCY-NRHS-TRANS.eps
+	 * plot-DATE-nrhs-TYPE-CORES-MTRX-INCX-INCY-TRANS.eps 
+	 * plot-DATE-incx-TYPE-CORES-MTRX-INCY-NRHS-TRANS.eps 
+	 * plot-DATE-incy-TYPE-CORES-MTRX-INCX-NRHS-TRANS.eps 
+	 * ...
+	 * */
+
+	/* wishlist: */
+	/* plot per matrix, then different indexing per nonzero */
+	/* plot for all matrices, then different indexing per nonzero */
+        if(rds==RSB_PRD_STYLE_TBL) if(wltm > 0) RSB_PRT("\\end{verbatim}\n");
+ret:
+	return errval;
+}
+
+rsb_err_t rsb__pr_dump(const void*rsprpv, const rsb_char_t**filenamea, rsb_int_t*ca, const rsb_int_t*incXa, const rsb_int_t*incYa, const rsb_int_t*nrhsa, const rsb_type_t*typecodes, const rsb_int_t*ta)
+{
+	/*
+	 * dump a performance record
+         * TODO: use rsb__basename().
+         * TODO: use a systematic combinations enumeration algorithm.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const int*filenameifp = NULL; /* filename index ? pointer */
+	const int*cifp = NULL; /* [used] cores index ? pointer */
+	const int*incXifp = NULL; /* incX index ? pointer */
+	const int*incYifp = NULL; /* incY index ? pointer */
+	const int*nrhsifp = NULL; /* nrhs index ? pointer */
+	const int*typecodefip = NULL; /* typecode index ? pointer */
+	const int*tifp = NULL; /* transposition index ? pointer */
+	const rsb_trans_t*tfp = NULL; /* transposition ? pointer */
+	const struct rsb_rspr_t * rsprp = rsprpv;
+	rsb_int_t filenamei, /* ci, incXi, incYi,*/ nrhsi, typecodesi, ti = 0;
+	rsb_trans_t transAa [] = { RSB_TRANSPOSITION_N, RSB_TRANSPOSITION_T, RSB_TRANSPOSITION_C };
+	rsb_char_t tag[RSB_MAX_FILENAME_LENGTH];
+	rsb_char_t bfn[RSB_MAX_FILENAME_LENGTH];
+	rsb_int_t noc = 0; /* number of combinations */
+	char rsb_prl_lcc = RSB_PRL_LCC_IE ;
+	const char*rsb_prl_tcs = RSB_PRL_TCS_IE;
+        int rds = rsb__getenv("RSB_PR_SR") ? rsb__util_atof(rsb__getenv("RSB_PR_SR")): RSB_PRD_STYLE_TBL;
+        int wltm = rsb__getenv("RSB_PR_WLTC") ? rsb__util_atof(rsb__getenv("RSB_PR_WLTC")) : 0; /* Want LaTeX tables mode */
+        if(rds==RSB_PRD_STYLE_TBL && wltm > 0)
+		RSB_PRT( "\\documentclass[a1,portrait,plainsections]{sciposter} \\usepackage{longtable,tabu,url,color} \\usepackage[cm]{fullpage} \\usepackage[table,x11names]{xcolor} \\usepackage[hyperindex,bookmarks]{hyperref}%% bookmarks do not seem to work\n\\begin{document}\\title{" RSB_PACKAGE_NAME " performance, postprocessed with " RSB_PACKAGE_STRING ".}\\author{} \\begin{tiny} \\rowcolors{1}{white!80!gray}{white}\n");
+
+	sprintf(tag,"all");
+	noc = RSB_PR_NOC(rsprp);
+	errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+						  filenameifp, NULL,cifp, incXifp, incYifp, nrhsifp, typecodefip, tifp, tfp, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+	if(RSB_SOME_ERROR(errval))
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+
+	if( rsb__util_atoi(rsb__getenv("RSB_PR_ONLY_TOTAL_TABLE")) )
+		goto err;
+
+	if( filenamea )
+	if( rsprp->filenamen > 1 )
+	if( rsprp->filenamen < noc )
+	for(     filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+	{
+                rsb_int is_symm = RSB_DO_FLAG_HAS(((rsprp->psa[rsb__pr_idx(rsprpv, filenamei, 0, 0, 0, 0, 0, 0)]).flagsA ),RSB_FLAG_SYMMETRIC);
+        	int etn = rsprp->tn;
+		rsb__mtxfn_bncp(bfn,rsb__basename(filenamea[filenamei]),0);
+		sprintf(tag,"file-%d-%s",filenamei+1,bfn);
+		RSB_PRL_SEP(" Limiting to file %d/%d --- %s:\n",filenamei+1,rsprp->filenamen,filenamea[filenamei]);
+		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  &filenamei, NULL,cifp, incXifp, incYifp, nrhsifp, typecodefip, tifp, tfp, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+
+                if(is_symm && rsprp->tn == 2 )
+                        etn = 1;
+
+        	if( etn > 1 ) /* otherwise the above dump suffices */
+        	if( etn * rsprp->filenamen < noc )
+        	for(     ti=0;     ti<RSB_MIN(etn,3)     ;++ti     )
+        	{
+        		const rsb_trans_t tf = transAa[ti];
+			rsb__mtxfn_bncp(bfn,rsb__basename(filenamea[filenamei]),0);
+			sprintf(tag,"file-%d-%s-transA-%c",filenamei+1,bfn,RSB_TRANSPOSITION_AS_CHAR(tf));
+        		RSB_PRL_SEP(" Limiting to both file %d/%d --- %s and transA=%c:\n",filenamei+1,rsprp->filenamen,filenamea[filenamei],RSB_TRANSPOSITION_AS_CHAR(tf));
+        		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  &filenamei, NULL,cifp, incXifp, incYifp, nrhsifp, typecodefip, &ti, &tf, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+        	}
+
+	        if( typecodes )
+        	if( rsprp->ntypecodes > 1 )
+        	if( rsprp->ntypecodes * rsprp->filenamen < noc )
+        	for(typecodesi=0;typecodesi<rsprp->ntypecodes;++typecodesi)
+        	{
+			rsb__mtxfn_bncp(bfn,rsb__basename(filenamea[filenamei]),0);
+			sprintf(tag,"file-%d-%s-type-%c",filenamei+1,bfn,typecodes[typecodesi]);
+        		RSB_PRL_SEP(" Limiting to both file %d/%d --- %s and type %c:\n",filenamei+1,rsprp->filenamen,filenamea[filenamei],typecodes[typecodesi]);
+        		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  &filenamei, NULL,cifp, incXifp, incYifp, nrhsifp, &typecodesi, tifp, tfp, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+        	}
+       	}
+
+	if( filenamea )
+	if( rsprp->filenamen > 1 )
+	{
+		rsb_flags_t sf[3] = {RSB_FLAG_NOFLAGS,RSB_FLAG_NOFLAGS,RSB_FLAG_NOFLAGS}, nf[3] = {RSB_FLAG_NOFLAGS,RSB_FLAG_NOFLAGS,RSB_FLAG_NOFLAGS};
+		rsb_char_t scb[3] = {0x0,0x0,0x0};
+		int sc = 0, si;
+		rsb_flags_t flag;
+
+		for(     flag = RSB_FLAG_SYMMETRIC, filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+			if(rsprp->psa[rsb__pr_idx(rsprpv, filenamei, 0, 0, 0, 0, 0, 0)].flagsA & flag)
+			{
+				scb[sc] = RSB_SYMCHAR(flag); nf[sc] = RSB_FLAG_NOFLAGS;  sf[sc++] = flag; break;
+			}
+
+		for(     flag = RSB_FLAG_HERMITIAN, filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+			if(rsprp->psa[rsb__pr_idx(rsprpv, filenamei, 0, 0, 0, 0, 0, 0)].flagsA & flag)
+			{
+				scb[sc] = RSB_SYMCHAR(flag); nf[sc] = RSB_FLAG_NOFLAGS;  sf[sc++] = flag; break;
+			}
+
+		for(     flag = (RSB_FLAG_SYMMETRIC|RSB_FLAG_HERMITIAN), filenamei=0;     filenamei<rsprp->filenamen ;++filenamei     )
+			if(rsprp->psa[rsb__pr_idx(rsprpv, filenamei, 0, 0, 0, 0, 0, 0)].flagsA & flag)
+				;
+			else
+			{
+				scb[sc] = RSB_SYMCHAR(RSB_FLAG_NOFLAGS); sf[sc] = RSB_FLAG_NOFLAGS;  nf[sc++] = flag; break;
+			}
+
+                if(sc > 1) /* TODO: shall be strictier and check whether at least more than one matrix applies for each given sc */
+                if(sc * rsprp->filenamen < noc)
+	        for(     si = 0; si < sc ; ++si )
+        	{
+			sprintf(tag,"symmetry-%c",scb[si]);
+        		RSB_PRL_SEP(" Limiting to symmetry %c (0x%x) \n",scb[si],sf[si]);
+        		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+        					  filenameifp, NULL,cifp, incXifp, incYifp, nrhsifp, typecodefip, tifp, tfp, sf[si], nf[si], tag);
+        	}
+	}
+
+	if( typecodes )
+	if( rsprp->ntypecodes > 1 )
+	if( rsprp->ntypecodes < noc )
+	for(typecodesi=0;typecodesi<rsprp->ntypecodes;++typecodesi)
+	{
+		sprintf(tag,"type-%c",typecodes[typecodesi]);
+		RSB_PRL_SEP(" Limiting to type %c:\n",typecodes[typecodesi]);
+		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  filenameifp, NULL,cifp, incXifp, incYifp, nrhsifp, &typecodesi, tifp, tfp, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+	}
+
+	if( nrhsa )
+	if( rsprp->nrhsn > 1 )
+	if( rsprp->nrhsn < noc )
+	for(     nrhsi=0;     nrhsi<rsprp->nrhsn     ;++nrhsi     )
+	{
+		sprintf(tag,"nrhs-%d",nrhsa[nrhsi]);
+		RSB_PRL_SEP(" Limiting to nrhs=%d:\n",nrhsa[nrhsi]);
+		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  filenameifp, NULL,cifp, incXifp, incYifp, &nrhsi, typecodefip, tifp, tfp, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+	}
+
+	if( ta && rsprp->tn > 1 )
+        {
+	if( ta && rsprp->tn < noc )
+	for(     ti=0;     ti<rsprp->tn     ;++ti     ) /** FIXME: why this case ? */
+	{
+		sprintf(tag,"transA-%c",RSB_TRANSPOSITION_AS_CHAR(ta[ti]));
+		RSB_PRL_SEP(" Limiting to transA=%d:\n",RSB_TRANSPOSITION_AS_CHAR(ta[ti]));
+		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  filenameifp, NULL,cifp, incXifp, incYifp, nrhsifp, typecodefip, &ti, tfp, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+	}
+        }
+	else
+	if( rsprp->tn > 1 )
+        {
+	if( rsprp->tn < noc )
+	for(     ti=0;     ti<RSB_MIN(rsprp->tn,3)     ;++ti     )
+	{
+		const rsb_trans_t tf = transAa[ti];
+		sprintf(tag,"transA-%c",RSB_TRANSPOSITION_AS_CHAR(tf));
+		RSB_PRL_SEP(" Limiting to transA=%c:\n",RSB_TRANSPOSITION_AS_CHAR(tf));
+		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+					  filenameifp, NULL, cifp, incXifp, incYifp, nrhsifp, typecodefip, &ti, &tf, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+        	if( nrhsa )
+        	if( rsprp->nrhsn > 1 )
+	        if( rsprp->tn * rsprp->nrhsn < noc )
+        	for(     nrhsi=0;     nrhsi<rsprp->nrhsn     ;++nrhsi     )
+        	{
+			sprintf(tag,"transA-%c-nrhs-%d",RSB_TRANSPOSITION_AS_CHAR(tf),nrhsa[nrhsi]);
+        		RSB_PRL_SEP(" Limiting to both transA=%c and nrhs=%d:\n",RSB_TRANSPOSITION_AS_CHAR(tf),nrhsa[nrhsi]);
+        		errval = rsb__pr_dump_inner(rsprpv, filenamea, ca, incXa, incYa, nrhsa, typecodes, ta, 
+        					  filenameifp, NULL,cifp, incXifp, incYifp, &nrhsi, typecodefip, &ti, &tf, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, tag );
+        	}
+	}
+        }
+
+        if(rds==RSB_PRD_STYLE_TBL && wltm > 0)
+		RSB_PRT("\\end{tiny}\\end{document}\n");
+/*
+	for(ci=0;ci<rsprp->cn;++ci)
+	for(     incXi=0;     incXi<rsprp->incXn     ;++incXi     )
+	for(     incYi=0;     incYi<rsprp->incYn     ;++incYi     )
+*/
+err:
+	return errval;
+}
+
+rsb_err_t rsb__pr_free(void * rsprpv)
+{
+	/*
+	 * free a performance record
+	 * */
+	struct rsb_rspr_t * rsprp = rsprpv;
+        if(!rsprp)
+                goto err;
+	RSB_CONDITIONAL_FREE(rsprp->psa);
+	RSB_CONDITIONAL_FREE(rsprp->rsprap);
+	RSB_CONDITIONAL_FREE(rsprp);
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+
+/* performance samples reporting / dumping facility for rsbench : end */
+/* @endcond */
diff --git a/rsb_pr.h b/rsb_pr.h
new file mode 100644
index 0000000..00114c1
--- /dev/null
+++ b/rsb_pr.h
@@ -0,0 +1,59 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Perfomance reporting code.
+ * @author Michele Martone
+ * */
+
+#ifndef RSB_PR_H_INCLUDED
+#define RSB_PR_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <stdio.h>
+#include "rsb_internals.h"
+
+struct rsb_ts_t /* time statistics struct; dumpable with RSB_STAT_DUMP_TS */
+{
+	rsb_time_t avg,min,max,sd;
+	rsb_int_t ns;
+};
+
+rsb_err_t rsb__pr_init(void**rsprpv, const struct rsb_mtx_t *mtxAp, rsb_int_t filenamen, rsb_int_t cn, rsb_int_t incXn, rsb_int_t incYn, rsb_int_t nrhsn, rsb_int_t ntypecodes, rsb_int_t tn);
+rsb_err_t rsb__pr_set(void*rsprpv, const struct rsb_mtx_t *mtxAp, const struct rsb_mtx_t *at_mtxAp, rsb_int_t filenamei, rsb_int_t ci, rsb_int_t incXi, rsb_int_t incYi, rsb_int_t nrhsi, rsb_int_t typecodesi, rsb_int_t ti, rsb_trans_t transA, rsb_perf_t op_time_best, rsb_perf_t mkl_csr_op_time_best, rsb_perf_t at_op_time_best, rsb_perf_t at_mkl_csr_op_time_best, rsb_int_t at_cn, rsb_int_t at_mkl_csr_cn, rsb_time_t at_t, rsb_int_t at_eps, const struct rsb_ts_t*otposp, const struct rsb_ts_t [...]
+rsb_err_t rsb__pr_dump(const void*rsprpv, const rsb_char_t**filenamea, rsb_int_t*ca, const rsb_int_t*incXa, const rsb_int_t*incYa, const rsb_int_t*nrhsa, const rsb_type_t*typecodes, const rsb_int_t *ta);
+rsb_err_t rsb__pr_dump_inner(const void*rsprpv, const rsb_char_t**filenamea, rsb_int_t*ca, const rsb_int_t*incXa, const rsb_int_t*incYa, const rsb_int_t*nrhsa, const rsb_type_t*typecodes, const rsb_int_t*ta, const int*filenameifp, const int*ifilenameifp, const int*cifp , const int*incXifp , const int*incYifp , const int*nrhsifp , const int*typecodefip , const int*tifp, const rsb_trans_t*tfp, rsb_flags_t flagsA, rsb_flags_t nflagsA, rsb_char_t *ltag);
+rsb_err_t rsb__pr_free(void*rsprpv);
+rsb_err_t rsb__pr_dumpfiles(const rsb_char_t **argv, const int argc);
+rsb_err_t rsb__pr_save(const rsb_char_t * RSB_RESTRICT filename, /*const*/ void * RSB_RESTRICT rsprpv, const rsb_char_t**RSB_RESTRICT filenamea, rsb_int_t*RSB_RESTRICT ca, const rsb_int_t*RSB_RESTRICT incXa, const rsb_int_t*RSB_RESTRICT incYa, const rsb_int_t*RSB_RESTRICT nrhsa, const rsb_type_t*RSB_RESTRICT typecodes, const rsb_int_t*RSB_RESTRICT ta, rsb_bool_t can_overwrite);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_PR_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_prec.c b/rsb_prec.c
new file mode 100644
index 0000000..9ffe21f
--- /dev/null
+++ b/rsb_prec.c
@@ -0,0 +1,341 @@
+
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Auxiliary functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define RSB_WANT_OMP        1
+#define RSB_MAX_OMP_THREADS 4
+#include <omp.h>       /* OpenMP parallelism (EXPERIMENTAL) */
+
+
+#include "rsb_common.h"
+rsb_err_t rsb_do_csr_ilu0_DOUBLE(struct rsb_coo_matrix_t * coop){
+	/**
+	 * \ingroup gr_internals
+		FIXME: INCOMPLETE, EXPERIMENTAL, TEMPORARILY HERE
+		On exit, the matrix will contain the L and U factors of a pattern preserving incomplete LU factorization (ILU 0).
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t i;
+
+{
+	double *VA = coop->VA;
+	const rsb_coo_idx_t *PA = coop->IA;
+	const rsb_coo_idx_t *JA = coop->JA;
+	for(i=1;i<coop->nr;++i)
+	{
+		const rsb_nnz_idx_t ifp = PA[i],ilp = PA[i+1],irnz = ilp-ifp;
+		rsb_nnz_idx_t idp = RSB_MARKER_NNZ_VALUE,ikp = RSB_MARKER_NNZ_VALUE;
+		if(irnz)
+		{
+
+			idp = rsb__nnz_split_coo_bsearch(JA+ifp,i,irnz)+ifp;
+			assert(idp<=ilp);
+			assert(idp>=ifp);
+			for(ikp=ifp;ikp<idp;++ikp)// k = 1...i-1
+			{
+				/* FIXME: write a sparse vectors dot product macro and apply it here */
+				const rsb_nnz_idx_t k = JA[ikp],kfp = PA[k],klp = PA[k+1],krnz = klp-kfp;
+				const int kdp = rsb__nnz_split_coo_bsearch(JA+kfp,k,krnz)+kfp;
+				rsb_nnz_idx_t kjp = kfp,ijp = ikp+1;
+				VA[ikp]/=VA[kdp];
+				/* FIXME: to optimize this phase, we should loop on the shorter row */
+				for(;ijp<ilp;++ijp)// j = k+1...n
+				{
+					for(;JA[kjp]<JA[ijp] && kjp<klp;++kjp)
+						;
+					if(kjp==klp)
+						goto out;
+					/* JA[kjp]>=JA[ijp] */
+					for(;JA[kjp]>JA[ijp] && ijp<ilp;++ijp)
+						;
+					if(ijp==ilp)
+						goto out;
+					/* JA[kjp]==JA[ijp] */
+					VA[ijp]-=VA[ikp]*VA[kjp];
+				}
+out:
+				RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				
+			}
+		}
+	}
+}
+	RSB_DO_ERR_RETURN(errval)
+}
+rsb_err_t rsb_do_csr_ilu0_FLOAT(struct rsb_coo_matrix_t * coop){
+	/**
+	 * \ingroup gr_internals
+		FIXME: INCOMPLETE, EXPERIMENTAL, TEMPORARILY HERE
+		On exit, the matrix will contain the L and U factors of a pattern preserving incomplete LU factorization (ILU 0).
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t i;
+
+{
+	float *VA = coop->VA;
+	const rsb_coo_idx_t *PA = coop->IA;
+	const rsb_coo_idx_t *JA = coop->JA;
+	for(i=1;i<coop->nr;++i)
+	{
+		const rsb_nnz_idx_t ifp = PA[i],ilp = PA[i+1],irnz = ilp-ifp;
+		rsb_nnz_idx_t idp = RSB_MARKER_NNZ_VALUE,ikp = RSB_MARKER_NNZ_VALUE;
+		if(irnz)
+		{
+
+			idp = rsb__nnz_split_coo_bsearch(JA+ifp,i,irnz)+ifp;
+			assert(idp<=ilp);
+			assert(idp>=ifp);
+			for(ikp=ifp;ikp<idp;++ikp)// k = 1...i-1
+			{
+				/* FIXME: write a sparse vectors dot product macro and apply it here */
+				const rsb_nnz_idx_t k = JA[ikp],kfp = PA[k],klp = PA[k+1],krnz = klp-kfp;
+				const int kdp = rsb__nnz_split_coo_bsearch(JA+kfp,k,krnz)+kfp;
+				rsb_nnz_idx_t kjp = kfp,ijp = ikp+1;
+				VA[ikp]/=VA[kdp];
+				/* FIXME: to optimize this phase, we should loop on the shorter row */
+				for(;ijp<ilp;++ijp)// j = k+1...n
+				{
+					for(;JA[kjp]<JA[ijp] && kjp<klp;++kjp)
+						;
+					if(kjp==klp)
+						goto out;
+					/* JA[kjp]>=JA[ijp] */
+					for(;JA[kjp]>JA[ijp] && ijp<ilp;++ijp)
+						;
+					if(ijp==ilp)
+						goto out;
+					/* JA[kjp]==JA[ijp] */
+					VA[ijp]-=VA[ikp]*VA[kjp];
+				}
+out:
+				RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				
+			}
+		}
+	}
+}
+	RSB_DO_ERR_RETURN(errval)
+}
+rsb_err_t rsb_do_csr_ilu0_FLOAT_COMPLEX(struct rsb_coo_matrix_t * coop){
+	/**
+	 * \ingroup gr_internals
+		FIXME: INCOMPLETE, EXPERIMENTAL, TEMPORARILY HERE
+		On exit, the matrix will contain the L and U factors of a pattern preserving incomplete LU factorization (ILU 0).
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t i;
+
+{
+	float complex *VA = coop->VA;
+	const rsb_coo_idx_t *PA = coop->IA;
+	const rsb_coo_idx_t *JA = coop->JA;
+	for(i=1;i<coop->nr;++i)
+	{
+		const rsb_nnz_idx_t ifp = PA[i],ilp = PA[i+1],irnz = ilp-ifp;
+		rsb_nnz_idx_t idp = RSB_MARKER_NNZ_VALUE,ikp = RSB_MARKER_NNZ_VALUE;
+		if(irnz)
+		{
+
+			idp = rsb__nnz_split_coo_bsearch(JA+ifp,i,irnz)+ifp;
+			assert(idp<=ilp);
+			assert(idp>=ifp);
+			for(ikp=ifp;ikp<idp;++ikp)// k = 1...i-1
+			{
+				/* FIXME: write a sparse vectors dot product macro and apply it here */
+				const rsb_nnz_idx_t k = JA[ikp],kfp = PA[k],klp = PA[k+1],krnz = klp-kfp;
+				const int kdp = rsb__nnz_split_coo_bsearch(JA+kfp,k,krnz)+kfp;
+				rsb_nnz_idx_t kjp = kfp,ijp = ikp+1;
+				VA[ikp]/=VA[kdp];
+				/* FIXME: to optimize this phase, we should loop on the shorter row */
+				for(;ijp<ilp;++ijp)// j = k+1...n
+				{
+					for(;JA[kjp]<JA[ijp] && kjp<klp;++kjp)
+						;
+					if(kjp==klp)
+						goto out;
+					/* JA[kjp]>=JA[ijp] */
+					for(;JA[kjp]>JA[ijp] && ijp<ilp;++ijp)
+						;
+					if(ijp==ilp)
+						goto out;
+					/* JA[kjp]==JA[ijp] */
+					VA[ijp]-=VA[ikp]*VA[kjp];
+				}
+out:
+				RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				
+			}
+		}
+	}
+}
+	RSB_DO_ERR_RETURN(errval)
+}
+rsb_err_t rsb_do_csr_ilu0_DOUBLE_COMPLEX(struct rsb_coo_matrix_t * coop){
+	/**
+	 * \ingroup gr_internals
+		FIXME: INCOMPLETE, EXPERIMENTAL, TEMPORARILY HERE
+		On exit, the matrix will contain the L and U factors of a pattern preserving incomplete LU factorization (ILU 0).
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t i;
+
+{
+	double complex *VA = coop->VA;
+	const rsb_coo_idx_t *PA = coop->IA;
+	const rsb_coo_idx_t *JA = coop->JA;
+	for(i=1;i<coop->nr;++i)
+	{
+		const rsb_nnz_idx_t ifp = PA[i],ilp = PA[i+1],irnz = ilp-ifp;
+		rsb_nnz_idx_t idp = RSB_MARKER_NNZ_VALUE,ikp = RSB_MARKER_NNZ_VALUE;
+		if(irnz)
+		{
+
+			idp = rsb__nnz_split_coo_bsearch(JA+ifp,i,irnz)+ifp;
+			assert(idp<=ilp);
+			assert(idp>=ifp);
+			for(ikp=ifp;ikp<idp;++ikp)// k = 1...i-1
+			{
+				/* FIXME: write a sparse vectors dot product macro and apply it here */
+				const rsb_nnz_idx_t k = JA[ikp],kfp = PA[k],klp = PA[k+1],krnz = klp-kfp;
+				const int kdp = rsb__nnz_split_coo_bsearch(JA+kfp,k,krnz)+kfp;
+				rsb_nnz_idx_t kjp = kfp,ijp = ikp+1;
+				VA[ikp]/=VA[kdp];
+				/* FIXME: to optimize this phase, we should loop on the shorter row */
+				for(;ijp<ilp;++ijp)// j = k+1...n
+				{
+					for(;JA[kjp]<JA[ijp] && kjp<klp;++kjp)
+						;
+					if(kjp==klp)
+						goto out;
+					/* JA[kjp]>=JA[ijp] */
+					for(;JA[kjp]>JA[ijp] && ijp<ilp;++ijp)
+						;
+					if(ijp==ilp)
+						goto out;
+					/* JA[kjp]==JA[ijp] */
+					VA[ijp]-=VA[ikp]*VA[kjp];
+				}
+out:
+				RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				
+			}
+		}
+	}
+}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__prec_ilu0(struct rsb_mtx_t * mtxAp){
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_coo_matrix_t coo;
+
+	if(!mtxAp || !rsb__is_terminal_recursive_matrix(mtxAp) ||
+		 !rsb__is_css_matrix(mtxAp) || (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES) ||
+		 /*mtxAp->typecode != RSB_NUMERICAL_TYPE_DOUBLE  || */!rsb__is_square(mtxAp) || rsb__is_symmetric(mtxAp) ||
+ 		RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT)
+		)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	if(mtxAp->nr==1)
+		goto err;
+	if((errval = rsb__project_rsb_to_coo(mtxAp,&coo))!=RSB_ERR_NO_ERROR)
+		goto err;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+		return rsb_do_csr_ilu0_DOUBLE(&coo);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+		return rsb_do_csr_ilu0_FLOAT(&coo);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+		return rsb_do_csr_ilu0_FLOAT_COMPLEX(&coo);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+		return rsb_do_csr_ilu0_DOUBLE_COMPLEX(&coo);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	errval = RSB_ERR_INTERNAL_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__prec_csr_ilu0(struct rsb_coo_matrix_t * coop){
+	// FIXME: termporary
+	if(coop->nr==1)
+		goto err;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( coop->typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+				return rsb_do_csr_ilu0_DOUBLE(coop);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( coop->typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+				return rsb_do_csr_ilu0_FLOAT(coop);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( coop->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+				return rsb_do_csr_ilu0_FLOAT_COMPLEX(coop);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( coop->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+				return rsb_do_csr_ilu0_DOUBLE_COMPLEX(coop);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+
+/* @endcond */
diff --git a/rsb_prec.h b/rsb_prec.h
new file mode 100644
index 0000000..0e0ef11
--- /dev/null
+++ b/rsb_prec.h
@@ -0,0 +1,66 @@
+
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Auxiliary functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifndef RSB_PREC_H_INCLUDED
+#define RSB_PREC_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define RSB_WANT_OMP        1
+#define RSB_MAX_OMP_THREADS 4
+
+
+#include "rsb_common.h"
+rsb_err_t rsb_do_csr_ilu0_DOUBLE(struct rsb_coo_matrix_t * coop);
+rsb_err_t rsb_do_csr_ilu0_FLOAT(struct rsb_coo_matrix_t * coop);
+rsb_err_t rsb_do_csr_ilu0_FLOAT_COMPLEX(struct rsb_coo_matrix_t * coop);
+rsb_err_t rsb_do_csr_ilu0_DOUBLE_COMPLEX(struct rsb_coo_matrix_t * coop);
+
+rsb_err_t rsb__prec_ilu0(struct rsb_mtx_t * mtxAp);
+
+rsb_err_t rsb__prec_csr_ilu0(struct rsb_coo_matrix_t * coop);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RSB_PREC_H_INCLUDED */
+
+
+/* @endcond */
diff --git a/rsb_prec.m4 b/rsb_prec.m4
new file mode 100644
index 0000000..549e365
--- /dev/null
+++ b/rsb_prec.m4
@@ -0,0 +1,186 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+ifelse(LIBMMVBR_INCLUDED_PREC_M4,1,`',`
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+/* @cond INNERDOC */
+dnl
+/**
+ * @file
+ * @brief
+ * Auxiliary functions.
+ */
+RSB_M4_HEADER_MESSAGE()dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_PREC_H_INCLUDED
+#define RSB_PREC_H_INCLUDED
+')
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+ifdef(`RSB_M4_WANT_OMP',`dnl
+dnl	FIXME : this should be moved elsewhere
+`#define RSB_WANT_OMP        '1
+`#define RSB_MAX_OMP_THREADS 'RSB_M4_MAX_OMP_THREADS
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#include <omp.h>       /* OpenMP parallelism (EXPERIMENTAL) */
+')
+')dnl
+
+dnl
+#include "rsb_common.h"
+dnl #include "rsb_internals.h"
+dnl #include "rsb_types.h"
+dnl 
+dnl
+dnl
+dnl	FIXME : COMMENT THIS FILE
+dnl	-------------------------
+dnl
+dnl
+foreach(`mtype',RSB_M4_TYPES,`dnl
+dnl
+dnl `rsb_err_t rsb_do_csr_ilu0_'touppercase(RSB_M4_CHOPSPACES(mtype))`(struct rsb_mtx_t * mtxAp)'dnl
+`rsb_err_t rsb_do_csr_ilu0_'touppercase(RSB_M4_CHOPSPACES(mtype))`(struct rsb_coo_matrix_t * coop)'dnl
+dnl `rsb_err_t rsb_do_csr_ilu0_'touppercase(RSB_M4_CHOPSPACES(mtype))(mtype `*VA, const rsb_coo_idx_t *PA, const rsb_coo_idx_t *JA)'dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`dnl
+{
+	/**
+	 * \ingroup gr_internals
+		FIXME: INCOMPLETE, EXPERIMENTAL, TEMPORARILY HERE
+		On exit, the matrix will contain the L and U factors of a pattern preserving incomplete LU factorization (ILU 0).
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t i;
+
+{
+	mtype *VA = coop->VA;
+	const rsb_coo_idx_t *PA = coop->IA;
+	const rsb_coo_idx_t *JA = coop->JA;
+dnl	const rsb_coo_idx_t *PA = mtxAp->bpntr;
+dnl	const rsb_coo_idx_t *JA = mtxAp->bindx;
+	for(i=1;i<coop->nr;++i)
+	{
+		const rsb_nnz_idx_t ifp = PA[i],ilp = PA[i+1],irnz = ilp-ifp;
+		rsb_nnz_idx_t idp = RSB_MARKER_NNZ_VALUE,ikp = RSB_MARKER_NNZ_VALUE;
+		if(irnz)
+		{
+
+			idp = rsb__nnz_split_coo_bsearch(JA+ifp,i,irnz)+ifp;
+			assert(idp<=ilp);
+			assert(idp>=ifp);
+			for(ikp=ifp;ikp<idp;++ikp)// k = 1...i-1
+			{
+				/* FIXME: write a sparse vectors dot product macro and apply it here */
+				const rsb_nnz_idx_t k = JA[ikp],kfp = PA[k],klp = PA[k+1],krnz = klp-kfp;
+				const int kdp = rsb__nnz_split_coo_bsearch(JA+kfp,k,krnz)+kfp;
+				rsb_nnz_idx_t kjp = kfp,ijp = ikp+1;
+				VA[ikp]/=VA[kdp];
+				/* FIXME: to optimize this phase, we should loop on the shorter row */
+				for(;ijp<ilp;++ijp)// j = k+1...n
+				{
+					for(;JA[kjp]<JA[ijp] && kjp<klp;++kjp)
+						;
+					if(kjp==klp)
+						goto out;
+					/* JA[kjp]>=JA[ijp] */
+					for(;JA[kjp]>JA[ijp] && ijp<ilp;++ijp)
+						;
+					if(ijp==ilp)
+						goto out;
+					/* JA[kjp]==JA[ijp] */
+					VA[ijp]-=VA[ikp]*VA[kjp];
+				}
+out:
+				RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+				
+			}
+		}
+	}
+}
+dnl err:
+	RSB_DO_ERR_RETURN(errval)
+}
+dnl
+dnl
+')dnl
+')dnl
+dnl
+
+dnl const void * rsb__prec_ilu0(struct rsb_mtx_t * mtxAp)`'dnl
+rsb_err_t rsb__prec_ilu0(struct rsb_mtx_t * mtxAp)`'dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`dnl
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_coo_matrix_t coo;
+
+	if(!mtxAp || !rsb__is_terminal_recursive_matrix(mtxAp) ||
+		 !rsb__is_css_matrix(mtxAp) || (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES) ||
+		 /*mtxAp->typecode != RSB_NUMERICAL_TYPE_DOUBLE  || */!rsb__is_square(mtxAp) || rsb__is_symmetric(mtxAp) ||
+ 		RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT)
+		)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	if(mtxAp->nr==1)
+		goto err;
+	if((errval = rsb__project_rsb_to_coo(mtxAp,&coo))!=RSB_ERR_NO_ERROR)
+		goto err;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( mtxAp->typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+		return rsb_do_csr_ilu0_`'touppercase(RSB_M4_CHOPSPACES(mtype))(&coo);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	errval = RSB_ERR_INTERNAL_ERROR;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+
+rsb_err_t rsb__prec_csr_ilu0(struct rsb_coo_matrix_t * coop)`'dnl
+ifdef(`ONLY_WANT_HEADERS',`;
+',`dnl
+{
+	// FIXME: termporary
+	if(coop->nr==1)
+		goto err;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( coop->typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+		dnl return rsb_do_csr_ilu0_`'touppercase(RSB_M4_CHOPSPACES(mtype))(coo->VA,coo->IA,coo->JA);
+		return rsb_do_csr_ilu0_`'touppercase(RSB_M4_CHOPSPACES(mtype))(coop);
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+')dnl
+dnl
+
+dnl
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_PREC_H_INCLUDED */
+')
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_psblas.h b/rsb_psblas.h
new file mode 100644
index 0000000..66caadf
--- /dev/null
+++ b/rsb_psblas.h
@@ -0,0 +1,33 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/** @file
+ *  @brief
+ *  Some definitions pertaining PSBLAS.
+ * */
+#ifndef RSB_PSBLAS_H_INCLUDED
+#define RSB_PSBLAS_H_INCLUDED
+#define  RSB_PSBLAS_TRANS_N  'N'
+#define  RSB_PSBLAS_TRANS_T  'T'
+#define  RSB_PSBLAS_TRANS_C  'C'
+#endif /* RSB_PSBLAS_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_rec.c b/rsb_rec.c
new file mode 100644
index 0000000..3d6aa02
--- /dev/null
+++ b/rsb_rec.c
@@ -0,0 +1,1940 @@
+/*
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/**
+ * @file
+ * @brief Recursion handling code
+ * @author Michele Martone
+ * */
+#include "rsb_common.h"
+#include <string.h>	/*memcmp, strchr*/
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_MTX_CMP_NNZ_ASC 0 /* ascending  */
+#define RSB_MTX_CMP_NNZ_DES 1 /* descending */
+#define RSB_MERGE_USE_TMP_COOMTX 1 /* */
+
+/* Macros to locate a free submatrix pointer after merging has carved holes in the submatrices array. */
+#define RSB_REC_FREE_SUBM_FLAG (!0x0) /* 0x0 is forbidden -- because it would rule out zeroing of the struct in RSB_MTX_INIT_LEAF ! */
+#define RSB_REC_USED_SUBM_FLAG (!(RSB_REC_FREE_SUBM_FLAG))  /* Anything different from RSB_REC_FREE_SUBM_FLAG */
+#define RSB_REC_MARK_SUBM_FREE(SM) if(SM)((SM)->flags=RSB_REC_FREE_SUBM_FLAG);(SM)=NULL; /* FIXME: for safety, one might BZERO former leaves here. */
+#define RSB_REC_IS_SUBM_FREE(SM) ((SM)->flags==RSB_REC_FREE_SUBM_FLAG)
+#define RSB_REC_MARK_SUBM_USED(SM) if(SM)(SM)->flags=RSB_REC_USED_SUBM_FLAG;
+
+int rsb__compar_rcsr_matrix_for_spsvl(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+		Useful for Lower Triangular Solve.
+	*/
+	struct rsb_translated_matrix_t *mtxAp = (struct rsb_translated_matrix_t*)ap;
+	struct rsb_translated_matrix_t *mtxBp = (struct rsb_translated_matrix_t*)bp;
+	rsb_coo_idx_t aro = mtxAp->roff, aco = mtxAp->coff, ar = mtxAp->nr;
+	rsb_coo_idx_t bro = mtxBp->roff, bco = mtxBp->coff, br = mtxBp->nr;
+
+	// the one who ends before the other beginning wins
+	if(  aro+ar <= bro )
+		return -1;
+	if(  bro+br <= aro )
+		return 1;
+	// the one beginning later comes later, unless the other matrix is on the diagonal
+	if(  aro > bro )
+		return bro==bco?-1:1;
+	if(  aro < bro )
+		return aro==aco?1:-1;
+	// if aligned, the one beginning later comes after
+	if(  aco > bco )
+		return 1;
+	else
+		return -1;
+}
+
+static int rsb_compar_rcsr_matrix_for_get_csr(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+	*/
+	struct rsb_translated_matrix_t *mtxAp = (struct rsb_translated_matrix_t*)ap;
+	struct rsb_translated_matrix_t *mtxBp = (struct rsb_translated_matrix_t*)bp;
+	rsb_coo_idx_t aro = mtxAp->roff, aco = mtxAp->coff;
+	rsb_coo_idx_t bro = mtxBp->roff, bco = mtxBp->coff;
+
+	if(  aro > bro )
+		return 1;
+	if(  aro < bro )
+		return -1;
+	if(  aco > bco )
+		return 1;
+	if(  aco < bco )
+		return -1;
+	return 0;
+}
+
+static int rsb_compar_rcsr_matrix_for_spsvut(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+		Useful for Lower Triangular Solve Transposed.
+
+		it's in a way similar to
+		rsb__compar_rcsr_matrix_for_spsvl, with substitutions:
+			aro -> bco+bc
+			bro -> aro+ar
+			aco -> bco+bc
+			bco -> aro+ar
+			... <viceversa>
+	*/
+	struct rsb_translated_matrix_t *mtxAp = (struct rsb_translated_matrix_t*)ap;
+	struct rsb_translated_matrix_t *mtxBp = (struct rsb_translated_matrix_t*)bp;
+	rsb_coo_idx_t aro = mtxAp->roff, aco = mtxAp->coff, ac = mtxAp->nc;
+	rsb_coo_idx_t bro = mtxBp->roff, bco = mtxBp->coff, bc = mtxBp->nc;
+
+	// the one who ends before the other beginning wins
+	if(  bco+bc <= aco )
+		return 1;
+	if(  aco+ac <= bco )
+		return -1;
+	// the one beginning later comes later, unless the other matrix is on the diagonal
+	if(  aco > bco )
+		return bro==bco?-1:1;
+	if(  aco < bco )
+		return aro==aco?1:-1;
+	// if aligned, the one beginning later comes after
+	if(  aro > bro )
+		return 1;
+	else
+		return -1;
+}
+
+static int rsb__compar_rcsr_matrix_for_spsvlt(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+		Useful for Lower Triangular Solve Transposed.
+
+		it's in a way similar to
+		rsb__compar_rcsr_matrix_for_spsvl, with substitutions:
+			aro -> bco+bc
+			bro -> aro+ar
+			aco -> bco+bc
+			bco -> aro+ar
+			... <viceversa>
+	*/
+	struct rsb_translated_matrix_t *mtxAp = (struct rsb_translated_matrix_t*)ap;
+	struct rsb_translated_matrix_t *mtxBp = (struct rsb_translated_matrix_t*)bp;
+	rsb_coo_idx_t aro = mtxAp->roff, aco = mtxAp->coff, ar = mtxAp->nr, ac = mtxAp->nc;
+	rsb_coo_idx_t bro = mtxBp->roff, bco = mtxBp->coff, /*br = mtxBp->nr,*/ bc = mtxBp->nc;
+
+	// the one who ends before the other beginning wins
+	if(  aco >= bco+bc )
+		return -1;
+	if(  bco >= aco+ac )
+		return 1;
+	// the one beginning later comes later, unless the other matrix is on the diagonal
+	if(  aco+ac < bco+bc )
+		return bro==bco?-1:1;
+	if(  aco+ac > bco+bc )
+		return aro==aco?1:-1;
+	// if aligned, the one beginning later comes after
+	if(  aro+ar > bro+ar )
+		return -1;
+	else
+		return 1;
+}
+
+static int rsb_compar_rcsr_matrix_for_spsvu(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+	*/
+	struct rsb_translated_matrix_t *mtxAp = (struct rsb_translated_matrix_t*)ap;
+	struct rsb_translated_matrix_t *mtxBp = (struct rsb_translated_matrix_t*)bp;
+	rsb_coo_idx_t aro = mtxAp->roff, aco = mtxAp->coff, ar = mtxAp->nr, ac = mtxAp->nc;
+	rsb_coo_idx_t bro = mtxBp->roff, bco = mtxBp->coff, br = mtxBp->nr;
+
+	// the one who ends before the other beginning wins
+	if(  aro >= bro+br )
+		return -1;
+	if(  bro >= aro+ar )
+		return 1;
+	// the one beginning later comes later, unless the other matrix is on the diagonal
+	if(  aro+ar < bro+br )
+		return bro==bco?-1:1;
+	if(  aro+ar > bro+br )
+		return aro==aco?1:-1;
+	// if aligned, the one beginning later comes after
+	if(  aco+ac > bco+ac )
+		return -1;
+	else
+		return 1;
+}
+
+#define RSB_ASC_CMP_FOR_QSRT(A,B) ( ( (A) > (B) ) ? (1) : (( (A) == (B) ) ? 0 : -1) )
+
+static int rsb_compar_nnz_idx_t(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+	*/
+	rsb_nnz_idx_t a=*(rsb_nnz_idx_t*)ap;
+	rsb_nnz_idx_t b=*(rsb_nnz_idx_t*)bp;
+
+        return RSB_ASC_CMP_FOR_QSRT(a,b);
+}
+
+
+static int rsb__compar_mtx_nnz_des(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+		Compare submatrices pointers in descending order of nnz occupation.
+	*/
+	const struct rsb_mtx_t*mtxAp = *(struct rsb_mtx_t**)ap;
+	const struct rsb_mtx_t*mtxBp = *(struct rsb_mtx_t**)bp;
+	rsb_nnz_idx_t nnzA = mtxAp->nnz;
+	rsb_nnz_idx_t nnzB = mtxBp->nnz;
+
+	return -RSB_ASC_CMP_FOR_QSRT(nnzA,nnzB);
+}
+
+static int rsb__compar_mtx_nnz_asc(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+		A compare function to be used with qsort.
+		Compare submatrices pointers in ascending order of nnz occupation.
+	*/
+	const struct rsb_mtx_t*mtxAp = *(struct rsb_mtx_t**)ap;
+	const struct rsb_mtx_t*mtxBp = *(struct rsb_mtx_t**)bp;
+	rsb_nnz_idx_t nnzA = mtxAp->nnz;
+	rsb_nnz_idx_t nnzB = mtxBp->nnz;
+
+	return  RSB_ASC_CMP_FOR_QSRT(nnzA,nnzB);
+}
+
+rsb_err_t rsb__srt_subm_ptr_array(struct rsb_mtx_t ** mtxApp, rsb_submatrix_idx_t nsm, int criteria)
+{
+	/**
+		\ingroup gr_internals
+		Sort submatrices pointers.
+		TODO: introduce other sorting criteria (e.g. index occupation, ...).
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	
+	switch(criteria)
+	{
+		case(RSB_MTX_CMP_NNZ_ASC):
+		qsort( mtxApp, (size_t) nsm, sizeof(struct rsb_mtx_t*), &rsb__compar_mtx_nnz_asc);
+		break;
+
+		case(RSB_MTX_CMP_NNZ_DES):
+		qsort( mtxApp, (size_t) nsm, sizeof(struct rsb_mtx_t*), &rsb__compar_mtx_nnz_des);
+		break;
+	}
+
+	return errval;
+}
+
+rsb_err_t rsb__sort_array_of_leaf_matrices_for_ussv(const struct rsb_mtx_t * mtxAp, struct rsb_translated_matrix_t *leaf_matrices, rsb_submatrix_idx_t n, rsb_trans_t transl)
+{
+	/**
+		\ingroup gr_internals
+		Sort rsb_translated_matrix_t structures.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	if(rsb__is_upper_triangle(mtxAp->flags))
+	{
+		if(RSB_DOES_TRANSPOSE(transl))
+			errval = rsb__sort_array_of_leaf_matrices(NULL,leaf_matrices,n,rsb_op_spsvut);
+		else
+			errval = rsb__sort_array_of_leaf_matrices(NULL,leaf_matrices,n,rsb_op_spsvu);
+	}
+	else
+	if(rsb__is_lower_triangle(mtxAp->flags))
+	{
+		if(RSB_DOES_TRANSPOSE(transl))
+			errval = rsb__sort_array_of_leaf_matrices(NULL,leaf_matrices,n,rsb_op_spsvlt);
+		else
+			errval = rsb__sort_array_of_leaf_matrices(NULL,leaf_matrices,n,rsb_op_spsvl);
+	}
+	else
+	{
+		/*
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_BADARGS;
+		*/
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__sort_array_of_leaf_matrices(const struct rsb_translated_matrix_t *rmatrix,struct rsb_translated_matrix_t *matrices, rsb_submatrix_idx_t n, enum rsb_op_t op)
+{
+	/**
+		\ingroup gr_internals
+	  	Sorts an array of leaf matrices in an order which will be suitable for SpMV, SpSV, ... later on.
+		FIXME: rmatrix is the root matrix, and is currently unused.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t ij;
+	rsb_nnz_idx_t * idx = NULL;
+	struct rsb_translated_matrix_t *smatrices=NULL;
+
+	if(!matrices)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+
+	switch(op)
+	{
+	case(rsb_op_spmv):
+	{
+		/* NOTE : this code is braindead : should use qsort directly instead */
+		smatrices = rsb__malloc(sizeof(struct rsb_translated_matrix_t) * n);
+		idx = rsb__malloc(2*sizeof(rsb_nnz_idx_t) * n);
+	
+		if(!smatrices || !idx){errval = RSB_ERR_ENOMEM; RSB_PERR_GOTO(err,RSB_ERRM_EM); }
+	
+		for(ij=0;ij<n;++ij)
+		{
+			/* currently, the sorting criteria is the base row only */
+			idx[2*ij+0]=matrices[ij].roff;
+			idx[2*ij+1]=ij;
+		}
+		qsort( idx , (size_t) n, 2*sizeof(rsb_nnz_idx_t), &rsb_compar_nnz_idx_t );
+		rsb__do_util_compact_permutation_nnz_idx_t_array(idx, n);
+		/* permutation */
+		for(ij=0;ij<n;++ij) smatrices[ij]=matrices[idx[ij]];
+		rsb_memcpy(matrices,smatrices,sizeof(struct rsb_translated_matrix_t)*n);
+	}
+	break;
+		case(rsb_op_spsvl):
+		{
+			qsort( matrices , (size_t) n, sizeof(struct rsb_translated_matrix_t), &rsb__compar_rcsr_matrix_for_spsvl);
+		}
+		break;
+		case(rsb_op_spsvlt):
+		{
+			qsort( matrices , (size_t) n, sizeof(struct rsb_translated_matrix_t), &rsb__compar_rcsr_matrix_for_spsvlt);
+		}
+		break;
+		case(rsb_op_spsvu):
+		{
+			qsort( matrices , (size_t) n, sizeof(struct rsb_translated_matrix_t), &rsb_compar_rcsr_matrix_for_spsvu);
+		}
+		break;
+		case(rsb_op_spsvut):
+		{
+			qsort( matrices , (size_t) n, sizeof(struct rsb_translated_matrix_t), &rsb_compar_rcsr_matrix_for_spsvut);
+		}
+		break;
+		case(rsb_op_get_csr):
+		{
+			qsort( matrices , (size_t) n, sizeof(struct rsb_translated_matrix_t), &rsb_compar_rcsr_matrix_for_get_csr);
+		}
+		break;
+		default:
+		errval = RSB_ERR_INTERNAL_ERROR;
+		break;
+	}
+err:
+	RSB_CONDITIONAL_FREE(idx);
+	RSB_CONDITIONAL_FREE(smatrices);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__fill_array_of_leaf_matrices(const struct rsb_translated_matrix_t *tmatrix, struct rsb_translated_matrix_t *matrices, rsb_submatrix_idx_t * sip)
+{
+	/**
+		\ingroup gr_internals
+		This function fills the input array with pointers to leaf matrices.
+		The input order should matter to spmv performance, but it will be dealt with later.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t si = *sip;
+
+	if(!tmatrix || !matrices || !tmatrix->mtxlp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if(rsb__is_terminal_recursive_matrix(tmatrix->mtxlp))
+	{
+		/* if this is the only matrix  */
+		matrices[si].mtxlp = tmatrix->mtxlp;
+		matrices[si].roff = tmatrix->roff;
+		matrices[si].coff = tmatrix->coff;
+		matrices[si].nr = tmatrix->nr;
+		matrices[si].nc = tmatrix->nc;
+		matrices[si].level = tmatrix->level;
+		++*sip;
+	       	++si;
+		goto ok;
+	}
+	else
+	{
+		/* if tmatrix has submatrices  */
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix = NULL;
+//		rsb_coo_idx_t mB=(tmatrix->mtxlp->rpntr[rsb__recursive_middle_block_index(tmatrix->mtxlp->M_b)]);
+//		rsb_coo_idx_t kB=(tmatrix->mtxlp->cpntr[rsb__recursive_middle_block_index(tmatrix->mtxlp->K_b)]);
+
+		RSB_SUBMATRIX_FOREACH(tmatrix->mtxlp,submatrix,i,j)
+		if(submatrix)
+		{
+			/* we update submatrices with positioning info */
+			struct rsb_translated_matrix_t tsubmatrix;
+
+			tsubmatrix.mtxlp = submatrix;
+//			tsubmatrix.roff = tmatrix->roff+i*mB;
+//			tsubmatrix.coff = tmatrix->coff+j*kB;
+			tsubmatrix.nr = submatrix->nr;
+			tsubmatrix.nc = submatrix->nc;
+			tsubmatrix.roff = submatrix->roff;
+			tsubmatrix.coff = submatrix->coff;
+			tsubmatrix.level = tmatrix->level+1;
+
+			errval = rsb__fill_array_of_leaf_matrices(&tsubmatrix, matrices, sip);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+		}
+		goto ok;
+	}
+ok:
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__get_array_of_leaf_matrices(struct rsb_mtx_t *mtxAp, struct rsb_translated_matrix_t ** tmatricesp, rsb_submatrix_idx_t *countp)
+{
+	/**
+		\ingroup gr_internals
+	   	\return an array of leaf matrices, ordered in a way to ease workload balancing on multicore platforms.
+		If *tmatricesp==NULL, will allocate it for us. Otherwise, it will use the given pointer.
+	*/
+	
+	long lmc = 0; /* leaf matrices count */
+	struct rsb_translated_matrix_t * tmatrices = NULL;
+	struct rsb_translated_matrix_t tmatrix;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t count = 0;
+
+	RSB_BZERO_P(&tmatrix);
+
+	if(!tmatricesp /*|| !countp */|| !mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	tmatrix.mtxlp = mtxAp;
+	tmatrix.roff = tmatrix.coff = 0;
+	tmatrix.level = 0;
+
+	lmc = rsb__terminal_recursive_matrix_count(mtxAp);
+
+	if(lmc>0)
+	{
+	//	rsb_submatrix_idx_t i,j,ij;
+	//	struct rsb_mtx_t * submatrix = NULL;
+
+		if(*tmatricesp)
+			tmatrices = *tmatricesp;
+		else
+		{
+			tmatrices = rsb__malloc(sizeof(struct rsb_translated_matrix_t) * (lmc));
+			if(!tmatrices)
+			{
+				errval = RSB_ERR_ENOMEM;
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+		}
+
+		errval = rsb__fill_array_of_leaf_matrices(&tmatrix,tmatrices,&count);
+		if(RSB_SOME_ERROR(errval))
+	       	{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+#if 0
+		/*  sorting breaks Z ordering, really */
+		errval = rsb__sort_array_of_leaf_matrices(&tmatrix,tmatrices, count, rsb_op_spmv );
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	       	}
+#endif
+#if 0
+		/* debug dump */
+		for(ij=0;ij<count;++ij)
+		{
+			RSB_INFO("submatrix: %d @ (%d %d) (level %d) (nnz %d)\n",
+				ij,tmatrices[ij].roff,tmatrices[ij].coff,tmatrices[ij].level,tmatrices[ij].mtxlp->nnz);
+		}
+#endif
+
+	}
+	goto ok;
+ok:
+	if(countp)
+		*countp = count;
+	else
+		mtxAp->all_leaf_matrices_n = count;
+	*tmatricesp = tmatrices;
+	goto ret;
+err:
+	if(!*tmatricesp)
+		RSB_CONDITIONAL_FREE(tmatrices);
+ret:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb__refresh_array_of_leaf_matrices(struct rsb_mtx_t *mtxAp)
+{
+	/*
+	 * On error, matrix shall be unaffected.
+	 * */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if 1
+	struct rsb_mtx_t mtxB  = *mtxAp;
+
+	/* FIXME: this method is not efficient; would rather need realloc and pointers diff.  */
+	mtxAp->all_leaf_matrices_n = 0;
+	mtxAp->all_leaf_matrices = NULL;
+
+	errval = rsb__get_array_of_leaf_matrices(mtxAp, &(mtxAp->all_leaf_matrices), &(mtxAp->all_leaf_matrices_n));
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		/* restore */
+		mtxAp->all_leaf_matrices_n = mtxB.all_leaf_matrices_n;
+		mtxAp->all_leaf_matrices = mtxB.all_leaf_matrices;
+	}
+	else
+	{
+		RSB_CONDITIONAL_FREE(mtxB.all_leaf_matrices);
+	}
+
+#else
+		/* Note: this method (no reallocation) would be better, but is incomplete ...  */
+		smu = 0;
+		for(sml=0;sml<mtxAp->all_leaf_matrices_n;sml++)
+			if( mtxAp->all_leaf_matrices[sml].mtxlp != NULL )
+			{
+				mtxAp->all_leaf_matrices[smu++] = mtxAp->all_leaf_matrices[sml];
+				// mtxAp->all_leaf_matrices[smu++].mtxlp = mtxAp->all_leaf_matrices[sml].mtxlp;
+			}
+		printf("Merged %d leaves (from %d to %d).\n",sml-smu,sml,smu);
+		mtxAp->all_leaf_matrices_n = smu;
+		if(mtxAp->all_leaf_matrices_n==0)
+			RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices);
+#endif
+
+	return errval;
+}
+	
+size_t rsb__get_index_storage_amount(const struct rsb_mtx_t *mtxAp)
+{
+	/**
+		\ingroup gr_experimental
+	   	\return the amount of allocated bytes for storage of the matrix
+		NOTE: valid only for (recursive) CSR
+		NOTE: we don't include the matrix struct size.
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	size_t isa = 0;
+
+	if(!mtxAp)
+		goto done;
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{	
+		size_t is;
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			is=sizeof(rsb_half_idx_t);
+		else
+			is=sizeof(rsb_coo_idx_t);
+
+//		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_BCSS_STORAGE))
+		if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCSR)
+			isa += (is*mtxAp->nnz)+(sizeof(rsb_coo_idx_t)*(mtxAp->Mdim+1));
+		else
+		if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR)
+//		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+			isa += 2*(is*mtxAp->nnz);
+
+		//isa += sizeof(struct rsb_mtx_t); // FIXME: should this be here ? NO: this is not index storage.
+	}
+	else
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+			isa += rsb__get_index_storage_amount(submatrix);
+	}
+done:
+	return isa;
+}
+
+rsb_submatrix_idx_t rsb__get_diagonal_elements_count(const struct rsb_mtx_t *mtxAp)
+{
+	/**
+		\ingroup gr_internals
+	   	\return the number of nonzeros which are on diagonal aligned with the main diagonal
+	 */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_submatrix_idx_t dse = 0;
+
+	if(!mtxAp)
+		goto done;
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp) && mtxAp->roff == mtxAp->coff)
+	{
+		dse=mtxAp->nnz;
+	}
+	else
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if( submatrix && i==j && RSB_SUBMATRIX_IS_ON_DIAG(submatrix) )
+			dse += rsb__get_diagonal_elements_count(submatrix);
+	}
+done:
+	return dse;
+}
+
+static rsb_bool_t rsb_is_node_pre_last(const struct rsb_mtx_t *mtxAp)
+{
+	/* rsb_err_t errval = RSB_ERR_NO_ERROR; */
+	rsb_bool_t inpl = RSB_BOOL_FALSE;
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+		goto ret;
+	else
+	{
+		struct rsb_mtx_t * submatrix = NULL;
+		rsb_submatrix_idx_t i,j;
+		inpl = RSB_BOOL_TRUE;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix && !rsb__is_terminal_recursive_matrix(submatrix))
+			inpl = RSB_BOOL_FALSE;
+	}
+
+ret:
+	return inpl;
+}
+
+struct rsb_mtx_list_t
+{
+	/* size_t sa[10000];
+	struct rsb_mtx_t*mp[10000]; */
+	size_t*sa; /* submatrices/scores array */
+	struct rsb_mtx_t**mp; /* matrices pointer array */
+	rsb_submatrix_idx_t mc; /* (leaf) matrices count (0...) */
+};
+
+static rsb_err_t rsb__leaves_analysis_rec(struct rsb_mtx_t *mtxAp, struct rsb_mtx_list_t *mlp, const int wv, rsb_bool_t wpl)
+{
+	/**
+		\ingroup gr_internals
+		Analyze submatrices and compute a score.
+		If (wpl) (want pre leaf) then pre-leaf groups will be considered (i.e. for merging); otherwise, leaves.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	const int miac = 1; // merge in any case --- even when no saving is gained
+
+	/* RSB_ASSERT(mtxAp->VA); RSB_ASSERT(mtxAp->bindx); RSB_ASSERT(mtxAp->bpntr); */
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		if(wpl)
+			; /* merge case: nothing to do */
+		else
+		{
+			/* split case */
+			mlp->sa[mlp->mc  ]=mtxAp->nnz;
+			mlp->mp[mlp->mc++]=mtxAp;
+		}
+		goto ret;
+	}
+	else
+	{
+		if(!wpl)
+		{
+			RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+				if(submatrix)
+					errval |= rsb__leaves_analysis_rec(submatrix,mlp,wv,wpl);
+			goto ret;
+		}
+
+		if(rsb_is_node_pre_last(mtxAp))
+		{
+			int sol = 0;
+			const int vl = 2;
+			rsb_coo_idx_t /*smnr[4]={0,0,0,0},*/nr=mtxAp->nr;
+			rsb_coo_idx_t /*smnc[4]={0,0,0,0},*/nc=mtxAp->nc;
+			rsb_nnz_idx_t /*smnz[4]={0,0,0,0},*/nz=mtxAp->nnz;
+			size_t hcooio=0,hcsrio=0;
+			size_t fcooio=0,fcsrio=0;
+			size_t rsbio = rsb__get_index_storage_amount(mtxAp),bestio=rsbio;
+			const rsb_char_t sp=' ', bettermark='.'/*, bestmark='*'*/;
+			rsb_char_t hcoof=sp,hcsrf=sp,fcoof=sp,fcsrf=sp,rsbf=sp;
+			double savepcnt = 0.0;
+
+			RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+				if(submatrix)
+				{
+					/*
+					int idx=2*i+j;
+					smnr[idx]=submatrix->nr;
+					smnc[idx]=submatrix->nc;
+					smnz[idx]=submatrix->nnz; */
+					++sol;
+				}
+			//sol = mtxAp->all_leaf_matrices_n;
+			if(wv>vl)
+			RSB_STDOUT("sub-leaf: %p is %d x %d and contains %d nnz in %d leaves ('.'=fewer indices)\n",(const void*)mtxAp,nr,nc,nz,sol),
+			RSB_STDOUT("as   is:%10zu %c\n",rsbio ,rsbf);
+			if(RSB_INDICES_FIT_IN_HALFWORD(nr,nc))
+			{
+				hcooio=sizeof(rsb_half_idx_t)*2*nz;
+				hcsrio=sizeof(rsb_half_idx_t)*nz+sizeof(rsb_nnz_idx_t)*(1+nr);
+				if(hcooio<rsbio )hcoof=bettermark;
+				if(hcooio<bestio)bestio=hcooio;
+				if(hcsrio<rsbio )hcsrf=bettermark;
+				if(hcsrio<bestio)bestio=hcsrio;
+				if(wv>vl)
+				RSB_STDOUT("as HCOO:%10zu %c\n",hcooio,hcoof),
+				RSB_STDOUT("as HCSR:%10zu %c\n",hcsrio,hcsrf);
+			}
+				fcooio=sizeof(rsb_coo_idx_t)*2*nz;
+				if(fcooio<rsbio)fcoof=bettermark;
+				if(fcooio<bestio)bestio=fcooio;
+				fcsrio=sizeof(rsb_coo_idx_t)*nz+sizeof(rsb_nnz_idx_t)*(1+nr);
+				if(fcsrio<rsbio )fcsrf=bettermark;
+				if(fcsrio<bestio)bestio=fcsrio;
+				if(wv>vl)
+				RSB_STDOUT("as  COO:%10zu %c\n",fcooio,fcoof),
+				RSB_STDOUT("as  CSR:%10zu %c\n",fcsrio,fcsrf);
+				savepcnt=100.0*(((double)(rsbio-bestio))/(double)rsbio);
+				if(savepcnt>0.0 || miac)
+				{
+					if(wv>vl)
+					RSB_STDOUT("potential saving is: %3.2lg%% (%zu bytes out of %zu)\n",savepcnt,rsbio-bestio,rsbio);
+					mlp->sa[mlp->mc  ]=rsbio-bestio;
+					mlp->mp[mlp->mc++]=mtxAp;
+				}
+		}
+		else
+		{
+			RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+				if(submatrix)
+					rsb__leaves_analysis_rec(submatrix,mlp,wv,wpl);
+		}
+	}
+
+ret:	return errval;
+}
+
+static rsb_err_t rsb__cor_merge(rsb_type_t typecode, void* RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t offB, rsb_nnz_idx_t nnzB, rsb_nnz_idx_t nnzC, const int wv, int wp, struct rsb_coo_matrix_t*RSB_RESTRICT coop)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Merges two non overlapping totally ordered COO sequences.
+	 * This is a naive version using a nnzB+nnzC temporary array.
+	 * If coop is supplied, no allocation of a  RSB_MIN(nnzB,nnzC) buffer space will occur but coop's will be used.
+	 * It would be nice to have a no-alloc version, but this can be very complicated.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VB = NULL, *VC = NULL, *VT = NULL;
+	rsb_coo_idx_t * IB = NULL, *JB = NULL;
+	rsb_coo_idx_t * IC = NULL, *JC = NULL;
+	rsb_coo_idx_t * IT = NULL, *JT = NULL;
+	rsb_nnz_idx_t bi = 0, ci = 0, ti = 0;
+	rsb_nnz_idx_t b0 = 0, c0 = 0, t0 = 0;
+	struct rsb_coo_matrix_t coo;
+	size_t es = RSB_SIZEOF(typecode);
+
+	if( nnzB == 0 || nnzC == 0 )
+	{
+		goto ret;
+	}
+
+	b0 = offB;
+	c0 = offB + nnzB;
+	VB = RSB_TYPED_OFF_PTR(typecode,VA,b0);
+	VC = RSB_TYPED_OFF_PTR(typecode,VA,c0);
+	IB = IA + b0;
+	IC = IA + c0;
+	JB = JA + b0;
+	JC = JA + c0;
+
+	RSB_BZERO_P(&coo);
+	coo.nnz = nnzB + nnzC;
+	coo.typecode = typecode;
+
+	if( coop && coop->nnz)
+	{
+		coo = *coop;
+		coo.nnz = nnzB + nnzC; /* necessary */
+	}
+	else
+	{
+		if( NULL == rsb__allocate_coo_matrix_t(&coo) )
+			goto err;
+	}
+
+	IT = coo.IA;
+	JT = coo.JA;
+	VT = coo.VA;
+
+again:
+	t0 = ti;
+       	while( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		++bi,++ti;
+	}
+	//if(ti>t0) RSB_STDOUT("t0:%d t1:%d bi:%d ci:%d bnz:%d cnz:%d\n",t0,ti,bi,ci,nnzB,nnzC);
+	if(ti>t0)
+		RSB_A_MEMCPY(VT,VB,t0,(bi-(ti-t0)),(ti-t0),es);
+
+	t0 = ti;
+       	while( bi<nnzB && ci<nnzC && RSB_COO_GE(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		++ci,++ti;
+	}
+
+	//if(ti>t0) RSB_STDOUT("t0:%d t1:%d bi:%d ci:%d bnz:%d cnz:%d\n",t0,ti,bi,ci,nnzB,nnzC);
+	if(ti>t0)
+		RSB_A_MEMCPY(VT,VC,t0,(ci-(ti-t0)),(ti-t0),es);
+
+	if( ci < nnzC && bi < nnzB )
+		goto again;
+
+       	if( bi<nnzB && ci==nnzC )
+	{
+		RSB_COA_MEMCPY(IT,IB,ti,bi,(nnzB-bi));
+		RSB_COA_MEMCPY(JT,JB,ti,bi,(nnzB-bi));
+		RSB_A_MEMCPY  (VT,VB,ti,bi,(nnzB-bi),es);
+		ti += (nnzB - bi);
+		bi = nnzB;
+	}
+
+       	if( ci<nnzC && bi==nnzB )
+	{
+		RSB_COA_MEMCPY(IT,IC,ti,ci,(nnzC-ci));
+		RSB_COA_MEMCPY(JT,JC,ti,ci,(nnzC-ci));
+		RSB_A_MEMCPY  (VT,VC,ti,ci,(nnzC-ci),es);
+		ti += (nnzC - ci);
+		ci = nnzC;
+	}
+
+	RSB_COA_MEMCPY(IA,IT,offB,0,(coo.nnz));
+	RSB_COA_MEMCPY(JA,JT,offB,0,(coo.nnz));
+	if(wp)
+	{
+		RSB_A_MEMCPY_parallel(  VA,VT,offB,0,(coo.nnz),es);
+	}
+	else
+	{
+		RSB_A_MEMCPY(  VA,VT,offB,0,(coo.nnz),es);
+	}
+
+	RSB_ASSERT(rsb__util_is_coo_array_sorted_up_partial_order(IA,coo.nnz));
+	goto done;
+err:
+	errval = RSB_ERR_ENOMEM;
+done:
+	if( coop && coop->nnz)
+		;
+	else
+		rsb__destroy_coo_matrix_t(&coo);
+ret:
+	return errval;
+}
+
+rsb_err_t rsb__leaves_merge_multiple(struct rsb_mtx_t *mtxAp, rsb_time_t *stp, rsb_time_t *atp, rsb_time_t *ltp, const int wv, int kc)
+{
+	/* FIXME: is this used ? where ? why ? */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t st = RSB_TIME_ZERO, lt = RSB_TIME_ZERO, at = RSB_TIME_ZERO;
+
+	while(mtxAp->all_leaf_matrices_n > 1)
+	{
+		rsb_time_t mst = RSB_TIME_ZERO, mlt = RSB_TIME_ZERO, mat = RSB_TIME_ZERO; /* merge: sort,elapsed,analysis time */
+
+		errval = rsb__leaves_merge(mtxAp, mtxAp->all_leaf_matrices_n, &mst, &mat, &mlt, wv, kc);
+
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err, RSB_ERRM_ES);
+		}
+		st += mst;
+	       	lt += mlt;
+		at += mat;
+	}
+
+	RSB_DEBUG_ASSERT( mtxAp->all_leaf_matrices_n == 1 );
+
+	if(kc)
+	{
+#ifdef RSB_USE_ASSERT
+		struct rsb_mtx_t * submatrix = NULL;
+		rsb_submatrix_idx_t smi;
+
+		RSB_SUBMATRIX_FOREACH_LEAF(mtxAp,submatrix,smi) 
+		{
+			RSB_DEBUG_ASSERT( submatrix->matrix_storage == RSB_MATRIX_STORAGE_BCOR );
+			RSB_DEBUG_ASSERT( RSB_DO_FLAG_HAS( submatrix->flags , RSB_FLAG_WANT_COO_STORAGE) );
+		}
+#endif /* RSB_USE_ASSERT */
+	}
+
+	if (mtxAp->all_leaf_matrices_n > 1)
+	{
+		RSB_ERROR("Merge did not work: matrix has still %d submatrices.\n", mtxAp->all_leaf_matrices_n);
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err, RSB_ERRM_ES);
+	}
+err:
+	RSB_ASSIGN_IF(stp,st)
+	RSB_ASSIGN_IF(atp,at)
+	RSB_ASSIGN_IF(ltp,lt)
+	return errval;
+}
+
+#if 0
+static void rsb__mtx_list_print(struct rsb_mtx_list_t * mlp, const int wv)
+{
+	rsb_submatrix_idx_t smi;
+
+	RSB_DEBUG_ASSERT(mlp);
+
+	RSB_STDOUT("Selected %d matrices:\n", mlp->mc);
+
+	for(smi=0;smi<mlp->mc;++smi)
+	{
+		struct rsb_mtx_t * mtxMp = mlp->mp[smi]; 
+		//if(wv>1)
+			RSB_STDOUT(RSB_PRINTF_MTX_SUMMARY_ARGS(mtxMp)),
+			RSB_STDOUT(" -> %zd\n",mlp->sa[smi]);
+	}
+}
+#endif
+
+static void rsb__mtx_list_free(struct rsb_mtx_list_t * mlp)
+{
+	RSB_DEBUG_ASSERT(mlp);
+
+	RSB_CONDITIONAL_FREE(mlp->mp);
+	RSB_CONDITIONAL_FREE(mlp->sa);
+}
+
+static void rsb__mtx_list_init(struct rsb_mtx_list_t * mlp)
+{
+	RSB_DEBUG_ASSERT(mlp);
+
+	RSB_BZERO_P(mlp);
+}
+
+static rsb_err_t rsb__mtx_list_bld(struct rsb_mtx_list_t * mlp, struct rsb_mtx_t *mtxAp)
+{
+	struct rsb_mtx_list_t ml; /* matrix list */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(mlp);
+
+	RSB_BZERO_P(&ml);
+
+	ml.mc = rsb__submatrices(mtxAp);
+	ml.sa = rsb__calloc(sizeof(*ml.sa) * ml.mc); /* after rsb__srt_subm_ptr_array, this won't make sense anymore */
+	ml.mp = rsb__calloc(sizeof(*ml.mp) * ml.mc);
+	ml.mc = 0; // for rsb__leaves_analysis_rec
+
+	if(!ml.sa || !ml.mp)
+       	{
+		errval = RSB_ERR_ENOMEM;
+		rsb__mtx_list_free(&ml);
+	       	RSB_PERR_GOTO(err,RSB_ERRM_EM);
+       	}
+
+	*mlp = ml;
+err:
+	return errval;
+}
+
+#define RSB_SPLIT_IS_EXPERIMENTAL 1
+#define RSB_LS_PARANOIA 0
+#if ( RSB_LS_PARANOIA > 0 )
+#define RSB_LS_ASSERT RSB_ASSERT
+#else /* RSB_LS_PARANOIA */
+#define RSB_LS_ASSERT 
+#endif /* RSB_LS_PARANOIA */
+
+#ifdef RSB_ALLOW_INTERNAL_GETENVS
+#define RSB_AT_ALLOW_GETENV RSB_ALLOW_INTERNAL_GETENVS /* activate this only for testing purposes */
+#else /* RSB_ALLOW_INTERNAL_GETENVS */
+#define RSB_AT_ALLOW_GETENV 0 /* activate this only for testing purposes */
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+static void rsb__scale_subm_idx_on_env_var(const char *envv, double * mftsp, rsb_submatrix_idx_t * mctsp, const int wv)
+{
+#if RSB_AT_ALLOW_GETENV
+	const char * msss = NULL; /* matrix split specification string */
+	if( ( msss = getenv(envv) ) != NULL )
+	{
+		int nom=0,den=0;
+	       	char c=0;
+
+		if( 2 == sscanf(msss,"%d/%d",&nom,&den) )
+			*mftsp = ((double)(nom))/(den);
+		else
+		if( 1 == sscanf(msss,"0.%lf",mftsp ) )
+			*mftsp /= 10.0;
+		else
+		if( 2 == sscanf(msss,"%lf%[%]",mftsp ,&c) )
+		{
+			*mftsp /= 100.0;
+		}
+		else
+		if( 1 == sscanf(msss,"%d",mctsp) )
+			;
+		else
+		{
+			RSB_STDOUT("\"%s\" is a wrong value for %s. Use something as in e.g. 1/3 0.4 10%% 5\n",msss,envv);
+		}
+		if(wv>0)
+			RSB_STDOUT("Will split/merge a fraction %g of the original submatrices (used env.v. %s).\n",*mftsp,envv);
+
+	}
+#endif /* RSB_AT_ALLOW_GETENV */
+}
+
+rsb_err_t rsb__mtx_split(struct rsb_mtx_t * RSB_RESTRICT mtxAp, rsb_submatrix_idx_t manp, rsb_time_t * RSB_RESTRICT stp, rsb_time_t * RSB_RESTRICT atp, rsb_time_t * RSB_RESTRICT ltp, const int wv, int kc)
+{
+	/* 
+	 	Splits leaves of a matrix further.
+	 	The matrix stays in a consistent state even on error.
+		However, it may be in a different state than in the beginning.
+		TODO: need to document work memory requirements.
+		FIXME: need to use manp.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t mt = - rsb_time(), st = RSB_TIME_ZERO, lt = RSB_TIME_ZERO, at = RSB_TIME_ZERO; /* merge,sort,elapsed,analysis time */
+	struct rsb_mtx_list_t ml; /* matrix list */
+	rsb_submatrix_idx_t smi = 0, nsm = 0;
+	int sc = RSB_MTX_CMP_NNZ_DES;
+	rsb_long_t smc = rsb__submatrices(mtxAp);
+	rsb_submatrix_idx_t nsbs,nsas; /* number of submatrices [before/after] split */
+	rsb_thread_t rnt = rsb_get_num_threads();
+	rsb_thread_t nst = rnt; /* number of threads active during splitting */
+
+	// rsb_submatrix_idx_t mmts = 0; /* max matrices to split */
+	rsb_submatrix_idx_t mcts = 0; /* matrices count to split */
+	double mfts = 0.5; /* matrices fraction to split */
+
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+	rsb__mtx_list_init(&ml);
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(ret, RSB_ERRM_E_MTXAP);
+	}
+
+	nsbs = mtxAp->all_leaf_matrices_n;
+	flags = mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES;
+	if(nsbs == 1)
+		RSB_DO_FLAG_ADD(flags, RSB_FLAG_USE_HALFWORD_INDICES);
+#if RSB_SPLIT_IS_EXPERIMENTAL
+	if(! RSB_SOME_ERROR(errval) )
+	if(!rsb__mtx_chk(mtxAp))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_ERROR(RSB_ERRM_ES);
+		goto ret;
+	}
+#endif
+
+	if(wv>2)
+		RSB_STDOUT("# experimental leaves analysis & split: "),
+		RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp)),
+		RSB_STDOUT("\n");
+	if(wv>2)
+		RSB_STDOUT("# max ptr diff is %zd units\n",rsb__submatrices_max_ptr_diff(mtxAp));
+/*
+	for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+	{
+		int smj;
+		for(smj=smi+1;smj<mtxAp->all_leaf_matrices_n;++smj)
+		if(mtxAp->all_leaf_matrices[smi].mtxlp == mtxAp->all_leaf_matrices[smj].mtxlp)
+		{
+			printf("Duplicate submatrices: p:%p i:%d j:%d max:%d\n",0x0,smi,smj,mtxAp->all_leaf_matrices_n); 
+			RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp->all_leaf_matrices[smj].mtxlp));
+		}
+	}
+*/
+	/* Determine largest / heaviest leaf. */
+	at = -rsb_time();
+	errval = rsb__mtx_list_bld(&ml, mtxAp);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(ret, RSB_ERRM_E_MTXAP);
+	}
+	errval = rsb__leaves_analysis_rec(mtxAp, &ml, wv, RSB_BOOL_FALSE);
+	errval = rsb__srt_subm_ptr_array(ml.mp, ml.mc, sc);
+	at += rsb_time();
+	RSB_ASSERT(ml.mc>0);
+
+	rsb__scale_subm_idx_on_env_var("RSB_SPLIT_SF",&mfts,&mcts,wv);
+#if RSB_AT_ALLOW_GETENV
+	nst = getenv("RSB_SPLIT_NT") ? rsb__util_atoi(getenv("RSB_SPLIT_NT")) : nst;
+#endif /* RSB_AT_ALLOW_GETENV */
+	/* TODO: can we have recursive split for corner-concentrated-nonzeroes matrices ? */
+
+	if(mcts == 0)
+		mcts = (rsb_submatrix_idx_t)(mfts*ml.mc);
+	mcts = RSB_MIN(RSB_MAX(1, mcts), ml.mc); /* 1 ... ml.mc */
+       	nst = RSB_MIN(mcts, nst);
+
+	if(manp > 0)
+	       	mcts = RSB_MIN(mcts, manp);
+
+	#pragma omp parallel for schedule(static,1) reduction(|:errval) reduction(+:lt) reduction(+:st) shared(nsm) shared(ml)  num_threads(nst)
+	for(smi=0;smi<mcts;++smi)
+	{
+		struct rsb_mtx_t * mtxMp = ml.mp[smi]; 
+		rsb_coo_idx_t nrA = mtxMp->nr, ncA = mtxMp->nc;
+		const rsb_coo_idx_t msz = 2;
+		rsb_nnz_idx_t nzul = 0, nzur = 0, nzll = 0, nzlr = 0;
+		rsb_nnz_idx_t nzu = 0, nzl = 0;
+		struct rsb_coo_matrix_t coa, cot;
+		rsb_coo_idx_t mr = RSB_MIDDLE(nrA), mc = RSB_MIDDLE(ncA);
+		rsb_time_t lst = RSB_TIME_ZERO, llt = RSB_TIME_ZERO; /* local st,lt */
+		rsb_coo_idx_t * TA = NULL;
+		/* RSB_STDOUT("thread %d handles matrix %d\n",omp_get_thread_num(),smi); */
+
+		llt = -rsb_time();
+
+		/* Skip processing if no split possible or convenient. */
+		if( nrA < msz || ncA < msz || mtxMp->nnz < 4 )
+		{
+			/* TODO: shall we communicate this somehow to the outside ? */
+			if(wv>2)
+			RSB_ERROR("Matrix is too small for splitting:"),
+			RSB_ERROR(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxMp)),
+			RSB_ERROR("\n");
+			goto nerr;
+		}
+
+		if( nsm + 3 >= RSB_TMP_OVERALLOC_MTX * ml.mc )
+		{
+			RSB_PERR_GOTO(lerr,"Exceeded inner limits !");
+		}
+
+		/* Switch to COO, then split. */
+		RSB_LS_ASSERT( rsb__mtx_chk(mtxMp)==RSB_BOOL_TRUE);
+		#pragma omp critical (rsb__mtx_split_cr)
+		{
+			/* TODO: one can minimize this number further */
+			TA = rsb__malloc( sizeof(rsb_coo_idx_t) * RSB_MIN(mtxMp->nnz,1+nrA) );
+			if(!TA)
+			{
+				errval = RSB_ERR_ENOMEM;
+			}
+		}
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(lerr,RSB_ERRM_PAL	); /* !! */
+		}
+		errval = rsb__do_switch_leaf(mtxMp, RSB_MATRIX_STORAGE_BCOR, RSB_FLAG_USE_FULLWORD_INDICES, 0, 0, TA);
+		RSB_LS_ASSERT(!RSB_SOME_ERROR(errval));
+		RSB_LS_ASSERT( rsb__mtx_chk(mtxMp)==RSB_BOOL_TRUE);
+		/*if( rsb__util_is_nnz_array_sorted_up_partial_order(mtxMp->bpntr,mtxMp->nnz)!=RSB_BOOL_TRUE)
+			rsb__mtx_chk(mtxMp); */
+
+		RSB_LS_ASSERT( rsb__util_is_nnz_array_sorted_up_partial_order(mtxMp->bpntr,mtxMp->nnz)==RSB_BOOL_TRUE);
+		if(wv>2)
+			RSB_STDOUT("# switched the largest leaf to COO: "),
+			RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxMp)),
+			RSB_STDOUT("\n");
+		rsb__project_rsb_to_coo(mtxMp,&coa);
+
+		/* Count elements in each quadrant */
+		nzu = rsb__nnz_split_coo_bsearch(coa.IA,mr,mtxMp->nnz);
+		nzl = mtxMp->nnz - nzu;
+
+		cot.typecode = mtxMp->typecode;
+		cot.nnz = RSB_MAX(nzu,nzl); /* TODO: one can find better solutions ... */
+
+		#pragma omp critical (rsb__mtx_split_cr)
+		{
+			if( NULL == rsb__allocate_coo_matrix_t(&cot) )
+			{
+				errval = RSB_ERR_ENOMEM;
+			}
+		}
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(lerr,RSB_ERRM_PAL	); /* !! */
+		}
+		lst = -rsb_time();
+		RSB_LS_ASSERT( rsb__mtx_chk(mtxMp)==RSB_BOOL_TRUE);
+		if( RSB_LS_PARANOIA )
+		{
+			/* FIXME: DO NOT COMMIT THIS */
+			rsb_coo_idx_t li, ui;
+			rsb_coo_idx_t lj, uj;
+			rsb__util_find_extremal_full_index_val(coa.IA,nzu+nzl,0,-1,&li,&ui);
+			rsb__util_find_extremal_full_index_val(coa.JA,nzu+nzl,0,-1,&lj,&uj);
+			//RSB_STDOUT("1 nr=%d nc=%d li=%d ui=%d lj=%d uj=%d mr=%d mc=%d nzu=%d nzl=%d.\n",nrA,ncA,li,ui,lj,uj,mr,mc,nzu,nzl);
+			RSB_ASSERT( (ui>=mr && nzl>0) || (ui<mr && nzl==0) );
+		}
+		if( RSB_LS_PARANOIA )
+		{
+			/* FIXME: DO NOT COMMIT THIS */
+			rsb_coo_idx_t li, ui;
+			rsb_coo_idx_t lj, uj;
+			rsb__util_find_extremal_full_index_val(coa.IA,nzu,0,-1,&li,&ui);
+			rsb__util_find_extremal_full_index_val(coa.JA,nzu,0,-1,&lj,&uj);
+			//RSB_STDOUT("2 nr=%d nc=%d li=%d ui=%d lj=%d uj=%d mr=%d mc=%d nzu=%d nzl=%d.\n",nrA,ncA,li,ui,lj,uj,mr,mc,nzu,nzl);
+			//RSB_ASSERT( (ui>=mr && nzl>0) || (ui<mr && nzl==0) );
+		}
+		rsb__coo_to_lr( cot.VA, cot.IA, cot.JA, coa.VA, coa.IA, coa.JA, mc, nzu, 0, 0,   &nzul, &nzur,  0,-mc, mtxAp->typecode);
+		if( RSB_LS_PARANOIA )
+		{
+			/* FIXME: DO NOT COMMIT THIS */
+			rsb_coo_idx_t li, ui;
+			rsb_coo_idx_t lj, uj;
+			rsb__util_find_extremal_full_index_val(coa.IA,nzu,0,-1,&li,&ui);
+			rsb__util_find_extremal_full_index_val(coa.JA,nzu,0,-1,&lj,&uj);
+			//RSB_STDOUT("3 nr=%d nc=%d li=%d ui=%d lj=%d uj=%d mr=%d mc=%d nzu=%d nzl=%d.\n",nrA,ncA,li,ui,lj,uj,mr,mc,nzu,nzl);
+			RSB_ASSERT( ui < mr );
+		}
+		if( RSB_LS_PARANOIA )
+		{
+			/* FIXME: DO NOT COMMIT THIS */
+			rsb_coo_idx_t li, ui;
+			rsb_coo_idx_t lj, uj;
+			rsb__util_find_extremal_full_index_val(coa.IA,nzul,0,-1,&li,&ui);
+			rsb__util_find_extremal_full_index_val(coa.JA,nzul,0,-1,&lj,&uj);
+			//RSB_STDOUT("3 nr=%d nc=%d li=%d ui=%d lj=%d uj=%d mr=%d mc=%d nzu=%d nzl=%d.\n",nrA,ncA,li,ui,lj,uj,mr,mc,nzu,nzl);
+			RSB_ASSERT( ui < mr );
+			RSB_ASSERT( uj < mc );
+		}
+		rsb__coo_to_lr( cot.VA, cot.IA, cot.JA, coa.VA, coa.IA, coa.JA, mc, nzl, 0, nzu, &nzll, &nzlr,-mr,-mc, mtxAp->typecode);
+		/* RSB_ASSERT( rsb__mtx_chk(mtxMp)==RSB_BOOL_TRUE); */
+		lst += rsb_time();
+		#pragma omp critical (rsb__mtx_split_cr)
+		{
+			rsb__destroy_coo_matrix_t(&cot);
+		}
+		RSB_ASSERT( mtxMp->nnz == nzu + nzl );
+		RSB_ASSERT( mtxMp->nnz == nzul + nzur + nzll + nzlr );
+		RSB_ASSERT( nzul + nzur == nzu );
+		RSB_ASSERT( nzll + nzlr == nzl );
+
+		if(wv>2)
+			RSB_STDOUT("# nzu=%d nzl=%d nzul=%d nzur=%d nzll=%d nzlr=%d.\n",nzu,nzl,nzul,nzur,nzll,nzlr);
+		/* make sure we have further one to four submatrices */
+
+		if(1)
+		{
+			struct rsb_mtx_t * mtxQp = NULL;
+			struct rsb_mtx_t *mtxQ1p = NULL,*mtxQ2p = NULL, *mtxQ3p = NULL, *mtxQ4p = NULL;
+			/* rename this in fashion of rsb__do_set_in_place_submatrices_offsets */
+#define RSB_MTX_INIT_LEAF(MTXAP,MTXLP,NNZ,NZOFF,NR,NC,ROFF,COFF)	\
+			RSB_BZERO_P((MTXLP));				\
+			rsb__set_init_flags_and_stuff(MTXLP,NULL,NULL,NR,NC,NNZ,NNZ,NNZ,(MTXAP)->typecode,(MTXAP)->flags); \
+			(MTXLP)->matrix_storage = RSB_MATRIX_STORAGE_BCOR;			\
+			(MTXLP)->roff =  (MTXAP)->roff + (ROFF),				\
+			(MTXLP)->coff =  (MTXAP)->coff + (COFF),				\
+			(MTXLP)->nzoff = (MTXAP)->nzoff + NZOFF, 			\
+			(MTXLP)->bpntr = (MTXAP)->bpntr + NZOFF, 			\
+			(MTXLP)->bindx = (MTXAP)->bindx + NZOFF,			\
+			(MTXLP)->VA = RSB_VA_OFFSET_POINTER((MTXAP)->VA, RSB_SIZEOF((MTXAP)->typecode), (NZOFF)),	\
+			RSB_DO_FLAG_ADD((MTXLP)->flags,RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS );	\
+			RSB_DO_FLAG_DEL((MTXLP)->flags,RSB_FLAG_USE_HALFWORD_INDICES);	/* COO */	\
+			rsb__compute_bounded_box((MTXLP));				\
+			errval = rsb__do_switch_leaf((MTXLP), RSB_MATRIX_STORAGE_AUTO, flags, 0, 0, TA);	\
+			RSB_DO_FLAG_ADD((MTXLP)->flags,RSB_FLAG_NON_ROOT_MATRIX);	\
+			if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(lerr,RSB_ERRM_PAL	);}
+
+			#pragma omp critical (rsb__mtx_split_cr)
+			{
+			       	/* this is a 'matrix reservation' mechanism */
+				/* mtxQp = (mtxAp) + smc + nsm; */ /* can use this only if there are no holes */
+				mtxQp = mtxAp;
+#define RSB_REC_NEXT_FREE(NSM) while((!RSB_REC_IS_SUBM_FREE(mtxQp)) && ((mtxQp)-mtxAp<(smc+NSM)))++mtxQp; /* TODO: move out of here */
+				RSB_REC_NEXT_FREE(nsm);
+				/* RSB_STDOUT("%d starts at %d\n",omp_get_thread_num(),(mtxQp-mtxAp)); */
+				if(nzul){ mtxQ1p = mtxQp; RSB_REC_MARK_SUBM_USED(mtxQp); mtxQp++; RSB_REC_NEXT_FREE(nsm); }
+				if(nzur){ mtxQ2p = mtxQp; RSB_REC_MARK_SUBM_USED(mtxQp); mtxQp++; RSB_REC_NEXT_FREE(nsm); }
+				if(nzll){ mtxQ3p = mtxQp; RSB_REC_MARK_SUBM_USED(mtxQp); mtxQp++; RSB_REC_NEXT_FREE(nsm); }
+				if(nzlr){ mtxQ4p = mtxQp; RSB_REC_MARK_SUBM_USED(mtxQp); mtxQp++; RSB_REC_NEXT_FREE(nsm); }
+				nsm += nzul ? 1 : 0;
+				nsm += nzur ? 1 : 0;
+				nsm += nzll ? 1 : 0;
+				nsm += nzlr ? 1 : 0;
+#undef RSB_REC_NEXT_FREE
+			}
+
+			if(nzul)
+			{
+				mtxQp=mtxQ1p;
+				/* RSB_STDOUT("thread %d divides quadrant %p / subquadrant %p = %d\n",omp_get_thread_num(),mtxMp,mtxQp,(mtxQp-mtxAp)); */
+				/* mtxMp->sm[0] = mtxQp++; */
+				mtxMp->sm[0] = mtxQp;
+				RSB_MTX_INIT_LEAF(mtxMp,mtxMp->sm[0],nzul,0        ,    mr,    mc, 0,0 );
+				RSB_LS_ASSERT( rsb__mtx_chk(mtxMp->sm[0])==RSB_BOOL_TRUE);
+			}
+			if(nzur)
+			{
+				mtxQp=mtxQ2p;
+				/* RSB_STDOUT("thread %d divides quadrant %p / subquadrant %p = %d\n",omp_get_thread_num(),mtxMp,mtxQp,(mtxQp-mtxAp)); */
+				/* mtxMp->sm[1] = mtxQp++; */
+				mtxMp->sm[1] = mtxQp;
+				RSB_MTX_INIT_LEAF(mtxMp,mtxMp->sm[1],nzur,nzul     ,    mr,ncA-mc, 0,mc);
+				RSB_LS_ASSERT( rsb__mtx_chk(mtxMp->sm[1])==RSB_BOOL_TRUE);
+				RSB_DO_FLAG_DEL((mtxMp->sm[1])->flags,RSB_FLAG_UPPTRI|RSB_FLAG_TRIANGULAR);
+			}
+			if(nzll)
+			{
+				mtxQp=mtxQ3p;
+				/* RSB_STDOUT("thread %d divides quadrant %p / subquadrant %p = %d\n",omp_get_thread_num(),mtxMp,mtxQp,(mtxQp-mtxAp)); */
+				/* mtxMp->sm[2] = mtxQp++; */
+				mtxMp->sm[2] = mtxQp;
+				RSB_MTX_INIT_LEAF(mtxMp,mtxMp->sm[2],nzll,nzu      ,nrA-mr,    mc,mr,0 );
+				RSB_LS_ASSERT( rsb__mtx_chk(mtxMp->sm[2])==RSB_BOOL_TRUE);
+				RSB_DO_FLAG_DEL((mtxMp->sm[2])->flags,RSB_FLAG_UPPTRI|RSB_FLAG_TRIANGULAR);
+			}
+			if(nzlr)
+			{
+				mtxQp=mtxQ4p;
+				/* RSB_STDOUT("thread %d divides quadrant %p / subquadrant %p = %d\n",omp_get_thread_num(),mtxMp,mtxQp,(mtxQp-mtxAp)); */
+				/* mtxMp->sm[3] = mtxQp++; */
+				mtxMp->sm[3] = mtxQp;
+				RSB_MTX_INIT_LEAF(mtxMp,mtxMp->sm[3],nzlr,nzu+nzll ,nrA-mr,ncA-mc,mr,mc);
+				RSB_LS_ASSERT( rsb__mtx_chk(mtxMp->sm[3])==RSB_BOOL_TRUE);
+			}
+#undef RSB_MTX_INIT_LEAF
+			if(wv>2)
+				RSB_STDOUT("# just split %ld -> %ld subms (max %ld splits allowed (max +%ld then)).\n",(long int)(smc/*+nsm*/),(long int)(mtxQp-mtxAp),(long int)mcts,4*(long int)mcts);
+			/* leaf recompression, bounds have been recomputed on each leaf */
+			/* marked present matrix as non terminal and assign nonzeroes to leaves */
+		}
+		/* RSB_DO_FLAG_DEL(mtxMp->flags,RSB_FLAG_NON_ROOT_MATRIX); */
+		RSB_DO_FLAG_ADD(mtxMp->flags,RSB_FLAG_QUAD_PARTITIONING);
+		/* TODO: what if nonzeroes are concentrated in a corner ? EXPERIMENTAL OVER-SUBDIVIDE !? */
+		mtxMp->VA = NULL;
+		mtxMp->bindx = NULL;
+		mtxMp->bpntr = NULL;
+		llt += rsb_time();
+		lt += llt;
+		st += lst;
+
+		mtxMp->est = lst;
+		mtxMp->tat = llt;
+		mtxMp->sat = llt - lst; /* TODO: this is to be completed */
+		#pragma omp critical (rsb__mtx_split_cr)
+	       	{
+			RSB_CONDITIONAL_FREE(TA);
+	       	}
+		continue;
+lerr:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+		RSB_ASSERT(0); /* critical */
+nerr:		/* Not an error condition. */
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+		/* FIXME: Good quality code would provide here a mechanism for reverting the matrix to the original state. */
+	} /* smi */
+
+	if(nsm > 0)
+		RSB_DO_FLAG_ADD(mtxAp->flags, RSB_FLAG_USE_HALFWORD_INDICES);
+
+	errval = rsb__refresh_array_of_leaf_matrices(mtxAp);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+	       	RSB_ERROR(RSB_ERRM_EM);
+	       	/*
+		 * TODO: complete this part !
+		 * One would need a matrix revert/recovery mechanism!
+		 * */
+	}	
+	mt += rsb_time();
+	nsas = mtxAp->all_leaf_matrices_n;
+#if RSB_STORE_IDXSA
+	mtxAp->idxsa = rsb__get_index_storage_amount(mtxAp);
+#endif
+	if(wv>0)
+		RSB_STDOUT("Split (%d -> %d leaves, %d -> %d subms) took %0.4lg s.\n",nsbs,nsas,(int)smc,(int)smc+nsm,mt);
+
+/* The following shall be used to locate bugs in the split loop locking mechanism. */
+/*
+	for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+	{
+		printf("%p : ",mtxAp->all_leaf_matrices[smi].mtxlp);
+		RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp->all_leaf_matrices[smi].mtxlp));
+		printf("\n");
+	}
+	for(smi=0;smi<mtxAp->all_leaf_matrices_n;++smi)
+	{
+		int smj;
+		for(smj=smi+1;smj<mtxAp->all_leaf_matrices_n;++smj)
+		if(mtxAp->all_leaf_matrices[smi].mtxlp == mtxAp->all_leaf_matrices[smj].mtxlp)
+		{
+			printf("Oops. Duplicate submatrices: p: %p  i:%d j:%d max:%d\n",mtxAp->all_leaf_matrices[smj].mtxlp,smi,smj,mtxAp->all_leaf_matrices_n); 
+			RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp));
+			printf("\n");
+			RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp->all_leaf_matrices[smj].mtxlp));
+			printf("\n");
+			printf("This is BAD. Terminating\n");
+			RSB_DEBUG_ASSERT(0);
+			exit(-1);
+		}
+	}
+*/
+
+#if RSB_SPLIT_IS_EXPERIMENTAL
+	if(! RSB_SOME_ERROR(errval) )
+	if(!rsb__mtx_chk(mtxAp))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_ERROR(RSB_ERRM_ES);
+		goto ret;
+	}
+#endif
+ret:
+	rsb__mtx_list_free(&ml);
+	RSB_ASSIGN_IF(stp, st)
+	RSB_ASSIGN_IF(atp, at)
+	RSB_ASSIGN_IF(ltp, lt)
+	return errval;
+}
+
+rsb_err_t rsb__leaves_merge(struct rsb_mtx_t * RSB_RESTRICT mtxAp, rsb_submatrix_idx_t manp, rsb_time_t * RSB_RESTRICT stp, rsb_time_t *RSB_RESTRICT atp, rsb_time_t *RSB_RESTRICT ltp, const int wv, int kc)
+{
+	/**
+		\ingroup gr_internals
+		Merges leaf level sparse blocks, one level.
+		It preserves the original VA,IA,JA arrays.
+
+	 	TODO: rename to rsb__mtx_merge
+	 	TODO: need to document memory requirements.
+		TODO: at the moment, errors are considered to be critical (matrix destructive).
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t mt = - rsb_time(), 
+		   st = RSB_TIME_ZERO, 
+		   lt = RSB_TIME_ZERO, 
+		   at = RSB_TIME_ZERO; /* merge,sort,elapsed,analysis time */
+	/*const int wv = 0;*/ /* want verbose */
+	rsb_submatrix_idx_t nsbp, nsap; /* number of submatrices before and after merge  */
+	struct rsb_mtx_list_t ml; /* matrix list */
+	const int vl = 2; /* ?? */
+	/* int kc = 0; */ /* keep coo */
+
+	rsb__mtx_list_init(&ml);
+
+	if(!mtxAp)
+	{
+		RSB_PERR_GOTO(err, RSB_ERRM_E_MTXAP);
+		goto ret;
+	}
+
+	nsbp = mtxAp->all_leaf_matrices_n;
+	if(wv>vl)
+		RSB_STDOUT("# experimental leaves analysis: "),
+		RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp)),
+		RSB_STDOUT("\n");
+
+	if(! rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		rsb_submatrix_idx_t smi;
+		const size_t rsbio = rsb__get_index_storage_amount(mtxAp);
+		int rnt = rsb_get_num_threads();
+		int nmt = rnt;
+		int sc = RSB_MTX_CMP_NNZ_ASC;
+		rsb_submatrix_idx_t mctm = 0; /* matrices count to merge */
+		double mftm = 0.5; /* matrices fraction to merge */
+
+#if RSB_AT_ALLOW_GETENV
+		nmt = getenv("RSB_MERGE_NT") ? rsb__util_atoi(getenv("RSB_MERGE_NT")) : nmt;
+		sc = getenv("RSB_MERGE_SC")  ? rsb__util_atoi(getenv("RSB_MERGE_SC")) : sc;
+#endif /* RSB_AT_ALLOW_GETENV */
+
+		RSB_DEBUG_ASSERT(nsbp>0);
+
+		at = -rsb_time();
+		errval = rsb__mtx_list_bld(&ml, mtxAp);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(ret, RSB_ERRM_EM);
+		}
+		errval = rsb__leaves_analysis_rec(mtxAp, &ml, wv, RSB_BOOL_TRUE);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err, RSB_ERRM_EM);
+		}
+		errval = rsb__srt_subm_ptr_array(ml.mp, ml.mc, sc);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err, RSB_ERRM_EM);
+		}
+		at += rsb_time();
+
+		if(manp > 0)
+		       	ml.mc = RSB_MIN( ml.mc, manp);
+
+		rsb__scale_subm_idx_on_env_var("RSB_MERGE_SF",&mftm,&mctm,wv);
+		if(mctm == 0)
+			mctm = (rsb_submatrix_idx_t)(mftm*ml.mc);
+		mctm = RSB_MIN(RSB_MAX(1, mctm), ml.mc); /* 1 ... ml.mc */
+       		nmt = RSB_MIN(mctm, nmt);
+
+		if(wv>vl)
+			RSB_STDOUT("Basic storage uses %zu bytes (%2.3lf bpnz).\n", rsbio, ((double)rsbio)/mtxAp->nnz),
+			RSB_STDOUT("We have %d merge candidate pre-leaves of which %d will be processed with %d threads.\n",ml.mc,mctm,nmt);
+
+		/* In the following parallel loop, omitting the RSB_NTC thread specification. */
+		#pragma omp parallel for schedule(static,1) reduction(|:errval) reduction(+:lt) reduction(+:st) shared(ml)  num_threads(nmt)
+		for(smi=0;smi</*ml.mc*/mctm;++smi)
+		{
+			struct rsb_mtx_t * submatrix = NULL;
+			rsb_submatrix_idx_t sml;
+			rsb_submatrix_idx_t i,j;
+			struct rsb_mtx_t * mtxMp = ml.mp[smi];
+			size_t rsbio = rsb__get_index_storage_amount(mtxAp); /* rsb indices occupation */
+			size_t subio = rsb__get_index_storage_amount(mtxMp); /* submatrices indices occupation */
+			size_t jmios = ml.sa[smi]; /* join matrix index occupation save */
+			double rsavepcnt = 100.0*(((double)(jmios))/(double)subio);
+			double asavepcnt = 100.0*(((double)(jmios))/(double)rsbio);
+			rsb_coo_idx_t roffM = mtxMp->roff, coffM = mtxMp->coff;
+			rsb_coo_idx_t broffM = mtxMp->roff+mtxMp->nr, bcoffM = mtxMp->coff+mtxMp->nc;
+			rsb_nnz_idx_t nzul = RSB_NNZ_OF(mtxMp->sm[0]), nzur = RSB_NNZ_OF(mtxMp->sm[1]);
+			rsb_nnz_idx_t nzll = RSB_NNZ_OF(mtxMp->sm[2]), nzlr = RSB_NNZ_OF(mtxMp->sm[3]);
+			rsb_bool_t ifq = RSB_BOOL_FALSE; /* is first quadrant ? */
+			rsb_time_t lst = RSB_TIME_ZERO, llt = RSB_TIME_ZERO; /* local st,lt */
+			rsb_coo_idx_t * TA = NULL;
+			struct rsb_coo_matrix_t tcoo;
+
+			RSB_BZERO_P(&tcoo);
+
+#if RSB_AT_ALLOW_GETENV
+			if(getenv("RSB_MERGE_KEEP_COO"))
+				kc = rsb__util_atoi(getenv("RSB_MERGE_KEEP_COO"));
+#endif /* RSB_AT_ALLOW_GETENV */
+
+			if(wv>vl)
+				RSB_STDOUT(RSB_PRINTF_MTX_SUMMARY_ARGS(mtxMp)),
+				RSB_STDOUT("\n");
+
+			llt = -rsb_time();
+			if(wv>vl)
+				RSB_STDOUT("By merging %p [%d+%d+%d+%d=%d], gain relative %3.2lg%% or absolute %3.2lg%% (%zu bytes)\n", (const void*)mtxMp, nzul, nzur, nzll, nzlr, mtxMp->nnz, rsavepcnt, asavepcnt, jmios);
+			RSB_ASSERT( nzul + nzur + nzll + nzlr == mtxMp->nnz );
+
+			mtxMp->bpntr = NULL;
+			mtxMp->bindx = NULL;
+			mtxMp->VA = NULL;
+
+			#pragma omp critical (rsb__mtx_split_cr)
+			{
+				/* TODO: one can minimize this number further */
+				rsb_nnz_idx_t tamnz = RSB_MIN(mtxMp->nnz,1+mtxMp->nr);
+#if RSB_MERGE_USE_TMP_COOMTX
+				tcoo.nnz = RSB_MAX( RSB_MAX(nzul+nzur,nzll+nzlr), tamnz );
+				tcoo.typecode = mtxMp->typecode;
+				if( NULL == rsb__allocate_coo_matrix_t(&tcoo) )
+					errval = RSB_ERR_ENOMEM;
+				TA = tcoo.IA;
+#else /* RSB_MERGE_USE_TMP_COOMTX */
+				TA = rsb__malloc( sizeof(rsb_coo_idx_t) * tamnz );
+				if(!TA)
+					errval = RSB_ERR_ENOMEM;
+#endif /* RSB_MERGE_USE_TMP_COOMTX */
+			}
+
+			RSB_SUBMATRIX_FOREACH(mtxMp, submatrix, i, j)
+			if(submatrix)
+			{
+				ifq = RSB_BOOL_FALSE;
+				if(wv>3)
+					RSB_STDOUT("lmax in IA/JA: %d/%d.\n", rsb__util_find_max_index_val(submatrix->bpntr, submatrix->nnz), rsb__util_find_max_index_val(submatrix->bindx, submatrix->nnz));
+				errval = rsb__do_switch_leaf(submatrix, RSB_MATRIX_STORAGE_BCOR, RSB_FLAG_USE_FULLWORD_INDICES, submatrix->roff-roffM, submatrix->coff-coffM, TA);
+				if(wv>3)
+					RSB_STDOUT("smax in IA/JA: %d/%d.\n", rsb__util_find_max_index_val(submatrix->bpntr, submatrix->nnz), rsb__util_find_max_index_val(submatrix->bindx, submatrix->nnz));
+
+				if(RSB_SOME_ERROR(errval))
+				{
+				       	/* RSB_PERR_GOTO(done,RSB_ERRM_ES); */
+					RSB_ASSERT(!(RSB_SOME_ERROR(errval)));
+				}
+				if( mtxMp->bpntr == NULL && mtxMp->bindx == NULL )
+				{
+					ifq = RSB_BOOL_TRUE;
+					mtxMp->bpntr = submatrix->bpntr;
+					mtxMp->bindx = submatrix->bindx;
+					mtxMp->VA = submatrix->VA;
+					mtxMp->flags = submatrix->flags;
+					mtxMp->matrix_storage = submatrix->matrix_storage;
+				}
+				mtxMp->roff = RSB_MIN(mtxMp->roff, submatrix->roff);
+				mtxMp->coff = RSB_MIN(mtxMp->coff, submatrix->coff);
+				bcoffM = RSB_MIN(bcoffM, submatrix->bcoff);
+				broffM = RSB_MIN(broffM, submatrix->broff);
+				/* broff, bcoff remain the same (local indices) */
+				/* base bm, bk */
+				mtxMp->bm   = mtxMp->nr;
+				mtxMp->bk   = mtxMp->nc;
+				/* tighten bm, bk (local indices) */
+				mtxMp->bm   = RSB_MAX(mtxMp->bm  ,roffM+submatrix->bm-submatrix->roff  );
+				mtxMp->bk   = RSB_MAX(mtxMp->bk  ,coffM+submatrix->bk-submatrix->coff  );
+				/* br, bc remain the same */
+				/*RSB_STDOUT("br/bc : %d/%d: %d/%d.\n", mtxMp->br,mtxMp->bc,submatrix->br,submatrix->bc); */
+
+				for(sml=0;sml<mtxAp->all_leaf_matrices_n;sml++)
+				if(mtxAp->all_leaf_matrices[sml].mtxlp == submatrix)
+				{
+					/* In order to get rid of this loop one shall reorder the submatrices appropriately */
+					if(ifq == RSB_BOOL_TRUE)
+					{
+						mtxAp->all_leaf_matrices[sml].mtxlp = NULL;
+						if(wv>3)
+							RSB_STDOUT("Nullified leaf %d [%d,%d] and substituted with merged (%d).\n",sml,i,j,smi);
+					}
+					else
+					{
+						mtxAp->all_leaf_matrices[sml].mtxlp = NULL;
+						if(wv>3)
+							RSB_STDOUT("Nullified leaf %d [%d,%d].\n",sml,i,j);
+					}
+				}
+			} /* submatrix */
+			RSB_SUBMATRIX_FOREACH(mtxMp, submatrix, i, j)
+			if(submatrix)
+			{
+				RSB_BZERO_P(submatrix);
+			}
+			RSB_DO_FLAG_DEL(mtxMp->flags, RSB_FLAG_WANT_BCSS_STORAGE);
+			RSB_DO_FLAG_DEL(mtxMp->flags, RSB_FLAG_USE_HALFWORD_INDICES);
+			RSB_DO_FLAG_ADD(mtxMp->flags, RSB_FLAG_WANT_COO_STORAGE);
+			mtxMp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+			mtxMp->broff = broffM;
+			mtxMp->bcoff = bcoffM;
+			lst = -rsb_time();
+			{
+				int mo = 0, wp = 0;
+#if RSB_AT_ALLOW_GETENV
+				if(getenv("RSB_MERGE_USRT"))
+					mo = rsb__util_atoi(getenv("RSB_MERGE_USRT"));
+#endif /* RSB_AT_ALLOW_GETENV */
+				/* Note: we are inside a parallel outer loop: therefore here the sort algorithm can only be serial */
+				if(mo == 0)
+				{
+					errval += rsb__cor_merge(mtxMp->typecode, mtxMp->VA, mtxMp->bpntr, mtxMp->bindx, 0        , nzul, nzur, 1, wp, &tcoo);
+					RSB_ASSERT(!(RSB_SOME_ERROR(errval)));
+					errval += rsb__cor_merge(mtxMp->typecode, mtxMp->VA, mtxMp->bpntr, mtxMp->bindx, nzul+nzur, nzll, nzlr, 1, wp, &tcoo);
+					RSB_ASSERT(!(RSB_SOME_ERROR(errval)));
+				}
+
+				if(mo == 1)
+				{
+					struct rsb_mtx_t * mtxCp = mtxMp->sm[2];
+
+					// RSB_STDOUT("Merging of %p %p %p %p\n",mtxMp->sm[0],mtxMp->sm[1],mtxMp->sm[2],mtxMp->sm[3]);
+					/* in the below invocations, I've to use nr of M, not of the submatrices */
+					if( mtxMp->sm[0] && mtxMp->sm[1] )
+					{
+						rsb_nnz_idx_t nnzB = nzul + nzur;
+						rsb_coo_idx_t*IB = mtxMp->bpntr, *JB = mtxMp->bindx;
+						void * VB = mtxMp->VA;
+						errval = rsb_util_sort_row_major_inner(VB, IB, JB, nnzB,mtxMp->nr,mtxMp->nc,mtxMp->typecode,mtxMp->flags);
+						RSB_ASSERT(!(RSB_SOME_ERROR(errval)));
+					}
+
+					if( mtxMp->sm[2] && mtxMp->sm[3] )
+					{
+						rsb_nnz_idx_t nnzC = nzll + nzlr;
+						rsb_coo_idx_t*IC = mtxMp->bpntr + mtxCp->nzoff - mtxMp->nzoff,*JC = mtxMp->bindx + mtxCp->nzoff - mtxMp->nzoff;
+						void * VC = RSB_TYPED_OFF_PTR(mtxMp->typecode, mtxMp->VA, (mtxCp->nzoff-mtxMp->nzoff));
+						errval = rsb_util_sort_row_major_inner(VC, IC, JC, nnzC, mtxMp->nr, mtxMp->nc, mtxMp->typecode, mtxMp->flags);
+						RSB_ASSERT(!(RSB_SOME_ERROR(errval)));
+					}
+				}
+
+				if(mo >= 2)
+				{
+					errval = rsb_util_sort_row_major_inner(mtxMp->VA, mtxMp->bpntr, mtxMp->bindx, mtxMp->nnz, mtxMp->nr, mtxMp->nc, mtxMp->typecode, mtxMp->flags);
+				}
+				RSB_ASSERT(rsb__util_is_coo_array_sorted_up_partial_order(mtxMp->bpntr, mtxMp->nnz));
+				RSB_ASSERT(nzul+nzur+nzll+nzlr == mtxMp->nnz);
+			}
+			lst += rsb_time();
+			st += lst;
+			if(RSB_SOME_ERROR(errval))
+		       	{
+			       	RSB_PERR_GOTO(lerr, RSB_ERRM_ES);
+		       	}
+
+			RSB_REC_MARK_SUBM_FREE(mtxMp->sm[0]);
+			RSB_REC_MARK_SUBM_FREE(mtxMp->sm[1]);
+			RSB_REC_MARK_SUBM_FREE(mtxMp->sm[2]);
+			RSB_REC_MARK_SUBM_FREE(mtxMp->sm[3]);
+
+			if(!kc)
+			{
+				/* FIXME: shall implement a format selection policy right here */
+				rsb_fmt_t dms = RSB_MATRIX_STORAGE_BCOR;
+				rsb_flags_t flagsM = RSB_FLAG_USE_FULLWORD_INDICES;
+				if(RSB_INDICES_FIT_IN_HALFWORD(mtxMp->nr, mtxMp->nc))
+					flagsM = RSB_FLAG_USE_HALFWORD_INDICES;
+				if( mtxMp->nr+1 <= mtxMp->nnz )
+					dms = RSB_MATRIX_STORAGE_BCSR;
+				errval = rsb__do_switch_leaf(mtxMp, dms, flagsM, 0, 0, TA);
+				RSB_ASSERT(!(RSB_SOME_ERROR(errval)));
+				/* TODO: shall harmonize with rsb_do_switch_fresh_recursive_matrix_to_halfword_storages_parallel */
+			}
+
+			if(RSB_SOME_ERROR(errval))
+			{ 
+				/* TODO:error reporting is missing */
+				RSB_PERR_GOTO(lerr, RSB_ERRM_ES);
+		       	}
+			if(wv>3)
+				RSB_STDOUT("tmax in IA/JA: %d/%d.\n", rsb__util_find_max_index_val(mtxMp->bpntr, mtxMp->nnz), rsb__util_find_max_index_val(mtxMp->bindx, mtxMp->nnz) );
+
+			llt += rsb_time();
+			lt += llt;
+
+			mtxMp->est = lst;
+			mtxMp->tat = llt;
+			mtxMp->sat = llt - lst; /* TODO: this is to be completed */
+
+			#pragma omp critical (rsb__mtx_split_cr)
+		       	{
+#if RSB_MERGE_USE_TMP_COOMTX
+				rsb__destroy_coo_matrix_t(&tcoo);
+#else
+				RSB_CONDITIONAL_FREE(TA);
+#endif /* RSB_MERGE_USE_TMP_COOMTX */
+		       	}
+			continue;
+lerr:
+			RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS
+			// goto done;
+		} /* smi */
+
+		if(wv>vl)
+			/* RSB_STDOUT("Now: %2.3lf bpnz\n", ((double)rsb__get_index_storage_amount(mtxAp))/mtxAp->nnz), */
+			RSB_STDOUT("Now: "),
+			RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxAp)),
+			RSB_STDOUT("\n");
+
+		errval = rsb__refresh_array_of_leaf_matrices(mtxAp);
+		if(RSB_SOME_ERROR(errval))
+	       	{
+			RSB_ERROR("Need code to recover the old (pre-merge) matrix here !\n"); /* FIXME */
+			/* Essentially, reorder the coefficients and restore the old order */
+		       	RSB_PERR_GOTO(cer,RSB_ERRM_ES);
+		}
+		RSB_DO_FLAG_DEL(mtxAp->flags, RSB_FLAG_NON_ROOT_MATRIX);
+#if RSB_STORE_IDXSA
+		mtxAp->idxsa = rsb__get_index_storage_amount(mtxAp);
+#endif
+	}
+	mt += rsb_time(); 
+	nsap = mtxAp->all_leaf_matrices_n;
+	RSB_ASSIGN_IF(stp, st)
+	RSB_ASSIGN_IF(atp, at)
+	RSB_ASSIGN_IF(ltp, lt)
+	if(wv>0)
+		RSB_STDOUT("Merge (%d -> %d leaves) took w.c.t. of %0.4lgs, ~%0.4lgs of computing time (of which %0.4lgs sorting, %0.4lgs analysis)\n", nsbp, nsap, mt, lt, st, at);
+	goto err;
+cer:
+	RSB_ERROR("Critical Merge Error: cannot proceed. Merged matrix in inconsistent state !\n");
+err:
+	rsb__mtx_list_free(&ml);
+ret:
+	return errval;
+}
+
+static rsb_err_t rsb__mtx_adjust_subm_ptrs(struct rsb_mtx_t *RSB_RESTRICT  mtxCp, const struct rsb_mtx_t *RSB_RESTRICT  mtxAp, rsb_long_t smc)
+{
+	/* 
+	 * Adjusts pointers displacements in the matrix tree and in the pointers list.
+	 */
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t smi;
+	rsb_submatrix_idx_t n,si;
+
+	for(n=0;n<smc;++n)
+	{
+		if(mtxCp[n].nnz) /* If valid. FIXME: IF NOT (E.G. MERGED), SHALL BE COMPLETELY ZEROED. */
+		for(si=0;si<RSB_FOUR;++si)
+			if(mtxCp[n].sm[si])
+			{
+				RSB_PTR_SHIFT( mtxCp[n].sm[si], mtxAp, mtxCp, (struct rsb_mtx_t*) );
+			/*	RSB_STDOUT("%03d/%03d: %p\n",n,si,mtxCp[n].sm[si]); */
+			}
+	} /* n */
+
+	for(	smi=0; smi < mtxCp->all_leaf_matrices_n; ++smi )
+	{
+		RSB_PTR_SHIFT( mtxCp->all_leaf_matrices[smi].mtxlp, mtxAp, mtxCp, (struct rsb_mtx_t*)  );
+	}
+
+	return errval;
+}
+
+rsb_err_t rsb__mtx_realloc_with_spare_leaves(struct rsb_mtx_t **mtxApp, rsb_submatrix_idx_t slc)
+{
+	/*
+	 * Will return RSB_ERR_ENOMEM on failure, in which the matrix will stay unchanged.
+	 * TODO: to get rid of this, need guaranteed preallocation in rsb__allocate_recursive_sparse_matrix_from_row_major_coo
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t *mtxAp = NULL;
+	rsb_submatrix_idx_t smc = 1 + rsb__submatrices_max_ptr_diff(*mtxApp);
+
+	if( slc <= 0 )
+	{
+		errval = RSB_ERR_BADARGS;
+		goto ret;
+	}
+
+	RSB_DEBUG_ASSERT( mtxApp);
+	RSB_DEBUG_ASSERT(*mtxApp);
+
+	mtxAp = rsb__realloc(*mtxApp,sizeof(*mtxAp)*(smc+slc));
+
+	if(mtxAp == NULL)
+	{
+		errval = RSB_ERR_ENOMEM;
+		goto ret;
+	}
+
+	if( mtxAp == *mtxApp )
+		goto ret;
+
+	if(0)
+		RSB_STDOUT("in (%d -> %d) realloc, pointers of %d matrices have to be readjusted: %p -> %p  (%+d bytes)\n",smc,slc,smc,*mtxApp,mtxAp,(int)((rsb_byte_t*)mtxAp-(rsb_byte_t*)*mtxApp));
+	errval = rsb__mtx_adjust_subm_ptrs( mtxAp, *mtxApp, smc );
+ret:
+	*mtxApp = mtxAp;
+	return errval;
+}
+
+rsb_submatrix_idx_t rsb__get_diagonal_submatrices_count(const struct rsb_mtx_t *mtxAp)
+{
+	/**
+	  \ingroup gr_internals
+	  \return the count of submatrices which are on the diagonal, if the matrix is recursive. zero, otherwise.
+	  */
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+	rsb_submatrix_idx_t dsc = 0;
+
+	if(!mtxAp)
+		goto done;
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp))
+	{
+		dsc = 1;	
+	}
+	else
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if( submatrix && i==j && RSB_SUBMATRIX_IS_ON_DIAG(submatrix) )
+			dsc += rsb__get_diagonal_submatrices_count(submatrix);
+	}
+done:
+	return dsc;
+}
+
+rsb_err_t rsb__init_set_quad_submatrices_info(const struct rsb_mtx_partitioning_info_t * pinfop, struct rsb_mtx_t ** matrices, rsb_nnz_idx_t uuk, rsb_nnz_idx_t mk, rsb_nnz_idx_t uk, rsb_nnz_idx_t lk, rsb_nnz_idx_t llk, rsb_coo_idx_t mB, rsb_coo_idx_t kB, rsb_coo_idx_t roff, rsb_coo_idx_t coff)
+{
+	/**
+	 *	\ingroup gr_internals
+	 *
+	 * */
+	rsb_submatrix_idx_t i,j,ij=0;
+	rsb_nnz_idx_t nzoff[RSB_FOUR+1]={uuk,uk,mk,lk,llk};
+	rsb_nnz_idx_t hnnz = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	for(i=0;i<2;++i)
+	for(j=0;j<2;++j)
+		if((hnnz=nzoff[i*2+j+1]-nzoff[i*2+j])>0)
+		{
+			RSB_DEBUG_ASSERT(hnnz>0);
+
+			matrices[ij]->roff=i*mB+roff;
+			matrices[ij]->coff=j*kB+coff;
+			matrices[ij]->nr=i?pinfop->nr-mB:mB;
+			matrices[ij]->nc=j?pinfop->nc-kB:kB;
+
+			RSB_DEBUG_ASSERT(matrices[i*2+j]->nr>0);
+			RSB_DEBUG_ASSERT(matrices[i*2+j]->nc>0);
+
+			matrices[ij]->M_b=0;
+			matrices[ij]->K_b=0;
+
+			matrices[ij]->br=pinfop->br;
+			matrices[ij]->bc=pinfop->bc;
+
+			matrices[ij]->nnz=nzoff[i*2+j+1]-nzoff[i*2+j];
+			matrices[ij]->block_count=nzoff[i*2+j]; /* TODO:this is a hack. we will use block_count as first nnz index, in this function */
+
+			//RSB_INFO("+\n");
+			++ij;
+		}
+		else
+			;//RSB_INFO("-\n");
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_rec.h b/rsb_rec.h
new file mode 100644
index 0000000..91450da
--- /dev/null
+++ b/rsb_rec.h
@@ -0,0 +1,66 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief Recursion handling code
+ * @author Michele Martone
+ * */
+
+#ifndef RSB_REC_H_INCLUDED
+#define RSB_REC_H_INCLUDED
+
+#include "rsb_internals.h"
+
+enum rsb_op_t{	/* FIXME : temporary, experimental  */
+            rsb_op_spmv = 1,
+            rsb_op_spsvl = 2,
+            rsb_op_spsvlt = 3,
+            rsb_op_spsvu = 4,
+            rsb_op_spsvut = 5,
+            rsb_op_get_csr = 6,
+            rsb_op_nop = 0
+};
+#define RSB_PTR_SHIFT(DPTR,MPTR,PPTR,TC)	/* FIXME: temporarily here */ \
+		if( (PPTR) > (MPTR) )	\
+		{ ( DPTR) = TC (( (const rsb_byte_t *) (DPTR) ) + ( ( (const rsb_byte_t *) (PPTR) ) - ( (const rsb_byte_t *) (MPTR) )) ); }	\
+		else	\
+		{ ( DPTR) = TC (( (const rsb_byte_t *) (DPTR) ) - ( ( (const rsb_byte_t *) (MPTR) ) - ( (const rsb_byte_t *) (PPTR) )) ); }
+
+#define RSB_TMP_OVERALLOC_MTX 4 /* 1 < RSB_TMP_OVERALLOC_MTX < 4; a temporary measure */
+
+rsb_err_t rsb__init_set_quad_submatrices_info(const struct rsb_mtx_partitioning_info_t * pinfop, struct rsb_mtx_t ** matrices, rsb_nnz_idx_t uuk, rsb_nnz_idx_t mk, rsb_nnz_idx_t uk, rsb_nnz_idx_t lk, rsb_nnz_idx_t llk, rsb_coo_idx_t mB, rsb_coo_idx_t kB, rsb_coo_idx_t roff, rsb_coo_idx_t coff);
+rsb_err_t rsb__get_array_of_leaf_matrices(struct rsb_mtx_t *mtxAp, struct rsb_translated_matrix_t ** tmatricesp, rsb_submatrix_idx_t *countp);
+rsb_err_t rsb__fill_array_of_leaf_matrices(const struct rsb_translated_matrix_t *tmatrix, struct rsb_translated_matrix_t *matrices, rsb_submatrix_idx_t * n);
+rsb_err_t rsb__sort_array_of_leaf_matrices(const struct rsb_translated_matrix_t *rmatrix,struct rsb_translated_matrix_t *matrices, rsb_submatrix_idx_t n, enum rsb_op_t op);
+int rsb__compar_rcsr_matrix_for_spsvl(const void * ap, const void * bp);
+size_t rsb__get_index_storage_amount(const struct rsb_mtx_t *mtxAp);
+rsb_submatrix_idx_t rsb__get_diagonal_elements_count(const struct rsb_mtx_t *mtxAp);
+rsb_submatrix_idx_t rsb__get_diagonal_submatrices_count(const struct rsb_mtx_t *mtxAp);
+rsb_err_t rsb__sort_array_of_leaf_matrices_for_ussv(const struct rsb_mtx_t * mtxAp, struct rsb_translated_matrix_t *leaf_matrices, rsb_submatrix_idx_t n, rsb_trans_t transl);
+rsb_err_t rsb__leaves_merge(struct rsb_mtx_t * RSB_RESTRICT mtxAp, rsb_submatrix_idx_t manp, rsb_time_t * RSB_RESTRICT stp, rsb_time_t *RSB_RESTRICT atp, rsb_time_t *RSB_RESTRICT ltp, const int wv, int kc);
+rsb_err_t rsb__leaves_merge_multiple(struct rsb_mtx_t *mtxAp, rsb_time_t *stp, rsb_time_t *atp, rsb_time_t *ltp, const int wv, int kc);
+rsb_err_t rsb__mtx_split(struct rsb_mtx_t * RSB_RESTRICT mtxAp, rsb_submatrix_idx_t manp, rsb_time_t * RSB_RESTRICT stp, rsb_time_t * RSB_RESTRICT atp, rsb_time_t * RSB_RESTRICT ltp, const int wv, int kc);
+rsb_err_t rsb__mtx_realloc_with_spare_leaves(struct rsb_mtx_t **mtxApp, rsb_submatrix_idx_t slc);
+
+#endif /* RSB_REC_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_rec2coo.c b/rsb_rec2coo.c
new file mode 100644
index 0000000..deac58a
--- /dev/null
+++ b/rsb_rec2coo.c
@@ -0,0 +1,309 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+ /**
+ * @file
+ * @author Michele Martone
+ * @brief Code for matrix format conversion. 
+ * */
+#include "rsb_common.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+static rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_leaf(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move somewhere else
+		TODO: flags checks
+		FIXME: UNTESTED
+
+	// to free the unnecessary data:
+	// RSB_CONDITIONAL_FREE(mtxAp
+	// RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+			rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)mtxAp->bpntr,mtxAp->nnz,do_shift?mtxAp->roff:0),
+			rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)mtxAp->bindx,mtxAp->nnz,do_shift?mtxAp->coff:0);
+		goto err;
+	}
+	errval = rsb__do_switch_compressed_array_to_fullword_coo(mtxAp->bpntr,mtxAp->Mdim,do_shift?mtxAp->roff:0,NULL);
+	mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+	RSB_DO_FLAG_DEL(mtxAp->flags,(RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS));
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		rsb__do_switch_array_to_fullword_coo((rsb_half_idx_t*)mtxAp->bindx,mtxAp->nnz,do_shift?mtxAp->coff:0);
+err:
+	RSB_DO_FLAG_SUBST(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES),(RSB_FLAG_WANT_COO_STORAGE));
+	// rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_MATRIX_MARKET RSB_CONST_DUMP_RECURSION_BRIEF, NULL);
+	// rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_MATRIX_MARKET RSB_CONST_DUMP_RECURSION_BRIEF, NULL);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move somewhere else
+		TODO: flags checks
+		FIXME: UNTESTED
+
+	// to free the unnecessary data:
+	// RSB_CONDITIONAL_FREE(mtxAp)
+	// RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices)
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(RSB_UNLIKELY(!mtxAp))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		return RSB_ERR_BADARGS;
+	}
+
+	if(RSB_UNLIKELY(rsb__is_root_matrix(mtxAp)))
+	{
+		if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS))
+			errval = RSB_ERR_BADARGS;
+		else
+			errval = rsb__do_switch_recursive_matrix_to_fullword_storage(mtxAp);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+	}
+
+	/*deleted by rsb__do_switch_recursive_matrix_to_fullword_storage*/
+	//RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+			if(submatrix)
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(submatrix,do_shift));
+	}
+	else
+		errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_leaf(mtxAp,do_shift);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_parallel(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift)
+{
+	/**
+		\ingroup gr_internals
+		FIXME: UNTESTED
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t n,all_leaf_matrices_n;
+
+	if(RSB_UNLIKELY(!mtxAp))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	all_leaf_matrices_n = mtxAp->all_leaf_matrices_n;
+
+	//rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_RECURSION_BRIEF, NULL);
+	#pragma omp parallel for schedule(static,1) reduction(|:errval)  shared(mtxAp) RSB_NTC
+	for(n=0;n<all_leaf_matrices_n;++n)
+	{
+		struct rsb_mtx_t *submatrix = mtxAp->all_leaf_matrices[n].mtxlp;
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_leaf(submatrix,do_shift));
+	}
+	//rsb__do_print_matrix_stats(mtxAp, RSB_CONST_DUMP_MATRIX_MARKET , NULL);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop)
+{
+	/**
+		\ingroup gr_internals
+		TODO: Move somewhere else
+		FIXME: UNTESTED,TEMPORARY, makes sense only for in place allocated
+		Rhis conversion gives you sorted coordinates.
+		On exit, the pointer matrix is deallocated
+		FIXME: error behaviour is undefined
+		FIXME: Here it would make sense to use a recursive merge algorithm.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t *fsm = NULL;
+	//rsb_flags_t flags;
+	struct rsb_coo_matrix_t coo;
+	int wmb = 1; /* want merge based (new: 20140727) */
+
+	RSB_BZERO_P(&coo);
+
+	if(RSB_UNLIKELY(!mtxAp))
+	{
+		RSB_ERROR(RSB_ERRM_E_MTXAP);
+		return RSB_ERR_BADARGS;
+	}
+	
+	if(mtxAp->all_leaf_matrices_n == 1)
+		wmb = 0; /* merge routine will not convert a single leaf's format */
+
+#if 0
+	fsm = rsb__do_get_first_submatrix(mtxAp);
+	if(!fsm)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	flags = mtxAp->flags;
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(mtxAp,RSB_BOOL_TRUE);
+	RSB_CONDITIONAL_FREE(mtxAp->all_leaf_matrices);
+	RSB_INIT_COO_FROM_MTX(coop,mtxAp);
+	RSB_BIND_COO_TO_MTX(coop,fsm);
+	RSB_CONDITIONAL_FREE(mtxAp);
+	//if((errval = rsb__util_sort_row_major_parallel(coop->VA,coop->IA,coop->JA,coop->nnz,coop->nr,coop->nc,coop->typecode,flags))!=RSB_ERR_NO_ERROR)
+	if((errval = rsb_util_sort_row_major_bucket_based_parallel(coop->VA,coop->IA,coop->JA,coop->nnz,coop->nr,coop->nc,coop->typecode,flags))!=RSB_ERR_NO_ERROR)
+		goto err;
+#else
+	if(wmb)
+	{
+		errval = rsb__leaves_merge_multiple(mtxAp, NULL, NULL, NULL, 0, 1);
+
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err, RSB_ERRM_ES);
+		}
+
+		fsm = rsb__do_get_first_submatrix(mtxAp);
+		if(!fsm)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err, RSB_ERRM_ES);
+		}
+	
+		RSB_INIT_COO_FROM_MTX(coop, mtxAp);
+		RSB_BIND_COO_TO_MTX(coop, fsm);
+		RSB_ASSERT(coop->VA || coop->nnz == 0);
+		RSB_ASSERT(coop->IA || coop->nnz == 0);
+		RSB_ASSERT(coop->JA || coop->nnz == 0);
+	}
+	else
+	{
+		fsm = rsb__do_get_first_submatrix(mtxAp);
+		if(!fsm)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err, RSB_ERRM_ES);
+		}
+	
+		RSB_INIT_CXX_FROM_MTX(&coo, mtxAp);
+		coo.nr = coo.nc = 0;
+		if(rsb__allocate_coo_matrix_t(&coo) != &coo)
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err, RSB_ERRM_ES);
+		}
+		errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N, NULL, mtxAp, coo.VA, coo.IA, coo.JA, 0, mtxAp->nr-1, &coo.nnz, RSB_FLAG_NOFLAGS);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err, RSB_ERRM_ES);
+		}
+		RSB_INIT_COO_FROM_MTX(coop, mtxAp);
+		RSB_BIND_COO_TO_MTX(coop, fsm);
+		RSB_COO_MEMCPY_parallel(coop->VA, coop->IA, coop->JA, coo.VA, coo.IA, coo.JA, 0, 0, coo.nnz, mtxAp->el_size);
+		rsb__destroy_coo_matrix_t(&coo);
+	}
+	fsm->VA = NULL;
+	fsm->bpntr = NULL;
+	fsm->bindx = NULL;
+	rsb__destroy_inner(mtxAp);
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move somewhere else
+		FIXME: UNTESTED,TEMPORARY, makes sense only for in place allocated
+		this conversion gives you sorted coordinates.
+		on exit, the pointer matrix is deallocated
+		FIXME: error behaviour is undefined
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//struct rsb_coo_matrix_t coo;
+	struct rsb_mtx_t *fsm = NULL;
+
+	if(RSB_UNLIKELY(!mtxAp))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		return RSB_ERR_BADARGS;
+	}
+#if 0
+	RSB_INIT_CXX_FROM_MTX(&coo,mtxAp);
+	if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,coo.VA,coo.IA,coo.JA,0,mtxAp->nr-1,&coo.nnz,RSB_FLAG_NOFLAGS);
+	if(RSB_SOME_ERROR(errval)) goto err;
+	//rsb__destroy_inner(mtxAp);
+	rsb__do_mtx_free(mtxAp);
+	coop->VA = coo.VA;
+	coop->IA = coo.IA;
+	coop->JA = coo.JA;
+	RSB_INIT_COO_FROM_MTX(coop,&coo);
+//	mtxAp->VA = coo.VA;
+//	mtxAp->bpntr = coo.IA;
+//	mtxAp->bindx = coo.JA;
+#else
+	fsm = rsb__do_get_first_submatrix(mtxAp);
+	if(!fsm)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo_parallel(mtxAp,RSB_BOOL_TRUE);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	RSB_BIND_COO_TO_MTX(coop,fsm);
+	RSB_INIT_COO_FROM_MTX(coop,mtxAp);
+	fsm->VA = NULL;
+	fsm->bpntr = NULL;
+	fsm->bindx = NULL;
+	rsb__destroy_inner(mtxAp);
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_rec2coo.h b/rsb_rec2coo.h
new file mode 100644
index 0000000..61fdaa0
--- /dev/null
+++ b/rsb_rec2coo.h
@@ -0,0 +1,35 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+ /**
+ * @file
+ * @author Michele Martone
+ * @brief Code for matrix format conversion. 
+ * */
+#ifndef RSB_REC2COO_H_INCLUDED
+#define RSB_REC2COO_H_INCLUDED
+#include "rsb_common.h"
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(struct rsb_mtx_t * mtxAp, rsb_bool_t do_shift);
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop);
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop);
+#endif /* RSB_REC2COO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_rec2csr.c b/rsb_rec2csr.c
new file mode 100644
index 0000000..02976bb
--- /dev/null
+++ b/rsb_rec2csr.c
@@ -0,0 +1,122 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+ /**
+ * @file
+ * @brief Code for matrix format conversion. 
+ * @author Michele Martone
+ * */
+#include "rsb_common.h"
+
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_csr(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move somewhere else
+		FIXME: UNTESTED,TEMPORARY, makes sense only for in place allocated
+		this conversion gives you sorted coordinates.
+		on exit, the pointer matrix is deallocated
+		FIXME: error behaviour is undefined
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//struct rsb_coo_matrix_t coo;
+	//struct rsb_mtx_t *fsm=NULL;
+
+	if(RSB_UNLIKELY(!mtxAp))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		return RSB_ERR_BADARGS;
+	}
+	if(RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nr))
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+#if 1
+	errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(mtxAp,coop);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	errval = rsb__do_switch_fullword_array_to_compressed(coop->IA,coop->nnz,coop->nr);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+#else
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_csc(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop)
+{
+	/**
+		\ingroup gr_internals
+		TODO: move somewhere else
+		FIXME: UNTESTED,TEMPORARY, makes sense only for in place allocated
+		this conversion gives you sorted coordinates.
+		on exit, the pointer matrix is deallocated
+		FIXME: error behaviour is undefined
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_coo_matrix_t coo;
+	//struct rsb_mtx_t *fsm=NULL;
+
+	if(RSB_UNLIKELY(!mtxAp))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		return RSB_ERR_BADARGS;
+	}
+	if(RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nc))
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	RSB_INIT_CXX_FROM_MTX(&coo,mtxAp);
+	coo.nr=coo.nc=0;/* FIXME: why ? */
+	if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+		goto err;
+	rsb__util_coo_array_set(coo.IA,coo.nnz,0);
+	errval = rsb__do_get_csc(mtxAp,(rsb_byte_t**)(&coo.VA),&coo.JA,&coo.IA);
+	coo.nr=mtxAp->nr;
+	coo.nc=mtxAp->nc;
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	rsb__do_mtx_free(mtxAp);
+	coop->typecode=mtxAp->typecode;
+	coop->nnz=coo.nnz;
+	coop->VA=coo.VA;
+	coop->IA=coo.IA;
+	coop->JA=coo.JA;
+	coop->nr=coo.nr;
+	coop->nc=coo.nc;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_rec2csr.h b/rsb_rec2csr.h
new file mode 100644
index 0000000..4cb5d0b
--- /dev/null
+++ b/rsb_rec2csr.h
@@ -0,0 +1,34 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+ /**
+ * @file
+ * @brief Code for matrix format conversion. 
+ * @author Michele Martone
+ * */
+#ifndef RSB_REC2CSR_H_INCLUDED
+#define RSB_REC2CSR_H_INCLUDED
+#include "rsb_common.h"
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_csr(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop);
+rsb_err_t rsb__do_switch_recursive_in_place_matrix_to_in_place_csc(struct rsb_mtx_t * mtxAp, struct rsb_coo_matrix_t * coop);
+#endif /* RSB_REC2CSR_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_render.c b/rsb_render.c
new file mode 100644
index 0000000..3eea197
--- /dev/null
+++ b/rsb_render.c
@@ -0,0 +1,297 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains pixmap rendering functions.
+ * */
+/*
+ * this code is EXPERIMENTAL and UNFINISHED
+ *
+ * TODO : with very little effort, we could introduce
+ *        - a mipmap based rsb_mtx_mipmap_t struct
+ * 	  - column major, too
+ * 	  - remove extra headers dumpout
+ * */
+
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+
+rsb_err_t rsb__do_print_postscript_header(FILE*fd, int width, int height, float csw, float csh)
+{
+	/**
+	   \ingroup gr_internals
+	   Prints an Encapsulated Postscript header for an area originating at (0 0),
+	   bound by (width,height), and defining 'csquare', a square macro..
+
+	   FIXME: this duplicates functionality from rsb_eps.c.
+	*/
+#if RSB_ALLOW_STDOUT
+	RSB_FPRINTF(fd,
+"%%!PS-Adobe-3.0 EPSF-3.0\n"
+"%%%%Creator: "RSB_PACKAGE_STRING"\n"
+"%%%%Title: matrix plot\n"
+"%%%%CreationDate: \n"
+"%%%%DocumentData: Clean7Bit\n"
+"%%%%Origin: 0 0\n"
+"%%%%BoundingBox: 0 0 %d %d\n"
+"%%%%LanguageLevel: 2 \n"
+"%%%%Pages: 1\n"
+"%%%%Page: 1 1\n"
+"\n"
+"/csquare {\n"
+"        newpath\n"
+"        0 0 moveto\n"
+"        0 -%g rlineto\n"
+"        %g 0 rlineto\n"
+"        0 %g rlineto\n"
+"        closepath\n"
+"       setrgbcolor\n"
+"        fill\n"
+"} def\n"
+"\n"
+"0 0 moveto\n"
+"\n",
+(int)width, (int)height,
+csw,csh,csw
+);
+		RSB_FPRINTF(fd,"save /$LIBRSB_DICT 3 dict def $LIBRSB_DICT begin /M {moveto} bind def /Z {gsave currentpoint lineto %g setlinewidth 1 setlinecap stroke grestore} bind def /D {M Z} bind def /K {0.5 0.5 setrgbcolor} bind def\n",1.0);
+		RSB_FPRINTF(fd,"/R {rlineto} bind def\n");
+		RSB_FPRINTF(fd,"/N {newpath} bind def\n");
+		RSB_FPRINTF(fd,"/L {lineto} bind def\n");
+		RSB_FPRINTF(fd,"/C {closepath} bind def\n");
+		RSB_FPRINTF(fd,"/SLW {setlinewidth} bind def\n");
+		RSB_FPRINTF(fd,"/SRGB {setrgbcolor} bind def\n");
+		RSB_FPRINTF(fd,"/SCF {scalefont} bind def\n");
+		RSB_FPRINTF(fd,"/SF {setfont} bind def\n");
+	return RSB_ERR_NO_ERROR;
+#else /* RSB_ALLOW_STDOUT */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_ALLOW_STDOUT */
+}
+
+static rsb_err_t rsb_get_pixmap_RGB_from_coo(const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t rows, rsb_coo_idx_t cols, void * pixmap, rsb_coo_idx_t p_rows, rsb_coo_idx_t p_cols, int br, int bc, rsb_flags_t render_flags)
+{
+	/**
+	 * \ingroup gr_internals
+	 *
+	 * Will fill the specified pixmap (assumed RGB, sized sizeof(char)*p_cols*rows ) with 
+	 * non pixels for nonzeros and black pixels for zeros.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	char * dst;
+//	rsb_coo_idx_t i,j;
+	rsb_nnz_idx_t k;
+	size_t sz;
+	size_t bpp=3;
+	char fgc=0x00,bgc=0x0;
+
+	if(!pixmap || !IA || !JA)
+		return RSB_ERR_BADARGS;
+	/* should check I and JA and rows and cols and nnz */
+//	if(cols>p_cols)
+//		return RSB_ERR_BADARGS;
+	
+	/* DANGER : overflow is possible : FIXME */
+	if( RSB_COO_ADD_OVERFLOW(p_rows, p_cols) )
+	{
+		errval = RSB_ERR_LIMITS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	dst=pixmap;
+	sz =p_cols;
+	sz*=p_rows;
+	sz*=bpp;
+
+	/* 
+	 * NOTE : inserting unsorted coefficients is SLOW!
+	 * wouldn't be faster to copy and sort ? 
+	 * FIXME
+	 */
+	//RSB_STDOUT("br %d bc %d\n",br,bc);
+
+	if(render_flags & 0x01)
+	{
+		bgc=~bgc;
+	}
+	fgc=~bgc;
+
+	if(br==bc && br==1)
+	{
+		memset(dst,bgc,sz);
+		for(k=0;k<nnz;++k)
+		{
+			dst[bpp*(IA[k]*p_cols+JA[k])+0]=fgc;
+			dst[bpp*(IA[k]*p_cols+JA[k])+1]=fgc;
+			dst[bpp*(IA[k]*p_cols+JA[k])+2]=fgc;
+		}
+	}
+	else
+	{
+		memset(dst,bgc,sz);
+		for(k=0;k<nnz;++k)
+		{
+			if(IA[k]/br < 0 || IA[k]/br>p_rows-1 || JA[k]/bc < 0 || JA[k]/bc>p_cols-1)
+				RSB_ERROR("I %ld JA %ld %ld %ld\n",(long)IA[k]/br,(long)JA[k]/bc,(long)p_rows,(long)p_cols);
+			dst[bpp*((IA[k]/br)*p_cols+JA[k]/bc)+0]=fgc;
+			dst[bpp*((IA[k]/br)*p_cols+JA[k]/bc)+1]=fgc;
+			dst[bpp*((IA[k]/br)*p_cols+JA[k]/bc)+2]=fgc;
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_pixmap_RGB_from_matrix(const char * filename, void * pixmap, int width, int height)
+{
+	/**
+	 * \ingroup gr_internals
+
+	 * FIXME : needs error handling
+
+	 * This function is experimentally used to render the sparse matrix;
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t *IA=NULL, *JA=NULL;
+	void *VA=NULL;
+	rsb_coo_idx_t m=0,k=0;
+	rsb_nnz_idx_t nnz=0;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#else /* RSB_NUMERICAL_TYPE_DOUBLE */
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_flags_t render_flags=0x01; /*  */
+	int br=1,bc=1;
+	rsb_time_t t=0;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_WANT_BCSS_STORAGE);
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+
+	if(!filename || !pixmap)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	t = - rsb_time();
+	if((errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&m,&k,&nnz,typecode,flags,NULL,NULL)))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	t += rsb_time();
+//	RSB_STDOUT("load : %lg\n",t);
+	
+/*	sorting doesn't seem to speed up sparse matrices rendering 	*/
+	t = - rsb_time();
+//	if((errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,m,k,typecode,flags)))
+//	{
+//		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+//	}
+	t += rsb_time();
+//	RSB_STDOUT("sort : %lg\n",t);
+
+	if(width <k)
+		bc=(k+width-1)/width ;
+
+	if(height<m)
+		br=(m+height-1)/height;
+
+	if(br<1 || bc<1)
+	{
+		// errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	/* rsb__mtx_as_pixmap_resize is optional */
+	if( (errval = rsb__mtx_as_pixmap_resize(VA, IA, JA, nnz, &nnz, m, k, height, width, typecode, flags)))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	else
+	{
+		m=height;
+		k=width;
+		br=bc=1;
+	}
+
+	t = - rsb_time();
+	if( (errval = rsb_get_pixmap_RGB_from_coo(IA, JA, nnz, m, k, pixmap, height, width, br, bc, render_flags)) )
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	t += rsb_time();
+//	RSB_STDOUT("render : %lg\n",t);
+
+err:
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__mtx_as_pixmap_resize(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_nnz_idx_t *rnnz, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_coo_idx_t p_rows, rsb_coo_idx_t p_cols, rsb_type_t typecode, rsb_flags_t render_flags)
+{
+	/*
+	 * \ingroup gr_internals
+	 * User shall be allowed to provide RSB_FLAG_SORTED_INPUT even on unsorted input.
+	 * However, in that case only contiguous duplicates will be catched.
+	 * FIXME : untested
+	 * FIXME : missing comments and error handling.
+	 * FIXME : missing input sanitizing
+	 */
+
+	double rf,cf;/* row and column factors */
+	rsb_nnz_idx_t n;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	rf=((double)p_rows)/(double)m;
+	cf=((double)p_cols)/(double)k;
+
+	for(n=0;n<nnz;++n)
+	{
+		IA[n]= (rsb_coo_idx_t)(((double)IA[n]) * rf);
+		JA[n]= (rsb_coo_idx_t)(((double)JA[n]) * cf);
+	}
+	m=p_rows;
+	k=p_cols;
+	
+	render_flags &= RSB_FLAG_SORTED_INPUT;
+	if(!RSB_DO_FLAG_HAS(render_flags,RSB_FLAG_SORTED_INPUT))
+	if((errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,m,k,typecode,render_flags/*RSB_FLAG_NOFLAGS*/))!=RSB_ERR_NO_ERROR)
+	{
+		errval = RSB_ERR_GENERIC_ERROR;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	*rnnz = rsb_weed_out_duplicates(IA,JA,VA,nnz,typecode,RSB_FLAG_DUPLICATES_SUM/*RSB_FLAG_DUPLICATES_DEFAULT_HANDLE*/|RSB_FLAG_SORTED_INPUT);
+
+	/* missing error handling here */	
+err:
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_render.h b/rsb_render.h
new file mode 100644
index 0000000..23ecfd2
--- /dev/null
+++ b/rsb_render.h
@@ -0,0 +1,44 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains pixmap rendering functions.
+ * */
+
+#ifndef RSB_RENDER_H_INCLUDED
+#define RSB_RENDER_H_INCLUDED
+
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+
+rsb_err_t rsb__do_get_pixmap_RGB_from_matrix(const char * filename, void * pixmap, int width, int height);
+rsb_err_t rsb__mtx_as_pixmap_resize(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_nnz_idx_t *rnnz, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_coo_idx_t p_rows, rsb_coo_idx_t p_cols, rsb_type_t typecode, rsb_flags_t render_flags);
+rsb_err_t rsb__do_print_postscript_header(FILE*fd, int width, int height, float csw, float csh);
+
+#define RSB_DEFAULT_MATRIX_RENDERING_ROWS 512
+#define RSB_DEFAULT_MATRIX_RENDERING_COLS 512
+
+#endif /* RSB_RENDER_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_rsb.c b/rsb_rsb.c
new file mode 100644
index 0000000..d781e0e
--- /dev/null
+++ b/rsb_rsb.c
@@ -0,0 +1,1671 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*! @file
+ *  @author Michele Martone
+ *  @brief Implementation of the library user interface.
+ */
+/*
+ *  The user interface functions and data structures for this library implementations.
+ *  (functions declared as static are not intended to be part of the user interface)
+ *  Internals should not be present in this file.
+ *  As a good rule, each function defined in this file (as well as any internal) shall NOT call another similar, but only its internal wrapper.
+ * */
+#include "rsb_internals.h"
+#include <stdio.h>
+#include "rsb_do.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_INTERFACE_RETURN_MTX_ERRP(MTXAP,ERRVAL,ERRVALP) \
+	                                 RSB_INTERFACE_ENDCMD \
+	RSB_CONDITIONAL_ERRPSET(ERRVALP,ERRVAL) RSB_DO_MTX_RETURN_INTERFACE(MTXAP,ERRVAL);
+#define RSB_INTERFACE_RETURN_MTX(MTXAP)  RSB_INTERFACE_ENDCMD return MTXAP;
+#define RSB_INTERFACE_RETURN_ERR(ERRVAL) 	RSB_INTERFACE_ENDCMD RSB_DO_ERR_RETURN_INTERFACE(ERRVAL)
+/* #define RSB_INTERFACE_RETURN_ERR_SILENT(ERRVAL) RSB_INTERFACE_ENDCMD return (ERRVAL); */
+#define RSB_INTERFACE_RETURN_VAL(VAL)    RSB_INTERFACE_ENDCMD {return (VAL);}
+
+/*!
+ * \internal
+ * This library, currently, can be used by only one master thread.
+ * Therefore it uses no handle for library execution instances.
+ * */
+
+rsb_err_t rsb_lib_init(struct rsb_initopts * iop)
+{
+	/*!
+	   \ingroup rsb_doc_library rsb_doc_rsb 
+ 	   \brief
+	   This is the library initialization function.
+	   \n
+	   It must be called only once before using any other library function.
+	   \n
+	   It is allowed to call it again after \ref rsb_lib_exit().
+	   \n
+	   To fine-tune the library behaviour, one may specify a number of options via the \c iop parameter.
+	   \n
+	   Options may be specified also after \ref rsb_lib_init() by calling \ref rsb_lib_reinit().
+	   \n
+	   One may call #RSB_REINIT_SINGLE_VALUE_GET  with flag  #RSB_IO_WANT_IS_INITIALIZED_MARKER  to verify whether the library has been initialized or not.
+	  
+	   \param \rsb_io_str_msg
+	   \return \rsberrcodemsg
+	   \see_lib_init
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_init(iop);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+/* @cond INNERDOC  */
+/* TODO: this is a "in development" function, not yet declared in rsb.h ; shall make it official when complete */
+rsb_err_t rsb__lib_get_info_str(int what, rsb_char_t* sbuf, size_t buflen)
+{
+	/* \see_lib_init */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_lib_get_info_str(what,sbuf,buflen);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+/* @endcond */
+
+rsb_err_t rsb_lib_set_opt(enum rsb_opt_t iof, const void*iop)
+{
+	/*!
+	 Sets value of a library option.
+ 	 A value specified by the request flag \c iof  will be fetched from \c *iop and will be used to update the selected option in the library internal state.
+
+	 \rsb_iof_param_msg
+	 \rsb_iop_out_param_msg
+	 \see \rsb_iof_macros
+	 \see_lib_init
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	RSB_DO_REINIT_SINGLE_VALUE_C_IOP(iof,iop,RSB_IO_SPECIFIER_SET,errval);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_lib_get_opt(enum rsb_opt_t iof, void*iop)
+{
+	/*!
+	 Gets value of a library option.
+ 	 A value specified by the request flag \c iof  will be fetched from the library internal state and \c *iop will be updated accordingly.
+
+	 \rsb_iof_param_msg
+	 \rsb_iop_out_param_msg
+	 \see \rsb_iof_macros
+	 \see_lib_init
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	RSB_DO_REINIT_SINGLE_VALUE_GET(iof,iop,errval);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_lib_set_opt_str(const rsb_char_t* opnp, const rsb_char_t* opvp)
+{
+	/*!
+	   \ingroup rsb_doc_library rsb_doc_rsb
+
+	   Specifies individual library options in order to fine-tune the library behaviour.
+	   Both the option name and the value shall be expressed as strings, identical to their preprocessor identifiers (see #rsb_opt_t ).
+	   The \c opnp string will be translated internally to the corresponding request flag values, and the passed value will be parsed out of the \c opvp string.
+	   \n
+	  
+	   \param \rsb_io_str_msg_opnp
+	   \param \rsb_io_str_msg_opvp
+	   \return \rsberrcodemsg
+
+	   \see_lib_init
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_set_initopt_as_string(opnp,opvp);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_lib_reinit(struct rsb_initopts * iop)
+{
+	/*!
+	   \ingroup rsb_doc_library rsb_doc_rsb
+	  
+	   Changes the library operation options which were set at initialization time either by a user or as defaults.
+	   \n
+	   Not all options may be supported, depending on build time library settings. 
+	   \n
+	   If an unsupported option was specified, an appropriate error (e.g.: #RSB_ERR_UNSUPPORTED_OPERATION) will be returned.  
+	   \n
+	   On the first error, option processing is interrupted and the remaining options (if any) are not processed.
+	   \n
+	   Program execution may continue safely even if an error code is returned (that is, library status should be consistent).
+	   \n
+	   
+	   \param \rsb_io_str_msg
+	   \return \rsberrcodemsg
+	   \see_lib_init
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_reinit(iop);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_lib_exit(struct rsb_initopts * iop)
+{
+	/*!
+	   \ingroup rsb_doc_library rsb_doc_rsb
+	  
+	   Finalize \librsb.
+	   \n
+	   #rsb_lib_exit should be called after having freed all matrices.
+	   \n
+	   If not all of the data structures were properly deallocated before, this function may still attempt finalizing the library and return the #RSB_ERR_MEMORY_LEAK error code (this depends on the \c --enable-allocator-wrapper configure time option).
+	   Any allocated memory will be lost (\librsb does not keep track of allocated matrices).
+	   \n
+	   Internal library state will be cleared.
+	   After this call, it is legal to initialize the library again, by calling \ref rsb_lib_init().
+	   \n
+	   On an error, the library state may be inconsistent, so it is advisable to either
+	   terminate program execution (rather than forcing a new initialization with \ref rsb_lib_init()).
+	   \n
+	   Parameter  \c iop  is reserved for future use; for now it is safe to pass #RSB_NULL_EXIT_OPTIONS.
+
+	   \param \rsb_io_str_msg
+	   \return \rsberrcodemsg
+	   \see_lib_init
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_exit();
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+struct rsb_mtx_t * rsb_mtx_alloc_from_coo_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   Given as input COO arrays \c VA,IA,JA, allocates and assembles an RSB matrix using separate arrays.
+	  
+	   \param \rsb_ro_va_ia_ja_desc_msg
+	   \param \rsb_nnzA_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_nrbows_A_sparse_inp_param_msg
+	   \param \rsb_flagsa_coc_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+	   \see_lib_alloc
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb__do_mtx_alloc_from_coo_const(VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+struct rsb_mtx_t * rsb_mtx_alloc_from_coo_inplace(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+	   
+	   \rsb_mtx_alloc_coo_inplace_msg
+	   \n
+	   \rsb_note_assume_nnz_sized
+
+	   \param \rsb_rw_va_ia_ja_desc_msg
+	   \param \rsb_nnzA_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_nrbows_A_sparse_inp_param_msg
+	   \param \rsb_flagsa_coi_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+
+	   \see_lib_alloc
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+struct rsb_mtx_t * rsb_mtx_free(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   Frees a previously allocated sparse matrix structure.
+	   \n
+	   In the case the matrix has the #RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS flag, the main three data arrays \rsb_va_ia_ja_decl will not be freed by #rsb_mtx_free (see \rsb_lib_alloc_in_place).
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \return \rsb_ret_null
+
+	   \see_lib_alloc
+	 */
+	struct rsb_mtx_t * mtxBp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxBp = rsb__do_mtx_free(mtxAp);
+	RSB_INTERFACE_RETURN_MTX(mtxBp);
+}
+
+rsb_err_t rsb_mtx_clone(struct rsb_mtx_t ** mtxBpp, rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_flags_t flags)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   This function clones a given matrix, allocating a fresh data structure or overwriting an existing one.
+	   \n
+	   Target type (specified by \c typecode) can be different from that in the matrix.
+	   \c
+	   If \c alphap=NULL, the cloned matrix will not be scaled.
+	   \n
+	   This new structure will be completely separated and independent from the original one.
+	   \n
+	   Examples:
+	   \code
+// will clone the matrix exactly
+errval = rsb_mtx_clone(&mtxBp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+// will clone the transpose of the matrix
+errval = rsb_mtx_clone(&mtxBp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_T,NULL,mtxAp,RSB_FLAG_IDENTICAL_FLAGS);
+// will clone the lower triangle of the matrix
+errval = rsb_mtx_clone(&mtxBp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxAp,RSB_FLAG_TRIANGULAR|RSB_FLAG_LOWER);
+	   \endcode
+	  
+	   \param \rsb_mtxtpp_inp_param_msg_b If \c *mtxBpp==NULL, a fresh clone will be assigned there; if not, the existing matrix structure will be freed and allocated to host the new one. The case \c *mtxBpp==mtxAp is supported.
+	   \param \rsb_type_o_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_s_inp_param_msg Of the type code of \c mtxAp.
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_flags_stru_fla_msg
+	   \return \rsberrcodemsg
+
+	   \see_lib_alloc
+	 */
+	/* FIXME: what if RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS ? */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__clone(mtxBpp,typecode,transA,alphap,mtxAp,flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+#if 0
+rsb_err_t rsb_get_rows_dense(const struct rsb_mtx_t * mtxAp, void* row, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags )
+{
+        /*!
+	 * \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	 * \return \rsberrcodemsg
+         * FIXME : missing test case, document
+         * */
+        return rsb__do_get_rows_dense(mtxAp,row,frA,lrA,IA,JA,rnzp,flags);
+}
+#endif
+
+
+
+#define RSB_EXPOSE_NEW_GENERAL_INTERFACE 1	/* temporary (internals) to delimit the new interface which supersedes the deprecated one */
+#if RSB_EXPOSE_NEW_GENERAL_INTERFACE
+#if 0
+/* #define RSB_EXTF_NONE		0x00000000*/			/* */
+#define RSB_EXTF_SLOWTRI	0x00000001			/*!< Flag values for extracting the strictly lower submatrix*/
+#define RSB_EXTF_SUPPTRI	0x00000002			/*!< Flag values for extracting the strictly upper submatrix .*/
+#define RSB_EXTF_DIAG		0x00000004			/*!< Flag values for extracting the diagonal submatrix.*/
+#define RSB_EXTF_LOWTRI		(RSB_EXTF_SLOWTRI|RSB_EXTF_DIAG)/*!< Flag values for extracting the lower submatrix.*/
+#define RSB_EXTF_UPPTRI		(RSB_EXTF_SUPPTRI|RSB_EXTF_DIAG)/*!< Flag values for extracting the upper submatrix.*/
+#define RSB_EXTF_OFFDIAG	(RSB_EXTF_SUPPTRI|RSB_EXTF_SLOWTRI)/*!< Flag values for extracting the whole matrix.*/
+#define RSB_EXTF_EXPSYMM	0x00000008			/*!< */
+#define RSB_EXTF_EXPDIAG	0x00000010			/*!< */
+#define RSB_EXTF_EXP		(RSB_EXTF_EXPDIAG|RSB_EXTF_EXPSYMM)	/*!< */
+#define RSB_EXTF_ALL		(RSB_EXTF_OFFDIAG|RSB_EXTF_DIAG)/*!< Flag values for extracting the whole matrix.*/
+#define RSB_EXTF_EXPALL		(RSB_EXTF_ALL|RSB_EXTF_EXP)	/*!< */
+#define RSB_EXTF_DEFAULT	RSB_EXTF_ALL			/*!< Flag values for extracting the whole matrix. */
+/* #define RSB_EXTF_SYMMEXP	0x00000020*/
+rsb_err_t rsb_get_submatrix_as_coo(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t *mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags);/* NEW, unfinished */
+
+rsb_err_t rsb_get_submatrix_as_coo(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t *mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags/* , rsb_extff_t eflags*/)/* NEW, unfinished */
+{
+	/*!
+	   \ingroup rsb_doc_matrix_conversion rsb_doc_rsb
+
+	   Extracts a submatrix.
+	   Call this function with VA,IA,JA NULL in order to get nonzeroes count.
+
+	   \param \rsb_type_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_wr_va_ia_ja_desc_msg
+	   \param \rsb_inp_rnz_msg
+	   \param \rsb_flags_idc_param_msg
+	   \return \rsberrcodemsg
+
+	   \warning \rsb_warn_unfinished_msg 
+	   \warning \rsb_warn_unfinished_flags_doc_msg
+	 */
+	 /*
+	   \todo: Shall document eflags.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb_do_get_submatrix_as_coo(typecode, transA, alphap, mtxAp, VA, IA, JA, rnzp, flags/* , eflags*/);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+#endif
+#endif /* RSB_EXPOSE_NEW_GENERAL_INTERFACE */
+
+#if 0
+rsb_err_t rsb_spmv_nt(const void *alphap, const struct rsb_mtx_t * mtxAp, const void * x1p, const void * x2p, rsb_coo_idx_t incX, const void * betap, void * y1p, void * y2p, rsb_coo_idx_t incY);
+rsb_err_t rsb_spmv_ata(const void *alphap, const struct rsb_mtx_t * mtxAp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Yp, rsb_coo_idx_t incY);
+rsb_err_t rsb_spmv_power(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp,  rsb_int_t exp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Yp, rsb_coo_idx_t incY);
+
+rsb_err_t rsb_spmv_nt(const void *alphap, const struct rsb_mtx_t * mtxAp, const void * x1p, const void * x2p, rsb_coo_idx_t incX, const void * betap, void * y1p, void * y2p, rsb_coo_idx_t incY)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Computes \f$Y_1 \leftarrow \beta Y_1 + \alpha {A}     \cdot X_1 \f$
+	   and      \f$Y_2 \leftarrow \beta Y_2 + \alpha {A}^{T} \cdot X_2 \f$.
+
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_incx_inp_param_msg
+	   \param \rsb_incy_inp_param_msg
+	   \param \rsb_y1y2_inp_param_msg
+	   \param \rsb_x1x2_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \warning \rsb_warn_untested_msg
+	 */
+
+	// FIXME: this is only a placeholder, waiting for a combined implementation.
+	// once done, should speedup methods like Biconjugate Gradient (BiCG).
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb_spmv(RSB_TRANSPOSITION_N,alphap,mtxAp,x1p,incX,betap,y1p,incY)|
+		rsb_spmv(RSB_TRANSPOSITION_T,alphap,mtxAp,x2p,incX,betap,y2p,incY);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_spmv_ata(const void *alphap, const struct rsb_mtx_t * mtxAp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Yp, rsb_coo_idx_t incY)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Computes \f$Y \leftarrow \beta Y + \alpha {A}^{T} {A} \cdot X \f$.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_x_inp_param_msg
+	   \param \rsb_y_out_param_msg
+	   \param \rsb_incx_inp_param_msg
+	   \param \rsb_incy_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \warning \rsb_warn_unimplemented_msg
+	   \warning \rsb_warn_untested_msg
+	 */
+	rsb_err_t errval = RSB_ERR_UNIMPLEMENTED_YET;
+	RSB_INTERFACE_PREAMBLE
+	RSB_INTERFACE_RETURN_ERR(errval)
+	// FIXME: this is only a placeholder, waiting for a combined implementation.
+}
+
+rsb_err_t rsb_spmv_power(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp,  rsb_int_t exp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Y, rsb_coo_idx_t incY)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Computes \f$Y \leftarrow \beta Y + \alpha ({A}^{T})^{exp} {A} \cdot X \f$.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_x_inp_param_msg
+	   \param \rsb_y_out_param_msg
+	   \param \rsb_incx_inp_param_msg
+	   \param \rsb_incy_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_exp_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \warning \rsb_warn_unimplemented_msg
+	   \warning \rsb_warn_untested_msg
+	 */
+
+	rsb_err_t errval = RSB_ERR_UNIMPLEMENTED_YET;
+	RSB_INTERFACE_PREAMBLE
+	RSB_INTERFACE_RETURN_ERR(errval)
+	// FIXME: this is only a placeholder, waiting for a combined implementation.
+}
+#endif
+
+rsb_err_t rsb_spmv(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, const void * Xp, rsb_coo_idx_t incX, const void * betap, void * Yp, rsb_coo_idx_t incY)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Multiplies a sparse matrix \f$opa(A)\f$ by a vector \f$X\f$, updating vector \f$Y\f$.
+	   \n
+	   Computes \f$Y \leftarrow \beta Y + \alpha \cdot opa(A) \cdot X \f$.
+	   \n
+	   It is not allowed to supply same \c Xp and \c Yp  (that is, \c Xp==Yp).
+	   \n
+
+	   \rsb_transa_mtx_msg
+	   \rsb_num_threads
+
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_x_inp_param_msg
+	   \param \rsb_incx_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_y_out_param_msg
+	   \param \rsb_incy_inp_param_msg
+	   \return \rsberrcodemsg
+	   \see_lib_spmx
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+       	errval = rsb_do_spmv(transA, alphap, mtxAp, Xp, incX, betap, Yp, incY);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+#if 0
+rsb_err_t rsb_spmv_sa(const struct rsb_mtx_t * mtxAp, const void * Xp, void * Yp, const void *alphap, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	 * computes \f$Y \leftarrow Y + \alpha op(A) \cdot X \f$
+	 * \return \rsberrcodemsg
+	 * 
+	 * */
+	if(!alphap || !mtxAp)
+		return RSB_ERR_BADARGS;
+	return rsb_do_spmv_general(transA,alphap,mtxTp,Xp,1,NULL,Yp,1,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+}
+#endif
+
+rsb_err_t rsb_spsv(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxTp, const void * Xp, rsb_coo_idx_t incX, void * Yp, rsb_coo_idx_t incY)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Computes \f$Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot X \f$, with upper or lower triangular \f$T\f$.
+	   It is allowed to supply same \c Xp and \c Yp  (that is, \c Xp==Yp).
+
+	   \rsb_transt_mtx_msg
+
+	   \param \rsb_transt_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_t
+	   \param \rsb_x_inp_param_msg
+	   \param \rsb_incx_inp_param_msg
+	   \param \rsb_y_out_param_msg
+	   \param \rsb_incy_inp_param_msg
+	   \return \rsberrcodemsg
+	   \rsb_spsv_no_zero
+	   \see_lib_spsx
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_spsv(transT, alphap, mtxTp, Xp, incX, Yp, incY);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+#if 0
+static rsb_err_t rsb__do_spsv_sxsx(const struct rsb_mtx_t * mtxAp, void * Yp, const void * alphap, rsb_coo_idx_t incX, rsb_trans_t transl)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	   computes \f$Y \leftarrow \alpha op(A)^{-1} \cdot Y \f$.
+	   \return \rsberrcodemsg
+	  
+	   It is allowed to use rhs == out, but in this case beta should be set to 1 and incX=incY, or the result will be undefined.
+	 */
+	return rsb__do_spsv_general(transl,alphap,mtxAp,Yp,1,Yp,1,RSB_OP_FLAG_DEFAULT RSB_INNER_NRHS_SPSV_ARGS_IDS);
+}
+#endif
+
+#if 0
+rsb_err_t rsb_spmv_uxux(const struct rsb_mtx_t * mtxAp, const void * Xp, void * Yp, const void *alphap, const void * betap, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	 * computes \f$Y \leftarrow \beta \cdot Y + \alpha\cdot A\cdot X\f$
+	 * \return \rsberrcodemsg
+	 * */
+	if(!alphap || !betap)
+		return RSB_ERR_BADARGS;
+	return rsb_do_spmv_general(transA,alphap,mtxAp,Xp,1,betap,Yp,1,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS);
+}
+#endif
+
+#if 0
+rsb_err_t rsb_spmm_az(const struct rsb_mtx_t * mtxAp, const void * mrhs, void *mout, rsb_int_t bstride, rsb_int_t cstride, rsb_int_t nrhs, rsb_trans_t transA)
+{
+	/*!
+	 * \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	 * computes \f$Y \leftarrow op(A) \cdot X \f$
+	 * when X is a multi-vector with nrhs elements, mrhs elements having stride bstride and mout elements having stride cstride
+	 * \return \rsberrcodemsg
+	 * */
+	 /* FIXME : and error detection ? **/
+#ifdef RSB_HAVE_OPTYPE_SPMM_AZ
+	if(!mtxAp || !mout)
+		return -1;
+
+	rsb__cblas_Xscal(mtxAp->typecode,nrhs*mtxAp->nr,NULL,mout,1);	/*FIXME:temporary*/
+
+	return rsb_spmm_inner(mtxAp,mrhs,mout,bstride,cstride,nrhs,transA);
+#else
+	return RSB_ERR_UNSUPPORTED_OPERATION;
+#endif
+}
+
+rsb_err_t rsb_spmm_sxsx(const struct rsb_mtx_t * mtxAp, const void * Bp, void * Cp, rsb_nnz_idx_t ldB, rsb_nnz_idx_t ldC, rsb_coo_idx_t nrhs, rsb_trans_t transA, const void * alphap, const void * betap, rsb_flags_t order)
+{
+	/*!
+	   \return \rsberrcodemsg
+	 */
+	return rsb__do_spmm(transA,alphap,mtxAp,nrhs,order,Bp,ldB,betap,Cp,ldC,RSB_OP_FLAG_DEFAULT);
+}
+#endif
+
+// rsb_err_t rsb_spsm_sxsx(const struct rsb_mtx_t * mtxAp, void * Bp, rsb_nnz_idx_t ldB, rsb_coo_idx_t nrhs, rsb_trans_t transT, const void * alphap, const void * betap, rsb_flags_t order)
+
+rsb_err_t rsb_spsm(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxTp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * betap, const void * Bp, rsb_nnz_idx_t ldB, void * Cp, rsb_nnz_idx_t ldC)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Computes \f$Y \leftarrow \alpha \cdot opt( T )^{-1} \cdot B \f$, with upper or lower triangular \f$T\f$.
+
+	   \rsb_transt_mtx_msg
+
+	   \param \rsb_transt_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_t
+	   \param \rsb_nrhs_inp_param_msg
+	   \param \rsb_order_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_b_inp_param_msg
+	   \param \rsb_ldb_inp_param_msg
+	   \param \rsb_c_inp_param_msg
+	   \param \rsb_ldc_inp_param_msg
+	   \return \rsberrcodemsg
+	   \see_lib_spsx
+	 */
+	   // \param \rsb_incx_inp_param_msg \param \rsb_incy_inp_param_msg
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_spsm(transT,alphap,mtxTp,nrhs,order,betap,Bp,ldB,Cp,ldC);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_coo_sort(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA,  rsb_type_t typecode, rsb_flags_t flagsA )
+{
+	/*!
+	   \ingroup gr_util rsb_doc_rsb
+
+	   Sorts row-major the given COO input arrays representing a sparse matrix \f$A\f$.
+
+	   \param \rsb_wr_va_ia_ja_desc_msg
+	   \param \rsb_nnzA_inp_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_flagsa_inp_param_msg If unsure, use #RSB_FLAG_NOFLAGS.
+	   \return \rsberrcodemsg
+	   \see_lib_util
+
+	   \note By invoking with swapped \c IA and \c JA (and swapping \c nrA and \c ncA as well) one can obtain column major order.
+	 */
+	/* \warning \rsb_warn_unfinished_flags_doc_msg */
+	/* Does it support Fortran flags ? */
+	/* In the future, one may reuse this interface for:
+	 * - cleaning up nonzeroes
+	 * - sorting in different ways
+	 * - compacting
+	 * - checking only if input is sorted; e.g.: using the RSB_FLAG_SORTED_INPUT flag
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+#if 0
+	/* This is not the default and shall be rechecked. */
+	errval = rsb__util_sort_row_major_buffered(VA,IA,JA,nnzA,nrA,ncA,typecode,flags,NULL,0);
+#else
+	/* This is not the default, well tested. */
+	errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnzA,nrA,ncA,typecode,flagsA);
+#endif
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_file_mtx_get_dims(const char * filename, rsb_coo_idx_t* nrp, rsb_coo_idx_t *ncp, rsb_coo_idx_t *nzp, rsb_flags_t*flagsp)
+{
+	/*!
+	   Reads structural information (dimensions, structural flags) for a matrix file into user specified (and optionally \c NULL) variables.
+
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+	   \param \rsb_filename_inp_param_msg
+	   \param \rsb_nrcowsp_inp_param_msg
+	   \param \rsb_nnzp_inp_param_msg
+	   \param \rsb_flagsp_inp_param_msg
+	   \return \rsberrcodemsg If read dimensions are illegal (see #rsb_coo_idx_t,#rsb_nnz_idx_t), #RSB_ERR_LIMITS will be returned.
+
+	   \rsb_matrixmarketonlynote_m
+	   \see_lib_get
+	*/
+	/* TODO: do we detect/read hermitiann'ess ? */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_file_mtx_get_dims(filename, nrp, ncp, nzp, flagsp);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_perror(void *stream, rsb_err_t errval)
+{
+	/*!
+	   \ingroup rsb_doc_error_handling rsb_doc_rsb
+	  
+	   Prints out to the specified \c stream  a string corresponding to the error code (using \c <stdio.h>'s \c fprintf).
+	   If \c stream==NULL, will print out to the default output stream; see #RSB_IO_WANT_OUTPUT_STREAM .
+	   
+	   \param stream A \c (FILE*) pointer, as declared in \c <stdio.h>; can be \c NULL.
+	   \param \rsb_errval_inp_param_msg
+	   \return \rsberrcodemsg
+	   \see_lib_error
+	 */
+	// \warning \rsb_warn_soon_to_be_updated_msg.
+	//   \todo : Should use all bits of the errval variable.
+	//   \todo : Should rename the function or make a new one matching perror().
+	//   \todo : Could invoke this function from rsb_strerror_r(*,NULL,*)
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_perror(stream,errval);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_strerror_r(rsb_err_t errval, rsb_char_t * buf, size_t buflen)
+{
+	/*!
+	   \ingroup rsb_doc_error_handling rsb_doc_rsb
+
+	   Writes a textual description of an error code in the specified string buffer.
+	   No more than buflen characters will be written (comprehensive of the terminting \c NUL character).
+	  
+	   \param \rsb_errval_inp_param_msg
+	   \param \rsb_buf_inp_param_msg
+	   \param \rsb_buflen_inp_param_msg
+
+	   \return \rsberrcodemsg
+	   \see_lib_error
+	 */
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_strerror_r(errval,buf,buflen);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_upd_vals(struct rsb_mtx_t * mtxAp, enum rsb_elopf_t elop_flags, const void * omegap)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling rsb_doc_rsb
+
+	   \f$ A \leftarrow op (A,\Omega) \f$
+	   Updates the matrix \f$A\f$ by applying either a rowwise or an elemental operation \f$op\f$, which is determined by \c elop_flags.
+	   If an unary operation is selected, \c omegap can be \c NULL.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_flags_elop_param_msg
+	   \param \rsb_omega_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \see_lib_set
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_upd_vals(mtxAp, elop_flags, omegap);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_set_vals(struct rsb_mtx_t * mtxAp, const void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling rsb_doc_rsb
+
+	   Updates the specified matrix elements, if found in the nonzero pattern.
+
+	   In the special case of a matrix in assembly state (that is, one that has been created as empty with #rsb_mtx_alloc_from_coo_begin() and not yet assembled with #rsb_mtx_alloc_from_coo_end() ) all the supplied matrix elements will be accepted: whether already present or not.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_ro_va_ia_ja_desc_msg
+	   \param \rsb_nnz_inp_param_msg
+	   \param \rsb_flags_setv_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \see_lib_set
+	 */
+
+	/* FIXME: new, UNFINISHED */
+	/* FIXME: shall document what will do on out-of-pattern elements */
+	/* should support sum, max, etc .. */
+//	RSB_ERROR("!!\n");
+//	if(flags == RSB_FLAG_DUPLICATES_SUM)
+//		return RSB_ERR_UNIMPLEMENTED_YET;
+//	else
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_set_elements(mtxAp,VA,IA,JA,nnz,flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_vals(const struct rsb_mtx_t * mtxAp, void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling rsb_doc_rsb
+
+	   Gets the specified matrix elements, if found.
+	   Please note that unlike #rsb_mtx_set_vals, the matrix has to be fully assembled here.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wr_va_rd_ia_ja_desc_msg
+	   \param \rsb_nnz_inp_param_msg
+	   \param \rsb_flags_getv_inp_param_msg
+	   \return \rsberrcodemsg
+
+           \see_lib_get
+	 */
+	/* may return an ...UNFINALIZED... error here ... */
+	/* TODO: could document better error behaviour (e.g.: what if all updated except one ? ) */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_elements(mtxAp,VA,IA,JA,nnz,flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_file_mtx_save(const struct rsb_mtx_t * mtxAp, const rsb_char_t * filename)
+{
+	/*!
+	   \ingroup rsb_doc_input_output rsb_doc_rsb
+
+	   Saves the given matrix to the specified matrix file.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_filename_out_param_msg
+	   \return \rsberrcodemsg
+
+	   \warning \rsb_warn_flags_not_complete_msg
+
+	   \rsb_matrixmarketonlynote_m
+
+           \see_lib_info
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_file_mtx_save(mtxAp,filename);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_file_vec_save(const rsb_char_t * filename, rsb_type_t typecode, const void * Yp, rsb_coo_idx_t yvl)
+{
+	/*!
+	   \ingroup rsb_doc_input_output rsb_doc_rsb
+
+	   Saves a dense vector to the specified file, using the numerical type representation as specified by the user.
+	   This function assumes \c Yp!=NULL and \c yvl>0.
+
+	   \param \rsb_filename_inv_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_y_out_param_msg
+	   \param \rsb_yvl_param_msg
+	   \return \rsberrcodemsg
+
+	   \rsb_matrixmarketonlynote_v
+           \see_lib_info
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_vec_save(filename, typecode, Yp, yvl);
+	RSB_INTERFACE_RETURN_ERR(errval);
+}
+
+rsb_err_t rsb_file_vec_load(const rsb_char_t * filename, rsb_type_t typecode, void * Yp, rsb_coo_idx_t *yvlp)
+{
+	/*!
+	   \ingroup rsb_doc_input_output rsb_doc_rsb
+
+	   Loads a dense vector from the specified file, using the numerical type representation as specified by the user.
+	   This function is intended to be called in two steps: first with \c Yp=NULL, in order to write the vector length to \c *yvlp ; then, with \c yvlp=NULL, to get \c Yp written.
+
+	   \param \rsb_filename_inv_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_y_inp_param_msg
+	   \param \rsb_yvlp_param_msg
+	   \return \rsberrcodemsg
+
+	   \rsb_matrixmarketonlynote_v
+           \see_lib_info
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_load_vector_file_as_matrix_market(filename,typecode,Yp,yvlp);
+	RSB_INTERFACE_RETURN_ERR(errval);
+}
+
+struct rsb_mtx_t * rsb_file_mtx_load(const rsb_char_t * filename, rsb_flags_t flagsA, rsb_type_t typecode, rsb_err_t *errvalp)
+{
+	/*!
+	   \ingroup rsb_doc_input_output rsb_doc_rsb
+
+	   Loads a sparse matrix from the specified matrix file, assembling it in the format specified by \rsb_flags, using the numerical type representation as specified by the user.
+
+	   \param \rsb_filename_inp_param_msg
+	   \param \rsb_flagsa_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+
+	   \rsb_matrixmarketonlynote_m
+	   \see_lib_info
+	 */
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb__dodo_load_matrix_file_as_matrix_market(filename, flagsA, typecode, &errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+struct rsb_mtx_t * rsb_sppsp(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling rsb_doc_rsb
+
+	   Computes the weighted sum of two sparse matrices, returning a new matrix:
+	   \f$C \leftarrow \alpha\cdot transA(A) + \beta\cdot transB{B} \f$
+	   Symmetry flags are ignored in this operation.
+
+	   \rsb_transa_mtx_msg
+	   \rsb_transb_mtx_msg
+
+	   \param \rsb_type_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_abi_param_msg_a
+	   \param \rsb_transb_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_mtxt_abi_param_msg_b
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+
+	   \see_lib_gemm
+
+	   \warning \rsb_warn_not_th_tested_msg
+	   \warning \rsb_warn_unoptimized_msg 
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxCp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxCp = rsb__do_matrix_sum(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxCp,errval,errvalp);
+}
+
+struct rsb_mtx_t * rsb_spmsp(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_handling rsb_doc_rsb
+
+	   Computes the weighted product of two sparse matrices in a new sparse matrix (also known as SpGEMM operation):
+	   \f$C \leftarrow \alpha \cdot opa(A) \cdot \beta \cdot opb(B) \f$
+	   Symmetry/Hermitian flags are ignored by this operation.
+
+	   \rsb_transa_mtx_msg
+	   \rsb_transb_mtx_msg
+
+	   \param \rsb_type_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_abi_param_msg_a
+	   \param \rsb_transb_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_mtxt_abi_param_msg_b
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+
+	   \warning Parameters \c alphap,betap,transA,transB  are not yet taken in consideration. The following defaults are valid: \f$\alpha=1.0\f$ and \f$\beta=1.0\f$, and \c transA=transB=#RSB_TRANSPOSITION_N.
+
+	   \see_lib_gemm
+	 */
+	/* FIXME: NEW, UNFINISHED, UNTESTED, UNSECURED */
+	/* \warning \rsb_warn_not_th_tested_msg \warning \rsb_warn_unoptimized_msg  */
+	struct rsb_mtx_t * mtxCp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	mtxCp = rsb__do_matrix_mul(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxCp,errval,errvalp);
+}
+
+rsb_err_t rsb_mtx_add_to_dense(const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_nnz_idx_t ldB, rsb_nnz_idx_t nrB, rsb_nnz_idx_t ncB, rsb_bool_t rowmajorB, void * Bp)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	   
+	   Dense matrix B is updated by adding scaled sparse matrix \f${A}\f$ to it:
+	   \f$B \leftarrow B + \alpha {A} \f$
+
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_abi_param_msg_a
+	   \param \rsb_ldb_inp_param_msg
+	   \param \rsb_nrcows_B_dense_inp_param_msg
+	   \param \rsb_rowmajor_B_inp_param_msg
+	   \param \rsb_dmtx_abi_param_msg_b
+	   \return \rsberrcodemsg
+	   \warning \rsb_warn_not_th_tested_msg
+
+	   \note Please note that it suffices to 'transpose' \c Bp's description parameters to get \f$A\f$ transposed summed in.
+	   \see_lib_gemm
+	 */
+	/* TODO: add transA */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_matrix_add_to_dense(alphap, mtxAp, ldB, nrB, ncB, rowmajorB, Bp);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_trans_t rsb_psblas_trans_to_rsb_trans(const char psbtrans)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+	
+	    "Translates" a PSBLAS transposition value character to a \librsb one. 
+	    \n
+	    See the PSBLAS library website/documentation for valid input values.
+
+	   \param \rsb_psb_trans_inp_param_msg 
+	   \return A valid transposition code; that is #RSB_TRANSPOSITION_N for 'N', #RSB_TRANSPOSITION_T for 'T', RSB_TRANSPOSITION_C for 'C',  (See \ref matrix_transposition_flags_section).
+	   \see_lib_psblas
+	 */
+	rsb_trans_t rsbtrans = RSB_INVALID_TRANS_CHAR;
+	RSB_INTERFACE_PREAMBLE
+	rsbtrans = rsb_do_psblas_trans_to_rsb_trans(psbtrans);
+	RSB_INTERFACE_RETURN_VAL(rsbtrans)
+}
+
+struct rsb_mtx_t * rsb_mtx_alloc_from_csr_const(const void *VA, const rsb_coo_idx_t * RP, const rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   Given input read only CSR format arrays, allocates and assembles an RSB matrix (stored in separate arrays).
+	  
+	   \param \rsb_ro_va_rp_ja_desc_msg
+	   \param \rsb_nnzA_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_nrbows_A_sparse_inp_param_msg
+	   \param \rsb_flagsa_csr_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+	   \see_lib_alloc
+	 */
+	// FIXME: flags and index and alloc mangling, here 
+	// FIXME: UNTESTED, AND NNZ<M ?
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb__do_mtx_alloc_from_csr_const(VA,RP,JA,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+struct rsb_mtx_t * rsb_mtx_alloc_from_csc_const(const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * CP, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   Given input read only CSC format arrays, allocates and assembles an RSB matrix (stored in separate arrays).
+	  
+	   \param \rsb_ro_va_ia_cp_desc_msg
+	   \param \rsb_nnzA_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_nrbows_A_sparse_inp_param_msg
+	   \param \rsb_flagsa_csc_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+	   \see_lib_alloc
+	 */
+	// FIXME: flags and index and alloc mangling, here 
+	// FIXME: UNTESTED, AND NNZ<M ?
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	RSB_INTERFACE_PREAMBLE
+       	mtxAp = rsb__do_mtx_alloc_from_csc_const(VA,IA,CP,nnzA,typecode,nrA,ncA,brA,bcA,flagsA,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+struct rsb_mtx_t * rsb_mtx_alloc_from_csr_inplace (void *VA, rsb_nnz_idx_t * RP, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_blk_idx_t brA, rsb_blk_idx_t bcA, rsb_flags_t flagsA, rsb_err_t * errvalp )
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   \rsb_mtx_alloc_csr_inplace_msg
+	   \n
+	   \rsb_note_assume_nnz_sized
+	  
+	   \param \rsb_wr_va_rp_ja_desc_msg
+	   \param \rsb_nnzA_inp_param_msg
+	   \param \rsb_type_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_nrbows_A_sparse_inp_param_msg
+	   \param \rsb_flagsa_csr_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxpmessage
+	   \see_lib_alloc
+	 */
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb__do_mtx_alloc_from_csr_inplace (VA, RP, JA, nnzA, typecode, nrA, ncA, brA, bcA, flagsA, &errval );
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+rsb_err_t rsb_mtx_switch_to_csr(struct rsb_mtx_t * mtxAp, void ** VAp, rsb_coo_idx_t ** IAp, rsb_coo_idx_t ** JAp, rsb_flags_t flags)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_conversion rsb_doc_rsb
+	   
+	   Switches the matrix to the CSR format, in-place. 
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wr_va_ia_ja_p_desc_msg
+	   \param \rsb_flags_idc_param_msg Flags #RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS are forbidden.
+	   \return \rsberrcodemsg
+
+	   \note \rsb_note_switch_in_place
+	   \warning \rsb_warn_not_th_tested_msg
+	   \see_lib_conv
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_switch_rsb_mtx_to_csr_sorted(mtxAp, VAp, IAp, JAp, flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_coo(const struct rsb_mtx_t * mtxAp, void * VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_flags_t flags )
+{
+	rsb_nnz_idx_t nnz = 0;
+	/*! 
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Returns the matrix converted in a coordinate storage format.
+	   \n
+	   Elements will be stored in no particular order.
+	   \n
+	   If there are structural or fill-in zero elements, these will be skipped.
+	   \n
+	   Writes as many entries as there are nonzeroes (use #rsb_mtx_get_info(mtxAp,#RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T,&nnz)) to find out how many in order to allocate the arrays correctly.
+	  
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wr_va_ia_ja_desc_msg
+	   \param \rsb_flags_getco_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \see_lib_get
+	   */
+	   /*
+	    No more than mtxAp->nnz elements will be written.
+	   \todo Allow optional VA,IA,JA, for pattern matrices, or other purposes.
+	   */
+	 // FIXME: does not support misc flags !
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_coo_noalloc(mtxAp,VA,IA,JA,&nnz,flags);
+//err:
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_csr(rsb_type_t typecode, const struct rsb_mtx_t *mtxAp, void * VA, rsb_nnz_idx_t * RP, rsb_coo_idx_t * JA, rsb_flags_t flags )
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_conversion rsb_doc_rsb
+
+	   Fills the given arrays with the matrix expressed in the CSR format. 
+
+	   \param \rsb_type_param_msg
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wo_va_rp_ja_desc_msg
+	   \param \rsb_flags_getcs_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \see_lib_get
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_csr(typecode,mtxAp,VA,RP,JA,flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_rows_sparse(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_nnz_idx_t *rnzp, rsb_flags_t flags)
+{
+        /*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Writes to the given COO arrays the specified submatrix.
+
+	   Invoke with \c VA,IA,JA  set to \c NULL  in order to get the nonzeroes count written to \c *rnzp, and know how large the arrays should be.
+
+	   \rsb_IA_can_null_msg (in this case it will be ignored).
+	   The written rows are ordered.
+	  
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wr_va_rd_ia_ja_desc_msg
+	   \param \rsb_inp_frlr_msg
+	   \param \rsb_inp_rnz_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_flags_getrs_inp_param_msg
+	   \return \rsberrcodemsg
+	   \see_lib_get
+         */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_rows_sparse(transA, alphap, mtxAp, VA, IA, JA, frA, lrA, rnzp, flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_coo_block(const struct rsb_mtx_t * mtxAp, void* VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t frA, rsb_coo_idx_t lrA, rsb_coo_idx_t fcA, rsb_coo_idx_t lcA, rsb_coo_idx_t * IREN, rsb_coo_idx_t * JREN, rsb_nnz_idx_t *rnzp, rsb_flags_t flags )
+{
+	/*!
+	   \ingroup rsb_doc_matrix_conversion rsb_doc_rsb
+
+	   Writes in COO format the specified submatrix.
+	   Works in two stages: first the user invokes it with \c VA,IA,JA set to \c NULL  to get \c *rnzp.
+	   Then the the \c VA,IA,JA arrays can be allocated, and the function called again, this time with \c rnzp=NULL but the \c VA,IA,JA arrays pointers non \c NULL (or at least, one of them).
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wr_va_ia_ja_desc_msg
+	   \param \rsb_inp_frlr_msg
+	   \param \rsb_inp_fclc_msg
+	   \param \rsb_xren_inp_param_msg
+	   \param \rsb_inp_rnz_msg
+	   \param \rsb_flags_getcb_inp_param_msg
+	   \return \rsberrcodemsg
+	   Examples:
+	   \code
+// get nnz count first
+errval=rsb_mtx_get_coo_block(mtxAp,NULL,NULL,NULL,frA,lrA,fcA,lcA,NULL,NULL,&rnz,flags )
+// allocate VA, IA, JA to rnz elements
+...
+// get the  rnz  values then
+errval=rsb_mtx_get_coo_block(mtxAp,  VA,  IA,  JA,frA,lrA,fcA,lcA,NULL,NULL,NULL,flags )
+	   \endcode
+
+	   \warning Expect this function to change soon (e.g.: have scaling parameters, etc.). Contact the author if you intend to use it.
+	   \see_lib_get
+	 */
+	/* \rsb_VA_can_null_msg (in such case, only the pattern information is extracted). */
+	/* FIXME: shall test rsb_mtx_get_coo_block with VA=NULL */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_block_sparse(mtxAp,VA,IA,JA,frA,lrA,fcA,lcA,IREN,JREN,rnzp,flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_spmm(rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Updates a dense matrix with the product of sparse matrix by dense matrix;
+	   that is, computes \f$ C \leftarrow \beta\cdot C + \alpha\cdot opa(A) \cdot B \f$.
+
+	   \rsb_transa_mtx_msg
+	   \rsb_num_threads
+
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_nrhs_inp_param_msg
+	   \param \rsb_order_inp_param_msg
+	   \param \rsb_b_inp_param_msg
+	   \param \rsb_ldb_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_c_inp_param_msg
+	   \param \rsb_ldc_inp_param_msg
+ 	   \return \rsberrcodemsg
+	   \see_lib_spmx
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_spmm(transA,alphap,mtxAp,nrhs,order,Bp,ldB,betap,Cp,ldC,RSB_OP_FLAG_DEFAULT);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_spmsp_to_dense(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp , rsb_nnz_idx_t ldC, rsb_nnz_idx_t nrC, rsb_nnz_idx_t ncC, rsb_bool_t rowmajorC, void *Cp)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+
+	   Computes the product of sparse matrices and adds it to a dense matrix:
+	   \f$C \leftarrow \alpha opa(A) \cdot \beta \cdot opb(B) \f$.
+
+	   \rsb_transa_mtx_msg
+	   \rsb_transb_mtx_msg
+
+	   \param \rsb_type_param_msg
+	   \param \rsb_transa_inp_param_msg
+	   \param \rsb_alpha_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_transb_inp_param_msg
+	   \param \rsb_beta_inp_param_msg
+	   \param \rsb_mtxt_inp_param_msg_b
+	   \param \rsb_ldc_inp_param_msg
+	   \param \rsb_nrcows_C_dense_inp_param_msg
+	   \param \rsb_rowmajor_C_inp_param_msg
+	   \param \rsb_dmtx_abi_param_msg_c
+ 	   \return \rsberrcodemsg
+
+	   \warning Parameters \c alphap,betap,transA,transB  are not yet taken in consideration. The following defaults are valid: \f$\alpha=1.0\f$ and \f$\beta=1.0\f$, and \c transA=transB=#RSB_TRANSPOSITION_N.
+
+	   \see_lib_gemm
+	 */
+	/* \todo \rsb_todo_unfinished_inc_msg */
+	/* \warning \rsb_warn_unfinished_msg \warning \rsb_warn_unfinished_noerr_msg */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_spgemm_to_dense(typecode,transA,alphap,mtxAp,transB,betap,mtxBp,ldC,nrC,ncC,!rowmajorC,Cp,NULL,NULL);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_rndr(const char * filename, const struct rsb_mtx_t*mtxAp, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags)
+{
+	/*!
+	   \ingroup rsb_doc_matrix_operations rsb_doc_rsb
+	   Renders a matrix to a file.
+	   Currently, only Encapsulated Postscript (EPS) is supported.
+	
+	   \param \rsb_filename_out_param_msg
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_render_pmwidth_inp_param_msg
+	   \param \rsb_render_pmheight_inp_param_msg
+	   \param \rsb_render_rflags_inp_param_msg
+
+	   \see_lib_rndr
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_mtx_render(filename, mtxAp, pmWidth, pmHeight, rflags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_file_mtx_rndr(void * pmp, const char * filename, rsb_coo_idx_t pmlWidth, rsb_coo_idx_t pmWidth, rsb_coo_idx_t pmHeight, rsb_marf_t rflags)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   Renders as pixel map the matrix contained in a matrix file.
+
+	   \param \rsb_render_pmp_inp_param_msg
+	   \param \rsb_filename_inp_param_msg
+	   \param \rsb_render_pmlwidth_inp_param_msg
+	   \param \rsb_render_pmwidth_inp_param_msg
+	   \param \rsb_render_pmheight_inp_param_msg
+	   \param \rsb_render_rflags_inp_param_msg
+	   \return \rsberrcodemsg
+	   
+	   \warning \rsb_warn_not_th_tested_msg
+
+	   \note At the time being, \c pmlWidth is required to be equal to \c pmWidth.
+	   \see_lib_rndr
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_file_mtx_rndr(pmp, filename, pmlWidth, pmWidth, pmHeight, rflags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_switch_to_coo(struct rsb_mtx_t * mtxAp, void ** VAp, rsb_coo_idx_t ** IAp, rsb_coo_idx_t ** JAp, rsb_flags_t flags)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_conversion rsb_doc_rsb
+
+	   Switches a matrix to COO arrays in place.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_wr_va_ia_ja_p_desc_msg
+	   \param \rsb_flags_swcoo_inp_param_msg
+	   \return \rsberrcodemsg
+
+	   \note \rsb_note_switch_in_place
+	   \warning \rsb_warn_not_th_tested_msg
+	   \see_lib_conv
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_switch_rsb_mtx_to_coo(mtxAp, VAp, IAp, JAp, flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_prec(void *opdp, const struct rsb_mtx_t * mtxAp, rsb_precf_t prec_flags, const void *ipdp)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   A function computing a simple preconditioner out of \c mtxAp.
+
+	   \param opdp Preconditioner data pointer (output).
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param prec_flags Valid preconditioner request flags (currently, only #RSB_PRECF_ILU0 is supported; for it, \c *opdp will be overwritten with two \c rsb_mtx_t pointers, respectively a lower and an upper matrix.).
+	   \param ipdp  Preconditioner data pointer (input).
+
+	   \return \rsberrcodemsg
+
+	   \note Matrix should be square, have at least two rows, and have at least one nonzero.
+	   \see_lib_get
+	*/
+	/*
+	   \warning \rsb_warn_not_th_tested_msg
+	*/
+	/* FIXME: temporary interface */
+	rsb_err_t errval = RSB_ERR_UNIMPLEMENTED_YET;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_preconditioner(opdp,mtxAp,prec_flags,ipdp);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_info(const struct rsb_mtx_t *mtxAp, enum rsb_mif_t miflags, void* minfop)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   \rsb_mtx_getinfo_msg.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_miflags_inp_param_msg
+	   \param \rsb_minfop_inp_param_msg
+
+	   \return \rsberrcodemsg
+
+	   \warning \rsb_warn_not_th_tested_msg
+	   \see_lib_info
+	*/
+	rsb_err_t errval = RSB_ERR_UNIMPLEMENTED_YET;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_mtx_get_info(mtxAp, miflags, minfop);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_info_str(const struct rsb_mtx_t *mtxAp, const rsb_char_t *mis, void* minfop, size_t buflen)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   \rsb_mtx_getinfo_msg, via a string form query.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param mis A string specifying any identifier among the matrix info ones. See #rsb_mif_t for a list of valid identifiers that can be supplied in string form.
+	   \param \rsb_minfop_inp_param_msg
+	   \param buflen If greater than 0, \c minfop will be treated as a string of length \c buflen and filled with the desired value via the standard \c snprintf() function.
+
+	   \return \rsberrcodemsg
+
+	   \see_lib_info
+	*/
+	/* \warning \rsb_warn_not_th_tested_msg */
+	rsb_err_t errval = RSB_ERR_UNIMPLEMENTED_YET;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_get_matrix_info_from_string(mtxAp,mis,minfop,buflen);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_nrm(const struct rsb_mtx_t * mtxAp , void * Np, enum rsb_extff_t flags)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   Computes a matrix norm (either infinite-norm or or 2-norm or 1-norm).
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param Np  Points to a scalar value which will be overwritten with the selected norm.
+	   \param flags Either #RSB_EXTF_NORM_ONE or #RSB_EXTF_NORM_TWO or #RSB_EXTF_NORM_INF.
+
+	   In case of a complex type, only the real part will be written to \c Np. 
+
+	   \return \rsberrcodemsg
+	   \see_lib_get
+	*/
+	rsb_err_t errval = RSB_ERR_BADARGS;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_matrix_norm(mtxAp, Np, flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_mtx_get_vec(const struct rsb_mtx_t * mtxAp , void * Dp, enum rsb_extff_t flags)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   Will overwrite a supplied array with a specific vector quantity.
+
+	   \param \rsb_mtxt_inp_param_msg_a
+	   \param \rsb_d_inp_param_msg
+	   \param flags Either one of the different extraction filter flags (e.g.: #RSB_EXTF_DIAG, #RSB_EXTF_SUMS_ROW, ...) .
+	   \return \rsberrcodemsg
+	   \see_lib_get
+	*/
+	rsb_err_t errval = RSB_ERR_BADARGS;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_matrix_compute(mtxAp,Dp,flags);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_time_t rsb_time(void)
+{
+	/*!
+	   \ingroup rsb_doc_misc rsb_doc_rsb
+
+	   Returns the current time in seconds.
+	   This function is meant to be used for computing wall clock time intervals (e.g.: for benchmarking purposes). 
+	   The user should not rely on this function for absolute time computations.
+
+	   \return A value for the current time, in seconds.
+	   \see_lib_util
+	 */
+	return rsb_do_time();
+}
+
+#if RSB_WANT_COO_BEGIN 
+struct rsb_mtx_t * rsb_mtx_alloc_from_coo_begin(rsb_nnz_idx_t nnzA, rsb_type_t typecode, rsb_coo_idx_t nrA, rsb_coo_idx_t ncA, rsb_flags_t flagsA, rsb_err_t * errvalp)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   Creates an empty matrix structure in assembly state.
+	   The user then populates it using #rsb_mtx_set_vals() repeatedly; then assembles it with #rsb_mtx_alloc_from_coo_end().
+	  
+	   \param \rsb_nnzA_inp_param_msg_i
+	   \param \rsb_type_param_msg
+	   \param \rsb_nrcows_A_sparse_inp_param_msg
+	   \param \rsb_flagsa_coc_param_msg
+	   \param \rsb_errvp_inp_param_msg
+	   \return \rsbmtxapmessage
+	   \warning \rsb_warn_not_th_tested_msg
+	   \see_lib_alloc
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_mtx_t * mtxAp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb__do_mtx_alloc_from_coo_begin(nnzA,typecode,nrA,ncA,flagsA,&errval);
+	RSB_INTERFACE_RETURN_MTX_ERRP(mtxAp,errval,errvalp);
+}
+
+rsb_err_t rsb_mtx_alloc_from_coo_end(struct rsb_mtx_t ** mtxApp)
+{
+	/*!
+ 	   \ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	   Assembles RSB arrays for a matrix in build state created with #rsb_mtx_alloc_from_coo_begin() and populated with #rsb_mtx_set_vals().
+	   After assembly, any operation on the matrix is allowed.
+	  
+	   \param \rsb_mtxt_inp_param_msg_i
+	   \return \rsberrcodemsg
+	   \warning \rsb_warn_not_th_tested_msg
+	   \note Note that the memory location of the matrix will be changed by this call, and the (old) \c *mtxApp  address value will be not valid anymore.
+	   \see_lib_alloc
+	 */
+	rsb_err_t errval = RSB_ERR_BADARGS;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_mtx_alloc_from_coo_end(mtxApp);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+#endif
+
+#if 0
+rsb_err_t rsb_tune_wrt(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+ 	\ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+	Tunes matrix with respect to a user specified "benchmark" or "performance oracle" function.
+	...
+	\rsb_version_12
+	*/
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	/* rsb__tune_spxx_bos (...) */
+	return errval;
+}
+#endif
+
+rsb_err_t rsb_tune_spmm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC)
+{
+	/*!
+ 	\ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+       	An auto-tuner: optimizes either the matrix instance, the thread count or both for the #rsb_spmm operation.
+
+	\rsb_tune__doc_msg
+	\param \rsb_tune_mtxOpp_iou_param_msg
+	\param \rsb_tune_sfp_iou_param_msg
+	\param \rsb_tune_tnp_iou_param_msg
+	\param \rsb_tune_maxr_iou_param_msg
+	\param \rsb_tune_maxt_iou_param_msg
+	\param \rsb_transa_inp_param_msg
+	\param \rsb_alpha_inp_param_msg
+	\param \rsb_mtxt_inp_param_msg_a
+	\param \rsb_nrhs_inp_param_msg
+	\param \rsb_order_inp_param_msg
+	\param \rsb_b_tune_inp_param_msg
+	\param \rsb_ldb_inp_param_msg
+	\param \rsb_beta_inp_param_msg
+	\param \rsb_c_tune_inp_param_msg
+	\param \rsb_ldc_inp_param_msg
+	\return \rsberrcodemsg
+
+	   Examples:
+	   \code
+// obtain best thread count for mtxAp:
+errval = rsb_tune_spmm(NULL  ,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// obtain best thread count for mtxAp; Bp and Cp will be allocated by the tuner:
+errval = rsb_tune_spmm(NULL  ,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,NULL,0,&beta,NULL,0);
+
+// obtain best clone of mtxAp (for current thread count):
+assert(mtxOp == NULL && mtxAp != NULL);
+errval = rsb_tune_spmm(&mtxOp,&sf,NULL,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// obtain best clone of mtxAp and best thread count:
+assert(mtxOp == NULL && mtxAp != NULL);
+errval = rsb_tune_spmm(&mtxOp,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// replace mtxAp with best clone (if any):
+errval = rsb_tune_spmm(&mtxAp,&sf,NULL,maxr,maxt,transA,&alpha,NULL ,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// replace mtxAp with best clone (if any) and obtain best thread count:
+errval = rsb_tune_spmm(&mtxAp,&sf,&tn ,maxr,maxt,transA,&alpha,NULL ,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+
+// illegal call:
+assert(mtxOp != NULL && mtxAp != NULL);
+errval = rsb_tune_spmm(&mtxOp,&sf,&tn ,maxr,maxt,transA,&alpha,mtxAp,nrhs,order,Bp,ldB,&beta,Cp,ldC);
+	   \endcode
+
+	\warning 
+	\rsb_tune_warning_doc_msg
+	\todo
+	\rsb_tune_todo_doc_msg
+	\see_lib_spmx
+	*/
+	rsb_err_t errval = RSB_ERR_BADARGS;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_tune_spmm( mtxOpp, sfp, tnp, maxr, maxt, transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+rsb_err_t rsb_tune_spsm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC)
+{
+	/*!
+ 	\ingroup rsb_doc_matrix_assembly rsb_doc_rsb
+
+       	An auto-tuner: optimizes either the matrix instance, the thread count or both for the #rsb_spsm operation.
+
+	\rsb_tune__doc_msg
+	\param \rsb_tune_mtxOpp_iou_param_msg
+	\param \rsb_tune_sfp_iou_param_msg
+	\param \rsb_tune_tnp_iou_param_msg
+	\param \rsb_tune_maxr_iou_param_msg
+	\param \rsb_tune_maxt_iou_param_msg
+	\param \rsb_transa_inp_param_msg
+	\param \rsb_alpha_inp_param_msg
+	\param \rsb_mtxt_inp_param_msg_a
+	\param \rsb_nrhs_inp_param_msg
+	\param \rsb_order_inp_param_msg
+	\param \rsb_b_tune_inp_param_msg
+	\param \rsb_ldb_inp_param_msg
+	\param \rsb_beta_inp_param_msg
+	\param \rsb_c_tune_inp_param_msg
+	\param \rsb_ldc_inp_param_msg
+	\return \rsberrcodemsg
+
+	\rsb_spsv_no_zero
+	\warning 
+	\rsb_tune_warning_doc_msg
+	\todo
+	\rsb_tune_todo_doc_msg
+	\see_lib_spsx
+	\see rsb_tune_spmm
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_INTERFACE_PREAMBLE
+	errval = rsb__do_tune_spsm( mtxOpp, sfp, tnp, maxr, maxt, transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC);
+	RSB_INTERFACE_RETURN_ERR(errval)
+}
+
+/*
+struct rsb_mtx_t * rsb_BLAS_get_mtx(blas_sparse_matrix handle)
+{
+	struct rsb_mtx_t * mtxAp = NULL;
+	RSB_INTERFACE_PREAMBLE
+	mtxAp = rsb_do_BLAS_get_mtx(handle);
+	RSB_INTERFACE_RETURN_MTX(mtxAp);
+}
+*/
+
diff --git a/rsb_set.c b/rsb_set.c
new file mode 100644
index 0000000..fc50fa0
--- /dev/null
+++ b/rsb_set.c
@@ -0,0 +1,494 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix setter functions.
+ * */
+
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_ALLOW_MTX_UPD 1
+
+static const void * rsb_do_has_coo_element_inner(const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * FIXME: unfinished
+	 *
+	 * Should return a pointer to the value at (i,j), if present, and NULL if not present.
+	 * Only for CSR/CSC.
+	 *
+	 * */
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(!RSB_INVALID_COO_INDEX(i));
+	RSB_DEBUG_ASSERT(!RSB_INVALID_COO_INDEX(j));
+	RSB_DEBUG_ASSERT(rsb__is_css_matrix(mtxAp));
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		const struct rsb_mtx_t * submatrix = RSB_FIND_SUBMATRIX_CONTAINING(mtxAp,i+mtxAp->roff,j+mtxAp->coff);
+		rsb_coo_idx_t moff;
+		rsb_coo_idx_t koff;
+
+		if(!submatrix)
+			return NULL;
+		moff = submatrix->roff-mtxAp->roff;
+		koff = submatrix->coff-mtxAp->coff;
+		return rsb_do_has_coo_element_inner(submatrix,i-moff,j-koff);
+	}
+	else
+	{
+		rsb_nnz_idx_t si;
+		rsb_coo_idx_t Mi,mi;
+		const rsb_nnz_idx_t * bpntr = mtxAp->bpntr;
+		const rsb_coo_idx_t * bindx = mtxAp->bindx;
+
+		if( mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER )
+			Mi = j, mi = i;
+		else
+			Mi = i, mi = j;
+
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			rsb_nnz_idx_t nnz1,nnz0,nnz = mtxAp->nnz;
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				// delimit the current row
+				const rsb_half_idx_t *IA = (const rsb_half_idx_t *)mtxAp->bpntr;
+				const rsb_half_idx_t *JA = (const rsb_half_idx_t *)mtxAp->bindx;
+				// we search the beginning of line Mi
+				nnz0 = rsb__nnz_split_hcoo_bsearch(IA,Mi,nnz);
+				// we search the end of line Mi
+				nnz1 = nnz0+rsb__nnz_split_hcoo_bsearch(IA+nnz0,Mi+1,nnz-nnz0);
+				if(nnz1-nnz0<1)
+					return NULL;// no row Mi
+				// in line Mi, we search the index of mi, if any
+				nnz0 += rsb__nnz_split_hcoo_bsearch(JA+nnz0,mi,nnz1-nnz0);
+				if(nnz1-nnz0<1 || JA[nnz0]!=mi)
+					return NULL;// no element mi			
+			}
+			else
+			{
+				// delimit the current row
+				const rsb_coo_idx_t *IA = mtxAp->bpntr;
+				const rsb_coo_idx_t *JA = mtxAp->bindx;
+				// we search the beginning of line Mi
+				nnz0 = rsb__nnz_split_coo_bsearch(IA,Mi,nnz);
+				// we search the end of line Mi
+				nnz1 = nnz0+rsb__nnz_split_coo_bsearch(IA+nnz0,Mi+1,nnz-nnz0);
+				if(nnz1-nnz0<1)
+					return NULL;// no row Mi
+				// in line Mi, we search the index of mi, if any
+				nnz0 += rsb__nnz_split_coo_bsearch(JA+nnz0,mi,nnz1-nnz0);
+				if(nnz1-nnz0<1 || JA[nnz0]!=mi)
+					return NULL;// no element mi
+			}
+			return ((const rsb_char_t*)(mtxAp->VA))+mtxAp->el_size*(nnz0);
+		}
+		else
+		{
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+			       	si = rsb__seek_half_idx_t(((rsb_half_idx_t*)bindx)+bpntr[Mi],mi,bpntr[Mi+1]-bpntr[Mi]);
+			else
+			       	si = rsb__seek_nnz_idx_t(bindx+bpntr[Mi],mi,bpntr[Mi+1]-bpntr[Mi]);
+		}
+
+		if(si == RSB_MARKER_NNZ_VALUE)
+			return NULL;
+		else
+			return ((const rsb_char_t*)(mtxAp->VA))+mtxAp->el_size*(bpntr[Mi]+si);
+	}
+}
+
+const void * rsb__do_coo_element_inner_address(const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	return rsb_do_has_coo_element_inner(mtxAp,i,j);
+}
+
+rsb_err_t rsb__do_set_coo_elements(struct rsb_mtx_t * mtxAp, const void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME: undocumented
+	 * FIXME: should parallelize meaningfully
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n;
+
+	if(nnz<rsb_global_session_handle.rsb_want_threads)
+	{
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+		{
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_set_coo_element(mtxAp,((rsb_char_t*)VA)+mtxAp->el_size*n,IA[n],JA[n]));
+			if(RSB_SOME_ERROR(errval))
+			{
+			       	RSB_ERROR("error updating %dth element of %d: %d %d\n",n,nnz,IA[n],JA[n]);
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+			}
+		}
+	}
+	else
+	{
+		#pragma omp parallel for schedule(static,1) reduction(|:errval)  RSB_NTC
+		for(n=0;n<nnz;++n)
+		{
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_set_coo_element(mtxAp,((rsb_char_t*)VA)+mtxAp->el_size*n,IA[n],JA[n]));
+			if(RSB_SOME_ERROR(errval))
+			{
+			       	RSB_ERROR("error updating %dth element of %d: %d %d\n",n,nnz,IA[n],JA[n]);
+				// RSB_PERR_GOTO(err,RSB_ERRM_ES)
+			}
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_set_coo_element(struct rsb_mtx_t * mtxAp, const void * vp, const rsb_coo_idx_t i, const rsb_coo_idx_t j)
+{
+	return rsb__do_upd_coo_element(mtxAp, vp, i, j, RSB_FLAG_DUPLICATES_DEFAULT_HANDLE);
+}
+
+rsb_err_t rsb__do_upd_coo_element(struct rsb_mtx_t * mtxAp, const void * vp, const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Overwrites the element at (i,j), if present.
+	 * If not present, returns RSB_ERR_GENERIC_ERROR.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void * OV = NULL;
+
+	if(!mtxAp || !vp || RSB_INVALID_COO_INDEX(i) || RSB_INVALID_COO_INDEX(j))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(!rsb__is_css_matrix(mtxAp))
+	{
+		errval = RSB_ERR_UNIMPLEMENTED_YET;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	if(!RSB_MATRIX_CONTAINS(mtxAp,i,j))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( i == j && rsb__get_diagonal_type_flag(mtxAp)==RSB_DIAGONAL_I )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	OV = (void*) rsb_do_has_coo_element_inner(mtxAp,i,j);
+
+	if(OV)
+	{
+#if RSB_ALLOW_MTX_UPD
+		rsb_flags_t cflag = ( mtxAp->flags & RSB_FLAG_ALL_DUPLICATE_FLAGS )
+		                  | (        flags & RSB_FLAG_ALL_DUPLICATE_FLAGS );
+
+		if(RSB_DO_FLAG_HAS(cflag,RSB_FLAG_DUPLICATES_SUM))
+		{ RSB_NUMERICAL_TYPE_SUM_AND_STORE_ELEMENTS(OV,vp,mtxAp->typecode);}
+		else
+#endif
+		{ RSB_NUMERICAL_TYPE_SET_ELEMENT(OV,vp,mtxAp->typecode); }
+	}
+	else
+		errval = RSB_ERR_GENERIC_ERROR;
+
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if 1
+/* rsb_do_locate_nnz_element and rsb_do_get_nnz_element are used (experimentally) by sparsersb  */
+static rsb_err_t rsb_do_locate_nnz_element(const struct rsb_mtx_t * mtxAp, void ** vpp, rsb_coo_idx_t*ip, rsb_coo_idx_t*jp, rsb_nnz_idx_t nzi)
+{
+	/* FIXME: * new, unfinished, untested */
+	rsb_err_t errval = RSB_ERR_BADARGS;
+	rsb_submatrix_idx_t i,j;
+	struct rsb_mtx_t * submatrix = NULL;
+
+	if(!mtxAp)
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(rsb__is_terminal_recursive_matrix(mtxAp)
+		       	&& ( nzi >= mtxAp->nzoff )
+		       	&& ( nzi <  mtxAp->nzoff+mtxAp->nnz )
+			)
+	{
+		rsb_nnz_idx_t lnz = nzi-mtxAp->nzoff;
+		rsb_byte_t*OV = mtxAp->VA;
+		OV += mtxAp->el_size * lnz;
+
+		if( (!ip) || (!jp) )
+			goto noij;
+
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(mIA,mJA,mtxAp)
+				i = mIA[lnz];
+				j = mJA[lnz];
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(mIA,mJA,mtxAp)
+				i = mIA[lnz];
+				j = mJA[lnz];
+			}
+		}
+		else
+		{
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+			{
+				RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(mPA,mJA,mtxAp);
+				j = mJA[lnz];
+				i = rsb__nnz_split_coo_bsearch(mPA,lnz,mtxAp->nnz);
+			}
+			else
+			{
+				RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(mPA,mJA,mtxAp);
+				j = mJA[lnz];
+				i = rsb__nnz_split_coo_bsearch(mPA,lnz,mtxAp->nnz);
+			}
+		}
+		if(ip)*ip = i;
+		if(jp)*jp = j;
+noij:
+		if(vpp)*vpp = OV;
+		errval = RSB_ERR_NO_ERROR;
+	}
+	else
+	{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix
+			       	&& ( nzi >= submatrix->nzoff ) 
+			       	&& ( nzi <  submatrix->nnz+submatrix->nzoff )
+		  )
+		{
+		       	errval = rsb_do_locate_nnz_element(submatrix,vpp,ip,jp,nzi);
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_get_nnz_element(const struct rsb_mtx_t * mtxAp, void * vp, rsb_coo_idx_t*ip, rsb_coo_idx_t*jp, rsb_nnz_idx_t nzi)
+{
+	/* FIXME: * new, unfinished (20130331) */
+	void * OV = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb_do_locate_nnz_element(mtxAp,&OV,ip,jp,nzi);
+	if((!RSB_SOME_ERROR(errval)) && OV)
+	{
+		RSB_NUMERICAL_TYPE_SET_ELEMENT(vp,OV,mtxAp->typecode);
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+#if 0
+rsb_err_t rsb_do_set_nnz_element(const struct rsb_mtx_t * mtxAp, const void * vp, rsb_nnz_idx_t nzi)
+{
+	/* FIXME: * new, unfinished (20130331) */
+	void * OV = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	errval = rsb_do_locate_nnz_element(mtxAp,&OV,NULL,NULL,nzi);
+	if((!RSB_SOME_ERROR(errval)) && OV)
+	{
+		RSB_NUMERICAL_TYPE_SET_ELEMENT(OV,vp,mtxAp->typecode);
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+rsb_err_t rsb__do_get_coo_element(const struct rsb_mtx_t * mtxAp, void * vp, rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * FIXME: undocumented
+	 * Gets the element at (i,j), if present.
+	 * If not present, returns RSB_ERR_GENERIC_ERROR and zeros the area.
+	 * */
+	const void * OV = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if RSB_ALLOW_ZERO_DIM
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+		goto err; /* FIXME: skipping further error checks */
+#endif
+	if(!mtxAp || !vp || RSB_INVALID_COO_INDEX(i) || RSB_INVALID_COO_INDEX(j))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(!rsb__is_css_matrix(mtxAp))
+	{
+		errval = RSB_ERR_UNIMPLEMENTED_YET;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	if(!RSB_MATRIX_CONTAINS(mtxAp,i,j))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( i == j && rsb__get_diagonal_type_flag(mtxAp)==RSB_DIAGONAL_I )
+		return rsb__fill_with_ones(vp,mtxAp->typecode,1,1);
+
+	OV = rsb_do_has_coo_element_inner(mtxAp,i,j);
+	if(!OV)
+	{
+		errval = RSB_ERR_GENERIC_ERROR;
+		rsb__cblas_Xscal(mtxAp->typecode,1,NULL,vp,1);
+	}
+	else
+		{RSB_NUMERICAL_TYPE_SET_ELEMENT(vp,OV,mtxAp->typecode);}
+
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_reverse_odd_rows(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \rsb_warn_unoptimized_msg
+	 * \note Works only for csr leaves.
+	 * */
+	RSB_DEBUG_ASSERT(mtxAp);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		struct rsb_mtx_t * submatrix;
+		rsb_submatrix_idx_t i,j;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+			rsb__do_reverse_odd_rows(submatrix);
+	}
+	else
+	{
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				//RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				/* FIXME: not implemented yet for COO */
+			}
+			else
+			{
+				//RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				/* FIXME: not implemented yet for COO */
+			}
+		}
+		else
+		{
+			rsb_coo_idx_t i;
+			rsb_nnz_idx_t ib,ie;
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				RSB_DECLARE_HALFCSR_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				for(i=1;i<mtxAp->nr;i+=2)
+					ib = IA[i],ie = IA[i+1],
+					rsb__util_reverse_halfword_coo_array(JA+ib,ie-ib);
+			}
+			else
+			{
+				RSB_DECLARE_FULLCSR_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				for(i=1;i<mtxAp->nr;i+=2)
+					ib = IA[i],ie = IA[i+1],
+					rsb__util_reverse_fullword_coo_array(JA+ib,ie-ib);
+			}
+		}
+	}
+	return RSB_ERR_NO_ERROR; 
+}
+
+rsb_err_t rsb__do_zsort_coo_submatrices(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * \rsb_warn_unoptimized_msg
+	 * */
+	RSB_DEBUG_ASSERT(mtxAp);
+
+	if(rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		struct rsb_mtx_t * submatrix;
+		rsb_submatrix_idx_t i,j;
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+    			if(submatrix)
+				rsb__do_zsort_coo_submatrices(submatrix);
+	}
+	else
+	{
+		if(rsb__is_coo_matrix(mtxAp))
+		{
+			rsb_err_t errval = RSB_ERR_NO_ERROR;
+			if( mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				RSB_DECLARE_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_fullword_coo(mtxAp));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_index_based_z_morton_sort(NULL,NULL,NULL,IA,JA,mtxAp->VA,mtxAp->nr,mtxAp->nc,mtxAp->nnz,mtxAp->typecode,RSB_OP_FLAG_DEFAULT));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_to_halfword_coo(mtxAp));
+			}
+			else
+			{
+				RSB_DECLARE_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_index_based_z_morton_sort(NULL,NULL,NULL,IA,JA,mtxAp->VA,mtxAp->nr,mtxAp->nc,mtxAp->nnz,mtxAp->typecode,RSB_OP_FLAG_DEFAULT));
+			}
+			return errval;
+		}
+		else
+		{
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+			{
+				/* nothing to do */
+			}
+			else
+			{
+				/* nothing to do */
+			}
+		}
+	}
+	return RSB_ERR_NO_ERROR; 
+}
+
+/* @endcond */
diff --git a/rsb_set.h b/rsb_set.h
new file mode 100644
index 0000000..73f013a
--- /dev/null
+++ b/rsb_set.h
@@ -0,0 +1,43 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains matrix setter functions.
+ * */
+
+#ifndef RSB_SET_H_INCLUDED
+#define RSB_SET_H_INCLUDED
+
+#include "rsb_internals.h"
+
+rsb_err_t rsb__do_set_coo_element(struct rsb_mtx_t * mtxAp, const void * vp, const rsb_coo_idx_t i, const rsb_coo_idx_t j);
+rsb_err_t rsb__do_upd_coo_element(struct rsb_mtx_t * mtxAp, const void * vp, const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_flags_t flags);
+rsb_err_t rsb__do_get_coo_element(const struct rsb_mtx_t * mtxAp, void * vp, rsb_coo_idx_t i, rsb_coo_idx_t j);
+rsb_err_t rsb__do_set_coo_elements(struct rsb_mtx_t * mtxAp, const void * VA, const rsb_coo_idx_t *IA, const rsb_coo_idx_t *JA, rsb_nnz_idx_t nnz);
+const void * rsb__do_coo_element_inner_address(const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t i, rsb_coo_idx_t j);
+rsb_err_t rsb__do_reverse_odd_rows(struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__do_zsort_coo_submatrices(struct rsb_mtx_t * mtxAp);
+#endif /* RSB_SET_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_spgemm.c b/rsb_spgemm.c
new file mode 100644
index 0000000..7cbaa18
--- /dev/null
+++ b/rsb_spgemm.c
@@ -0,0 +1,741 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse matrices multiplication.
+ */
+/* FIXME : UNFINISHED, UNCHECKED, UNSECURED, preliminar code
+ * TODO : spscatter
+ *  	  should parallelize this code (not difficult; based on nproc accumulator arrays, one external omp loop)
+ * */
+
+#include "rsb_common.h"
+#include "rsb_clone.h"
+
+#define RSB_WANT_SPGEMM_MFLOPS 1
+#define RSB_WANT_SPGEMM_VERBOSE (RSB_WANT_SPGEMM_MFLOPS&&0)  
+#define RSB_WANT_SPGEMM_VERBOSE_PROGRAM 1
+#define WANT_SPGEMM_FULL 1
+// take care that having more threads will not give you benefits if memory access is slow 
+#define RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS RSB_WANT_OMP_RECURSIVE_KERNELS
+
+#if 0
+/* 20120930	unused ?! */
+static rsb_err_t rsb_spgemm_sym_count_blocks(const struct rsb_mtx_t * mtxAp, const struct rsb_mtx_t * mtxBp, rsb_nnz_idx_t * cblocksp)
+{
+	/**
+	 * \ingroup gr_unfinished
+	 * Counts an upper bound estimate of the output blocks of A * B conformant sparse matrices product.
+	 * It is a valid estimate for the expected work.
+	 * Matrices must be pairwise CSR and CSC formats or BCSR br x bc and BCSC bc x br formats to conform.
+	 * Matrices should be unsymmetric (although no error would be issues on symmetric matrices as input).
+	 *
+	 * ...
+	 * */
+	rsb_nnz_idx_t cblocks=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t bi,bj;
+	rsb_coo_idx_t cm,ck;
+
+	if(!mtxAp || !mtxBp || !cblocksp || !rsb_are_matrices_spgemm_block_conformant(mtxAp,mtxBp))
+	{errval = RSB_ERR_BADARGS;goto err;}
+	
+	cm=mtxAp->nr;
+	ck=mtxBp->nc;
+
+	for(bi=0;bi<cm;++bi)
+	for(bj=0;bj<ck;++bj)
+	{
+		rsb_nnz_idx_t aro=mtxAp->bpntr[bi];
+		rsb_nnz_idx_t are=mtxAp->bpntr[bi+1];
+//		rsb_nnz_idx_t arb=mtxAp->bpntr[bi+1] - mtxAp->bpntr[bi];
+		rsb_nnz_idx_t bro=mtxBp->bpntr[bj];
+		rsb_nnz_idx_t bre=mtxBp->bpntr[bj+1];
+//		rsb_nnz_idx_t bcb=mtxBp->bpntr[bj+1] - mtxBp->bpntr[bj];
+		rsb_nnz_idx_t al=0,bl=0;
+
+		if(/*  arb<bcb*/ 1 )
+		{
+			for(al=aro,bl=bro;al<are&&bl<bre;)
+			{
+				while(mtxBp->bindx[bl]<mtxAp->bindx[al] && bl<bre)
+					++bl;
+
+				/* TODO : a fast binary search codelet here */
+
+				while(mtxBp->bindx[bl]>mtxAp->bindx[al] && al<are)
+					++al;
+
+				if(mtxAp->bindx[al]==mtxBp->bindx[bl])
+					++cblocks,++al;
+			}
+		}
+		else
+		{
+			/* FIXME : write me */
+		}
+	}
+	
+	*cblocksp=cblocks;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+#endif
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+static rsb_err_t rsb_spgemm_inner(const struct rsb_coo_matrix_t * acoo, const struct rsb_coo_matrix_t * bcoo, rsb_nnz_idx_t * cblocksp, rsb_coo_idx_t ** PA, rsb_coo_idx_t ** JA, void ** VA, size_t * opsp)
+{
+	/**
+	 * \ingroup gr_unfinished
+	 * Counts an upper bound estimate of the output blocks of A * B conformant sparse matrices product.
+	 * It is a valid estimate for the expected work.
+	 * Matrices must be pairwise CSR formats or CSC formats to conform.
+	 * Matrices should be unsymmetric (although no error would be issues on symmetric matrices as input).
+	 *
+	 * CSR-CSR or CSC-CSC (and swapped A/B)
+	 * ...
+	 * */
+	rsb_nnz_idx_t cblocks=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t bi;
+	rsb_coo_idx_t ai,aj;
+//	rsb_coo_idx_t ci,cj;
+	rsb_coo_idx_t cm,ck;
+	/*rsb_coo_idx_t am,ak*/;
+	rsb_coo_idx_t /*bm,*/bk;
+	rsb_coo_idx_t al,bl;
+	size_t el_size=0;
+	rsb_coo_idx_t*abpntr=NULL,*bbpntr=NULL;
+	rsb_coo_idx_t*abindx=NULL,*bbindx=NULL;
+
+	void *acc=NULL;
+	rsb_nnz_idx_t * p=NULL;
+#if RSB_WANT_SPGEMM_MFLOPS
+	rsb_nnz_idx_t ops=0;
+	rsb_time_t t=0;
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+
+	if(!acoo || !bcoo || !cblocksp /* || !rsb_are_matrices_spgemm_block_conformant(acoo,bcoo)*/)
+	{errval = RSB_ERR_BADARGS;goto err;}
+	el_size = RSB_SIZEOF(acoo->typecode);
+
+	abpntr=acoo->IA;
+	bbpntr=bcoo->IA;
+	abindx=acoo->JA;
+	bbindx=bcoo->JA;
+
+#if RSB_WANT_SPGEMM_MFLOPS
+	t = - rsb_time();
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+
+	cm=acoo->nr;
+	/*bm=bcoo->nr;*/
+	bk=bcoo->nc;
+	ck=bcoo->nc;
+	/*ak=acoo->nc;*/
+
+	acc = rsb__calloc(el_size*bk);
+	if(!acc) {errval = RSB_ERR_BADARGS;goto err;}
+	p = rsb__calloc(sizeof(rsb_nnz_idx_t)*(bk+1));
+	if(!p) {errval = RSB_ERR_BADARGS;goto err;}
+
+	if(PA && JA && VA)
+		*PA = rsb__calloc(sizeof(rsb_coo_idx_t)*(cm+1));
+
+	if(!*PA)
+	{
+		RSB_CONDITIONAL_FREE(*PA);
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+
+	for(ai=0;ai<cm;++ai)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		/* rsb_nnz_idx_t arb; */
+		rsb_nnz_idx_t marker;
+		rsb_nnz_idx_t rcblocks=0;
+
+		marker=cblocks+1;
+		aro=abpntr[ai];
+		are=abpntr[ai+1];
+		/* arb=abpntr[ai+1] - abpntr[ai]; */
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro;
+			rsb_nnz_idx_t bre;
+			/* rsb_nnz_idx_t bcb; */
+			aj=abindx[al];
+			bi=aj;
+			bro=bbpntr[bi];
+			bre=bbpntr[bi+1];
+			/* bcb=bbpntr[bi+1] - bbpntr[bi]; */
+			for(bl=bro;bl<bre;++bl)
+			{
+				rsb_coo_idx_t bj;
+				bj=bbindx[bl];
+			//	RSB_STDOUT("(%d %d) x (%d %d)\n",ai,aj,bi,bj);
+				if(p[bj]<marker)
+					p[bj]=marker,
+					rcblocks++;
+				else
+					;
+			}
+		}
+		if(!RSB_IS_VALID_NNZ_SUM(cblocks,rcblocks))
+		{
+			RSB_ERROR("number of matrices product nnz may exceed maximum allowed.\n");
+			errval = RSB_ERR_LIMITS;
+			goto err;
+		}
+		cblocks += rcblocks;
+		(*PA)[ai+1]=cblocks;
+	}
+	if(cblocksp)
+		*cblocksp=cblocks;
+
+	/* FIXME: and what to do if cblocks == 0 ? */
+	if(PA && JA && VA)
+	{
+		*JA = rsb__calloc(sizeof(rsb_coo_idx_t)*cblocks);
+		*VA = rsb__calloc(el_size*cblocks);
+		if(!*PA || !*JA || !*VA)
+		{
+			RSB_CONDITIONAL_FREE(*PA);
+			RSB_CONDITIONAL_FREE(*JA);
+			RSB_CONDITIONAL_FREE(*VA);
+			errval = RSB_ERR_BADARGS;goto err;
+		}
+#if RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS
+	#pragma omp parallel RSB_NTC
+	{
+		const rsb_thread_t nt = omp_get_num_threads();
+		const rsb_thread_t tn = omp_get_thread_num();
+		rsb_coo_idx_t bj;
+		void *acc_=NULL; rsb_nnz_idx_t *p_=NULL;
+		#pragma omp critical (spgemm_alloc)
+	{
+#if RSB_WANT_SPGEMM_VERBOSE
+		//if(!tn)RSB_STDOUT("parallel SPGEMM with %d threads\n",nt);
+#endif /* RSB_WANT_SPGEMM_VERBOSE */
+		if(tn)
+		{
+			acc_=rsb__calloc(el_size*bk);
+			if(!acc_) {errval = RSB_ERR_BADARGS;}
+			p_=rsb__calloc(sizeof(rsb_nnz_idx_t)*(bk+1));
+			if(!p_) {errval = RSB_ERR_BADARGS;}
+		}
+		else
+		{
+			acc_=acc;p_=p;
+			for(bj=0;bj<bk;++bj) p_[bj]=0;
+		}
+	}
+		if(RSB_SOME_ERROR(errval))
+			goto ierr;
+		/* FIXME: ops in parallel section ! */
+		rsb__do_util_csr_csr_sparse_mul_serial(*PA,*JA,*VA,abpntr,bbpntr,abindx,bbindx,acoo->VA,bcoo->VA,cm,ck,p_,acc_,&ops,acoo->typecode,tn,nt);
+ierr:
+		#pragma omp critical (spgemm_alloc)
+		{ if(tn) { RSB_CONDITIONAL_FREE(acc_);RSB_CONDITIONAL_FREE(p_); } }
+	}
+#else /* RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS */
+	{
+		rsb_coo_idx_t bj;
+		for(bj=0;bj<bk;++bj)
+			p[bj]=0;
+		
+		//rsb__do_util_csr_csr_sparse_mul_serial(*PA,*JA,*VA,acoo->bpntr,bcoo->bpntr,acoo->bindx,bcoo->bindx,acoo->VA,bcoo->VA,cm,ck,p,acc,&ops,acoo->typecode,0,1);
+		rsb__do_util_csr_csr_sparse_mul_serial(*PA,*JA,*VA,acoo->IA,bcoo->IA,acoo->JA,bcoo->JA,acoo->VA,bcoo->VA,cm,ck,p,acc,&ops,acoo->typecode,0,1);
+	}
+#endif /* RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS */
+	}
+	if(1)
+	{
+		/* CSR to COO conversion */
+		/* FIXME: these lines are NEW, and should be error-checked */
+		/* FIXME */
+		/* FIXME */
+		/* FIXME */
+		/* FIXME */
+		*PA = rsb__realloc(*PA,RSB_MAX(cblocks,RSB_MAX(cm,ck))*sizeof(rsb_nnz_idx_t));
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_switch_compressed_array_to_fullword_coo(*PA,cm,0,NULL));
+	}
+#if RSB_WANT_SPGEMM_MFLOPS
+	ops*=2;
+	t += rsb_time();
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+#if RSB_WANT_SPGEMM_VERBOSE
+#if RSB_ALLOW_STDOUT
+	RSB_STDOUT("%zd nonzeros , %zd x %zd\n",(size_t)cblocks,cm,ck);
+	RSB_STDOUT("%g MFLOPS\n",((double)(ops))/(t*1000000));
+#endif /* RSB_ALLOW_STDOUT */
+#endif /* RSB_WANT_SPGEMM_VERBOSE */
+err:
+	RSB_CONDITIONAL_FREE(acc);
+	RSB_CONDITIONAL_FREE(p);
+	if(opsp)*opsp=ops;
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_CONDITIONAL_FREE(*PA);
+		RSB_CONDITIONAL_FREE(*JA);
+		RSB_CONDITIONAL_FREE(*VA);
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_do_spgemm_dense_inner(rsb_coo_idx_t ldc, rsb_coo_idx_t nr, rsb_coo_idx_t nc, rsb_bool_t isccolmajor, void *cVA_, const struct rsb_coo_matrix_t * acoo, const struct rsb_coo_matrix_t * bcoo, rsb_nnz_idx_t * cblocksp, size_t * opsp)
+{
+	/**
+	 * \ingroup gr_unfinished
+	 * */
+	//rsb_nnz_idx_t cblocks=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_coo_idx_t bi;
+	//rsb_coo_idx_t ai,aj;
+	rsb_coo_idx_t cm,ck;
+	rsb_coo_idx_t /*am,*/ak;
+	rsb_coo_idx_t bm,bk;
+	//rsb_coo_idx_t al,bl;
+	size_t el_size=0;
+
+#if RSB_WANT_SPGEMM_MFLOPS
+	rsb_nnz_idx_t ops=0;
+	rsb_time_t t=0;
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+	rsb_coo_idx_t*abpntr=NULL,*bbpntr=NULL;
+	rsb_coo_idx_t*abindx=NULL,*bbindx=NULL;
+
+	if(!acoo || !bcoo || !cblocksp /* || !rsb_are_matrices_spgemm_block_conformant(acoo,bcoo)*/)
+	{errval = RSB_ERR_BADARGS;goto err;}
+	el_size = RSB_SIZEOF(acoo->typecode);
+
+	if( (isccolmajor && (ldc<nr)) ||  ((!isccolmajor) && (ldc<nc))
+	 || (acoo->nc != bcoo->nr) || (acoo->nr > nr) || (bcoo->nc > nc) 
+	  )
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+#if RSB_WANT_SPGEMM_MFLOPS
+	t = - rsb_time();
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+
+	abpntr=acoo->IA;
+	bbpntr=bcoo->IA;
+	abindx=acoo->JA;
+	bbindx=bcoo->JA;
+
+	cm=acoo->nr;
+	bm=bcoo->nr;
+	bk=bcoo->nc;
+	ck=bcoo->nc;
+	ak=acoo->nc;
+
+	{
+		rsb_nnz_idx_t opss=0;
+#if RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS
+#if RSB_WANT_SPGEMM_MFLOPS
+		//size_t opss=0;
+#else /* RSB_WANT_SPGEMM_MFLOPS */
+		//size_t opss=NULL;
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+	#pragma omp parallel reduction(+:opss) RSB_NTC
+	{
+		const rsb_thread_t nt = omp_get_num_threads();
+		const rsb_thread_t tn = omp_get_thread_num();
+		//rsb_coo_idx_t bj;
+		rsb__do_util_csr_csr_dense_mul_serial(ldc,nr,nc,isccolmajor,cVA_,abpntr,bbpntr,abindx,bbindx,acoo->VA,bcoo->VA,cm,ck,&opss,acoo->typecode,tn,nt);
+	}
+#else /* RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS */
+	{
+		rsb__do_util_csr_csr_dense_mul_serial(ldc,nr,nc,isccolmajor,cVA_,abpntr,bbpntr,abindx,bbindx,acoo->VA,bcoo->VA,cm,ck,&opss,acoo->typecode,0,1);
+	}
+#endif /* RSB_WANT_OMP_RECURSIVE_SPGEMM_KERNELS */
+		ops+=opss;
+	}
+
+#if RSB_WANT_SPGEMM_MFLOPS
+	ops*=2;
+	t += rsb_time();
+#endif /* RSB_WANT_SPGEMM_MFLOPS */
+#if RSB_WANT_SPGEMM_VERBOSE
+#if RSB_ALLOW_STDOUT
+	//RSB_STDOUT("%zd nonzeros , %zd x %zd\n",(size_t)cblocks,cm,ck);
+	RSB_STDOUT("%g MFLOPS\n",((double)(ops))/(t*1000000));
+#endif /* RSB_ALLOW_STDOUT */
+#endif /* RSB_WANT_SPGEMM_VERBOSE */
+	if(opsp)*opsp=ops;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static struct rsb_mtx_t * rsb_spgemm_tmp(rsb_type_t typecode, const struct rsb_mtx_t * mtxAp, const struct rsb_mtx_t * mtxBp, rsb_trans_t transA, rsb_trans_t transB, const  void *alphap, const void *betap, rsb_err_t *errvalp, rsb_time_t * dtp, size_t * opsp)
+{
+	/**
+	 * \ingroup gr_unfinished
+	 * FIXME: requires CSR format everywhere !
+	 * TODO need to-CSR cloning, here!
+	 * FIXME: missing a smart approach for handling symmetric matrices (current policy is explicit expansion) 
+	 * */
+	// FIXME: WON'T WORK FOR SOME VERY SPARSE MATRICES
+	void * VA = NULL;
+	rsb_coo_idx_t * IA = NULL, *JA = NULL;
+	rsb_nnz_idx_t nnz = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_STORAGE_FLAGS;
+	struct rsb_coo_matrix_t acsr,bcsr;
+	struct rsb_mtx_t *mtxCp = NULL;
+	rsb_time_t dt = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_coo_idx_t cm = 0,ck = 0;
+	rsb_coo_idx_t tak = 0, tbm = 0;
+
+	RSB_BZERO_P(&acsr);
+	RSB_BZERO_P(&bcsr);
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+		RSB_PERR_GOTO(err,RSB_ERRM_EM);
+	}
+
+	if( !mtxAp || !mtxBp )
+	{
+		RSB_ERROR("Supplied a NULL matrix pointer.\n");
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE);
+
+#if 0
+	if( mtxAp->typecode != mtxBp->typecode )
+	{
+		RSB_ERROR("Matrix types do not match.\n");
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+#else
+#endif
+
+	cm = RSB_MTX_TRANSPOSED_ROWS(mtxAp,transA);
+	ck = RSB_MTX_TRANSPOSED_COLS(mtxBp,transB);
+	tak = RSB_MTX_TRANSPOSED_COLS(mtxAp,transA);
+	tbm = RSB_MTX_TRANSPOSED_ROWS(mtxBp,transB);
+
+	if( (transA != RSB_TRANSPOSITION_N) || (transB != RSB_TRANSPOSITION_N) )
+	{
+		RSB_ERROR("Transposition parameter not yet supported !\n");
+		errval = RSB_ERR_UNIMPLEMENTED_YET;
+		goto err;
+	}
+
+	if(tak!=tbm)
+	{
+		RSB_ERROR("Matrix sizes do not match.\n");
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	acsr.nr = mtxAp->nr;
+	acsr.nc = mtxAp->nc;
+	acsr.nnz = RSB_MAX(mtxAp->nnz,RSB_MAX(mtxAp->nr+1,mtxAp->nc+1)); /* FIXME: temporary !*/
+	acsr.nnz += mtxAp->nnz+RSB_MIN(mtxAp->nr+1,mtxAp->nc+1); /* FIXME: temporary, in case of symmetry & diagonal !*/
+	acsr.typecode = typecode;
+	if((rsb__allocate_coo_matrix_t(&acsr)!=&acsr))
+	{
+		RSB_ERROR("problem converting the A matrix\n");
+       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	//acsr.nnz=mtxAp->nnz;
+	bcsr.nr = mtxBp->nr;
+	bcsr.nc = mtxBp->nc;
+	bcsr.nnz = RSB_MAX(mtxBp->nnz,RSB_MAX(mtxBp->nr+1,mtxBp->nc+1)); /* FIXME: temporary !*/
+	bcsr.nnz += mtxBp->nnz+RSB_MIN(mtxBp->nr+1,mtxBp->nc+1); /* FIXME: temporary, in case of symmetry & diagonal !*/
+	bcsr.typecode = typecode;
+	if((rsb__allocate_coo_matrix_t(&bcsr)!=&bcsr))
+	{
+		RSB_ERROR("problem converting the B matrix\n");
+       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	//bcsr.nnz=mtxBp->nnz;
+	errval = rsb__do_get_csr(typecode,mtxAp,acsr.VA,acsr.IA,acsr.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR("csr extraction problems from matrix A\n");
+	      	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	errval = rsb__do_get_csr(typecode,mtxBp,bcsr.VA,bcsr.IA,bcsr.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR("csr extraction problems from matrix B\n");
+	      	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	acsr.nnz=acsr.IA[acsr.nr];
+	bcsr.nnz=bcsr.IA[bcsr.nr];
+
+	if(!mtxAp || !mtxBp)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+
+	if(dtp)dt = - rsb_time();
+	if((errval = rsb_spgemm_inner(&acsr,&bcsr,&nnz,&IA,&JA,&VA,opsp))!=RSB_ERR_NO_ERROR)
+	/* FIXME: warning: allocation size may not be max(nnz,m) in mtxCp arrays, now ! */
+	{
+		//RSB_ERROR(RSB_ERRM_ES);
+		rsb__do_perror(NULL,errval);
+		goto err;
+	}
+	if(dtp)dt += rsb_time();
+	//else rsb__test_print_coo_mm(mtxAp->typecode,mtxAp->flags,IA,JA,VA,mtxAp->nr,mtxBp->nc,nnz,RSB_BOOL_TRUE,RSB_DEFAULT_STREAM);
+
+	mtxCp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnz,typecode,cm,ck,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags,&errval);
+	if(mtxCp)RSB_DO_FLAG_DEL(mtxCp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	if(!mtxCp)
+	{
+		RSB_ERROR("Failed allocating a matrix\n.");
+		if(errval == RSB_ERR_NO_ERROR)
+			errval = RSB_ERR_INTERNAL_ERROR;
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+		goto err;
+	}
+	//else rsb__print_matrix_unsorted_coo(mtxCp);
+err:
+	if(dtp)*dtp=dt;
+	rsb__destroy_coo_matrix_t(&acsr);
+	rsb__destroy_coo_matrix_t(&bcsr);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	return mtxCp;
+}
+
+rsb_err_t rsb__do_spgemm_to_dense(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_coo_idx_t ldc, rsb_coo_idx_t nr, rsb_coo_idx_t nc, rsb_bool_t isccolmajor, void *cVA, rsb_time_t * dtp, size_t * opsp)
+{
+	/**
+	 * \ingroup gr_unfinished
+	 * TODO: missing input validation
+	 * FIXME: WON'T WORK FOR SOME VERY SPARSE MATRICES
+	 * FIXME: alphap,betap,transA,transB still unused
+	 * */
+	rsb_nnz_idx_t nnz=0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS;
+	struct rsb_coo_matrix_t acsr,bcsr;
+	rsb_time_t dt=RSB_CONST_IMPOSSIBLY_BIG_TIME;
+
+	RSB_BZERO_P(&acsr);
+	RSB_BZERO_P(&bcsr);
+
+	if( RSB_INVALID_COO_INDEX(nr) || RSB_INVALID_COO_INDEX(nc) || RSB_INVALID_COO_INDEX(ldc) || !mtxAp || !mtxBp
+#if 0
+	|| (mtxAp->typecode != mtxBp->typecode)
+#endif
+       	)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	if( (transA != RSB_TRANSPOSITION_N) || (transB != RSB_TRANSPOSITION_N) )
+	{
+		RSB_ERROR("Transposition parameter not yet supported !\n");
+		errval = RSB_ERR_UNIMPLEMENTED_YET;
+		goto err;
+	}
+	acsr.nr=mtxAp->nr;
+	acsr.nc=mtxAp->nc;
+	acsr.nnz = RSB_MAX(mtxAp->nnz,RSB_MAX(mtxAp->nr+1,mtxAp->nc+1)); /* FIXME: temporary !*/
+	//acsr.typecode=mtxAp->typecode;
+	acsr.typecode=typecode;
+	if((rsb__allocate_coo_matrix_t(&acsr)!=&acsr))
+	{
+		RSB_ERROR("allocaton problem\n");
+       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	acsr.nnz=mtxAp->nnz;
+	bcsr.nr=mtxBp->nr;
+	bcsr.nc=mtxBp->nc;
+	bcsr.nnz = RSB_MAX(mtxBp->nnz,RSB_MAX(mtxBp->nr+1,mtxBp->nc+1)); /* FIXME: temporary !*/
+	//bcsr.typecode=mtxBp->typecode;
+	bcsr.typecode=typecode;
+	if((rsb__allocate_coo_matrix_t(&bcsr)!=&bcsr))
+	{
+		RSB_ERROR("allocaton problem\n");
+       		errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	bcsr.nnz=mtxBp->nnz;
+	errval = rsb__do_get_csr(typecode,mtxAp,acsr.VA,acsr.IA,acsr.JA,flags);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR("csr extraction problems from matrix A\n");
+	      	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+	errval = rsb__do_get_csr(typecode,mtxBp,bcsr.VA,bcsr.IA,bcsr.JA,flags);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_ERROR("csr extraction problems from matrix B\n");
+	      	errval = RSB_ERR_INTERNAL_ERROR; goto err;
+	}
+
+	if(dtp)dt = - rsb_time();
+	if((errval = rsb_do_spgemm_dense_inner(ldc,nr,nc,isccolmajor,cVA,&acsr,&bcsr,&nnz,opsp))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	if(dtp)dt += rsb_time();
+
+err:
+	if(dtp)*dtp=dt;
+	rsb__destroy_coo_matrix_t(&acsr);
+	rsb__destroy_coo_matrix_t(&bcsr);
+	return errval;
+}
+
+rsb_err_t rsb_do_spgemm_test_code(const int argc, char * const argv[])
+{
+	/**
+	 * \ingroup gr_unfinished
+	 * FIXME : temporary, testing code */
+#if WANT_SPGEMM_FULL
+	struct rsb_mtx_t *mtxCp = NULL;
+#else /* WANT_SPGEMM_FULL */
+	void * CVA=NULL; rsb_coo_idx_t * CIA=NULL,*CJA=NULL;
+#endif /* WANT_SPGEMM_FULL */
+	//rsb_coo_idx_t m=0,k=0,nnz=0;
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT;
+	//rsb_flags_t flags = 0;
+	//rsb_flags_t flags = RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS;		 	
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_RSB_MATRIX_FLAGS;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const char * filename=NULL;
+	const char * cfilename=NULL;
+	struct rsb_mtx_t *mtxAp = NULL;
+	struct rsb_mtx_t *mtxBp = NULL;
+	int br,bc;
+	/* 4x4 and 1x8 blockings give various results, as a numerical side effect */
+	//rsb_nnz_idx_t cblocks=0;
+	rsb_trans_t transA = RSB_TRANSPOSITION_N;
+	rsb_trans_t transB = RSB_TRANSPOSITION_N;
+	//rsb_aligned_t errnorm[];
+#if 0
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_byte_t * alphap=alpha;
+	rsb_byte_t * betap=beta;
+#else
+	rsb_byte_t * alphap=NULL;
+	rsb_byte_t * betap=NULL;
+#endif
+	rsb_time_t rsb_spg_time = RSB_CONST_IMPOSSIBLY_BIG_TIME,csr_spg_time = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_thread_t nt = rsb_get_num_threads(); 
+	size_t ops=0;
+	//rsb_option options[] = { RSB_BENCH_PROG_OPTS {0,0,0,0} };
+	br=1,bc=8; br=4,bc=4; br=1,bc=1;
+
+	RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+	filename = "pd.mtx";
+
+	if(argc>=2)
+		filename=argv[1];
+
+	if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+		goto err;
+
+	if((errval = rsb__do_load_matrix_file_as_matrix_market(&mtxAp,filename,flags,typecode))!=RSB_ERR_NO_ERROR)
+		goto err;
+
+	if(argc>=3 && strcmp(argv[2],argv[1]))
+		errval = rsb__do_load_matrix_file_as_matrix_market(&mtxBp,argv[2],flags,typecode);
+	else
+		mtxBp=mtxAp;
+
+	if(argc>=4)
+		cfilename = argv[3];
+	else
+		cfilename = "pd.tmp.mtx";
+
+	t = - rsb_time();
+	if((mtxCp = rsb_spgemm_tmp(typecode,mtxAp,mtxBp,transA,transB,alphap,betap,&errval,&csr_spg_time,&ops))==NULL)
+		goto err;
+
+#if RSB_WANT_SPGEMM_VERBOSE
+	//RSB_STDOUT("%zd nonzeros, %g s (%g Mnnz/s)\n",(size_t)cblocks,t,((double)(cblocks))/(t*1000000));
+#endif /* RSB_WANT_SPGEMM_VERBOSE */
+
+err:
+	t += rsb_time();
+	rsb_spg_time=t;
+#if RSB_WANT_SPGEMM_VERBOSE_PROGRAM
+#if RSB_ALLOW_STDOUT
+	RSB_STDOUT_MATRIX_SUMMARY(mtxAp);
+	RSB_STDOUT("\n * \n");
+	RSB_STDOUT_MATRIX_SUMMARY(mtxBp);
+	RSB_STDOUT("\n = \n");
+	RSB_STDOUT_MATRIX_SUMMARY(mtxCp);
+	RSB_STDOUT("\n");
+	RSB_STDOUT("%%:CSR_SPGEMM_PERFORMANCE:");RSB_STDOUT_MATRIX_ESSENTIALS(mtxCp,filename,nt);RSB_STDOUT("\t%10.6lf\n",(RSB_FPINV(csr_spg_time)*ops)/RSB_MILLION_I );
+	RSB_STDOUT("%%:RSB_SPGEMM_PERFORMANCE:");RSB_STDOUT_MATRIX_ESSENTIALS(mtxCp,filename,nt);RSB_STDOUT("\t%10.6lf\n",(RSB_FPINV(rsb_spg_time)*ops)/RSB_MILLION_I );
+	RSB_STDOUT("%%:CSR_SPGEMM_TIME:");RSB_STDOUT_MATRIX_ESSENTIALS(mtxCp,filename,nt);RSB_STDOUT("\t%10.6lf\n",(csr_spg_time));
+	RSB_STDOUT("%%:RSB_SPGEMM_TIME:");RSB_STDOUT_MATRIX_ESSENTIALS(mtxCp,filename,nt);RSB_STDOUT("\t%10.6lf\n",(rsb_spg_time));
+	RSB_STDOUT("%%:SPGEMM_OPS:");RSB_STDOUT_MATRIX_ESSENTIALS(mtxCp,filename,nt);RSB_STDOUT("\t%zd\n",ops);
+#endif /* RSB_ALLOW_STDOUT */
+#endif /* RSB_WANT_SPGEMM_VERBOSE_PROGRAM */
+
+	rsb__do_perror(NULL,errval);
+	if(mtxBp && mtxBp!=mtxAp)
+		RSB_MTX_FREE(mtxBp);
+	if(mtxAp)
+	       	RSB_MTX_FREE(mtxAp);
+
+	errval = rsb_file_mtx_save(mtxCp, cfilename);
+	rsb__do_perror(NULL,errval);
+
+#if WANT_SPGEMM_FULL
+	if(mtxCp)
+		RSB_MTX_FREE(mtxCp)
+#else /* WANT_SPGEMM_FULL */
+	RSB_CONDITIONAL_FREE(CIA);
+	RSB_CONDITIONAL_FREE(CJA);
+	RSB_CONDITIONAL_FREE(CVA);
+#endif /* WANT_SPGEMM_FULL */
+
+	if((errval = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+		;
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+struct rsb_mtx_t * rsb__do_matrix_mul(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp)
+{
+	return rsb_spgemm_tmp(typecode,mtxAp,mtxBp,transA,transB,alphap,betap,errvalp,NULL,NULL);
+}
+
+/* @endcond */
diff --git a/rsb_spgemm.h b/rsb_spgemm.h
new file mode 100644
index 0000000..30579c1
--- /dev/null
+++ b/rsb_spgemm.h
@@ -0,0 +1,38 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse matrix multiply.
+ * FIXME : unfinished code.
+ * */
+
+#ifndef RSB_SPGEMM_H_INCLUDED
+#define RSB_SPGEMM_H_INCLUDED
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+rsb_err_t rsb_do_spgemm_test_code(const int argc, char * const argv[]);
+struct rsb_mtx_t * rsb__do_matrix_mul(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp);
+rsb_err_t rsb__do_spgemm_to_dense(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_coo_idx_t ldc, rsb_coo_idx_t nr, rsb_coo_idx_t nc, rsb_bool_t isccolmajor, void *cVA, rsb_time_t * dtp, size_t * opsp);
+#endif /* RSB_SPGEMM_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_spgemm_csr.c b/rsb_spgemm_csr.c
new file mode 100644
index 0000000..16546f5
--- /dev/null
+++ b/rsb_spgemm_csr.c
@@ -0,0 +1,396 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some CSR sparse matrices multiplication code.
+ * */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_internals.h"
+
+
+rsb_err_t rsb__do_util_csr_csr_sparse_mul_serial(rsb_nnz_idx_t * PA, rsb_coo_idx_t * JA, void *VA_, const rsb_nnz_idx_t *ARP, const rsb_nnz_idx_t *BRP, const rsb_coo_idx_t *AJA, const rsb_coo_idx_t *BJA, const void * aVA_, const void * bVA_, const rsb_coo_idx_t cm, const rsb_coo_idx_t ck, rsb_nnz_idx_t * p, void * acc_, rsb_nnz_idx_t * opsp , rsb_type_t typecode, const rsb_coo_idx_t afr, const rsb_coo_idx_t ars)
+{
+	rsb_nnz_idx_t cblocks=0; 
+	rsb_nnz_idx_t ops=0; 
+	rsb_coo_idx_t ai,aj;
+	rsb_coo_idx_t al,bl,cl;
+	rsb_coo_idx_t bi,bj;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	double *VA=VA_,*acc=acc_;
+	const double * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+		rsb_nnz_idx_t marker;
+
+		//assert(cblocks==PA[ai]);	// this is true on the serial execution of this loop
+		cblocks=PA[ai];		// this shall work even in a parallel execution of this loop (with differing acc/p arrays)
+		marker=cblocks+1;
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+				if(p[bj]<marker)
+					p[bj]=marker,
+					(JA)[cblocks++]=bj,
+					acc[bj] =aVA[al]*bVA[bl];
+				else
+					acc[bj]+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+
+		for(cl=(PA)[ai];cl<(PA)[ai+1];++cl)
+		{
+			((double*)(VA))[cl]=acc[(JA)[cl]];	/* FIXME */
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	float *VA=VA_,*acc=acc_;
+	const float * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+		rsb_nnz_idx_t marker;
+
+		//assert(cblocks==PA[ai]);	// this is true on the serial execution of this loop
+		cblocks=PA[ai];		// this shall work even in a parallel execution of this loop (with differing acc/p arrays)
+		marker=cblocks+1;
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+				if(p[bj]<marker)
+					p[bj]=marker,
+					(JA)[cblocks++]=bj,
+					acc[bj] =aVA[al]*bVA[bl];
+				else
+					acc[bj]+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+
+		for(cl=(PA)[ai];cl<(PA)[ai+1];++cl)
+		{
+			((float*)(VA))[cl]=acc[(JA)[cl]];	/* FIXME */
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	float complex *VA=VA_,*acc=acc_;
+	const float complex * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+		rsb_nnz_idx_t marker;
+
+		//assert(cblocks==PA[ai]);	// this is true on the serial execution of this loop
+		cblocks=PA[ai];		// this shall work even in a parallel execution of this loop (with differing acc/p arrays)
+		marker=cblocks+1;
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+				if(p[bj]<marker)
+					p[bj]=marker,
+					(JA)[cblocks++]=bj,
+					acc[bj] =aVA[al]*bVA[bl];
+				else
+					acc[bj]+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+
+		for(cl=(PA)[ai];cl<(PA)[ai+1];++cl)
+		{
+			((float complex*)(VA))[cl]=acc[(JA)[cl]];	/* FIXME */
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	double complex *VA=VA_,*acc=acc_;
+	const double complex * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+		rsb_nnz_idx_t marker;
+
+		//assert(cblocks==PA[ai]);	// this is true on the serial execution of this loop
+		cblocks=PA[ai];		// this shall work even in a parallel execution of this loop (with differing acc/p arrays)
+		marker=cblocks+1;
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+				if(p[bj]<marker)
+					p[bj]=marker,
+					(JA)[cblocks++]=bj,
+					acc[bj] =aVA[al]*bVA[bl];
+				else
+					acc[bj]+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+
+		for(cl=(PA)[ai];cl<(PA)[ai+1];++cl)
+		{
+			((double complex*)(VA))[cl]=acc[(JA)[cl]];	/* FIXME */
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__do_util_csr_csr_dense_mul_serial(rsb_coo_idx_t ldc, rsb_coo_idx_t nr, rsb_coo_idx_t nc, rsb_bool_t isccolmajor, void *cVA_, const rsb_nnz_idx_t *ARP, const rsb_nnz_idx_t *BRP, const rsb_coo_idx_t *AJA, const rsb_coo_idx_t *BJA, const void * aVA_, const void * bVA_, const rsb_coo_idx_t cm, const rsb_coo_idx_t ck, rsb_nnz_idx_t * opsp , rsb_type_t typecode, const rsb_coo_idx_t afr, const rsb_coo_idx_t ars)
+{
+	rsb_nnz_idx_t ops=0; 
+	rsb_coo_idx_t ai,aj;
+	rsb_coo_idx_t al,bl;
+	rsb_coo_idx_t bi,bj;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	double *cVA=cVA_;
+	const double * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+
+				RSB_BLOCK_X_MAJOR_REFERENCE(cVA,ldc,ai,bj,isccolmajor)+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	float *cVA=cVA_;
+	const float * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+
+				RSB_BLOCK_X_MAJOR_REFERENCE(cVA,ldc,ai,bj,isccolmajor)+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	float complex *cVA=cVA_;
+	const float complex * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+
+				RSB_BLOCK_X_MAJOR_REFERENCE(cVA,ldc,ai,bj,isccolmajor)+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	double complex *cVA=cVA_;
+	const double complex * aVA=aVA_,*bVA=bVA_; ;
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+
+				RSB_BLOCK_X_MAJOR_REFERENCE(cVA,ldc,ai,bj,isccolmajor)+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+
+/* @endcond */
diff --git a/rsb_spgemm_csr.h b/rsb_spgemm_csr.h
new file mode 100644
index 0000000..e1903f8
--- /dev/null
+++ b/rsb_spgemm_csr.h
@@ -0,0 +1,49 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some CSR sparse matrices multiplication code.
+ * */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+#ifndef RSB_SPGEMM_COO_H_INCLUDED
+#define RSB_SPGEMM_COO_H_INCLUDED
+#include "rsb_internals.h"
+
+
+rsb_err_t rsb__do_util_csr_csr_sparse_mul_serial(rsb_nnz_idx_t * PA, rsb_coo_idx_t * JA, void *VA_, const rsb_nnz_idx_t *ARP, const rsb_nnz_idx_t *BRP, const rsb_coo_idx_t *AJA, const rsb_coo_idx_t *BJA, const void * aVA_, const void * bVA_, const rsb_coo_idx_t cm, const rsb_coo_idx_t ck, rsb_nnz_idx_t * p, void * acc_, rsb_nnz_idx_t * opsp , rsb_type_t typecode, const rsb_coo_idx_t afr, const rsb_coo_idx_t ars)
+;
+
+rsb_err_t rsb__do_util_csr_csr_dense_mul_serial(rsb_coo_idx_t ldc, rsb_coo_idx_t nr, rsb_coo_idx_t nc, rsb_bool_t isccolmajor, void *cVA_, const rsb_nnz_idx_t *ARP, const rsb_nnz_idx_t *BRP, const rsb_coo_idx_t *AJA, const rsb_coo_idx_t *BJA, const void * aVA_, const void * bVA_, const rsb_coo_idx_t cm, const rsb_coo_idx_t ck, rsb_nnz_idx_t * opsp , rsb_type_t typecode, const rsb_coo_idx_t afr, const rsb_coo_idx_t ars)
+;
+
+#endif /* RSB_SPGEMM_COO_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_spgemm_csr.m4 b/rsb_spgemm_csr.m4
new file mode 100644
index 0000000..027ea04
--- /dev/null
+++ b/rsb_spgemm_csr.m4
@@ -0,0 +1,149 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some CSR sparse matrices multiplication code.
+ * */
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_SPGEMM_COO_H_INCLUDED
+#define RSB_SPGEMM_COO_H_INCLUDED
+#include "rsb_internals.h"
+',`dnl
+#include "rsb_internals.h"
+')
+dnl
+
+rsb_err_t rsb__do_util_csr_csr_sparse_mul_serial(rsb_nnz_idx_t * PA, rsb_coo_idx_t * JA, void *VA_, const rsb_nnz_idx_t *ARP, const rsb_nnz_idx_t *BRP, const rsb_coo_idx_t *AJA, const rsb_coo_idx_t *BJA, const void * aVA_, const void * bVA_, const rsb_coo_idx_t cm, const rsb_coo_idx_t ck, rsb_nnz_idx_t * p, void * acc_, rsb_nnz_idx_t * opsp , rsb_type_t typecode, const rsb_coo_idx_t afr, const rsb_coo_idx_t ars)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	rsb_nnz_idx_t cblocks=0; 
+	rsb_nnz_idx_t ops=0; 
+	rsb_coo_idx_t ai,aj;
+	rsb_coo_idx_t al,bl,cl;
+	rsb_coo_idx_t bi,bj;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+dnl
+	{
+	mtype *VA=VA_,*acc=acc_;
+	const mtype * aVA=aVA_,*bVA=bVA_; ;
+dnl	for(ai=0;ai<cm;++ai)
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+		rsb_nnz_idx_t marker;
+
+		//assert(cblocks==PA[ai]);	// this is true on the serial execution of this loop
+		cblocks=PA[ai];		// this shall work even in a parallel execution of this loop (with differing acc/p arrays)
+		marker=cblocks+1;
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+				if(p[bj]<marker)
+					p[bj]=marker,
+					(JA)[cblocks++]=bj,
+					acc[bj] =aVA[al]*bVA[bl];
+				else
+					acc[bj]+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+
+		for(cl=(PA)[ai];cl<(PA)[ai+1];++cl)
+		{
+			((mtype*)(VA))[cl]=acc[(JA)[cl]];	/* FIXME */
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+
+dnl
+
+rsb_err_t rsb__do_util_csr_csr_dense_mul_serial(rsb_coo_idx_t ldc, rsb_coo_idx_t nr, rsb_coo_idx_t nc, rsb_bool_t isccolmajor, void *cVA_, const rsb_nnz_idx_t *ARP, const rsb_nnz_idx_t *BRP, const rsb_coo_idx_t *AJA, const rsb_coo_idx_t *BJA, const void * aVA_, const void * bVA_, const rsb_coo_idx_t cm, const rsb_coo_idx_t ck, rsb_nnz_idx_t * opsp , rsb_type_t typecode, const rsb_coo_idx_t afr, const rsb_coo_idx_t ars)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	rsb_nnz_idx_t ops=0; 
+	rsb_coo_idx_t ai,aj;
+	rsb_coo_idx_t al,bl;
+	rsb_coo_idx_t bi,bj;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+dnl
+	{
+	mtype *cVA=cVA_;
+	const mtype * aVA=aVA_,*bVA=bVA_; ;
+dnl	for(ai=0;ai<cm;++ai)
+	for(ai=afr;ai<cm;ai+=ars)
+	{
+		rsb_nnz_idx_t aro;
+		rsb_nnz_idx_t are;
+		rsb_nnz_idx_t arb;
+
+		aro=ARP[ai];
+		are=ARP[ai+1];
+		arb=ARP[ai+1]-ARP[ai];
+		/* we start row ai of target matrix C */
+		for(al=aro;al<are;++al)
+		{
+			rsb_nnz_idx_t bro=BRP[aj=AJA[al]];
+			rsb_nnz_idx_t bre=BRP[aj+1];
+/*			rsb_nnz_idx_t bcb=BRP[aj+1] - BRP[aj];*/
+			for(bl=bro;bl<bre;++bl)
+			{
+				bi=aj; bj=BJA[bl];
+dnl				*(mtype*)(RSB_BLOCK_ROWMAJOR_ADDRESS(cVA,ldc,nr,nc,ai,bj,(sizeof(mtype))))+=aVA[al]*bVA[bl];
+
+				RSB_BLOCK_X_MAJOR_REFERENCE(cVA,ldc,ai,bj,isccolmajor)+=aVA[al]*bVA[bl];
+			}
+/*#if RSB_WANT_SPGEMM_MFLOPS*/
+			ops+=(bre-bro);
+/*#endif*/
+		}
+	}
+	if(opsp)*opsp=ops;
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_SPGEMM_COO_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
diff --git a/rsb_spmv.c b/rsb_spmv.c
new file mode 100644
index 0000000..ab1c1b8
--- /dev/null
+++ b/rsb_spmv.c
@@ -0,0 +1,437 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse recursive multicore matrix vector multiplication.
+ */
+/*
+ * FIXME: many beta-related ops are NOT parallel, and this is BAD.
+ *
+ * */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+RSB_INTERNALS_COMMON_HEAD_DECLS
+/* FIXME: to move these macros to one header in order to avoid any identifier clash */
+/* #define RSB_CBLAS_X_SCAL_SPMV rsb__cblas_Xscal */
+#define RSB__FOREACH_NRHS(NRHSI, NRHS) for (NRHSI=0;NRHSI<NRHS;++NRHSI)
+#define RSB_CBLAS_X_SCAL_SPMV(TYPECODE,N,ALPHAP,A,STRIDE) rsb__cblas_Xscal_parallel((TYPECODE),(N),(ALPHAP),(A),(STRIDE))
+#if RSB_ENABLE_INNER_NRHS_SPMV
+#define RSB_CBLAS_X_SCAL_SPMM(TYPECODE,N,ALPHAP,A,STRIDE) 					\
+{	/* FIXME: this interacts with RSB_INNER_NRHS_SPMV_ARGS */					\
+	rsb_int_t nrhsi = 0;										\
+	RSB__FOREACH_NRHS(nrhsi,nrhs)									\
+	{												\
+		RSB_CBLAS_X_SCAL_SPMV(TYPECODE, N, ALPHAP, RSB_TYPED_OFF_PTR(TYPECODE,A,nrhsi*(outnri)), STRIDE); 	\
+	}												\
+}										/* RSB_CBLAS_X_SCAL_SPMM */
+#else  /* RSB_ENABLE_INNER_NRHS_SPMV */
+#define RSB_CBLAS_X_SCAL_SPMM(TYPECODE,N,ALPHAP,A,STRIDE) RSB_CBLAS_X_SCAL_SPMV(TYPECODE,N,ALPHAP,A,STRIDE) 
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+
+rsb_err_t rsb_do_spmv_non_recursive(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA RSB_INNER_NRHS_SPMV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_bool_t nostride = ( incx == 1 && incy == 1 )?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	rsb_bool_t should_scale_y = ( betap && !RSB_IS_ELEMENT_ONE( betap,mtxAp->typecode))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	rsb_bool_t use_alpha_one = (!alphap || RSB_IS_ELEMENT_ONE(alphap,mtxAp->typecode))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	rsb_bool_t use_y_zeroing_kernel = (should_scale_y && RSB_IS_ELEMENT_ZERO(betap,mtxAp->typecode) && nostride && use_alpha_one)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+
+	/*
+		FIXME : should handle beta in a more specialized fashion.
+			should also handle more specialized alphap cases.
+	*/
+#if RSB_ENABLE_INNER_NRHS_SPMV
+	/*const size_t outtot=0,rhstot=0;
+	const size_t outnri=0,rhsnri=0;
+	const rsb_int_t nrhs=1;*/
+ 	/* FIXME: the above should be specified from argument */
+	const size_t lenx=(mtxAp->el_size*rhsnri);
+	const size_t leny=(mtxAp->el_size*outnri);
+	rsb_int_t nrhsi=0;
+	for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+	{
+		void      *out=((      rsb_byte_t*)y)+(leny*nrhsi);
+		const void*rhs=((const rsb_byte_t*)x)+(lenx*nrhsi);
+#else /* RSB_ENABLE_INNER_NRHS_SPMV */
+		void      *out=((      rsb_byte_t*)y);
+		const void*rhs=((const rsb_byte_t*)x);
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+
+	if(should_scale_y && !use_y_zeroing_kernel)
+		RSB_CBLAS_X_SCAL_SPMV(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transA),betap,out,incy);
+	/* no beta specified counts as beta=1, and so no scaling is needed */
+
+	if(use_alpha_one)
+	{
+		/* no alpha specified counts as alpha=1 */
+		if(nostride)
+		{
+			if(use_y_zeroing_kernel)
+				/* y <- a * x  */
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_uauz(mtxAp,rhs,out,transA));
+			else
+				/* y <- y + a * x  */
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_uaua(mtxAp,rhs,out,transA));
+		}
+		else
+			/* y <- a * x  , with stride */
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_sasa(mtxAp,rhs,out,incx,incy,transA));
+	}
+	else
+	{
+		if(nostride)
+		{
+			/* y <- - a * x  */
+			if(RSB_IS_ELEMENT_MINUS_ONE(alphap,mtxAp->typecode))
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_unua(mtxAp,rhs,out,transA));
+			/* y <- alpha * a * x  */
+			else
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_uxua(mtxAp,rhs,out,alphap,transA));
+		}
+		else
+			/* y <- alpha * a * x  , with stride */
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_sxsa(mtxAp,rhs,out,alphap,incx,incy,transA));
+	}
+#if RSB_ENABLE_INNER_NRHS_SPMV
+	}
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+	RSB_DO_ERR_RETURN(errval)
+}
+#if 0
+#define RSB_SPMV_VS_DECL	int*mivr=NULL,mivi=0;
+#define RSB_SPMV_VS_ALLOC(MTXAP,ERRVAL,ERRL)	op_flags|=RSB_OP_FLAG_WANT_TRACE_PLOT;if(op_flags & RSB_OP_FLAG_WANT_TRACE_PLOT){ mivr=rsb__calloc(sizeof(int)*((MTXAP)->all_leaf_matrices_n));if(!mivr){ERRVAL=RSB_ERR_ENOMEM;goto ERRL;} }
+#define RSB_SPMV_VS_MARK(MI)		if(op_flags & RSB_OP_FLAG_WANT_TRACE_PLOT){ mivr[mivi++]=(MI);}
+#define RSB_SPMV_VS_DUMP(MTXAP)		if(op_flags & RSB_OP_FLAG_WANT_TRACE_PLOT){ /* for(mivi=0;mivi<((MTXAP)->all_leaf_matrices_n);++mivi) printf("%d ",mivr[mivi]);printf("\n"); */ rsb__dump_postscript_recursion_from_mtx_t(NULL,"spmv-dump.eps",(MTXAP),1,1,512,512,RSB_FLAG_NOFLAGS,0,1,0,mivr); }
+#define RSB_SPMV_VS_DEALLOC		RSB_CONDITIONAL_FREE(mivr);
+#else /* 0 */
+#define RSB_SPMV_VS_DECL
+#define RSB_SPMV_VS_ALLOC(MTXAP,ERRVAL,ERRL)
+#define RSB_SPMV_VS_MARK(MI)
+#define RSB_SPMV_VS_DUMP(MTXAP)
+#define RSB_SPMV_VS_DEALLOC
+#endif /* 0 */
+
+rsb_err_t rsb_do_spmv_recursive_parallel(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPMV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_SPMV_VS_DECL
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+
+	const struct rsb_translated_matrix_t * all_leaf_matrices=NULL;
+	struct rsb_spmv_lock_struct_t lock;
+	rsb_submatrix_idx_t all_leaf_matrices_n=0;
+
+
+	if(!rsb__is_recursive_matrix(mtxAp->flags))
+		return rsb_do_spmv_non_recursive(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+
+	all_leaf_matrices  =mtxAp->all_leaf_matrices;
+	all_leaf_matrices_n=mtxAp->all_leaf_matrices_n;
+
+	if(!all_leaf_matrices || all_leaf_matrices_n<1)
+	{errval = RSB_ERR_ENOMEM;goto err;}
+
+	errval = rsb_do_spmv_lock_init(&lock,rsb_global_session_handle.rsb_want_threads,all_leaf_matrices_n,mtxAp,op_flags,transA,y,incy);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+#if 0
+	if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+	{
+	#pragma omp parallel shared(y,mtxAp,rsb_global_session_handle)  RSB_NTC 
+	{
+		rsb_nnz_idx_t tdim = rsb_do_get_rows_of(mtxAp,transA),dim,chunk;
+		rsb_char_t * yy=y;
+		rsb_thr_t th_id = omp_get_thread_num();
+		if(th_id >= rsb_global_session_handle.rsb_want_threads)
+			goto scaled;
+		chunk=tdim/rsb_global_session_handle.rsb_want_threads;
+		yy+=th_id*chunk*incy;
+		if(th_id == rsb_global_session_handle.rsb_want_threads-1)
+			dim=tdim-th_id*chunk;
+		else		
+			dim=chunk;
+
+		if(RSB_IS_ELEMENT_ZERO(betap,mtxAp->typecode))
+			RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,dim,NULL,yy,incy);
+		else
+			RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,dim,betap,yy,incy);
+		scaled:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}
+	}
+	#pragma omp barrier
+#else /* 0 */
+	/* TODO: make the following parallel */
+	if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+		RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transA),betap,y,incy);
+#endif /* 0 */
+	RSB_SPMV_VS_ALLOC(mtxAp,errval,err)
+	#pragma omp parallel reduction(|:errval) shared(lock,all_leaf_matrices,mtxAp)  RSB_NTC 
+{
+	rsb_thr_t th_id = omp_get_thread_num();
+	rsb_submatrix_idx_t n=0;
+	rsb_submatrix_idx_t dm=0;
+
+	if(th_id >= rsb_global_session_handle.rsb_want_threads)
+		goto skip;
+
+	if(th_id>=all_leaf_matrices_n)
+		goto skip;
+
+again:
+	for(n=0;RSB_LIKELY(n<all_leaf_matrices_n);++n)
+#if RSB_WANT_SM_TO_THREAD_MOD_MAPPING && !RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV 
+	if( (n % RSB_MIN(all_leaf_matrices_n,rsb_global_session_handle.rsb_want_threads)) == th_id )
+#endif /* RSB_WANT_SM_TO_THREAD_MOD_MAPPING */
+	{
+		const struct rsb_mtx_t *submatrix=all_leaf_matrices[n].mtxlp;
+		char *ov=y;
+		rsb_bool_t gomv = RSB_BOOL_FALSE;
+		rsb_coo_idx_t oincy=incy;
+#if RSB_WANT_SPMV_WITH_REDUCE
+		rsb_coo_idx_t rh,r0;	/* new */
+#endif /* RSB_WANT_SPMV_WITH_REDUCE */
+		#pragma omp critical (rsb_spmv_crs)
+#if RSB_WANT_BOUNDED_BOXES_SPMV
+		{ gomv=(rsb_do_spmv_lock_get(&lock,th_id,submatrix->broff,submatrix->bm,submatrix->bcoff,submatrix->bk,n,transA,&ov,&oincy)==RSB_BOOL_TRUE); if(gomv==RSB_BOOL_TRUE){RSB_SPMV_VS_MARK(n);} }
+#else /* RSB_WANT_BOUNDED_BOXES_SPMV */
+		{ gomv=(rsb_do_spmv_lock_get(&lock,th_id,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,n,transA,&ov,&oincy)==RSB_BOOL_TRUE); if(gomv==RSB_BOOL_TRUE){RSB_SPMV_VS_MARK(n);} }
+#endif /* RSB_WANT_BOUNDED_BOXES_SPMV */
+		if(gomv == RSB_BOOL_TRUE)
+		{
+			const char * offx=NULL; char *offy=NULL;
+			const size_t scoff=submatrix->coff-mtxAp->coff;
+			const size_t sroff=submatrix->roff-mtxAp->roff;
+			offy=((char*)ov)+(mtxAp->el_size*sroff)*oincy,offx=((const char*)x)+(mtxAp->el_size*scoff)*incx;
+
+			/* FIXME */
+			RSB_ASSERT(scoff>=0);
+			RSB_ASSERT(sroff>=0);
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_non_recursive(submatrix,offx,offy,alphap,NULL,incx,oincy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS));
+                       	#pragma omp critical (rsb_spmv_crs)
+			{rsb_do_spmv_lock_release(&lock,th_id,ov);RSB_DO_SPMV_LOCK_DM_INC(lock);}
+		}
+#if RSB_WANT_SPMV_WITH_REDUCE
+		if(gomv == RSB_BOOL_ALMOST_TRUE)
+		{
+                       	#pragma omp critical (rsb_spmv_crs)
+			{rsb__do_pick_candidate_interval_for_reduce(&lock,th_id,&ov,&r0,&rh);}
+
+			if(ov && ov!=y)
+			{
+				rsb__vectors_left_sum_reduce_and_zero(y,ov,mtxAp->typecode,rh,oincy,r0);
+                       		#pragma omp critical (rsb_spmv_crs)
+                       		{ rsb__do_release_candidate_interval_for_reduce(&lock,th_id,ov,r0,rh);}
+			}
+		}
+#endif /* RSB_WANT_SPMV_WITH_REDUCE*/
+	}
+		#pragma omp critical (rsb_spmv_crs)
+		{ dm = RSB_DO_SPMV_LOCK_DM(lock); }
+		if(dm<all_leaf_matrices_n
+#if RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV
+			&& ((all_leaf_matrices_n-dm)>th_id)
+#endif	/* RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV */
+		)goto again;
+skip:
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	/* done */
+}
+	RSB_SPMV_VS_DUMP(mtxAp)
+err:
+	RSB_SPMV_VS_DEALLOC
+#if   !defined(__xlC__)
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	/* FIXME: xlc does not allow this, but we have experienced problems, without */
+	#pragma omp barrier
+#endif /* __xlC__ */
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_lock_free(&lock));
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = RSB_ERR_UNIMPLEMENTED_YET;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_spmv_recursive_serial(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA RSB_INNER_NRHS_SPMV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+		This function does not offer result vector accumulation in case of diagonal implicit matrices.
+	*/
+	struct rsb_mtx_t * submatrix=NULL;
+	rsb_submatrix_idx_t i,j;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		void*offy=NULL;
+		const void *offx=NULL;
+
+		if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+		{
+			/* should scale the output vector */
+			RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transA),betap,y,incy);
+		}
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			size_t scoff=submatrix->coff-mtxAp->coff;
+			size_t sroff=submatrix->roff-mtxAp->roff;
+
+			/* FIXME */
+			RSB_ASSERT(scoff>=0);
+			RSB_ASSERT(sroff>=0);
+
+			offy=((char*)y)+(mtxAp->el_size*sroff)*incy,offx=((const char*)x)+(mtxAp->el_size*scoff)*incx;
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_recursive_serial(submatrix,offx,offy,alphap,NULL,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS));
+		}
+	}
+	else
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_non_recursive(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS));
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_do_spmv_general(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, const void * x, rsb_coo_idx_t incx, const void * betap, void * y, rsb_coo_idx_t incy, enum rsb_op_flags_t op_flags RSB_OUTER_NRHS_SPMV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+#if RSB_ALLOW_ZERO_DIM
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+	{
+		errval = RSB_ERR_NO_ERROR;
+		goto err; /* FIXME: skipping further checks */
+	}
+#endif
+	if(x==y)
+		goto err;
+
+	if(incx<1 || incy<1)
+		goto err;
+
+/*	we tolerate NULL alhap and betap */
+#if 0
+	if(!alphap || !betap)
+		goto err;
+#endif /* 0 */
+
+	if(!mtxAp || !x || !y || transA == RSB_INVALID_FLAGS)
+		goto err;
+
+#if 0
+	errval = rsb_do_spmv_recursive_serial(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+	goto done;
+#endif /* 0 */
+
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	if(RSB_UNLIKELY(op_flags == RSB_OP_FLAG_WANT_SERIAL))
+		errval = rsb_do_spmv_recursive_serial(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+	else
+	{
+		RSB_NUM_THREADS_DECL
+		RSB_NUM_THREADS_PUSH
+		errval = rsb_do_spmv_recursive_parallel(mtxAp,x,y,alphap,betap,incx,incy,transA,op_flags RSB_OUTER_NRHS_SPMV_ARGS_IDS	);
+		RSB_NUM_THREADS_POP
+	}
+	/* the RSB_OP_FLAG_FAKE_LOCK case is handled by rsb_do_spmv_recursive_parallel */
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = rsb_do_spmv_recursive_serial(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	goto done;
+done:
+	if(!RSB_UNLIKELY(op_flags&RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT))// NEW: fix for odd spsv/diagonal implicit/no-parallel cases
+	if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+		rsb__BLAS_Xaxpy_parallel(rsb_do_get_rows_of(mtxAp,transA),alphap,y,incy,x,incx,mtxAp->typecode);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+/* @endcond */
diff --git a/rsb_spmv.h b/rsb_spmv.h
new file mode 100644
index 0000000..f4d25a4
--- /dev/null
+++ b/rsb_spmv.h
@@ -0,0 +1,115 @@
+/* @cond INNERDOC */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse recursive multicore matrix vector multiplication.
+ */
+/*
+ * FIXME: many beta-related ops are NOT parallel, and this is BAD.
+ *
+ * */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+
+#ifndef RSB_SPMV_H_INCLUDED
+#define RSB_SPMV_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+#define RSB_ENABLE_INNER_NRHS_SPMV 1
+#if RSB_ENABLE_INNER_NRHS_SPMV
+#define RSB_INNER_NRHS_SPMV_ARGS	,const rsb_int_t nrhs, /*const size_t outtot, const size_t rhstot,*/ const size_t outnri, const size_t rhsnri
+#define RSB_INNER_NRHS_SPMV_ARGS_IDS	,nrhs/*,outtot,rhstot*/,outnri,rhsnri
+#define RSB_INNER_NRHS_SPMV_YSCA_IDS	,nrhs,outnri
+#define RSB_OUTER_NRHS_SPMV_ARGS	,const rsb_int_t nrhs, const size_t outnri, const size_t rhsnri
+#define RSB_OUTER_NRHS_SPMV_ARGS_IDS	,nrhs,outnri,rhsnri
+#else /* RSB_ENABLE_INNER_NRHS_SPMV */
+#define RSB_INNER_NRHS_SPMV_ARGS	
+#define RSB_INNER_NRHS_SPMV_YSCA_IDS		/* */
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+#define RSB_DEFAULT_INNER_NRHS_SPMV_ARGS	,1,/*0,0,*/0,0
+#define RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS	,1,0,0
+
+rsb_err_t rsb_do_spmv_non_recursive(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA RSB_INNER_NRHS_SPMV_ARGS)
+;
+rsb_err_t rsb_do_spmv_recursive_parallel(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPMV_ARGS)
+;
+rsb_err_t rsb_do_spmv_recursive_serial(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA RSB_INNER_NRHS_SPMV_ARGS)
+;
+rsb_err_t rsb_do_spmv_general(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, const void * x, rsb_coo_idx_t incx, const void * betap, void * y, rsb_coo_idx_t incy, enum rsb_op_flags_t op_flags RSB_OUTER_NRHS_SPMV_ARGS)
+;
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_SPMV_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_spmv.m4 b/rsb_spmv.m4
new file mode 100644
index 0000000..fbfdc20
--- /dev/null
+++ b/rsb_spmv.m4
@@ -0,0 +1,451 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+include(`rsb_krnl_macros.m4')dnl
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse recursive multicore matrix vector multiplication.
+ */
+/*
+ * FIXME: many beta-related ops are NOT parallel, and this is BAD.
+ *
+ * */
+RSB_M4_HEADER_MESSAGE()dnl
+
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_SPMV_H_INCLUDED
+#define RSB_SPMV_H_INCLUDED
+')
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+dnl
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+dnl 
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl
+#define RSB_ENABLE_INNER_NRHS_SPMV 1
+dnl
+#if RSB_ENABLE_INNER_NRHS_SPMV
+#define RSB_INNER_NRHS_SPMV_ARGS	,const rsb_int_t nrhs, /*const size_t outtot, const size_t rhstot,*/ const size_t outnri, const size_t rhsnri
+#define RSB_INNER_NRHS_SPMV_ARGS_IDS	,nrhs/*,outtot,rhstot*/,outnri,rhsnri
+#define RSB_INNER_NRHS_SPMV_YSCA_IDS	,nrhs,outnri
+#define RSB_OUTER_NRHS_SPMV_ARGS	,const rsb_int_t nrhs, const size_t outnri, const size_t rhsnri
+#define RSB_OUTER_NRHS_SPMV_ARGS_IDS	,nrhs,outnri,rhsnri
+#else /* RSB_ENABLE_INNER_NRHS_SPMV */
+#define RSB_INNER_NRHS_SPMV_ARGS	
+#define RSB_INNER_NRHS_SPMV_YSCA_IDS		/* */
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+#define RSB_DEFAULT_INNER_NRHS_SPMV_ARGS	,1,/*0,0,*/0,0
+#define RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS	,1,0,0
+dnl
+',`dnl
+RSB_INTERNALS_COMMON_HEAD_DECLS
+dnl
+dnl
+/* FIXME: to move these macros to one header in order to avoid any identifier clash */
+/* #define RSB_CBLAS_X_SCAL_SPMV rsb__cblas_Xscal */
+#define RSB__FOREACH_NRHS(NRHSI, NRHS) for (NRHSI=0;NRHSI<NRHS;++NRHSI)
+#define RSB_CBLAS_X_SCAL_SPMV(TYPECODE,N,ALPHAP,A,STRIDE) rsb__cblas_Xscal_parallel((TYPECODE),(N),(ALPHAP),(A),(STRIDE))
+#if RSB_ENABLE_INNER_NRHS_SPMV
+#define RSB_CBLAS_X_SCAL_SPMM(TYPECODE,N,ALPHAP,A,STRIDE) 					\
+{	/* FIXME: this interacts with RSB_INNER_NRHS_SPMV_ARGS */					\
+	rsb_int_t nrhsi = 0;										\
+	RSB__FOREACH_NRHS(nrhsi,nrhs)									\
+	{												\
+		RSB_CBLAS_X_SCAL_SPMV(TYPECODE, N, ALPHAP, RSB_TYPED_OFF_PTR(TYPECODE,A,nrhsi*(outnri)), STRIDE); 	\
+	}												\
+}										/* RSB_CBLAS_X_SCAL_SPMM */
+#else  /* RSB_ENABLE_INNER_NRHS_SPMV */
+#define RSB_CBLAS_X_SCAL_SPMM(TYPECODE,N,ALPHAP,A,STRIDE) RSB_CBLAS_X_SCAL_SPMV(TYPECODE,N,ALPHAP,A,STRIDE) 
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+')dnl
+
+dnl
+rsb_err_t rsb_do_spmv_non_recursive(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA RSB_INNER_NRHS_SPMV_ARGS)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_bool_t nostride = ( incx == 1 && incy == 1 )?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	rsb_bool_t should_scale_y = ( betap && !RSB_IS_ELEMENT_ONE( betap,mtxAp->typecode))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	rsb_bool_t use_alpha_one = (!alphap || RSB_IS_ELEMENT_ONE(alphap,mtxAp->typecode))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+	rsb_bool_t use_y_zeroing_kernel = (should_scale_y && RSB_IS_ELEMENT_ZERO(betap,mtxAp->typecode) && nostride && use_alpha_one)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+
+	/*
+		FIXME : should handle beta in a more specialized fashion.
+			should also handle more specialized alphap cases.
+	*/
+#if RSB_ENABLE_INNER_NRHS_SPMV
+	/*const size_t outtot=0,rhstot=0;
+	const size_t outnri=0,rhsnri=0;
+	const rsb_int_t nrhs=1;*/
+ 	/* FIXME: the above should be specified from argument */
+	const size_t lenx=(mtxAp->el_size*rhsnri);
+	const size_t leny=(mtxAp->el_size*outnri);
+	rsb_int_t nrhsi=0;
+	for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+	{
+		void      *out=((      rsb_byte_t*)y)+(leny*nrhsi);
+		const void*rhs=((const rsb_byte_t*)x)+(lenx*nrhsi);
+#else /* RSB_ENABLE_INNER_NRHS_SPMV */
+		void      *out=((      rsb_byte_t*)y);
+		const void*rhs=((const rsb_byte_t*)x);
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+
+	if(should_scale_y && !use_y_zeroing_kernel)
+		RSB_CBLAS_X_SCAL_SPMV(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transA),betap,out,incy);
+	/* no beta specified counts as beta=1, and so no scaling is needed */
+
+dnl
+dnl		FIXME : yes. using RSB_M4_ARGS_TO_ACTUAL_ARGS two times. we were forced to do so, probably due to a bug in RSB_M4_ARGS_TO_ACTUAL_ARGS.
+dnl
+	if(use_alpha_one)
+	{
+		/* no alpha specified counts as alpha=1 */
+		if(nostride)
+		{
+			if(use_y_zeroing_kernel)
+				/* y <- a * x  */
+				RSB_DO_ERROR_CUMULATE(errval,RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(`spmv_uauz')(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(`spmv_uauz'))))));
+			else
+				/* y <- y + a * x  */
+				RSB_DO_ERROR_CUMULATE(errval,RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(`spmv_uaua')(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(`spmv_uaua'))))));
+		}
+		else
+			/* y <- a * x  , with stride */
+			RSB_DO_ERROR_CUMULATE(errval,RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(`spmv_sasa')(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(`spmv_sasa'))))));
+	}
+	else
+	{
+		if(nostride)
+		{
+			/* y <- - a * x  */
+			if(RSB_IS_ELEMENT_MINUS_ONE(alphap,mtxAp->typecode))
+				RSB_DO_ERROR_CUMULATE(errval,RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(`spmv_unua')(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(`spmv_unua'))))));
+			/* y <- alpha * a * x  */
+			else
+				RSB_DO_ERROR_CUMULATE(errval,RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(`spmv_uxua')(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(`spmv_uxua'))))));
+		}
+		else
+			/* y <- alpha * a * x  , with stride */
+			RSB_DO_ERROR_CUMULATE(errval,RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_IDENTIFIER(`spmv_sxsa')(RSB_M4_ARGS_TO_ACTUAL_ARGS(RSB_M4_ARGS_TO_ACTUAL_ARGS((RSB_M4_DIRECT_KERNEL_DISPATCH_FUNCTION_ARGS(`spmv_sxsa'))))));
+	}
+dnl
+dnl	FIXME : we deliberately ignore other useful kernels we have :
+dnl	extra_blas_matrix_ops=spmv_sxsx,spmv_uauz,spmv_uxux
+dnl
+#if RSB_ENABLE_INNER_NRHS_SPMV
+	}
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#if 0
+#define RSB_SPMV_VS_DECL	int*mivr=NULL,mivi=0;
+#define RSB_SPMV_VS_ALLOC(MTXAP,ERRVAL,ERRL)	op_flags|=RSB_OP_FLAG_WANT_TRACE_PLOT;if(op_flags & RSB_OP_FLAG_WANT_TRACE_PLOT){ mivr=rsb__calloc(sizeof(int)*((MTXAP)->all_leaf_matrices_n));if(!mivr){ERRVAL=RSB_ERR_ENOMEM;goto ERRL;} }
+#define RSB_SPMV_VS_MARK(MI)		if(op_flags & RSB_OP_FLAG_WANT_TRACE_PLOT){ mivr[mivi++]=(MI);}
+#define RSB_SPMV_VS_DUMP(MTXAP)		if(op_flags & RSB_OP_FLAG_WANT_TRACE_PLOT){ /* for(mivi=0;mivi<((MTXAP)->all_leaf_matrices_n);++mivi) printf("%d ",mivr[mivi]);printf("\n"); */ rsb__dump_postscript_recursion_from_mtx_t(NULL,"spmv-dump.eps",(MTXAP),1,1,512,512,RSB_FLAG_NOFLAGS,0,1,0,mivr); }
+#define RSB_SPMV_VS_DEALLOC		RSB_CONDITIONAL_FREE(mivr);
+#else /* 0 */
+#define RSB_SPMV_VS_DECL
+#define RSB_SPMV_VS_ALLOC(MTXAP,ERRVAL,ERRL)
+#define RSB_SPMV_VS_MARK(MI)
+#define RSB_SPMV_VS_DUMP(MTXAP)
+#define RSB_SPMV_VS_DEALLOC
+#endif /* 0 */
+')dnl
+
+rsb_err_t rsb_do_spmv_recursive_parallel(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPMV_ARGS)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_SPMV_VS_DECL
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+
+	const struct rsb_translated_matrix_t * all_leaf_matrices=NULL;
+	struct rsb_spmv_lock_struct_t lock;
+	rsb_submatrix_idx_t all_leaf_matrices_n=0;
+
+dnl	if(alphap || betap || incx>1 || incy>1 || transA != RSB_TRANSPOSITION_N)	/* FIXME */
+dnl	{errval = RSB_ERR_UNIMPLEMENTED_YET;goto err;}
+
+	if(!rsb__is_recursive_matrix(mtxAp->flags))
+		return rsb_do_spmv_non_recursive(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+
+	all_leaf_matrices  =mtxAp->all_leaf_matrices;
+	all_leaf_matrices_n=mtxAp->all_leaf_matrices_n;
+
+	if(!all_leaf_matrices || all_leaf_matrices_n<1)
+	{errval = RSB_ERR_ENOMEM;goto err;}
+
+	errval = rsb_do_spmv_lock_init(&lock,rsb_global_session_handle.rsb_want_threads,all_leaf_matrices_n,mtxAp,op_flags,transA,y,incy);
+	if(RSB_SOME_ERROR(errval))
+		goto err;
+
+#if 0
+	if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+	{
+	#pragma omp parallel shared(y,mtxAp,rsb_global_session_handle)  RSB_NTC 
+	{
+		rsb_nnz_idx_t tdim = rsb_do_get_rows_of(mtxAp,transA),dim,chunk;
+		rsb_char_t * yy=y;
+		rsb_thr_t th_id = omp_get_thread_num();
+		if(th_id >= rsb_global_session_handle.rsb_want_threads)
+			goto scaled;
+		chunk=tdim/rsb_global_session_handle.rsb_want_threads;
+		yy+=th_id*chunk*incy;
+		if(th_id == rsb_global_session_handle.rsb_want_threads-1)
+			dim=tdim-th_id*chunk;
+		else		
+			dim=chunk;
+
+		if(RSB_IS_ELEMENT_ZERO(betap,mtxAp->typecode))
+			RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,dim,NULL,yy,incy);
+		else
+			RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,dim,betap,yy,incy);
+		scaled:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}
+	}
+	#pragma omp barrier
+#else /* 0 */
+	/* TODO: make the following parallel */
+	if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+		RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transA),betap,y,incy);
+#endif /* 0 */
+	RSB_SPMV_VS_ALLOC(mtxAp,errval,err)
+	#pragma omp parallel reduction(|:errval) shared(lock,all_leaf_matrices,mtxAp)  RSB_NTC 
+{
+	rsb_thr_t th_id = omp_get_thread_num();
+	rsb_submatrix_idx_t n=0;
+	rsb_submatrix_idx_t dm=0;
+
+	if(th_id >= rsb_global_session_handle.rsb_want_threads)
+		goto skip;
+
+	if(th_id>=all_leaf_matrices_n)
+		goto skip;
+
+again:
+	for(n=0;RSB_LIKELY(n<all_leaf_matrices_n);++n)
+dnl	//if(!RSB_BITMAP_GET(lock.bmap,1,lock.subms,0,n))
+#if RSB_WANT_SM_TO_THREAD_MOD_MAPPING && !RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV 
+	if( (n % RSB_MIN(all_leaf_matrices_n,rsb_global_session_handle.rsb_want_threads)) == th_id )
+#endif /* RSB_WANT_SM_TO_THREAD_MOD_MAPPING */
+	{
+		const struct rsb_mtx_t *submatrix=all_leaf_matrices[n].mtxlp;
+		char *ov=y;
+		rsb_bool_t gomv = RSB_BOOL_FALSE;
+		rsb_coo_idx_t oincy=incy;
+#if RSB_WANT_SPMV_WITH_REDUCE
+		rsb_coo_idx_t rh,r0;	/* new */
+#endif /* RSB_WANT_SPMV_WITH_REDUCE */
+		#pragma omp critical (rsb_spmv_crs)
+#if RSB_WANT_BOUNDED_BOXES_SPMV
+		{ gomv=(rsb_do_spmv_lock_get(&lock,th_id,submatrix->broff,submatrix->bm,submatrix->bcoff,submatrix->bk,n,transA,&ov,&oincy)==RSB_BOOL_TRUE); if(gomv==RSB_BOOL_TRUE){RSB_SPMV_VS_MARK(n);} }
+#else /* RSB_WANT_BOUNDED_BOXES_SPMV */
+		{ gomv=(rsb_do_spmv_lock_get(&lock,th_id,submatrix->roff,submatrix->nr,submatrix->coff,submatrix->nc,n,transA,&ov,&oincy)==RSB_BOOL_TRUE); if(gomv==RSB_BOOL_TRUE){RSB_SPMV_VS_MARK(n);} }
+#endif /* RSB_WANT_BOUNDED_BOXES_SPMV */
+		if(gomv == RSB_BOOL_TRUE)
+		{
+			const char * offx=NULL; char *offy=NULL;
+			const size_t scoff=submatrix->coff-mtxAp->coff;
+			const size_t sroff=submatrix->roff-mtxAp->roff;
+dnl			offy=((char*)y)+(mtxAp->el_size*sroff)*incy,offx=((const char*)x)+(mtxAp->el_size*scoff)*incx;
+			offy=((char*)ov)+(mtxAp->el_size*sroff)*oincy,offx=((const char*)x)+(mtxAp->el_size*scoff)*incx;
+
+			/* FIXME */
+			RSB_ASSERT(scoff>=0);
+			RSB_ASSERT(sroff>=0);
+dnl #if 1
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_non_recursive(submatrix,offx,offy,alphap,NULL,incx,oincy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS));
+dnl #else
+dnl			RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_uaua(submatrix,offx,offy,transA));
+dnl #endif
+                       	#pragma omp critical (rsb_spmv_crs)
+			{rsb_do_spmv_lock_release(&lock,th_id,ov);RSB_DO_SPMV_LOCK_DM_INC(lock);}
+		}
+#if RSB_WANT_SPMV_WITH_REDUCE
+		if(gomv == RSB_BOOL_ALMOST_TRUE)
+		{
+                       	#pragma omp critical (rsb_spmv_crs)
+			{rsb__do_pick_candidate_interval_for_reduce(&lock,th_id,&ov,&r0,&rh);}
+
+			if(ov && ov!=y)
+			{
+				rsb__vectors_left_sum_reduce_and_zero(y,ov,mtxAp->typecode,rh,oincy,r0);
+                       		#pragma omp critical (rsb_spmv_crs)
+                       		{ rsb__do_release_candidate_interval_for_reduce(&lock,th_id,ov,r0,rh);}
+			}
+		}
+#endif /* RSB_WANT_SPMV_WITH_REDUCE*/
+	}
+		#pragma omp critical (rsb_spmv_crs)
+		{ dm = RSB_DO_SPMV_LOCK_DM(lock); }
+		if(dm<all_leaf_matrices_n
+#if RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV
+			&& ((all_leaf_matrices_n-dm)>th_id)
+#endif	/* RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPMV */
+		)goto again;
+skip:
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	/* done */
+}
+	RSB_SPMV_VS_DUMP(mtxAp)
+err:
+	RSB_SPMV_VS_DEALLOC
+#if   !defined(__xlC__)
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	/* FIXME: xlc does not allow this, but we have experienced problems, without */
+	#pragma omp barrier
+#endif /* __xlC__ */
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_lock_free(&lock));
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = RSB_ERR_UNIMPLEMENTED_YET;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+dnl
+
+dnl
+rsb_err_t rsb_do_spmv_recursive_serial(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA RSB_INNER_NRHS_SPMV_ARGS)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	  	\ingroup gr_internals
+		This function does not offer result vector accumulation in case of diagonal implicit matrices.
+	*/
+	struct rsb_mtx_t * submatrix=NULL;
+	rsb_submatrix_idx_t i,j;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if( rsb__is_recursive_matrix(mtxAp->flags))
+	{
+		void*offy=NULL;
+		const void *offx=NULL;
+
+		if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+		{
+			/* should scale the output vector */
+			RSB_CBLAS_X_SCAL_SPMM(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transA),betap,y,incy);
+		}
+
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+			size_t scoff=submatrix->coff-mtxAp->coff;
+			size_t sroff=submatrix->roff-mtxAp->roff;
+
+			/* FIXME */
+			RSB_ASSERT(scoff>=0);
+			RSB_ASSERT(sroff>=0);
+
+			offy=((char*)y)+(mtxAp->el_size*sroff)*incy,offx=((const char*)x)+(mtxAp->el_size*scoff)*incx;
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_recursive_serial(submatrix,offx,offy,alphap,NULL,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS));
+		}
+	}
+	else
+	{
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_non_recursive(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS));
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+dnl
+
+dnl
+rsb_err_t rsb_do_spmv_general(rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, const void * x, rsb_coo_idx_t incx, const void * betap, void * y, rsb_coo_idx_t incy, enum rsb_op_flags_t op_flags RSB_OUTER_NRHS_SPMV_ARGS)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+#if RSB_ALLOW_ZERO_DIM
+	if(RSB_ANY_MTX_DIM_ZERO(mtxAp))
+	{
+		errval = RSB_ERR_NO_ERROR;
+		goto err; /* FIXME: skipping further checks */
+	}
+#endif
+	if(x==y)
+		goto err;
+
+	if(incx<1 || incy<1)
+		goto err;
+
+/*	we tolerate NULL alhap and betap */
+#if 0
+	if(!alphap || !betap)
+		goto err;
+#endif /* 0 */
+
+	if(!mtxAp || !x || !y || transA == RSB_INVALID_FLAGS)
+		goto err;
+
+#if 0
+	errval = rsb_do_spmv_recursive_serial(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+	goto done;
+#endif /* 0 */
+
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	if(RSB_UNLIKELY(op_flags == RSB_OP_FLAG_WANT_SERIAL))
+		errval = rsb_do_spmv_recursive_serial(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+	else
+	{
+		RSB_NUM_THREADS_DECL
+		RSB_NUM_THREADS_PUSH
+		errval = rsb_do_spmv_recursive_parallel(mtxAp,x,y,alphap,betap,incx,incy,transA,op_flags RSB_OUTER_NRHS_SPMV_ARGS_IDS	);
+		RSB_NUM_THREADS_POP
+	}
+	/* the RSB_OP_FLAG_FAKE_LOCK case is handled by rsb_do_spmv_recursive_parallel */
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = rsb_do_spmv_recursive_serial(mtxAp,x,y,alphap,betap,incx,incy,transA RSB_INNER_NRHS_SPMV_ARGS_IDS);
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	goto done;
+done:
+	if(!RSB_UNLIKELY(op_flags&RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT))// NEW: fix for odd spsv/diagonal implicit/no-parallel cases
+	if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_UNIT_DIAG_IMPLICIT))
+		rsb__BLAS_Xaxpy_parallel(rsb_do_get_rows_of(mtxAp,transA),alphap,y,incy,x,incx,mtxAp->typecode);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+dnl
+
+
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_SPMV_H_INCLUDED */
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsb_spsum.c b/rsb_spsum.c
new file mode 100644
index 0000000..f869f4a
--- /dev/null
+++ b/rsb_spsum.c
@@ -0,0 +1,144 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse matrices sum.
+ */
+/* FIXME : UNFINISHED, UNCHECKED, UNSECURED, preliminar code
+ * TODO : spscatter
+ * */
+
+#include "rsb_internals.h"
+
+#define RSB_SPSUM_VERBOSITY 0
+
+struct rsb_mtx_t * rsb__do_matrix_sum(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp)
+{
+	/*!
+	 * \todo: unfinished
+	 * TODO: overflows are possible; need checks.
+	 * TODO: need a specialized approach for symmetric matrices (e.g.: sum of two symmetric ones is symmetric)!
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_SPSUM_VERBOSITY
+	rsb_nnz_idx_t rnz=0,an,bn,cn;
+#endif /* RSB_SPSUM_VERBOSITY */
+	struct rsb_coo_matrix_t cooa,coob,cooc;
+	struct rsb_mtx_t * mtxCp = NULL;
+	rsb_flags_t flags = RSB_FLAG_DEFAULT_STORAGE_FLAGS|RSB_FLAG_DISCARD_ZEROS|RSB_FLAG_SORTED_INPUT;
+	rsb_coo_idx_t tam,tak,tbm,tbk;
+
+	RSB_BZERO_P(&cooa);
+	RSB_BZERO_P(&coob);
+	RSB_BZERO_P(&cooc);
+
+	if( !mtxAp /*|| !alphap || !betap*/ || !mtxBp )
+	{
+		/* Note: alphap==NULL and betap==NULL are allowed */
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	tam = RSB_MTX_TRANSPOSED_ROWS(mtxAp,transA);
+	tak = RSB_MTX_TRANSPOSED_COLS(mtxAp,transA);
+	tbm = RSB_MTX_TRANSPOSED_ROWS(mtxBp,transB);
+	tbk = RSB_MTX_TRANSPOSED_COLS(mtxBp,transB);
+
+	if( tam != tbm )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( tak != tbk )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(!mtxAp) { errval = RSB_ERR_GENERIC_ERROR; RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+	if(!mtxBp) { errval = RSB_ERR_GENERIC_ERROR; RSB_PERR_GOTO(err,RSB_ERRM_ES);}
+
+	if(!RSB_IS_VALID_NNZ_SUM(mtxAp->nnz,mtxBp->nnz))
+	{
+		errval = RSB_ERR_LIMITS;
+		RSB_PERR_GOTO(err,"number of matrices sum nnz may exceed maximum allowed.\n");
+	}
+
+	/*
+	 * TODO: if same type, same transposition, same matrices, one may simply clone and scale.
+	 * */
+	cooc.nnz = 2*RSB_MAX(mtxAp->nnz+mtxBp->nnz,tam+1)+2*(tam+1); /* FIXME: this is excess allocation for symmetry handling */
+	cooc.typecode=mtxAp->typecode;
+	if(rsb__callocate_coo_matrix_t(&cooc)!=&cooc) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+
+	/* ...->flags&RSB_FLAG_SOME_SYMMETRY is added filtering to avoid expansion */
+	RSB_DO_ERROR_CUMULATE(errval,rsb__clone_coo(mtxAp,transA,alphap,typecode,&cooa,flags|(mtxAp->flags&(RSB_FLAG_SOME_SYMMETRY))));
+	if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+
+	RSB_DO_ERROR_CUMULATE(errval,rsb__clone_coo(mtxBp,transA, betap,typecode,&coob,flags|(mtxBp->flags&(RSB_FLAG_SOME_SYMMETRY))));
+	if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+
+	RSB_COO_MEMCPY(cooc.VA,cooc.IA,cooc.JA,cooa.VA,cooa.IA,cooa.JA,0       ,0,cooa.nnz,RSB_SIZEOF(typecode));
+	RSB_COO_MEMCPY(cooc.VA,cooc.IA,cooc.JA,coob.VA,coob.IA,coob.JA,cooa.nnz,0,coob.nnz,RSB_SIZEOF(typecode));
+
+	cooc.nnz=cooa.nnz+coob.nnz;
+	RSB_DO_FLAG_DEL(flags,RSB_FLAG_SORTED_INPUT);
+
+	{
+		rsb_nnz_idx_t dnz = 0;
+		errval = rsb__cor_merge_dups(typecode, cooc.VA, cooc.IA, cooc.JA, 0, cooa.nnz, coob.nnz, 0, 1, &dnz, NULL);
+		cooc.nnz-=dnz;
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+	}
+
+#if RSB_SPSUM_VERBOSITY
+	RSB_STDOUT("sum output will have %d nnz\n",rnz);
+#endif /* RSB_SPSUM_VERBOSITY */
+	cooc.nr = mtxAp->nr;
+	cooc.nc = mtxAp->nc;
+	mtxCp = rsb__do_mtx_alloc_from_coo_inplace(cooc.VA,cooc.IA,cooc.JA,cooc.nnz,cooc.typecode,cooc.nr,cooc.nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags|RSB_FLAG_DUPLICATES_SUM,&errval);
+	if(!mtxCp||RSB_SOME_ERROR(errval))
+	{
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+       	}
+	RSB_DO_FLAG_DEL(mtxCp->flags,RSB_FLAG_EXTERNALLY_ALLOCATED_ARRAYS);
+	RSB_DO_FLAG_DEL(mtxCp->flags,RSB_FLAG_DUPLICATES_SUM);
+
+#if RSB_SPSUM_VERBOSITY
+	RSB_STDOUT("sum output will have %d nnz\n",rnz);
+#endif /* RSB_SPSUM_VERBOSITY */
+	goto ok;
+err:
+	rsb__do_perror(NULL,errval);
+	RSB_ERROR("!\n");
+	rsb__destroy_coo_matrix_t(&cooc);
+ok:
+	rsb__destroy_coo_matrix_t(&cooa);
+	rsb__destroy_coo_matrix_t(&coob);
+	RSB_CONDITIONAL_ERRPSET(errvalp,errval);
+	RSB_DO_MTX_RETURN(mtxCp,errval);
+}
+/* @endcond */
diff --git a/rsb_spsum.h b/rsb_spsum.h
new file mode 100644
index 0000000..77a4575
--- /dev/null
+++ b/rsb_spsum.h
@@ -0,0 +1,35 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse matrix sum.
+ * */
+
+#ifndef RSB_SPSUM_H_INCLUDED
+#define RSB_SPSUM_H_INCLUDED
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+struct rsb_mtx_t * rsb__do_matrix_sum(rsb_type_t typecode, rsb_trans_t transA, const void *alphap, const struct rsb_mtx_t * mtxAp, rsb_trans_t transB, const void *betap, const struct rsb_mtx_t * mtxBp, rsb_err_t * errvalp);
+#endif /* RSB_SPSUM_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_spsum_misc.c b/rsb_spsum_misc.c
new file mode 100644
index 0000000..a02082a
--- /dev/null
+++ b/rsb_spsum_misc.c
@@ -0,0 +1,349 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_common.h"
+
+
+rsb_err_t rsb__do_add_submatrix_to_dense(const struct rsb_mtx_t * mtxAp, const void *alphap, void * Bp, rsb_nnz_idx_t ldb, rsb_nnz_idx_t nr, rsb_nnz_idx_t nc, rsb_bool_t rowmajor)
+{
+	rsb_nnz_idx_t n;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t	roff=0, coff=0;
+
+	if(!mtxAp || !Bp || !alphap ) {errval = RSB_ERR_BADARGS; goto err;}
+	roff=mtxAp->roff, coff=mtxAp->coff;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+{
+	double *VA=mtxAp->VA;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t n,i;
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double*)alphap)*(VA[n]);
+			}		
+		}
+	}
+	else
+		RSB_ERROR(RSB_ERRM_NL);
+}
+
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+{
+	float *VA=mtxAp->VA;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t n,i;
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float*)alphap)*(VA[n]);
+			}		
+		}
+	}
+	else
+		RSB_ERROR(RSB_ERRM_NL);
+}
+
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+{
+	float complex *VA=mtxAp->VA;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(float complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t n,i;
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(float complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(float complex*)alphap)*(VA[n]);
+			}		
+		}
+	}
+	else
+		RSB_ERROR(RSB_ERRM_NL);
+}
+
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+{
+	double complex *VA=mtxAp->VA;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(double complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t n,i;
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double complex*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(double complex*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(double complex*)alphap)*(VA[n]);
+			}		
+		}
+	}
+	else
+		RSB_ERROR(RSB_ERRM_NL);
+}
+
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE;
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+/* @endcond */
diff --git a/rsb_spsum_misc.h b/rsb_spsum_misc.h
new file mode 100644
index 0000000..36a79f7
--- /dev/null
+++ b/rsb_spsum_misc.h
@@ -0,0 +1,44 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+#ifndef RSB_SPSUM_MISC_H_INCLUDED
+#define RSB_SPSUM_MISC_H_INCLUDED
+#include "rsb_common.h"
+
+
+rsb_err_t rsb__do_add_submatrix_to_dense(const struct rsb_mtx_t * mtxAp, const void *alphap, void * Bp, rsb_nnz_idx_t ldb, rsb_nnz_idx_t nr, rsb_nnz_idx_t nc, rsb_bool_t rowmajor)
+;
+#endif /* RSB_SPSUM_MISC_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_spsum_misc.m4 b/rsb_spsum_misc.m4
new file mode 100644
index 0000000..1c4afc7
--- /dev/null
+++ b/rsb_spsum_misc.m4
@@ -0,0 +1,120 @@
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * */
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_SPSUM_MISC_H_INCLUDED
+#define RSB_SPSUM_MISC_H_INCLUDED
+#include "rsb_common.h"
+',`dnl
+#include "rsb_common.h"
+')
+dnl
+
+rsb_err_t rsb__do_add_submatrix_to_dense(const struct rsb_mtx_t * mtxAp, const void *alphap, void * Bp, rsb_nnz_idx_t ldb, rsb_nnz_idx_t nr, rsb_nnz_idx_t nc, rsb_bool_t rowmajor)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	rsb_nnz_idx_t n;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t	roff=0, coff=0;
+
+	if(!mtxAp || !Bp || !alphap ) {errval = RSB_ERR_BADARGS; goto err;}
+	roff=mtxAp->roff, coff=mtxAp->coff;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( mtxAp->typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+dnl
+	{
+{
+	mtype *VA=mtxAp->VA;
+
+	if(rsb__is_coo_matrix(mtxAp))
+	{
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		{
+			RSB_DECLARE_CONST_HALFCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(mtype*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(mtype*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCOO_ARRAYS_FROM_MATRIX(IA,JA,mtxAp)
+			if(rowmajor)
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(mtype*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+			else
+			for(n=0;RSB_LIKELY(n<mtxAp->nnz);++n)
+				*(mtype*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,IA[n]+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+		}
+	}
+	else
+	if(rsb__is_csr_matrix(mtxAp))
+	{
+		rsb_nnz_idx_t n,i;
+
+		if(RSB_DO_FLAG_HAS(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES)))
+		{
+			RSB_DECLARE_CONST_HALFCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(mtype*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(mtype*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+			}
+		}
+		else
+		{
+			RSB_DECLARE_CONST_FULLCSR_ARRAYS_FROM_MATRIX(PA,JA,mtxAp)
+			if(rowmajor)
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(mtype*)(RSB_BLOCK_ROWMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+			}
+			else
+			{
+				for(i=0;RSB_LIKELY(i<mtxAp->nr);++i)
+				for(n=PA[i];RSB_LIKELY(n<PA[i+1]);++n)
+					*(mtype*)(RSB_BLOCK_COLMAJOR_ADDRESS(Bp,ldb,nr,nc,i+roff,JA[n]+coff,mtxAp->el_size))+=(*(mtype*)alphap)*(VA[n]);
+			}		
+		}
+	}
+	else
+		RSB_ERROR(RSB_ERRM_NL);
+}
+
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE;
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_GENERIC_ERROR;
+}
+')dnl
+
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif /* RSB_SPSUM_MISC_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
diff --git a/rsb_spsv.c b/rsb_spsv.c
new file mode 100644
index 0000000..e15b1a6
--- /dev/null
+++ b/rsb_spsv.c
@@ -0,0 +1,867 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse recursive multicore triangular solve.
+ */
+/*
+ * FIXME: the submatrices sorting routines are buggy.
+ * */
+#include "rsb_internals.h"		/* */
+#include "rsb_lock.h"		/* */
+#include "rsb_spsv.h"		/* */
+
+#define RSB_WANT_VERBOSE_SPSV	0
+/* #define RSB_CBLAS_X_SCAL_SPSV rsb__cblas_Xscal */
+#define RSB_CBLAS_X_SCAL_SPSV rsb__cblas_Xscal_parallel
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+static rsb_err_t rsb_do_spsv_terminal(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void * alphap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transl RSB_INNER_NRHS_SPSV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+		Entry function for SPSV.
+		alphap can be NULL.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp || !y || !x || transl == RSB_INVALID_FLAGS || !rsb__is_square(mtxAp) || !RSB_IS_VALID_INCX_VALUE(incx) || !RSB_IS_VALID_INCX_VALUE(incy))
+	{
+		errval = RSB_ERR_BADARGS;
+		goto ret;
+	}
+
+	/*
+		FIXME : should handle alphap in a more specialized fashion.
+	*/
+
+#if 0
+	if(betap && !RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+	{
+		if(incy>1)
+			rsb__cblas_Xscal(mtxAp->typecode,rsb_do_get_columns_of(mtxAp,transl),betap,y,incy);
+		else
+		{
+			/* if should zero the output vector */
+			if(RSB_IS_ELEMENT_ZERO(betap,mtxAp->typecode))
+				rsb__cblas_Xscal(mtxAp->typecode,rsb_do_get_columns_of(mtxAp,transl),NULL,y,incy);
+			else
+			/* if should scale the output vector */
+			if(!RSB_IS_ELEMENT_ONE(betap,mtxAp->typecode))
+				rsb_vector_scale(y,betap,mtxAp->typecode,rsb_do_get_columns_of(mtxAp,transl));
+		}
+	}
+#endif
+
+
+#if RSB_ENABLE_INNER_NRHS_SPSV
+	{
+	const size_t lenx=(mtxAp->el_size*rhsnri);
+	const size_t leny=(mtxAp->el_size*outnri);
+	rsb_int_t nrhsi=0;
+	for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+#endif
+	{
+#if RSB_ENABLE_INNER_NRHS_SPSV
+		void      *out=((      rsb_byte_t*)y)+(leny*nrhsi);
+		const void*rhs=((const rsb_byte_t*)x)+(lenx*nrhsi);
+#else
+		void      *out=((      rsb_byte_t*)y);
+		const void*rhs=((const rsb_byte_t*)x);
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+	if(!alphap || RSB_IS_ELEMENT_ONE(alphap,mtxAp->typecode))
+	{
+		if(incy==1 && incx==1)
+			errval = rsb__do_spsv_uxua(mtxAp,rhs,out,transl);
+		else
+			errval = rsb__do_spsv_sxsx(mtxAp,rhs,y,alphap,incx,incy,transl);
+	}
+	else
+		errval = rsb__do_spsv_sxsx(mtxAp,rhs,out,alphap,incx,incy,transl);
+	}
+#if RSB_ENABLE_INNER_NRHS_SPSV
+	}
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+ret:
+	return errval;
+}
+
+static rsb_err_t rsb_do_spsv_recursive_serial(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void * alphap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transl, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPSV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+	 *
+	 *	FIXME : document
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_aligned_t mone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb__util_set_area_to_converted_integer(&mone[0],mtxAp->typecode,-1);
+	rsb__util_set_area_to_converted_integer(&pone[0],mtxAp->typecode,+1);
+	rsb_int_t nrhsi = 0;
+
+	if(mtxAp->roff == mtxAp->coff)
+	if(rsb__is_root_matrix(mtxAp))
+#if RSB_ENABLE_INNER_NRHS_SPSV
+	for (nrhsi=0;nrhsi<nrhs;++nrhsi)
+#endif /* RSB_ENABLE_INNER_NRHS_SPSV */
+		RSB_CBLAS_X_SCAL_SPSV(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transl),alphap,RSB_TYPED_OFF_PTR(mtxAp->typecode,y,nrhsi*(outnri)*incy),incy);
+
+	if( rsb__is_recursive_matrix(mtxAp->flags))
+#if 1
+	{
+		void*offy=NULL; const void *offx=NULL;
+//		rsb_coo_idx_t scoff=submatrix->coff; rsb_coo_idx_t sroff=submatrix->roff;
+		rsb_bool_t isupp = rsb__is_upper_triangle(mtxAp->flags);
+		rsb_bool_t istrans = (RSB_DOES_NOT_TRANSPOSE(transl))?0:1;
+		rsb_coo_idx_t half;
+//		offy=((rsb_byte_t*)y)+(mtxAp->el_size*sroff)*incy,offx=((const rsb_byte_t*)x)+(mtxAp->el_size*scoff)*incx;
+		if(mtxAp->coff!=mtxAp->roff)
+		{	
+			RSB_ERROR("!\n");
+			errval = RSB_ERR_BADARGS;goto err;
+		}
+		if(!(RSB_SUBMATRIX_INDEX(mtxAp,0,0)) || !(RSB_SUBMATRIX_INDEX(mtxAp,1,1)))
+		{
+			RSB_ERROR("@ %d %d and with null diagonal elements, %p %p %p %p\n",mtxAp->roff,mtxAp->coff,
+					mtxAp->sm[0],
+					mtxAp->sm[1],
+					mtxAp->sm[2],
+					mtxAp->sm[3]
+					);
+			errval = RSB_ERR_BADARGS;goto err;
+		}
+		half = RSB_SUBMATRIX_INDEX(mtxAp,1,1)->roff-mtxAp->roff;
+		offy=((      rsb_byte_t*)y)+(mtxAp->el_size*half)*incy;
+		offx=((const rsb_byte_t*)x)+(mtxAp->el_size*half)*incx;
+	
+		switch(isupp)
+		{
+		case RSB_BOOL_TRUE:
+		switch(istrans)
+		{
+		case RSB_BOOL_TRUE:
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,0,0),x,y,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			if(RSB_SUBMATRIX_INDEX(mtxAp,0,1) && op_flags != RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transl,&mone[0],RSB_SUBMATRIX_INDEX(mtxAp,0,1),offx,incx,NULL,y,incy,RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,1,1),offx,offy,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+		break;
+		default:
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,1,1),offx,offy,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			if(RSB_SUBMATRIX_INDEX(mtxAp,0,1) && op_flags != RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transl,&mone[0],RSB_SUBMATRIX_INDEX(mtxAp,0,1),offx,incx,NULL,y,incy,RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,0,0),x,y,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+		break;
+		}
+		break;
+		case RSB_BOOL_FALSE:
+		switch(istrans)
+		{
+		case RSB_BOOL_TRUE:
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,1,1),offx,offy,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			if(RSB_SUBMATRIX_INDEX(mtxAp,1,0) && op_flags != RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transl,&mone[0],RSB_SUBMATRIX_INDEX(mtxAp,1,0),x,incx,NULL,offy,incy,RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,0,0),x,y,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+		break;
+		default:
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,0,0),x,y,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			if(RSB_SUBMATRIX_INDEX(mtxAp,1,0) && op_flags != RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_general(transl,&mone[0],RSB_SUBMATRIX_INDEX(mtxAp,1,0),x,incx,NULL,offy,incy,RSB_OP_FLAG_DIAGONAL_OVERRIDE_EXPLICIT_SERIAL RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(RSB_SUBMATRIX_INDEX(mtxAp,1,1),offx,offy,&pone[0],incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+		break;
+		}
+		break;
+		default:
+			errval = RSB_ERR_INTERNAL_ERROR;goto err;
+		break;
+		}
+	}
+#else
+	{
+		rsb_submatrix_idx_t i,j;
+		struct rsb_mtx_t * submatrix=NULL;
+		void*offy=NULL; const void *offx=NULL;
+
+		if( RSB_DOES_NOT_TRANSPOSE( transl ) )
+		{
+		RSB_SUBMATRIX_FOREACH(mtxAp,submatrix,i,j)
+		if(submatrix)
+		{
+//			RSB_STDOUT("%d %d \n",i,j);
+			rsb_coo_idx_t scoff=submatrix->coff; rsb_coo_idx_t sroff=submatrix->roff;
+
+			RSB_DEBUG_ASSERT(scoff>=0);
+			RSB_DEBUG_ASSERT(sroff>=0);
+
+			//RSB_ERROR("-> 0x%p %d %d (%d) (%d)\n",submatrix,submatrix->roff,submatrix->coff,submatrix->nnz, rsb__is_recursive_matrix(mtxAp->flags));
+
+				offy=((rsb_byte_t*)y)+(mtxAp->el_size*sroff)*incy,offx=((const rsb_byte_t*)x)+(mtxAp->el_size*scoff)*incx;
+			if(submatrix->roff==submatrix->coff && (i==(RSB_DOES_NOT_TRANSPOSE(transl))?0:1) )
+			{
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(submatrix,x,y,alphap,incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			}
+			else
+			if(submatrix->roff==submatrix->coff && (i==(RSB_DOES_NOT_TRANSPOSE(transl))?1:0) )
+			{
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(submatrix,x,y,alphap,incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			}
+			else
+			//if(i==((RSB_DOES_NOT_TRANSPOSE(transl))?1:0))
+			if(i==1 && op_flags != RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+			{
+			//	RSB_STDOUT("offx %g offy %g\n",*(double*)offx,*(double*)offy);
+//				RSB_STDOUT("spmv %d %d\n",submatrix->roff,submatrix->coff);
+				/* FIXME : DOES NOT TAKE INTO ACCOUNT INCX,INCY */
+				/* transposition is not relevant, as long as we work with square matrices everywhere */
+			//	if(y!=x)
+			//		RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(offy,x,0,0,sroff,mtxAp->el_size));
+				// FIXME : the following lines should be equivalent, but they aren't . why ?
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_recursive_serial(submatrix,offx,offy,&mone[0],NULL,1,1,transl RSB_INNER_NRHS_SPMV_ARGS_IDS));
+			//	RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_recursive_parallel(submatrix,offx,offy,&mone[0],NULL,1,1,transl));
+				//RSB_DO_ERROR_CUMULATE(errval,rsb_spmv_unua(submatrix,offx,offy,transl));
+			//	RSB_STDOUT("offx %g offy %g\n",*(double*)offx,*(double*)offy);
+			}
+			if(RSB_SOME_ERROR(errval))
+				goto err;
+		}}
+		else
+		{
+		RSB_SUBMATRIX_FOREACH_REVERSE(mtxAp,submatrix,i,j)
+		{
+		//	RSB_STDOUT("%d %d \n",i,j);
+		if(submatrix)
+		{
+//			RSB_STDOUT("%d %d \n",i,j);
+			rsb_coo_idx_t scoff=submatrix->coff; rsb_coo_idx_t sroff=submatrix->roff;
+
+			RSB_DEBUG_ASSERT(scoff>=0);
+			RSB_DEBUG_ASSERT(sroff>=0);
+
+			offy=((rsb_byte_t*)y)+(mtxAp->el_size*sroff)*incy,offx=((const rsb_byte_t*)x)+(mtxAp->el_size*scoff)*incx;
+			if(submatrix->roff==submatrix->coff && (i==(RSB_DOES_NOT_TRANSPOSE(transl)))?0:1) )
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(submatrix,x,y,alphap,incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			else
+			if(submatrix->roff==submatrix->coff && (i==RSB_DOES_NOT_TRANSPOSE(transl)))?1:0) )
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_recursive_serial(submatrix,x,y,alphap,incx,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS));
+			else
+			if(i==1 && op_flags != RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+			{
+				RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_recursive_serial(submatrix,offx,offy,&mone[0],NULL,1,1,transl RSB_INNER_NRHS_SPMV_ARGS_IDS));
+			}
+			if(errval != RSB_ERR_NO_ERROR)goto err;
+		}}
+		}
+	}
+#endif
+	else
+	{
+		void*offy=NULL; const void *offx=NULL;
+		rsb_coo_idx_t scoff=0;
+		rsb_coo_idx_t sroff=0;
+		RSB_DEBUG_ASSERT(scoff>=0);
+		RSB_DEBUG_ASSERT(sroff>=0);
+		offy=((rsb_byte_t*)y)+(mtxAp->el_size*sroff)*incy,offx=((const rsb_byte_t*)x)+(mtxAp->el_size*scoff)*incx;
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_terminal(mtxAp,offx,offy,alphap,incx,incy,transl RSB_OUTER_NRHS_SPSV_ARGS_IDS));
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_submatrices_block_for_get_csr(const struct rsb_mtx_t * mtxAp, struct rsb_translated_matrix_t ** all_leaf_matricesp, rsb_submatrix_idx_t * all_leaf_matrices_np)
+{
+	/**	
+	 * \ingroup gr_internals
+	 * FIXME: rename : csr -> csc
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t all_leaf_matrices_n=0;
+	struct rsb_translated_matrix_t * all_leaf_matrices=NULL;
+
+	all_leaf_matrices_n=mtxAp->all_leaf_matrices_n;
+	all_leaf_matrices = rsb__clone_area(mtxAp->all_leaf_matrices,sizeof(struct rsb_translated_matrix_t)*all_leaf_matrices_n);
+	errval = rsb__sort_array_of_leaf_matrices(NULL,all_leaf_matrices,all_leaf_matrices_n,rsb_op_get_csr);
+
+	*all_leaf_matrices_np=all_leaf_matrices_n;
+	*all_leaf_matricesp=all_leaf_matrices;
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_get_submatrices_for_ussv( const struct rsb_mtx_t * mtxAp, struct rsb_translated_matrix_t ** all_leaf_matricesp, rsb_submatrix_idx_t * all_leaf_matrices_np, rsb_trans_t transT)
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_submatrix_idx_t all_leaf_matrices_n=0;
+	struct rsb_translated_matrix_t * all_leaf_matrices=NULL;
+
+	all_leaf_matrices_n=mtxAp->all_leaf_matrices_n;
+	all_leaf_matrices = rsb__clone_area(mtxAp->all_leaf_matrices,sizeof(struct rsb_translated_matrix_t)*all_leaf_matrices_n);
+	rsb__submatrices_exclude_nontriangular(all_leaf_matrices,&all_leaf_matrices_n,mtxAp);
+	errval = rsb__sort_array_of_leaf_matrices_for_ussv(mtxAp,all_leaf_matrices,all_leaf_matrices_n,transT);
+
+	*all_leaf_matrices_np=all_leaf_matrices_n;
+	*all_leaf_matricesp=all_leaf_matrices;
+	RSB_DO_ERR_RETURN(errval)
+}
+
+void rsb__submatrices_exclude_nontriangular(struct rsb_translated_matrix_t * all_leaf_matrices, rsb_submatrix_idx_t * all_leaf_matrices_np, const struct rsb_mtx_t * mtxAp)
+{
+	/**
+	  	\ingroup gr_internals
+	*/
+	rsb_submatrix_idx_t n, all_leaf_matrices_n=0;
+	RSB_DEBUG_ASSERT(mtxAp);
+	RSB_DEBUG_ASSERT(all_leaf_matrices);
+	RSB_DEBUG_ASSERT(all_leaf_matrices_np);
+	if(rsb__is_upper_triangle(mtxAp->flags))
+	{
+		for(n=0;n<mtxAp->all_leaf_matrices_n;++n)
+			if (mtxAp->all_leaf_matrices[n].roff<=mtxAp->all_leaf_matrices[n].coff)
+				all_leaf_matrices[all_leaf_matrices_n++]=mtxAp->all_leaf_matrices[n];
+	}
+	else
+	{
+		for(n=0;n<mtxAp->all_leaf_matrices_n;++n)
+			if (mtxAp->all_leaf_matrices[n].roff>=mtxAp->all_leaf_matrices[n].coff)
+				all_leaf_matrices[all_leaf_matrices_n++]=mtxAp->all_leaf_matrices[n];
+	}
+//	if(all_leaf_matrices_n<mtxAp->all_leaf_matrices_n)
+//	;
+	//;RSB_STDOUT("FIX : discarded %d upper diagonal matrices out of %d \n",mtxAp->all_leaf_matrices_n-all_leaf_matrices_n,mtxAp->all_leaf_matrices_n);
+	*all_leaf_matrices_np=all_leaf_matrices_n;
+}
+
+static rsb_err_t rsb__do_spsv_uxua_recursive_parallel(const struct rsb_mtx_t * mtxAp, const void * x, void * y, const void * alphap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transl, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPSV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+	 * triangular solve for recursive
+	 *
+	 * each submatrix starting at (i,j) depends on :
+	   
+	   i if i=j
+	   
+	   1 2 3 4 5 6
+	  +-+---------+
+	  +-+-+       | 1
+	  +-+-+-+     | 2
+	  +-+-+-+-+   | 3
+	  +-+-+-+-+-+ | 4
+	  +-+-+-+-+-+-+ 5
+	  +-+-+-+-+-+-+ 6
+
+	  The active rows in the spmv are locked with an interval information.
+	  Since the rows interval active in trsv is not under spmv, there is no need for another lock.
+
+	  We need a ...
+
+	  alphap NULL means alpha = 1
+
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+#if	RSB_WANT_VERBOSE_SPSV
+	rsb_time_t sv_time = RSB_CONST_IMPOSSIBLY_BIG_TIME,dp_time = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+#endif /* RSB_WANT_VERBOSE_SPSV */
+	struct rsb_translated_matrix_t * all_leaf_matrices=NULL;	/** NEW, EXPERIMENTAL */
+	struct rsb_rows_lock_struct_t lock;
+	rsb_submatrix_idx_t * deps=NULL;	/** NEW, EXPERIMENTAL */
+	rsb_submatrix_idx_t all_leaf_matrices_n=0;
+	rsb_bool_t backdeps,isupptri;
+	rsb_aligned_t alpha_inv[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t mone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb__util_set_area_to_converted_integer(&mone[0],mtxAp->typecode,-1);
+	rsb__util_set_area_to_converted_integer(&pone[0],mtxAp->typecode,+1);
+
+	RSB_BZERO_P(&lock);
+	if( !mtxAp)
+	{errval = RSB_ERR_GENERIC_ERROR;goto err;}
+	if( !rsb__is_recursive_matrix(mtxAp->flags) || !mtxAp)
+	{errval = RSB_ERR_GENERIC_ERROR;goto err;}
+	if(x!=y)/* FIXME */
+	{errval = RSB_ERR_GENERIC_ERROR;goto err;}
+	
+	isupptri = rsb__is_upper_triangle(mtxAp->flags);
+	backdeps = RSB_BOOL_XOR(RSB_DOES_TRANSPOSE(transl),isupptri);
+	rsb__util_set_area_to_negated_fraction(alpha_inv,alphap,mtxAp->typecode);
+
+#if	RSB_WANT_VERBOSE_SPSV
+	dp_time = rsb_time();
+#endif /* RSB_WANT_VERBOSE_SPSV */
+
+	errval = rsb__do_get_submatrices_for_ussv(mtxAp,&all_leaf_matrices,&all_leaf_matrices_n,transl);
+	deps = rsb__malloc(sizeof(rsb_submatrix_idx_t)*all_leaf_matrices_n);
+	if(RSB_SOME_ERROR(errval) || !all_leaf_matrices || !deps)
+	{errval = RSB_ERR_ENOMEM;goto err;}
+
+#if 0
+	{
+		/* printout */
+		rsb_submatrix_idx_t n;
+			for(n=0;n<all_leaf_matrices_n;++n)
+				RSB_STDOUT("got %d/%d:%d~%d,%d~%d\n",
+					n,all_leaf_matrices_n,
+					all_leaf_matrices[n].roff,all_leaf_matrices[n].mtxlp->nr+all_leaf_matrices[n].roff,
+					all_leaf_matrices[n].coff,all_leaf_matrices[n].mtxlp->nc+all_leaf_matrices[n].coff);
+	}
+#endif
+#if 0
+	{
+	rsb_submatrix_idx_t n;
+	rsb_coo_idx_t s=0;
+	for(n=0;n<all_leaf_matrices_n;++n)
+		if(all_leaf_matrices[n].roff==all_leaf_matrices[n].coff)
+		{
+			s+=all_leaf_matrices[n].mtxlp->nr;
+//			RSB_STDOUT("%d/%d [%d~%d,%d~%d] (on diag)\n",n,all_leaf_matrices_n,
+//				all_leaf_matrices[n].roff, all_leaf_matrices[n].roff+all_leaf_matrices[n].mtxlp->nr-1,
+//				all_leaf_matrices[n].coff, all_leaf_matrices[n].coff+all_leaf_matrices[n].mtxlp->nc-1);
+		}
+		else
+			;
+//			RSB_STDOUT("%d/%d [%d~%d,%d~%d] (not on diag)\n",n,all_leaf_matrices_n,
+//				all_leaf_matrices[n].roff, all_leaf_matrices[n].roff+all_leaf_matrices[n].mtxlp->nr-1,
+//				all_leaf_matrices[n].coff, all_leaf_matrices[n].coff+all_leaf_matrices[n].mtxlp->nc-1);
+	if(mtxAp->nr != s)
+	{	RSB_STDOUT("FATAL : sum of diagonal matrices rows %d != %d \n",s,mtxAp->nr);
+		goto err;
+}
+	}
+#endif
+#if 0
+	if(RSB_DOES_TRANSPOSE(transl))
+	{
+		rsb_submatrix_idx_t n; for(n=0;n<all_leaf_matrices_n;++n) {	
+			RSB_SWAP(rsb_coo_idx_t,all_leaf_matrices[n].roff,all_leaf_matrices[n].coff);
+		}
+	}
+#endif
+
+	if(0)
+	if(RSB_DOES_TRANSPOSE(transl))
+	{
+		rsb_submatrix_idx_t n;
+		for(n=0;n<all_leaf_matrices_n;++n)
+		{
+			all_leaf_matrices[n].roff=mtxAp->nc-(all_leaf_matrices[n].coff+all_leaf_matrices[n].mtxlp->nc*1);
+			all_leaf_matrices[n].coff=mtxAp->nr-(all_leaf_matrices[n].roff+all_leaf_matrices[n].mtxlp->nr*1);
+			all_leaf_matrices[n].nr=all_leaf_matrices[n].mtxlp->nc;
+			all_leaf_matrices[n].nc=all_leaf_matrices[n].mtxlp->nr;
+		}
+	}
+
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+#if 0
+	{
+		/* printout */
+		rsb_submatrix_idx_t n;
+			for(n=0;n<all_leaf_matrices_n;++n)
+				RSB_STDOUT("got %d/%d:%d~%d,%d~%d\n",
+					n,all_leaf_matrices_n,
+//					all_leaf_matrices[n].mtxlp->roff,all_leaf_matrices[n].mtxlp->nr+all_leaf_matrices[n].mtxlp->roff,
+//					all_leaf_matrices[n].mtxlp->coff,all_leaf_matrices[n].mtxlp->nc+all_leaf_matrices[n].mtxlp->coff);
+					all_leaf_matrices[n].roff,all_leaf_matrices[n].nr+all_leaf_matrices[n].roff,
+					all_leaf_matrices[n].coff,all_leaf_matrices[n].nc+all_leaf_matrices[n].coff);
+	}
+#endif
+#if 1
+	{
+		rsb_submatrix_idx_t n; 
+		for(n=1;n<all_leaf_matrices_n;++n)
+		{
+			rsb_submatrix_idx_t np=n-1;
+			if(all_leaf_matrices[n].roff==all_leaf_matrices[n].coff)
+			{
+				while(np>0 && all_leaf_matrices[np].roff!=all_leaf_matrices[np].coff)
+					--np;
+				//for(;np<n;++np)
+				//	RSB_STDOUT("%d vs %d ? %d\n",n,np,rsb__compar_rcsr_matrix_for_spsvl(all_leaf_matrices+np,all_leaf_matrices+n));
+			}
+			else
+			{
+				while(np>0
+				 && !(
+					(  all_leaf_matrices[np].roff==all_leaf_matrices[np].coff ) /*&&
+					( (all_leaf_matrices[np].coff+ all_leaf_matrices[np].mtxlp->nc)>=
+					  (all_leaf_matrices[n ].coff+ all_leaf_matrices[n ].mtxlp->nc) )*/
+					))
+					--np;
+			}
+			deps[n]=np;
+//#define RSB__TRSV_OUT__ 1
+			if(RSB__TRSV_OUT__)RSB_STDOUT("dep: %d ->  %d\n",n,np);
+		}
+	}
+#endif
+#if 0
+	if(RSB_DOES_TRANSPOSE(transl))
+	{
+		rsb_submatrix_idx_t n; for(n=0;n<all_leaf_matrices_n;++n) {	
+			RSB_SWAP(rsb_coo_idx_t,all_leaf_matrices[n].roff,all_leaf_matrices[n].coff);
+		}
+#if 1
+		for(n=0;n<all_leaf_matrices_n/2;++n) {	RSB_SWAP(struct rsb_translated_matrix_t,all_leaf_matrices[n],all_leaf_matrices[(all_leaf_matrices_n-1)-n]);}
+	}
+#endif
+#endif
+#if 0
+	{
+	rsb_submatrix_idx_t n;
+	for(n=0;n<all_leaf_matrices_n;++n)
+		if(all_leaf_matrices[n].roff<all_leaf_matrices[n].coff)
+		{
+			RSB_ERROR("all_leaf_matrices[n].roff<all_leaf_matrices[n].coff (%d<%d) in a lower triangular matrix!\n",
+			all_leaf_matrices[n].roff,all_leaf_matrices[n].coff);
+			{errval = RSB_ERR_GENERIC_ERROR;goto err;}
+		}
+	}
+#endif
+#if 0
+	{
+	rsb_submatrix_idx_t n,ad=0,d=0,pad=0,pda=0;
+//	int cppad=0,ncppad=0;
+	for(n=0;n<mtxAp->all_leaf_matrices_n;++n)
+	{
+		if(all_leaf_matrices[n].roff==all_leaf_matrices[n].coff)
+		{
+			rsb_submatrix_idx_t nn;
+			rsb_submatrix_idx_t dr=all_leaf_matrices[n].roff+all_leaf_matrices[n].mtxlp->nr;
+			++d;
+			for(nn=n;nn<mtxAp->all_leaf_matrices_n;++nn)
+			if(
+				all_leaf_matrices[nn].roff >= dr &&
+				all_leaf_matrices[nn].roff > all_leaf_matrices[nn].coff &&
+				all_leaf_matrices[nn].coff + all_leaf_matrices[nn].mtxlp->nc <= dr &&
+				1
+			)
+			{
+				pad++;	/* parallelizable anti diagonal */
+			}
+			for(nn=n+1;nn<mtxAp->all_leaf_matrices_n && 
+				all_leaf_matrices[nn].roff > all_leaf_matrices[nn].coff ;++nn)
+				pda++;
+		}
+	}
+		RSB_STDOUT(
+			"    diagonal blocks : %d\n"
+			"antidiagonal blocks : %d\n"
+			"prediagonal blocks : %d\n"
+	//		"antidiagonal blocks on the critical path : %d\n"
+	//		"antidiagonal blocks not on the critical path : %d\n"
+			,d,pad,pda//,cppad,ncppad
+			);
+	}
+#endif
+
+	errval = rsb__do_lock_init(&lock,rsb_global_session_handle.rsb_want_threads,all_leaf_matrices_n,mtxAp,op_flags);
+	if(errval != RSB_ERR_NO_ERROR)
+		goto err;
+
+
+{
+#if RSB_ENABLE_INNER_NRHS_SPSV
+	const size_t leny=(mtxAp->el_size*outnri);
+	rsb_int_t nrhsi=0;
+	for(nrhsi=0;nrhsi<nrhs;++nrhsi)
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+	{
+#if RSB_ENABLE_INNER_NRHS_SPSV
+		void      *out=((      rsb_byte_t*)y)+(leny*nrhsi);
+#else /* RSB_ENABLE_INNER_NRHS_SPMV */
+		void      *out=((      rsb_byte_t*)y);
+#endif /* RSB_ENABLE_INNER_NRHS_SPMV */
+		RSB_CBLAS_X_SCAL_SPSV(mtxAp->typecode,rsb_do_get_rows_of(mtxAp,transl),alphap,out,incy);
+
+#if 1
+	/* corner triangle solve */
+	if(all_leaf_matrices_n)
+	{
+		rsb_submatrix_idx_t n=0;
+		void * offy=((rsb_byte_t*)out)+(mtxAp->el_size*all_leaf_matrices[n].roff)*incy;
+		RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_terminal(all_leaf_matrices[n].mtxlp,offy,offy,&pone[0],incy,incy,transl RSB_DEFAULT_OUTER_NRHS_SPMV_ARGS));
+		rsb__do_lock_get(&lock,0,all_leaf_matrices[n].mtxlp->nr+all_leaf_matrices[n].roff,all_leaf_matrices[n].mtxlp->nr,all_leaf_matrices[n].coff,all_leaf_matrices[n].mtxlp->nc,n,transl);
+		rsb__do_lock_release(&lock,0);
+		lock.dm=1; /* first matrix processed */
+		if(!backdeps)
+			lock.dr=all_leaf_matrices[n].mtxlp->nr+all_leaf_matrices[n].roff;
+		else
+			lock.dr=all_leaf_matrices[n].roff;
+	}
+#else
+	if(all_leaf_matrices_n)
+	{
+		if(backdeps)
+			lock.dr=all_leaf_matrices[0].mtxlp->nr+all_leaf_matrices[0].roff;
+		else
+			lock.dr=all_leaf_matrices[0].mtxlp->nr;
+	}
+#endif
+	}
+}
+
+#if 	RSB_WANT_VERBOSE_SPSV
+	sv_time = rsb_time();
+	dp_time=-dp_time+sv_time;
+#endif /* RSB_WANT_VERBOSE_SPSV */
+	#pragma omp parallel reduction(|:errval) shared(lock,all_leaf_matrices,mtxAp)  RSB_NTC 
+	{
+	const rsb_thr_t th_id = omp_get_thread_num();
+	rsb_submatrix_idx_t n=0;
+	rsb_submatrix_idx_t dm=0;
+	#pragma omp barrier
+
+	if(th_id >= rsb_global_session_handle.rsb_want_threads)
+		goto skip;
+
+	#pragma omp critical (rsb_spsv_crs)
+	{ dm=lock.dm; }
+
+again:
+	for(n=0;n<all_leaf_matrices_n;++n)
+	//if(!RSB_BITMAP_GET(lock.bmap,1,lock.subms,0,n))
+	{
+		struct rsb_mtx_t *submatrix=all_leaf_matrices[n].mtxlp;
+		const rsb_byte_t* trhs=((rsb_byte_t*)y)+mtxAp->el_size*all_leaf_matrices[n].coff*incx;
+		rsb_byte_t* tout=((rsb_byte_t*)y)+mtxAp->el_size*all_leaf_matrices[n].roff*incy;
+		enum rsb_op_t op = rsb_op_nop;
+		rsb_coo_idx_t roff=all_leaf_matrices[n].roff;
+		rsb_coo_idx_t coff=all_leaf_matrices[n].coff;
+
+//#define RSB__TRSV_OUT__ 1
+		#pragma omp critical (rsb_spsv_crs)
+		{
+		if( th_id==0 )
+		{
+			if(	
+			 	(
+				 	(( backdeps) && coff+all_leaf_matrices[n].mtxlp->nc==lock.dr ) ||
+				 	((!backdeps) && roff==lock.dr ) 
+				)
+				&& roff==coff  && 
+					(!RSB_BITMAP_GET(lock.bmap,1,lock.subms,0,n))
+				)
+				{
+					rsb_submatrix_idx_t np=deps[n];
+					while(RSB_BITMAP_GET(lock.bmap,1,lock.subms,0,np))
+					{
+						++np;
+						if(RSB__TRSV_OUT__) RSB_STDOUT("%d -> %d\n",n,np);
+					}
+					if(np==n && (rsb__do_lock_get(&lock,th_id,roff,all_leaf_matrices[n].mtxlp->nr,coff,all_leaf_matrices[n].mtxlp->nc,n,transl)==RSB_BOOL_TRUE))
+						op = rsb_op_spsvl;
+				}
+		}
+
+		if(op == rsb_op_nop)
+		{
+//			if(RSB__TRSV_OUT)RSB_STDOUT("%d@%d %d %d %d %d\n",n,th_id,op,all_leaf_matrices[n].roff,all_leaf_matrices[n].coff,omp_get_num_threads());
+			if(
+			 	(((!backdeps) && 
+				(
+				 (roff >=lock.dr && !isupptri &&
+				roff != coff &&
+				coff + all_leaf_matrices[n].mtxlp->nc <= lock.dr) ||
+				 (coff >=lock.dr && isupptri &&
+				roff != coff &&
+				roff + all_leaf_matrices[n].mtxlp->nr <= lock.dr)
+				)
+			       	) 
+				 ||
+			  	(( backdeps) && 
+				(( isupptri &&(coff >= lock.dr)) ||
+				((!isupptri)&&(roff >= lock.dr))) &&
+			       	roff != coff //&&
+				//coff + all_leaf_matrices[n].mtxlp->nc <=lock.dr 
+				))
+			       	&& 
+#if RSB_WANT_BOUNDED_BOXES_SPSV
+				(rsb__do_lock_get(&lock,th_id,all_leaf_matrices[n].mtxlp->broff,all_leaf_matrices[n].mtxlp->bm,all_leaf_matrices[n].mtxlp->bcoff,all_leaf_matrices[n].mtxlp->bk,n,transl)==RSB_BOOL_TRUE)&&
+#else /* RSB_WANT_BOUNDED_BOXES_SPSV */
+				(rsb__do_lock_get(&lock,th_id,all_leaf_matrices[n].roff,all_leaf_matrices[n].mtxlp->nr,all_leaf_matrices[n].coff,all_leaf_matrices[n].mtxlp->nc,n,transl)==RSB_BOOL_TRUE)&&
+#endif /* RSB_WANT_BOUNDED_BOXES_SPSV */
+				1
+				)
+				op = rsb_op_spmv ;
+		}
+		}
+//			if(RSB__TRSV_OUT)RSB_STDOUT("%d@%d %d %d %d %d\n",n,th_id,op,all_leaf_matrices[n].roff,all_leaf_matrices[n].coff,omp_get_num_threads());
+		if(RSB__TRSV_OUT__)dm=lock.dm;
+		if(RSB__TRSV_OUT__)
+		if(
+				(!RSB_BITMAP_GET(lock.bmap,1,lock.subms,0,n) && op == rsb_op_nop)||
+				( RSB_BITMAP_GET(lock.bmap,1,lock.subms,0,n) && op != rsb_op_nop)
+				)
+		RSB_STDOUT("%d/%d [%d~%d,%d~%d] on th.%d -> op %d (dr:%d) (done:%d)\n",n,all_leaf_matrices_n,
+				all_leaf_matrices[n].roff, all_leaf_matrices[n].roff+submatrix->nr-1,
+				all_leaf_matrices[n].coff, all_leaf_matrices[n].coff+submatrix->nc-1,
+				th_id,op,lock.dr,dm);
+
+		switch(op){
+		case rsb_op_spsvl:
+		{
+			/* diagonal blocks */
+			if(RSB__TRSV_OUT__)RSB_STDOUT("spsv on %d on %d \n",n,th_id);
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_terminal(submatrix,trhs,tout,&pone[0],incx,incy,transl RSB_OUTER_NRHS_SPSV_ARGS_IDS));
+			//RSB_DO_ERROR_CUMULATE(errval,rsb_do_spsv_terminal(submatrix,trhs,tout,alphap,incx,incy,transl RSB_OUTER_NRHS_SPSV_ARGS_IDS));
+                       	#pragma omp critical (rsb_spsv_crs)
+			{
+				if(!backdeps)
+			       		lock.dr=submatrix->nr+roff;
+				else
+			       		lock.dr=submatrix->roff;
+				rsb__do_lock_release(&lock,th_id); ++lock.dm; dm=lock.dm;
+			}
+		}
+		break;
+		case rsb_op_spmv:
+		{
+			/* antidiagonal blocks */
+			if(RSB__TRSV_OUT__)RSB_STDOUT("spmv on %d on %d \n",n,th_id);
+			//RSB_DO_ERROR_CUMULATE(errval,rsb_spmv_unua(submatrix,trhs,tout,transl));
+			RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_non_recursive(submatrix,trhs,tout,&mone[0],NULL,incx,incy,transl RSB_OUTER_NRHS_SPSV_ARGS_IDS));
+                       	#pragma omp critical (rsb_spsv_crs)
+			{rsb__do_lock_release(&lock,th_id);++lock.dm;dm=lock.dm;}
+		}
+		break;
+		}
+		//if(errval != RSB_ERR_NO_ERROR)
+		//	break;
+		//if(op != rsb_op_nop)
+	
+	}
+	
+	#pragma omp critical (rsb_spsv_crs)
+	{ dm=lock.dm; }
+	if(RSB__TRSV_OUT__)RSB_STDOUT("on thread %d : done %d/%d \n",th_id,lock.dm,all_leaf_matrices_n);
+	if(dm<all_leaf_matrices_n
+#if RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPSV
+			&& ((all_leaf_matrices_n-dm)>th_id)
+#endif /* RSB_WANT_EARLY_PARALLEL_REGION_JUMPOUT_SPSV */
+	)goto again;
+skip:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+		/* FIXME : could place a barrier here. */
+		#pragma omp barrier
+		/* now we can leave the parallel region safely */
+	}
+err:
+	RSB_CONDITIONAL_FREE(all_leaf_matrices);
+	RSB_CONDITIONAL_FREE(deps);
+	RSB_DO_ERROR_CUMULATE(errval,rsb__do_lock_free(&lock));
+#if	RSB_WANT_VERBOSE_SPSV
+	sv_time=-sv_time+rsb_time();
+	RSB_INFO("SPSV: solve time:%lg   deps time:%lg\n",sv_time,dp_time);
+#endif /* RSB_WANT_VERBOSE_SPSV */
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	errval = RSB_ERR_UNSUPPORTED_OPERATION;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_spsv_general(rsb_trans_t transl, const void * alphap, const struct rsb_mtx_t * mtxAp, const void * x, rsb_coo_idx_t incx, void * y, rsb_coo_idx_t incy, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPSV_ARGS)
+{
+	/**
+	  	\ingroup gr_internals
+	 	computes \f$y \leftarrow \alpha op(A)^{-1} \cdot y \f$
+
+		Entry function for SPSV.
+		alphap can be NULL.
+
+		FIXME : incx and incy check should be stricter
+		FIXME : x!=y are disabled
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp || !y || !x || transl == RSB_INVALID_FLAGS || !rsb__is_square(mtxAp) || incx<1 || incy<1)
+	{
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+	if(!RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_TRIANGULAR))
+	{
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+	
+#if 1
+	if(x!=y)
+	{
+			// FIXME: should parallelize this
+		rsb_int_t nrhsi = 0;
+#if RSB_ENABLE_INNER_NRHS_SPSV
+		for (nrhsi=0;nrhsi<nrhs;++nrhsi)
+#endif /* RSB_ENABLE_INNER_NRHS_SPSV */
+		{
+			//RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy_strided_typed(y,x,0,0,mtxAp->nr,mtxAp->typecode,incy,incx));
+			RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xcopy(mtxAp->typecode,mtxAp->nr,RSB_TYPED_OFF_PTR(mtxAp->typecode,x,nrhsi*(rhsnri)*incx),incx,RSB_TYPED_OFF_PTR(mtxAp->typecode,y,nrhsi*(outnri)*incy),incy));
+		}
+	}
+#endif
+
+	if(op_flags == RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE)
+	{
+		errval = rsb_do_spsv_recursive_serial(mtxAp,y,y,alphap,incy,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS);
+		goto done;
+	}
+	if( !rsb__is_recursive_matrix(mtxAp->flags))
+		errval = rsb_do_spsv_terminal(mtxAp,y,y,alphap,incy,incy,transl RSB_OUTER_NRHS_SPSV_ARGS_IDS);
+	else
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		if(op_flags == RSB_OP_FLAG_WANT_SERIAL)
+			errval = rsb_do_spsv_recursive_serial(mtxAp,y,y,alphap,incy,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS);
+		else
+			errval = rsb__do_spsv_uxua_recursive_parallel(mtxAp,y,y,alphap,incy,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS);
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		errval = rsb_do_spsv_recursive_serial(mtxAp,y,y,alphap,incy,incy,transl,op_flags RSB_INNER_NRHS_SPSV_ARGS_IDS);
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	}
+	goto done;
+done:
+#if 0
+                {
+			/* FIXME : won't work with when incx or incy is not 1 */
+			rsb_aligned_t checksum[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+			RSB_CBLAS_X_SCAL_SPSV(mtxAp->typecode,1,NULL,checksum,1);
+			rsb_nnz_idx_t n;
+			rsb__util_vector_sum(checksum,y,mtxAp->typecode,mtxAp->nr);
+			RSB_STDOUT("#spsv checksum:\n");
+			rsb__debug_print_value(checksum,typecode);
+			RSB_STDOUT("\n");
+                }
+#endif
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_spsv(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxTp, const void * Xp, rsb_coo_idx_t incX, void * Yp, rsb_coo_idx_t incY)
+{
+	return rsb__do_spsv_general(transT,alphap,mtxTp,Xp,incX,Yp,incY,RSB_OP_FLAG_DEFAULT RSB_DEFAULT_OUTER_NRHS_SPSV_ARGS	);
+}
+
+/* @endcond */
diff --git a/rsb_spsv.h b/rsb_spsv.h
new file mode 100644
index 0000000..347264d
--- /dev/null
+++ b/rsb_spsv.h
@@ -0,0 +1,56 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains functions for sparse triangular solve.
+ * */
+
+#ifndef RSB_SPTRSV_H_INCLUDED
+#define RSB_SPTRSV_H_INCLUDED
+#include "rsb_internals.h"		/* */
+
+/* #define RSB_ENABLE_INNER_NRHS_SPSV 1 */
+#define RSB_ENABLE_INNER_NRHS_SPSV RSB_ENABLE_INNER_NRHS_SPMV 
+#if RSB_ENABLE_INNER_NRHS_SPSV
+#define RSB_INNER_NRHS_SPSV_ARGS	,const rsb_int_t nrhs, /*const size_t outtot, const size_t rhstot,*/ const size_t outnri, const size_t rhsnri
+#define RSB_INNER_NRHS_SPSV_ARGS_IDS	,nrhs/*,outtot,rhstot*/,outnri,rhsnri
+#define RSB_OUTER_NRHS_SPSV_ARGS	,const rsb_int_t nrhs, const size_t outnri, const size_t rhsnri
+#define RSB_OUTER_NRHS_SPSV_ARGS_IDS	,nrhs,outnri,rhsnri
+#else
+#define RSB_INNER_NRHS_SPSV_ARGS	
+#define RSB_INNER_NRHS_SPSV_ARGS_IDS
+#define RSB_OUTER_NRHS_SPSV_ARGS_IDS
+#endif /* RSB_ENABLE_INNER_NRHS_SPSV */
+
+#define RSB_DEFAULT_INNER_NRHS_SPSV_ARGS	,1,/*0,0,*/0,0
+#define RSB_DEFAULT_OUTER_NRHS_SPSV_ARGS	,1,0,0
+
+rsb_err_t rsb__do_get_submatrices_for_ussv( const struct rsb_mtx_t * mtxAp, struct rsb_translated_matrix_t ** all_leaf_matricesp, rsb_submatrix_idx_t * all_leaf_matrices_np, rsb_trans_t transT);
+void rsb__submatrices_exclude_nontriangular(struct rsb_translated_matrix_t * all_leaf_matrices, rsb_submatrix_idx_t * all_leaf_matrices_np, const struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__do_spsv(rsb_trans_t transT, const void * alphap, const struct rsb_mtx_t * mtxTp, const void * Xp, rsb_coo_idx_t incX, void * Yp, rsb_coo_idx_t incY);
+rsb_err_t rsb__do_spsv_general(rsb_trans_t transl, const void * alphap, const struct rsb_mtx_t * mtxAp, const void * x, rsb_coo_idx_t incx, void * y, rsb_coo_idx_t incy, enum rsb_op_flags_t op_flags RSB_INNER_NRHS_SPSV_ARGS);
+rsb_err_t rsb__do_get_submatrices_block_for_get_csr(const struct rsb_mtx_t * mtxAp, struct rsb_translated_matrix_t ** all_leaf_matricesp, rsb_submatrix_idx_t * all_leaf_matrices_np);
+#endif /* RSB_SPTRSV_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_src.c b/rsb_src.c
new file mode 100644
index 0000000..c6dca1e
--- /dev/null
+++ b/rsb_src.c
@@ -0,0 +1,419 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains searching functions.
+ * */
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_nnz_idx_t rsb__nnz_split_hcoo_bsearch(const rsb_half_idx_t *A, const rsb_half_idx_t S,const rsb_nnz_idx_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+ 	 * \return the found index, or 0
+	 *
+	 * Performs a binary search in the given sorted array to find the first
+  	 * element which is >= S, and returns its index.
+	 * \note : n=0 is not allowed
+	*/
+	register rsb_nnz_idx_t l=0,h=n-1,mid;
+
+	if(n<1)
+		return 0;
+
+	if( S > A[h]  )
+		return n;/* no such element */
+
+	if( A[l]>=S )/* the point we look for could be before */
+		return l;
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(S));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n>0));
+
+	do
+	{
+		mid=l + ((h+1)-l)/2;
+
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(A[mid]));
+		RSB_DEBUG_ASSERT( A[mid] >= A[0] );
+		RSB_DEBUG_ASSERT( A[mid] <= A[n-1] );
+
+//		RSB_INFO("hop %d, at %d\n",hop,base+hop);
+//		RSB_INFO("h %d l %d m %d\n",h,l,mid);
+		if( A[mid]<S )/* the point we search is after this */
+		{
+			l=mid;
+//		RSB_INFO("*+\n");
+		}
+		else
+		if( A[mid]>=S )/* the point we look for could be before */
+		{
+			if(h==mid)
+				goto ok;
+			h=mid;
+//			RSB_INFO("*-\n");
+		}
+	}
+	while(RSB_LIKELY(l!=h));
+	
+	ok:
+
+//	RSB_INFO(" \n");
+
+	return mid;
+}
+
+rsb_nnz_idx_t rsb__nnz_split_nnz_bsearch(const rsb_nnz_idx_t*A,const rsb_nnz_idx_t S,const rsb_nnz_idx_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+ 	 * \return the found index, or 0
+	 *
+	 * Performs a binary search in the given sorted array to find the first
+  	 * element which is >= S, and returns its index.
+	 * \note : n=0 is not allowed
+	*/
+	register rsb_nnz_idx_t l=0,h=n-1,mid;
+
+	if(n<1)
+		return 0;
+
+	if( S > A[h]  )
+		return n;/* no such element */
+
+	if( A[l]>=S )/* the point we look for could be before */
+		return l;
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(S));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n>0));
+
+	do
+	{
+		mid=l + ((h+1)-l)/2;
+
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(A[mid]));
+		RSB_DEBUG_ASSERT( A[mid] >= A[0] );
+		RSB_DEBUG_ASSERT( A[mid] <= A[n-1] );
+
+//		RSB_INFO("hop %d, at %d\n",hop,base+hop);
+//		RSB_INFO("h %d l %d m %d\n",h,l,mid);
+		if( A[mid]<S )/* the point we search is after this */
+		{
+			l=mid;
+//		RSB_INFO("*+\n");
+		}
+		else
+		if( A[mid]>=S )/* the point we look for could be before */
+		{
+			if(h==mid)
+				goto ok;
+			h=mid;
+//			RSB_INFO("*-\n");
+		}
+	}
+	while(RSB_LIKELY(l!=h));
+	
+	ok:
+
+//	RSB_INFO(" \n");
+
+	return mid;
+}
+
+rsb_nnz_idx_t rsb__nnz_split_coo_bsearch(const rsb_coo_idx_t*A,const rsb_coo_idx_t S,const rsb_nnz_idx_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+ 	 * \return the found index, or 0
+	 *
+	 * Performs a binary search in the given sorted array to find the first
+  	 * element which is >= S, and returns its index.
+	 * \note : n=0 is not allowed
+	*/
+	register rsb_nnz_idx_t l=0,h=n-1,mid;
+
+	if(n<1)
+		return 0;
+
+	if( S > A[h]  )
+		return n;/* no such element */
+
+	if( A[l]>=S )/* the point we look for could be before */
+		return l;
+
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(S));
+	RSB_DEBUG_ASSERT(RSB_IS_VALID_NNZ_INDEX(n>0));
+
+	do
+	{
+		mid=l + ((h+1)-l)/2;
+
+		RSB_DEBUG_ASSERT(RSB_IS_VALID_COO_INDEX(A[mid]));
+		RSB_DEBUG_ASSERT( A[mid] >= A[0] );
+		RSB_DEBUG_ASSERT( A[mid] <= A[n-1] );
+
+//		RSB_INFO("hop %d, at %d\n",hop,base+hop);
+//		RSB_INFO("h %d l %d m %d\n",h,l,mid);
+		if( A[mid]<S )/* the point we search is after this */
+		{
+			l=mid;
+//		RSB_INFO("*+\n");
+		}
+		else
+		if( A[mid]>=S )/* the point we look for could be before */
+		{
+			if(h==mid)
+				goto ok;
+			h=mid;
+//			RSB_INFO("*-\n");
+		}
+	}
+	while(RSB_LIKELY(l!=h));
+	
+	ok:
+
+//	RSB_INFO(" \n");
+
+	return mid;
+}
+
+rsb_nnz_idx_t rsb__seek_coo_idx_t(const rsb_coo_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Assuming p an array monotonically, increasingly sorted, will return the first index in it containing 
+		a value equal to v among the first n.
+		Returns RSB_MARKER_NNZ_VALUE if no such value exists.
+		Ready to handle unsigned indices.
+	*/
+	rsb_nnz_idx_t d=n;
+	rsb_nnz_idx_t e=1;
+
+	RSB_DEBUG_ASSERT(v>=0);
+	RSB_DEBUG_ASSERT(n>=0);
+	RSB_DEBUG_ASSERT(p);
+
+	if(n<1)
+		goto head_not_found;
+	if(n==1)
+		{d=0;goto maybe;}
+	/* n>1 */
+
+	while(e<((n-1)/2)+1)
+		e*=2;
+	d=e;
+	e/=2;
+
+	/* now e < n */
+	RSB_DEBUG_ASSERT(e<n);
+	RSB_DEBUG_ASSERT(d>0);
+	RSB_DEBUG_ASSERT(d>=e);
+
+	do
+	{
+		if(v>=p[d])
+		{
+			if(v==p[d])
+				goto head_found;
+			else
+				if(d+e<n)
+					d+=e;
+		}
+		else
+			d-=e;
+		e/=2;
+	}
+	while(RSB_LIKELY(e>0));
+maybe:
+	if(v==p[d])
+		goto head_found;
+
+	if(d==1 && p[d=0]==v)
+		goto head_found;
+
+head_not_found:
+		return RSB_MARKER_NNZ_VALUE;
+head_found:
+		return d;
+}
+
+rsb_nnz_idx_t rsb__seek_half_idx_t(const rsb_half_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Assuming p an array monotonically, increasingly sorted, will return the first index in it containing 
+		a value equal to v among the first n.
+		Returns RSB_MARKER_NNZ_VALUE if no such value exists.
+		Ready to handle unsigned indices.
+	*/
+	rsb_nnz_idx_t d=n;
+	rsb_nnz_idx_t e=1;
+
+	RSB_DEBUG_ASSERT(v>=0);
+	RSB_DEBUG_ASSERT(n>=0);
+	RSB_DEBUG_ASSERT(p);
+
+	if(n<1)
+		goto head_not_found;
+	if(n==1)
+		{d=0;goto maybe;}
+	/* n>1 */
+
+	while(e<((n-1)/2)+1)
+		e*=2;
+	d=e;
+	e/=2;
+
+	/* now e < n */
+	RSB_DEBUG_ASSERT(e<n);
+	RSB_DEBUG_ASSERT(d>0);
+	RSB_DEBUG_ASSERT(d>=e);
+
+	do
+	{
+		if(v>=p[d])
+		{
+			if(v==p[d])
+				goto head_found;
+			else
+				if(d+e<n)
+					d+=e;
+		}
+		else
+			d-=e;
+		e/=2;
+	}
+	while(RSB_LIKELY(e>0));
+maybe:
+	if(v==p[d])
+		goto head_found;
+
+	if(d==1 && p[d=0]==v)
+		goto head_found;
+
+head_not_found:
+		return RSB_MARKER_NNZ_VALUE;
+head_found:
+		return d;
+}
+
+rsb_nnz_idx_t rsb__seek_nnz_idx_t(const rsb_nnz_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Assuming p an array monotonically, increasingly sorted, will return the first index in it containing 
+		a value equal to v among the first n.
+		Returns RSB_MARKER_NNZ_VALUE if no such value exists.
+		Ready to handle unsigned indices.
+	*/
+#if 1
+	rsb_nnz_idx_t d=n;
+	rsb_nnz_idx_t e=1;
+
+	RSB_DEBUG_ASSERT(v>=0);
+	RSB_DEBUG_ASSERT(n>=0);
+	RSB_DEBUG_ASSERT(p);
+
+	if(n<1)
+		goto head_not_found;
+	if(n==1)
+		{d=0;goto maybe;}
+	/* n>1 */
+
+	while(e<((n-1)/2)+1)
+		e*=2;
+	d=e;
+	e/=2;
+
+	/* now e < n */
+	RSB_DEBUG_ASSERT(e<n);
+	RSB_DEBUG_ASSERT(d>0);
+	RSB_DEBUG_ASSERT(d>=e);
+
+	do
+	{
+		if(v>=p[d])
+		{
+			if(v==p[d])
+				goto head_found;
+			else
+				if(d+e<n)
+					d+=e;
+		}
+		else
+			d-=e;
+		e/=2;
+	}
+	while(RSB_LIKELY(e>0));
+maybe:
+	if(v==p[d])
+		goto head_found;
+
+	if(d==1 && p[d=0]==v)
+		goto head_found;
+
+head_not_found:
+		return RSB_MARKER_NNZ_VALUE;
+head_found:
+		return d;
+#else
+	RSB_DEBUG_ASSERT(v>=0);
+	RSB_DEBUG_ASSERT(n>=0);
+	RSB_DEBUG_ASSERT(p);
+	rsb_nnz_idx_t k=0;
+	/* fallback, slow */
+	for(k=0;RSB_LIKELY(k<n);++k)
+		if(p[k]==v)
+			return k;
+#endif
+	return RSB_MARKER_NNZ_VALUE;
+}
+
+rsb_nnz_idx_t rsb__seek_nnz_idx_t_linear(const rsb_nnz_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n)
+{
+	/**
+	 	\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t k=0;
+	RSB_DEBUG_ASSERT(v>=0);
+	RSB_DEBUG_ASSERT(n>=0);
+	RSB_DEBUG_ASSERT(p);
+	/* fallback, slow */
+	for(k=0;RSB_LIKELY(k<n);++k)
+		if(p[k]==v)
+			return k;
+	return RSB_MARKER_NNZ_VALUE;
+}
+
+/* @endcond */
+
diff --git a/rsb_src.h b/rsb_src.h
new file mode 100644
index 0000000..e25b74c
--- /dev/null
+++ b/rsb_src.h
@@ -0,0 +1,44 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains searching functions.
+ * */
+
+#ifndef RSB_SEARCH_H_INCLUDED
+#define RSB_SEARCH_H_INCLUDED
+
+#include "rsb_swt.h"	/* rsb_half_idx_t */
+#include "rsb_common.h"	/* rsb_coo_matrix_t */
+
+rsb_nnz_idx_t rsb__nnz_split_nnz_bsearch(const rsb_nnz_idx_t*A,const rsb_nnz_idx_t S,const rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__nnz_split_coo_bsearch(const rsb_coo_idx_t*A,const rsb_coo_idx_t S,const rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__nnz_split_hcoo_bsearch(const rsb_half_idx_t *A, const rsb_half_idx_t S,const rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__seek_nnz_idx_t(const rsb_nnz_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__seek_coo_idx_t(const rsb_coo_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__seek_nnz_idx_t_linear(const rsb_nnz_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n);
+rsb_nnz_idx_t rsb__seek_half_idx_t(const rsb_half_idx_t *p, rsb_nnz_idx_t v, rsb_nnz_idx_t n);
+#endif /* RSB_SEARCH_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_srt.c b/rsb_srt.c
new file mode 100644
index 0000000..bfdffbc
--- /dev/null
+++ b/rsb_srt.c
@@ -0,0 +1,1903 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains many sorting functions, with variations on the index type and algorithm.
+ **/
+/*
+ * 
+ * FIXME : There are idiosyncracies in these sorting routines.
+ *         Certain ones work with an in place algorithm, certain
+ *         ones not, and certain ones need a bigger index type.
+ * */
+#include "rsb_internals.h"	/* rsb_coo_matrix_t	*/
+#include "rsb_msort_up.h"	/* msort_up		*/
+#ifdef RSB_HAVE_GSL
+#include <gsl/gsl_sort.h>
+#endif /* RSB_HAVE_GSL */
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_err_t rsb__do_util_sortcoo(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const struct rsb_mtx_partitioning_info_t * pinfop , rsb_flags_t flags, void * WA, size_t wb)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Will sort the input coefficients of type typecode.
+	 *
+	 * \param VA	a pointer to a valid coefficients array
+	 * \param IA	a pointer to a valid rows coefficients array
+	 * \param JA	a pointer to a valid columns coefficients array
+	 * \param nnz	the coefficients count
+	 * \param typecode	the coefficients typecode
+	 * \param pinfop	valid partitioning info structure pointer or NULL
+	 * \return RSB_ERR_NO_ERROR on correct operation, an error code (see \ref errors_section) otherwise.
+	 *
+	 * If pinfop is not NULL, its partitioning information will be used to
+	 * sort the arrays in a blockwise fashion (i.e.: inside a block,
+	 * coefficients relative order doesn't matter).
+	 * 
+	 * Note that the RSB_SORT_IN_PLACE flag isn' supported, as it is not suitable for mergesort
+	 *
+	 * */
+	void *rVA=NULL;
+	rsb_coo_idx_t * rIA=NULL,*rJA=NULL;
+	rsb_coo_idx_t * bIA=NULL, *bJA=NULL;	/* block coordinates for each nonzero */
+	rsb_coo_idx_t * brIA=NULL,*brJA=NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t t;
+	rsb_blk_idx_t br = 1, bc = 1;	/* default, if ! pinfop*/
+	/* aliases */
+	rsb_blk_idx_t MIb = 0, mIb = 0;
+	rsb_blk_idx_t Mdim = 0, mdim = 0;
+	rsb_coo_idx_t * Mindx=NULL,*mindx=NULL;
+	rsb_coo_idx_t * rMindx=NULL,*rmindx=NULL;
+	rsb_coo_idx_t * bMindx=NULL, *bmindx=NULL;
+	rsb_coo_idx_t * brMindx=NULL,*brmindx=NULL;
+//	enum rsb_op_flags_t op_flags = RSB_OP_FLAG_WANT_PARALLEL_SORT;
+//	enum rsb_op_flags_t op_flags = RSB_OP_FLAG_WANT_SERIAL_SORT;
+	enum rsb_op_flags_t op_flags = RSB_WANT_OMP_RECURSIVE_KERNELS?RSB_OP_FLAG_WANT_PARALLEL_SORT:RSB_OP_FLAG_WANT_SERIAL_SORT;
+	
+	if(nnz==0)
+		goto err;/* a special case */
+	if(!VA || !IA || !JA || RSB_INVALID_NNZ_INDEX(nnz) )
+		return RSB_ERR_BADARGS;
+
+#if !RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+//	if(!pinfop)return RSB_ERR_BADARGS;// pinfop is optional (as used now in the lib)
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS  */
+
+	t = - rsb_time();
+	if( pinfop )
+		rsb__do_get_blocking_from_pinfo(pinfop, flags, &br, &bc);
+
+	if( pinfop && ( flags & RSB_FLAG_SHOULD_DEBUG ) )
+	{
+		if((errval = rsb__do_is_valid_pinfo_t(pinfop))!=RSB_ERR_NO_ERROR)
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ESIIB);
+		}
+		else
+		{
+			if(RSB_WANT_VERBOSE_MESSAGES)
+				RSB_STDERR("sorting input seems ok \n");
+		}
+	}	
+
+	if( flags & RSB_FLAG_WANT_BCSS_STORAGE )
+		/* FIXME : NEW . IT BREAKS NON BCSS */
+		RSB_DO_FLAG_ADD(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT);
+
+	if( ! ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT ) )
+	if( pinfop && (( flags & RSB_FLAG_WANT_BCSS_STORAGE ) == 0) )
+	{
+		rsb_time_t p;
+		rsb_nnz_idx_t k=0;
+		p = - rsb_time();
+
+		bIA    = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz));
+		bJA    = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz));
+		brIA   = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz));
+		brJA   = rsb__malloc(sizeof(rsb_coo_idx_t)*(nnz));
+
+		if(!brIA || !brJA ){RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+		for(k=0;RSB_LIKELY(k<nnz);++k)
+		{
+			/*
+			 * warning : the following code is slow and should be optimized
+			 * */
+			bJA[k]=RSB_GET_BLOCK_COL_FOR_NZ_(JA+k,pinfop->cpntr,pinfop->K_b);
+			bIA[k]=RSB_GET_BLOCK_ROW_FOR_NZ_(IA+k,pinfop->rpntr,pinfop->M_b);
+
+	                RSB_DEBUG_ASSERT(bIA[k]>=0);
+	                RSB_DEBUG_ASSERT(bJA[k]>=0);
+		}
+		p += rsb_time();
+		if(RSB_WANT_VERBOSE_MESSAGES)
+			RSB_STDERR("slow pre-sorting took %lg seconds\n",p);
+	}
+
+	if( ! ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT ) )
+	{
+		errval = rsb_util_coo_alloc(&rVA,&rIA,&rJA,nnz,typecode,RSB_BOOL_FALSE);
+	}
+	else
+	{
+		rIA = IA;
+		rJA = JA;
+		rVA = VA;
+	}
+
+	if(!rIA || !rJA || !rVA)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mindx=IA,Mindx=JA;
+		rmindx = rIA, rMindx = rJA;
+		bmindx=bIA, bMindx=bJA;
+		brmindx=brIA,brMindx=brJA;
+		mdim=m, Mdim=k;
+		mIb=br, MIb=bc;
+	}
+	else
+	{	
+		Mindx=IA,mindx=JA;
+		rMindx = rIA, rmindx = rJA;
+		bMindx=bIA, bmindx=bJA;
+		brMindx=brIA,brmindx=brJA;
+		Mdim=m, mdim=k;
+		MIb=br, mIb=bc;
+	}
+
+#if RSB_WANT_INDEX_BASED_SORT && defined(RSB_MATRIX_STORAGE_BCSR)
+	if( ( flags & RSB_FLAG_WANT_BCSS_STORAGE ) || ( flags & RSB_FLAG_WANT_FIXED_BLOCKING_VBR ) )
+	{
+
+		if( flags & RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING )
+			errval = rsb__do_index_based_recursive_bcsr_sort(Mindx,mindx,VA,rMindx,rmindx,rVA,Mdim,mdim,MIb,mIb,nnz,typecode,flags,op_flags);
+		else
+		{
+			if( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT )
+			errval = rsb__do_index_based_bcsr_msort(rMindx,rmindx,rVA,Mdim,mdim,MIb,mIb,nnz,typecode,flags,op_flags,WA,wb);
+			else
+			errval = rsb__do_index_based_bcsr_sort(Mindx,mindx,VA,rMindx,rmindx,rVA,Mdim,mdim,MIb,mIb,nnz,typecode,flags,op_flags,WA,wb);
+		}
+
+		if(errval == RSB_ERR_NO_ERROR)
+		{	
+			if( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT )
+				/* FIXME : in this case we'll have a segfault, because we did not allocate copies ! */
+				goto sorted_in_place;
+			goto sorted;
+		}
+		else
+		if(errval == RSB_ERR_LIMITS && !( flags & RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING ) )
+		{
+			/* WARNING : switching back to our traditional, slower sorting */
+			errval = RSB_ERR_NO_ERROR;
+		}
+		else
+			goto err;
+	}
+#endif /* RSB_WANT_INDEX_BASED_SORT && defined(RSB_MATRIX_STORAGE_BCSR) */
+	
+#ifdef RSB_MATRIX_STORAGE_BCSR
+	if( ( flags & RSB_FLAG_WANT_BCSS_STORAGE ) != 0)
+	{
+		/* FIXME : temporary fix (need ad-hoc variables)  */
+
+		if(bc==1 && br==1)
+			rsb__do_mergesort_CSR( Mindx, mindx, VA, nnz, rMindx, rmindx, rVA, typecode);
+		else
+			rsb__do_mergesort_BCSR( Mindx, mindx, VA, nnz, MIb,mIb, rMindx, rmindx, rVA, typecode);
+	}
+	else
+#endif /* RSB_MATRIX_STORAGE_BCSR */
+	if( bMindx && bmindx )
+		rsb__do_mergesort_VBR( Mindx, mindx, bMindx, bmindx, VA, nnz, rMindx, rmindx, brMindx, brmindx, rVA, typecode);
+	else
+		rsb__do_mergesort_CSR( Mindx, mindx, VA, nnz, rMindx, rmindx, rVA, typecode);
+sorted:
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( ! ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT ) )
+	{
+		/* we copy back the sorted arrays to the input arrays */
+		RSB_COO_MEMCPY(rVA,rIA,rJA,VA,IA,JA,0,0,nnz,RSB_NUMERICAL_TYPE_SIZE(typecode));
+	}
+
+sorted_in_place:
+
+	if( pinfop && ( flags & RSB_FLAG_SHOULD_DEBUG ) )
+	{
+		if((errval = rsb__do_is_valid_pinfo_t(pinfop))!=RSB_ERR_NO_ERROR)
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_SLSIB);
+		}
+		else
+		{
+			if(RSB_WANT_VERBOSE_MESSAGES)
+				RSB_STDERR("sorting seems ok (1/2)\n");
+		}
+	}	
+	
+/*	if( ( flags & RSB_FLAG_SHOULD_DEBUG ) && ! ( flags & RSB_FLAG_QUAD_PARTITIONING ) )
+	{
+		RSB_WARN("skipping recursive sort check!\n");
+	}*/
+
+/*	if( ( flags & RSB_FLAG_SHOULD_DEBUG ) && ! ( flags & RSB_FLAG_QUAD_PARTITIONING ) )*/
+	if( flags & RSB_FLAG_SHOULD_DEBUG )
+	{
+		if(RSB_WANT_VERBOSE_MESSAGES)
+			RSB_STDERR("just sorted. let's check\n");
+
+		errval= rsb__util_is_sorted_coo(VA, Mindx,  mindx, nnz, typecode, pinfop, flags ) ;
+
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_STDERR(RSB_ERRM_SLIINS);
+			goto err;
+		}
+		else
+		{
+			if(RSB_WANT_VERBOSE_MESSAGES)
+				RSB_STDERR("sorting seems ok (2/2)\n");
+		}
+	}
+
+err:
+	t += rsb_time();
+
+	if(RSB_SOME_ERROR(errval))
+		RSB_ERROR("!\n");
+
+//	if(RSB_WANT_VERBOSE_MESSAGES)
+//		RSB_STDERR("matrix sorted in %lf seconds \n", t);
+	if( ! ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT ) )
+	{
+		RSB_CONDITIONAL_FREE(bIA);
+		RSB_CONDITIONAL_FREE(bJA);
+		RSB_CONDITIONAL_FREE(brIA);
+		RSB_CONDITIONAL_FREE(brJA);
+		RSB_CONDITIONAL_FREE(rIA);
+		RSB_CONDITIONAL_FREE(rJA);
+		RSB_CONDITIONAL_FREE(rVA);
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static int rsb_compar_coo_idx_t(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+	*/
+	rsb_coo_idx_t a=*(rsb_coo_idx_t*)ap;
+	rsb_coo_idx_t b=*(rsb_coo_idx_t*)bp;
+        return
+                 ( a >  b ) ? 1 :
+                 (( a == b ) ? 0 : -1);
+}
+
+static int rsb_compar_nnz_idx_t(const void * ap, const void * bp)
+{
+	/**
+		\ingroup gr_internals
+	*/
+	rsb_nnz_idx_t a=*(rsb_nnz_idx_t*)ap;
+	rsb_nnz_idx_t b=*(rsb_nnz_idx_t*)bp;
+        return
+                 ( a >  b ) ? 1 :
+                 (( a == b ) ? 0 : -1);
+}
+
+static inline rsb_nnz_idx_t rsb_coo_index_bit_interleave(rsb_coo_idx_t o, rsb_coo_idx_t e)
+{
+	/**
+		\ingroup gr_internals
+		
+		Interleaves two index values.
+		Could be performed in one assembly instruction, if available.
+
+		assumes sizeof(rsb_nnz_idx_t) >= sizeof(rsb_coo_idx_t)
+		assumes no branches will occur, as this can be optimized at compile time.
+	*/
+	rsb_nnz_idx_t i = 0, O=o, E=e;
+
+	//RSB_ASSERT(sizeof(rsb_nnz_idx_t) >= sizeof(rsb_coo_idx_t));
+
+	RSB_DEBUG_ASSERT(O>=0);
+	RSB_DEBUG_ASSERT(E>=0);
+
+	if (sizeof(rsb_nnz_idx_t)==1)
+	{
+		E = (E | (E << 2)) & 0x33;
+		E = (E | (E << 1)) & 0x55;
+		O = (O | (O << 2)) & 0x33;
+		O = (O | (O << 1)) & 0x55;
+	}
+	else
+	if (sizeof(rsb_nnz_idx_t)==2)
+	{
+		E = (E | (E << 4)) & 0x0F0F;
+		E = (E | (E << 2)) & 0x3333;
+		E = (E | (E << 1)) & 0x5555;
+		O = (O | (O << 4)) & 0x0F0F;
+		O = (O | (O << 2)) & 0x3333;
+		O = (O | (O << 1)) & 0x5555;
+	}
+	else
+	if (sizeof(rsb_nnz_idx_t)==4)
+	{
+		E = (E | (E << 8)) & 0x00FF00FF;
+		E = (E | (E << 4)) & 0x0F0F0F0F;
+		E = (E | (E << 2)) & 0x33333333;
+		E = (E | (E << 1)) & 0x55555555;
+		O = (O | (O << 8)) & 0x00FF00FF;
+		O = (O | (O << 4)) & 0x0F0F0F0F;
+		O = (O | (O << 2)) & 0x33333333;
+		O = (O | (O << 1)) & 0x55555555;
+	}
+	else
+	if (sizeof(rsb_nnz_idx_t)==8)
+	{
+		E = (E | (E <<16)) & 0x0000FFFF0000FFFF;
+		E = (E | (E << 8)) & 0x00FF00FF00FF00FF;
+		E = (E | (E << 4)) & 0x0F0F0F0F0F0F0F0F;
+		E = (E | (E << 2)) & 0x3333333333333333;
+		E = (E | (E << 1)) & 0x5555555555555555;
+		O = (O | (O <<16)) & 0x0000FFFF0000FFFF;
+		O = (O | (O << 8)) & 0x00FF00FF00FF00FF;
+		O = (O | (O << 4)) & 0x0F0F0F0F0F0F0F0F;
+		O = (O | (O << 2)) & 0x3333333333333333;
+		O = (O | (O << 1)) & 0x5555555555555555;
+	}
+	else
+	{
+		RSB_ERROR(RSB_ERRM_FYCITINS);
+		/* FIXME : fatal! */
+	}
+
+	i = (E | (O << 1));
+/*	if(i<0)
+	{
+		printf("OVERFLOW %d %d %d %d %d\n",i,e,o,E,O);
+	}*/
+	RSB_DEBUG_ASSERT((i & ~-1)>=0);
+	return i;
+}
+
+static void rsb_expand_coo_indices( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl, rsb_coo_idx_t * mzp, rsb_coo_idx_t * kzp)
+{
+	rsb_coo_idx_t mz=0,kz=0;
+	rsb_coo_idx_t mh=m,kh=k,mc=i,kc=j,khb,mhb;
+	register int im=0,ik=0,lm=ml,lk=kl;
+
+	RSB_DEBUG_ASSERT(mzp);
+	RSB_DEBUG_ASSERT(kzp);
+
+	while( mh >= 2 )
+	{
+		mhb=mh;
+		mh=(mh+1)/2;
+		if(mc >= mh)
+		{
+			mz = (mz<<1) | 1;
+			mc-= mh;
+			mh = mhb-mh;
+		}
+		else
+		{
+			mz = (mz<<1);
+		}
+		++im;
+	}
+
+	while( kh >= 2 )
+	{
+		khb=kh;
+		kh=(kh+1)/2;
+		if(kc >= kh)
+		{
+			kz = (kz<<1) | 1;
+			kc-= kh;
+			kh = khb-kh;
+		}
+		else
+		{
+			kz = (kz<<1);
+		}
+		++ik;
+	}
+
+//			RSB_STDERR("shifts : %d %d  %d %d\n",lm,lk,im,ik);
+//			RSB_STDERR("shifts : %d %d\n",lm,lk);
+//			RSB_STDERR("shifts : %d %d\n",im,ik);
+//			RSB_STDERR("Z : %d %d -> %d %d : %d\n",i,j,mz,kz,-1);
+#if 0
+			/* FIXME : seems like REMOVING these assertions slows down the code a lot ! */
+			RSB_ASSERT(lm>=im);
+			RSB_ASSERT(lk>=ik);
+#endif
+	mz<<=(lm-im);
+	kz<<=(lk-ik);
+//			if(lm-im<lk-ik)mz<<=(lk-ik); else kz<<=(lm-im);
+//			mz<<=(lm); kz<<=(lk);
+
+	if(lm<lk)mz<<=(lk-lm); else kz<<=(lm-lk);
+
+	*kzp=kz;
+	*mzp=mz;
+}
+
+
+static inline void rsb_asymmetric_z_indices( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl , rsb_coo_idx_t *h, rsb_coo_idx_t *l)
+{
+	/**
+		Interleaves two rsb_coo_idx_t words, bitwise, regardless sizeof(rsb_coo_idx_t).
+	in:
+		e:	   o:
+		+-+-+-+-+  +-+-+-+-+
+		|0|2|4|6|  |1|3|5|7|
+		+-+-+-+-+  +-+-+-+-+
+	out:
+		+-+-+-+-+  +-+-+-+-+
+		|0|1|2|3|  |4|5|6|7|
+		+-+-+-+-+  +-+-+-+-+
+
+		FIXME : UNFINISHED
+	*/
+
+#if 0
+	RSB_DEBUG_ASSERT(h);
+	RSB_DEBUG_ASSERT(l);
+	
+	unsigned a;
+	int b;
+	a = ( (~0) << 16 ) >>16;
+	b = ( (~0) << 16 ) >>16;
+	printf("%x %x\n",a,b);
+	a = ( (~0) << 16 ) ;
+	b = ( (~0) << 16 ) ;
+	printf("%x %x\n",a,b);
+	a = ~ a ;
+	b = ~ b ;
+	printf("%x %x\n",a,b);
+	a = ( (~0) >> 16 ) ;
+	b = ( (~0) >> 16 ) ;
+	printf("%x %x\n",a,b);
+
+#else
+	/* the compiler should be smart enough here */
+	rsb_coo_idx_t mz=0,kz=0;
+	const int hcb=(sizeof(rsb_coo_idx_t)*RSB_CHAR_BIT)/2;	/* half coo bytes */
+	const rsb_coo_idx_t fm =~((rsb_coo_idx_t)0);	/* full bits mask */
+	const rsb_coo_idx_t lm = ~(fm<<(hcb-1));
+	const rsb_coo_idx_t hm = ~lm;
+	int hs = hcb-1;
+	RSB_DEBUG_ASSERT(h);
+	RSB_DEBUG_ASSERT(l);
+	
+
+//	printf("%x %x %x\n",fm,lm,hs);
+
+
+//	printf("halfword : %x %x %x: \n",lm,~lm,1<<30);
+
+	rsb_expand_coo_indices( i, j, m, k, ml, kl, &mz, &kz );
+	
+	/* to avoid trouble, we move the highest bit from l to h */
+	/* FIXME : we deliberately ignore the highest two bits of h */
+	*l = rsb_coo_index_bit_interleave( mz&lm     , kz&lm     );
+	*h = rsb_coo_index_bit_interleave((mz&hm)>>hs,(kz&hm)>>hs);
+	RSB_DEBUG_ASSERT(*h>=0);
+	RSB_DEBUG_ASSERT(*l>=0);
+#endif
+
+}
+
+void rsb__asymmetric_z_indices_encode( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl , rsb_coo_idx_t *h, rsb_coo_idx_t *l)
+{
+	rsb_asymmetric_z_indices(i,j,m,k,ml,kl,h,l);
+}
+
+rsb_nnz_idx_t rsb__nearest_power_of_two( const rsb_nnz_idx_t n )
+{
+	/**
+	 	\ingroup gr_internals
+		\param m a positive (m>0) index
+ 		\returns the nearest power of two not less than m, if possible (higher bit unset), otherwise the highest bit
+	*/
+	register int bits=1;
+//	RSB_DEBUG_ASSERT(i>0);
+
+	while(n>>(bits+1))
+		++bits;
+	if((1<<bits) < n)
+		return 1 << (bits+1);
+	else
+		return n;/* n was a power of two */
+}
+
+rsb_nnz_idx_t rsb__asymmetric_z_index( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Given (block) coordinate indices, computes a suitable asymmetric Z index for the coordinate.
+		The order defined by this index is suitable to recursively partition matrices.
+
+		FIXME : there are probably still limits and overflow related problems !
+		\todo : should be inline ?
+		
+		\todo this routine is all about bit mangling, and it is performance critical, although non obvious.
+   		
+
+                \code 
+		If sorting 2D coordinates with this index, the elements will get sorted as follows:
+
+ 			->->->->->->->->	--->--->--->--->	--------------->
+ 			<-<-<-<-<-<-<-<-	 /    /   /    /	            /  
+ 			->->->->->->->->	/    /   /    /                   /  
+ 			<-<-<-<-<-<-<-<-	<---<---<---<---	        /       
+ 			->->->->->->->->	--->--->--->--->	      /        
+ 			<-<-<-<-<-<-<-<-	 /    /   /    /	    /          
+ 			->->->->->->->->	/    /   /    /           /          
+ 			<-<-<-<-<-<-<-<-	<---<---<---<---	<---------------
+  
+                \endcode 
+	*/
+	rsb_coo_idx_t mz=0,kz=0;
+#if 1
+	rsb_expand_coo_indices( i, j, m, k, ml, kl, &mz, &kz );
+	return rsb_coo_index_bit_interleave(mz,kz);
+#else
+	rsb_coo_idx_t mh=m,kh=k,mc=i,kc=j,khb,mhb;
+	register int im=0,ik=0,lm=ml,lk=kl;
+
+	while( mh >= 2 )
+	{
+		mhb=mh;
+		mh=(mh+1)/2;
+		if(mc >= mh)
+		{
+			mz = (mz<<1) | 1;
+			mc-= mh;
+			mh = mhb-mh;
+		}
+		else
+		{
+			mz = (mz<<1);
+		}
+		++im;
+	}
+
+	while( kh >= 2 )
+	{
+		khb=kh;
+		kh=(kh+1)/2;
+		if(kc >= kh)
+		{
+			kz = (kz<<1) | 1;
+			kc-= kh;
+			kh = khb-kh;
+		}
+		else
+		{
+			kz = (kz<<1);
+		}
+		++ik;
+	}
+
+//			RSB_STDERR("shifts : %d %d  %d %d\n",lm,lk,im,ik);
+//			RSB_STDERR("shifts : %d %d\n",lm,lk);
+//			RSB_STDERR("shifts : %d %d\n",im,ik);
+//			RSB_STDERR("Z : %d %d -> %d %d : %d\n",i,j,mz,kz,-1);
+#if 0
+			/* FIXME : seems like REMOVING these assertions slows down the code a lot ! */
+			RSB_ASSERT(lm>=im);
+			RSB_ASSERT(lk>=ik);
+#endif
+	mz<<=(lm-im);
+	kz<<=(lk-ik);
+//			if(lm-im<lk-ik)mz<<=(lk-ik); else kz<<=(lm-im);
+//			mz<<=(lm); kz<<=(lk);
+
+	if(lm<lk)mz<<=(lk-lm); else kz<<=(lm-lk);
+//			if(lm<lk)kz>>=(lk-lm); else mz>>=(lm-lk);
+//			if(im<ik)kz>>=(ik-im); else mz>>=(im-ik);
+//			if(im<ik)mz<<=(ik-im); else kz<<=(im-ik);
+//			if(lm<lk)kz<<=(lk-ik); else mz<<=(lm-ik);
+//			if(lm>lk)mz<<=(lk); else kz<<=(lm);
+//			if(im>ik)kz<<=(im-ik); else mz<<=(ik-im);
+			//RSB_STDERR("Z : %d %d -> %d %d \n",IA[n]/br,JA[n]/bc,mz,kz);
+
+//			RSB_STDERR("Z : %d %d -> %d %d : %d\n",i,j,mz,kz,rsb_coo_index_bit_interleave(mz,kz));
+	return rsb_coo_index_bit_interleave(mz,kz);
+#endif
+}
+
+void rsb__asymmetric_z_nnz_indices( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl, rsb_nnz_idx_t * a , rsb_nnz_idx_t * b )
+{
+	/**
+		\note : this function was written for cases in which sizeof(rsb_coo_idx_t)==sizeof(rsb_nnz_idx_t)
+	*/
+	rsb_coo_idx_t mz=0,kz=0;
+
+	RSB_DEBUG_ASSERT(sizeof(rsb_coo_idx_t)==sizeof(rsb_nnz_idx_t));
+	rsb_expand_coo_indices( i, j, m, k, ml, kl, &mz, &kz );
+}
+
+rsb_err_t rsb__do_coo_index_sort_on_rows_array_make( 
+	rsb_coo_idx_t * K, const rsb_coo_idx_t * IA,
+	const rsb_coo_idx_t m, const rsb_coo_idx_t br,
+	const rsb_nnz_idx_t nnz, const rsb_type_t typecode)
+{
+	rsb_coo_idx_t Idim=(m+(br-1))/br;
+	rsb_nnz_idx_t n;
+
+	/**
+	 	\ingroup gr_internals
+
+ 		Fills an array with block indices corresponding to given row indices.
+
+		Block row size is fixed.
+		\param br block row size
+		\param m row count
+		\param nnz length of IA
+		\param IA the row index array
+		\param typecode
+
+		TODO : document
+	*/
+	if(!IA || RSB_INVALID_BLK_INDEX(br) || RSB_INVALID_COO_INDEX(m) || RSB_INVALID_COO_INDEX(Idim) || RSB_INVALID_NNZ_INDEX(nnz))
+		return RSB_ERR_BADARGS;
+
+	if(br==1)
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+		{
+			K[2*n+0] =(IA[n]);
+			K[2*n+1] =n;
+
+			RSB_DEBUG_ASSERT(IA[n]<Idim);
+			RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+		}
+	else
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+		{
+			K[2*n+0] =((IA[n]+0)/br);
+			K[2*n+1] =n;
+
+			RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+		}
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_double_coo_index_sort_array_make( 
+	rsb_coo_idx_t * K, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t roffset,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags
+	,int want_recursive_sort
+	,enum rsb_op_flags_t op_flags
+	/*, int want_rows_sort */)
+{
+	/**
+	 	\ingroup gr_internals
+ 		Fills an array with block indices corresponding to given row and column indices.
+
+		TODO : document
+	*/
+	rsb_coo_idx_t Idim=(m+(br-1))/br;
+	rsb_coo_idx_t Jdim=(k+(bc-1))/bc;
+	rsb_nnz_idx_t n;
+
+	if( want_recursive_sort )
+	{
+		int ml=0, kl=0;
+
+		while( (1<<ml) < Idim ) ml++;
+		while( (1<<kl) < Jdim ) kl++;
+
+		RSB_DEBUG_ASSERT(ml>=0);
+		RSB_DEBUG_ASSERT(kl>=0);
+
+		//op_flags = RSB_OP_FLAG_WANT_PARALLEL_SORT;
+		//op_flags = RSB_OP_FLAG_WANT_SERIAL_SORT;
+		/* note : integer division is quite fast .. */
+		if(op_flags == RSB_OP_FLAG_WANT_PARALLEL_SORT)
+		{
+		if(br==1 && bc==1)
+		{
+#pragma omp parallel for schedule(static) RSB_NTC
+			for(n=0;n<nnz;++n)
+		//	for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				rsb_asymmetric_z_indices(IA[n],JA[n],Idim,Jdim,ml,kl,K+2*n,K+2*n+1);
+//				printf("%d %d -> %d %d\n",mz,kz,*h,*l);
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+				RSB_DEBUG_ASSERT(K[2*n+1]>=0);
+			}
+		}
+		else
+		{
+#pragma omp parallel for schedule(static) RSB_NTC
+			for(n=0;n<nnz;++n)
+		//	for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				rsb_asymmetric_z_indices((IA[n])/br,(JA[n])/bc,Idim,Jdim,ml,kl,K+2*n,K+2*n+1);
+
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+				RSB_DEBUG_ASSERT(K[2*n+1]>=0);
+			}
+		}
+		}
+		else
+		{
+		if(br==1 && bc==1)
+		{
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				rsb_asymmetric_z_indices(IA[n],JA[n],Idim,Jdim,ml,kl,K+2*n,K+2*n+1);
+//				printf("%d %d -> %d %d\n",mz,kz,*h,*l);
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+				RSB_DEBUG_ASSERT(K[2*n+1]>=0);
+			}
+		}
+		else
+		{
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				rsb_asymmetric_z_indices((IA[n])/br,(JA[n])/bc,Idim,Jdim,ml,kl,K+2*n,K+2*n+1);
+
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+				RSB_DEBUG_ASSERT(K[2*n+1]>=0);
+			}
+		}
+		}
+	}
+	else
+	{
+		/* FIXME : UNFINISHED */
+		return RSB_ERR_UNIMPLEMENTED_YET;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_nnz_index_sort_array_make( 
+	rsb_nnz_idx_t * K, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t roffset,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags,
+	int want_recursive_sort/*, int want_rows_sort */
+	,enum rsb_op_flags_t op_flags
+	)
+{
+	/**
+	 	\ingroup gr_internals
+ 		Fills an array with block indices corresponding to given row and column indices.
+
+		TODO : document
+	*/
+	rsb_coo_idx_t Idim=(m+(br-1))/br;
+	rsb_coo_idx_t Jdim=(k+(bc-1))/bc;
+	rsb_nnz_idx_t n;
+
+	if( want_recursive_sort )
+	{
+		int ml=0, kl=0;
+
+		while( (1<<ml) < Idim ) ml++;
+		while( (1<<kl) < Jdim ) kl++;
+
+		RSB_DEBUG_ASSERT(ml>=0);
+		RSB_DEBUG_ASSERT(kl>=0);
+		if(op_flags == RSB_OP_FLAG_WANT_PARALLEL_SORT)
+		{
+		/* note : integer division is quite fast .. */
+		if(br==1 && bc==1)
+		{
+			#pragma omp parallel for schedule(static) RSB_NTC
+		//	for(n=0;RSB_LIKELY(n<nnz);++n)
+			for(n=0;n<nnz;++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				K[2*n+0]=rsb__asymmetric_z_index((IA[n]),(JA[n]),Idim,Jdim,ml,kl);
+				K[2*n+1]=n;
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0); //if(!RSB_IS_SIGNED(rsb_coo_idx_t)) { RSB_DEBUG_ASSERT(K[2*n+0]>=0); }
+			}
+		}
+		else
+		{
+			#pragma omp parallel for schedule(static) RSB_NTC
+		//	for(n=0;RSB_LIKELY(n<nnz);++n)
+			for(n=0;n<nnz;++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				K[2*n+0]=rsb__asymmetric_z_index((IA[n])/br,(JA[n])/bc,Idim,Jdim,ml,kl);
+				K[2*n+1]=n;
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0); //if(!RSB_IS_SIGNED(rsb_coo_idx_t)) { RSB_DEBUG_ASSERT(K[2*n+0]>=0); }
+			}
+		}
+		}
+		else
+		{
+		/* note : integer division is quite fast .. */
+		if(br==1 && bc==1)
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				K[2*n+0]=rsb__asymmetric_z_index((IA[n]),(JA[n]),Idim,Jdim,ml,kl);
+				K[2*n+1]=n;
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0); //if(!RSB_IS_SIGNED(rsb_coo_idx_t)) { RSB_DEBUG_ASSERT(K[2*n+0]>=0); }
+			}
+		else
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				/* this is Z block sort balanced, unlike rsb_coo_index_bit_interleave  */
+				K[2*n+0]=rsb__asymmetric_z_index((IA[n])/br,(JA[n])/bc,Idim,Jdim,ml,kl);
+				K[2*n+1]=n;
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0); //if(!RSB_IS_SIGNED(rsb_coo_idx_t)) { RSB_DEBUG_ASSERT(K[2*n+0]>=0); }
+			}
+		}
+#if 0
+	else
+	if( want_rows_sort )
+	{
+		/* FIXME : still unused */
+		if(br==1)
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				K[2*n+0] =(IA[n]);
+				K[2*n+1] =n;
+
+                                RSB_DEBUG_ASSERT(IA[n]<Idim);
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+			}
+		else
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				K[2*n+0] =((IA[n]+0)/br);
+				K[2*n+1] =n;
+
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+			}
+	}
+#endif
+	}
+	else
+	{
+		/**
+			FIXME : WARNING : this is a place of overflows !
+		 */
+		if(br==1 && bc==1)
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				K[2*n+0] =(IA[n]-roffset);
+				K[2*n+0]*= Jdim;
+				K[2*n+0]+=(JA[n]);
+				K[2*n+1] =n;
+
+                                RSB_DEBUG_ASSERT(JA[n]<Jdim);
+                                RSB_DEBUG_ASSERT(IA[n]<Idim);
+
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+			}
+		else
+			for(n=0;RSB_LIKELY(n<nnz);++n)
+			{
+				K[2*n+0] =((IA[n]-roffset)/br);
+				K[2*n+0]*=Jdim;
+				K[2*n+0]+=((JA[n]+0)/bc);
+				K[2*n+1] =n;
+
+				RSB_DEBUG_ASSERT(K[2*n+0]>=0);
+			}
+	}
+
+	return RSB_ERR_NO_ERROR;
+}
+
+void rsb__do_util_compact_permutation_coo_idx_t_array(rsb_coo_idx_t * K, rsb_nnz_idx_t nnz)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Compacts a permutation vector.
+		TODO : document
+	*/
+	rsb_nnz_idx_t n;
+	/* compacting K into its first half */
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+		K[n]=K[2*n+1];
+}
+
+void rsb__do_util_compact_permutation_nnz_idx_t_array(rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Compacts a permutation vector.
+		TODO : document
+	*/
+	rsb_nnz_idx_t n;
+	/* compacting K into its first half */
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+		K[n]=K[2*n+1];
+}
+
+rsb_err_t rsb_do_double_pass_coo_index_based_bcsr_msort( 
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags)
+{
+	/* FIXME */
+	/**
+		\ingroup gr_internals
+
+		FIXME : FINISH ME ME
+	*/
+#if 0
+	rsb_nnz_idx_t n1=0,n2=0;
+	rsb_nnz_idx_t *K;
+	size_t el_size = RSB_SIZEOF(typecode);
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	K = rsb__malloc( (nnz+1) * sizeof(rsb_nnz_idx_t)  * 2 );
+	if(!K)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	errval = rsb_do_nnz_index_sort_on_rows_array_make(K,rIA,m,br,nnz,typecode);
+
+	errval = rsb__do_nnz_index_based_sort_and_permute(rIA,rJA,rVA,rIA,rJA,rVA,K,nnz,typecode,flags);
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	RSB_CONDITIONAL_FREE(K);
+	K = rsb__malloc( k*br * sizeof(rsb_nnz_idx_t)  * 2 );
+	/* FIXME ! overflows could still happen here! */
+
+	if(!K)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	while(n1!=nnz)
+	{
+		/* FIXME : need specialized code here */
+		while( n2+1<nnz && IA[n1]/br==IA[n2+1]/br )
+			++n2;
+
+		errval = rsb__do_nnz_index_sort_array_make(K,IA+n1,JA+n1,m,k,IA[n1],br,bc,(n2+1)-n1,typecode,flags,0,op_flags);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		errval = rsb__do_nnz_index_based_sort_and_permute(IA+n1,JA+n1,((char*)VA)+n1*mtxAp->el_size,rIA+n1,rJA+n1,((char*)rVA)+n1*mtxAp->el_size,K,(n2+1)-n1,typecode,flags);
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+
+		++n2;n1=n2;
+	}
+err:
+	RSB_CONDITIONAL_FREE(K);
+
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_GENERIC_ERROR;
+#endif
+}
+
+rsb_err_t rsb__do_nnz_index_based_bcsr_msort( 
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	,void * WA, size_t wb)
+{
+	/**
+		\ingroup gr_internals
+
+		FIXME : could optimize a bit more!
+		FIXME : should implement double pass msort!
+	*/
+	/* nothing to do for RSB_FLAG_WANT_COLUMN_MAJOR_ORDER :
+	 the calling routine should already have swapped input arguments accordingly */
+	rsb_nnz_idx_t * K=NULL;
+	rsb_coo_idx_t Idim;
+	rsb_coo_idx_t Jdim;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t st=0,it=0;
+
+	RSB_DEBUG_ASSERT(m);
+	RSB_DEBUG_ASSERT(k);
+	RSB_DEBUG_ASSERT(br);
+	RSB_DEBUG_ASSERT(bc);
+
+	Idim=(m+(br-1))/br;
+	Jdim=(k+(bc-1))/bc;
+
+	RSB_DEBUG_ASSERT(Idim>0);
+	RSB_DEBUG_ASSERT(Jdim>0);
+
+	if(nnz<2)
+		goto err;
+
+	if(br<1 || bc<1 || m<1 || k<1)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	/* this check implies a cast to rsb_nnz_idx_t */
+	if(
+	 RSB_NNZ_MUL_OVERFLOW(Idim,Jdim) != 0  ||
+	 RSB_NNZ_MUL_OVERFLOW(Idim,Idim) != 0  ||
+	 RSB_NNZ_MUL_OVERFLOW(Jdim,Jdim) != 0 
+	)
+
+	{
+		/* should resort to a double pass algorithm */
+		return RSB_ERR_LIMITS;
+	}
+
+	if(WA && wb >= RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT(nnz,m,k,br,bc))
+		K=WA;
+	else
+		K = rsb__malloc(RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT(nnz,m,k,br,bc));
+
+	if(!K)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	it = - rsb_time();
+	errval = rsb__do_nnz_index_sort_array_make(K,rIA,rJA,m,k,0,br,bc,nnz,typecode,flags,0,op_flags);
+	it += rsb_time();
+	
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+
+	st = - rsb_time();
+	errval = rsb__do_nnz_index_based_sort_and_permute(rIA,rJA,rVA,rIA,rJA,rVA,K,nnz,typecode,flags,op_flags);
+	st += rsb_time();
+
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+
+err:
+	if(WA && wb >= RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT(nnz,m,k,br,bc))
+		;
+	else
+		RSB_CONDITIONAL_FREE(K);
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_index_based_bcsr_msort( 
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	,void * WA, size_t wb
+	)
+{
+	/**
+		\ingroup gr_internals
+
+		FIXME : could optimize a bit more!
+	*/
+	/* nothing to do for RSB_FLAG_WANT_COLUMN_MAJOR_ORDER :
+	 the calling routine should already have swapped input arguments accordingly */
+	rsb_err_t errval = rsb__do_nnz_index_based_bcsr_msort(rIA,rJA,rVA,m,k,br,bc,nnz,typecode,flags,op_flags,WA,wb);
+
+	if(errval == RSB_ERR_LIMITS)
+		/* FIXME : the following is not msort based ! */
+		/* FIXME : should implement a double pass msort ! */
+		errval = rsb__do_index_based_bcsr_sort(rIA,rJA,rVA,rIA,rJA,rVA,m,k,br,bc,nnz,typecode,flags,op_flags,WA,wb);
+		//return rsb_do_double_pass_coo_index_based_bcsr_msort( rIA, rJA, rVA, rIA, rJA, rVA, m, k, br, bc, nnz, typecode, flags);
+	else
+		;
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_nnz_index_based_sort_and_permute(
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	)
+{
+	/**
+	 	\ingroup gr_internals
+
+		Sort and permute with a rsb_nnz_idx_t index-based permutation array.
+	*/
+
+	rsb_bool_t want_msort=1;	/* FIXME : should choose it in some other way */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!want_msort)
+	{
+#ifdef RSB_HAVE_GSL
+		gsl_heapsort( K , (size_t) nnz, 2*sizeof(rsb_nnz_idx_t), &rsb_compar_nnz_idx_t );
+#else /* RSB_HAVE_GSL */
+		qsort( K , (size_t) nnz, 2*sizeof(rsb_nnz_idx_t), &rsb_compar_nnz_idx_t );
+#endif /* RSB_HAVE_GSL */
+		rsb__do_util_compact_permutation_nnz_idx_t_array(K, nnz);
+
+		if(flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT)
+			errval = rsb__do_permute_values_in_place_with_nnz_index(
+				rVA,rIA,rJA,K,nnz,typecode);
+		else
+			errval = rsb__do_permute_values_with_nnz_index(
+				rVA, VA, rIA, IA, rJA, JA, K, nnz, typecode);
+	}
+	else
+	{
+		rsb_bool_t was_already_sorted=0;
+		rsb__do_util_compact_permutation_nnz_idx_t_array(K+1, nnz-1);	/* FIXME : a hack ! */
+
+		was_already_sorted = RSB_SOME_ERROR(rsb_do_msort_up(nnz,K,K+nnz))?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+
+		if(flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT)
+			;/* if in place, o data to copy */
+		else
+		{
+			/* if not in place, we copy first */
+			RSB_COO_MEMCPY(rVA,rIA,rJA,VA,IA,JA,0,0,nnz,RSB_NUMERICAL_TYPE_SIZE(typecode));
+		}
+
+		if(!was_already_sorted)
+			rsb_ip_reord(nnz, rVA, rIA, rJA, K+nnz, typecode);
+	}
+	
+	if(RSB_SOME_ERROR(errval))
+	{ RSB_PERR_GOTO(err,RSB_ERRM_ES) }
+
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_index_based_recursive_bcsr_sort( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	)
+{
+	/**
+		\ingroup gr_internals
+		
+		An index based coordinate sorting routine.
+		Usually faster than merge sort.
+		Will allocate 2 * nnz * sizeof(rsb_nnz_idx_t) bytes for a permutation vector.
+		FIXME : in some cases will alocate 3 * nnz * sizeof(rsb_nnz_idx_t) bytes for a permutation vector.
+
+		\attention : limited to smaller matrices (will exit, in case) due to overflow problems.
+		\todo : it could be modified to work around the potential overflow problem, but
+			should need an estimate of the maximum nnz per row amount.
+		FIXME : needs more error checks (e.g: overflow of 2*nnz index .. )
+		FIXME : it will believe no overflow is possible if last positive bit is found set
+	*/
+	/* nothing to do for RSB_FLAG_WANT_COLUMN_MAJOR_ORDER :
+	 the calling routine should already have swapped input arguments accordingly */
+	rsb_nnz_idx_t * K=NULL;
+	rsb_coo_idx_t Idim;
+	rsb_coo_idx_t Jdim;
+	//rsb_nnz_idx_t n;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t it=0;
+	rsb_nnz_idx_t nIdim,nJdim;
+
+	RSB_DEBUG_ASSERT(m);
+	RSB_DEBUG_ASSERT(k);
+	RSB_DEBUG_ASSERT(br);
+	RSB_DEBUG_ASSERT(bc);
+
+	Idim=(m+(br-1))/br;
+	Jdim=(k+(bc-1))/bc;
+
+	RSB_DEBUG_ASSERT(Idim>0);
+	RSB_DEBUG_ASSERT(Jdim>0);
+
+	if( ! ( flags & RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING ) )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(nnz<2)
+	{
+		if(nnz==1 && rIA && rJA && rVA)
+		{
+			/* nothing to sort; only one copy is needed. */
+			rIA[0]=IA[0];
+			rJA[0]=JA[0];
+			rsb_memcpy(rVA,VA,RSB_NUMERICAL_TYPE_SIZE(typecode));
+		}
+		/* now it is ok to return */
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(br<1 || bc<1 || m<1 || k<1)
+	{
+		/* FIXME : use macros for this check */
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	/*
+		FIXME : coo overflow is NOT a menace.
+		nnz_index is THE problem.
+	*/
+	if(0)
+	{
+	size_t a=Idim,b=Jdim,c=a*b;
+	printf("OFLOW ? %ld %ld -> %ld  %d %d %d  %zd %zd %zd\n",(long)(Idim),(long)Jdim,(long)(Idim*Jdim),
+//		(size_t)Idim*(size_t)(Jdim),
+//		(size_t)Idim*(size_t)(Jdim)== (size_t)(RSB_MAX_MATRIX_DIM),
+		(Idim*Jdim)== (size_t)(RSB_MAX_MATRIX_DIM),
+		(Idim*Jdim) < (size_t)(RSB_MAX_MATRIX_DIM),
+		(size_t)((size_t)Idim)*((size_t)Jdim) > (size_t)(Idim*Jdim),
+		(size_t)(((size_t)Idim)*((size_t)Jdim)) , (size_t)(Idim*Jdim),
+		c
+		);
+	}
+
+	nIdim = rsb__nearest_power_of_two(Idim);
+	nJdim = rsb__nearest_power_of_two(Jdim);
+
+	if(
+	 nIdim<Idim || nJdim<Jdim || /* FIXME: this is the check that should not be */
+	 RSB_NNZ_MUL_OVERFLOW(nIdim,nJdim) != 0  ||
+	 RSB_NNZ_MUL_OVERFLOW(nIdim,nIdim) != 0  ||
+	 RSB_NNZ_MUL_OVERFLOW(nJdim,nJdim) != 0 
+	)
+	{
+		/* FIXME : NEW */
+		goto double_coo_index;
+
+/*
+		errval = RSB_ERR_LIMITS;
+		RSB_PERR_GOTO(err,"ERROR : index overflow\n");*/
+	}
+
+	if( RSB_NNZ_MUL_OVERFLOW(Idim,Jdim) != 0 )/* this check implies a cast to rsb_nnz_idx_t */
+	{
+		/* overflow. should work around this. */
+/*		RSB_INFO("NO OVERFLOW ? : %ld * %ld = %ld (%ld)  (%zd), Idim=%ld Jdim=%ld\n",
+			(long)m,(long)k,(long)(m*k),(long)(m)*(long)(k),(size_t)((size_t)m)*((size_t)k),(long)Idim,(long)Jdim);
+*/
+		errval = RSB_ERR_LIMITS;
+/*
+		RSB_ERROR(RSB_ERRM_WOPSTASA);
+*/
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	goto single_nnz_index;
+
+single_nnz_index:
+
+	K = rsb__malloc( ((nnz+2)+nnz) * sizeof(rsb_nnz_idx_t) );
+
+	if(!K)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	it = - rsb_time();
+	errval = rsb__do_nnz_index_sort_array_make(K,IA,JA,m,k,0,br,bc,nnz,typecode,flags,1,op_flags);
+	it += rsb_time();
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	errval = rsb__do_nnz_index_based_sort_and_permute(IA,JA,VA,rIA,rJA,rVA,K,nnz,typecode,flags,op_flags);
+
+	goto done;
+
+double_coo_index:
+	/* NEW : for msort_up2 only */
+	K = rsb__malloc( (nnz+2) * sizeof(rsb_nnz_idx_t) + nnz * ( 2 * sizeof(rsb_coo_idx_t) ) );
+
+	if(!K)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	it = - rsb_time();
+	errval = rsb__do_double_coo_index_sort_array_make((rsb_coo_idx_t*)K,IA,JA,m,k,0,br,bc,nnz,typecode,flags,1,op_flags);
+	it += rsb_time();
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	{
+		rsb_bool_t was_already_sorted=0;
+
+		was_already_sorted = rsb__do_msort_up2coo(nnz,K,((rsb_coo_idx_t*)K)+2*nnz);
+
+		if(flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT)
+			;/* if in place, o data to copy */
+		else
+		{
+			/* if not in place, we copy first */
+			RSB_COO_MEMCPY(rVA,rIA,rJA,VA,IA,JA,0,0,nnz,RSB_NUMERICAL_TYPE_SIZE(typecode));
+		}
+
+		if(!was_already_sorted)
+			rsb_ip_reord(nnz, rVA, rIA, rJA, ((rsb_coo_idx_t*)K)+2*nnz, typecode);
+	}
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	/* FIXME : UNFINISHED */
+
+	goto done;
+done:
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+err:
+	RSB_CONDITIONAL_FREE(K);
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_index_based_bcsr_sort( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	,void * WA, size_t wb
+	)
+{
+	/**
+ 		FIXME : DEPRECATED; should be restructured
+ 
+		\ingroup gr_internals
+		
+		An index based coordinate sorting routine.
+		Usually faster than merge sort.
+		Will allocate 2 * nnz * sizeof(rsb_nnz_idx_t) bytes for a permutation vector.
+
+		\attention : limited to smaller matrices (will exit, in case) due to overflow problems.
+		\todo : it could be modified to work around the potential overflow problem, but
+			should need an estimate of the maximum nnz per row amount.
+		FIXME : needs more error checks (e.g: overflow of 2*nnz index .. )
+	*/
+	/* nothing to do for RSB_FLAG_WANT_COLUMN_MAJOR_ORDER :
+	 the calling routine should already have swapped input arguments accordingly */
+	rsb_nnz_idx_t * K=NULL;
+	rsb_coo_idx_t * CP=NULL;
+	rsb_coo_idx_t Idim;
+	rsb_coo_idx_t Jdim;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t st=0,pt=0,it=0;
+	rsb_bool_t want_two_pass_sort = 0;	/* cannot be combined with recursive sort for now */
+	RSB_DEBUG_ASSERT(m);
+	RSB_DEBUG_ASSERT(k);
+	RSB_DEBUG_ASSERT(br);
+	RSB_DEBUG_ASSERT(bc);
+
+	Idim=(m+(br-1))/br;
+	Jdim=(k+(bc-1))/bc;
+
+	RSB_DEBUG_ASSERT(Idim>0);
+	RSB_DEBUG_ASSERT(Jdim>0);
+
+	if( flags & RSB_FLAG_OBSOLETE_BLOCK_ASYMMETRIC_Z_SORTING )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+		
+	if(nnz<2)
+	{
+		if( nnz==1 && rIA && rJA && rVA && rIA!=IA && rJA!=JA && rVA!=VA )
+		{
+			/* nothing to sort; only one copy is needed. */
+			rIA[0]=IA[0];
+			rJA[0]=JA[0];
+			rsb_memcpy(rVA,VA,RSB_NUMERICAL_TYPE_SIZE(typecode));
+		}
+		/* now it is ok to return */
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if(br<1 || bc<1 || m<1 || k<1)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+	
+	/*
+		FIXME : coo overflow is NOT a menace.
+		nnz_index is THE problem.
+	*/
+	if(0)
+	{
+		size_t a=Idim,b=Jdim,c=a*b;
+		printf("OFLOW ? %ld %ld -> %ld  %d %d %d  %zd %zd %zd\n",
+			(long)(Idim),(long)Jdim,(long)(Idim*Jdim),
+//		(size_t)Idim*(size_t)(Jdim),
+//		(size_t)Idim*(size_t)(Jdim)== (size_t)(RSB_MAX_MATRIX_DIM),
+		(Idim*Jdim)== (size_t)(RSB_MAX_MATRIX_DIM),
+		(Idim*Jdim) < (size_t)(RSB_MAX_MATRIX_DIM),
+		(size_t)((size_t)Idim)*((size_t)Jdim) > (size_t)(Idim*Jdim),
+		(size_t)(((size_t)Idim)*((size_t)Jdim)) , (size_t)(Idim*Jdim),
+		c
+		);
+	}
+
+	/* this check implies a cast to rsb_nnz_idx_t */
+	if(
+	 RSB_NNZ_MUL_OVERFLOW(Idim,Jdim) != 0  ||
+	 RSB_NNZ_MUL_OVERFLOW(Idim,Idim) != 0  ||
+	 RSB_NNZ_MUL_OVERFLOW(Jdim,Jdim) != 0 
+	)
+	{
+		/* overflow. should work around this. */
+/*		RSB_INFO("NO OVERFLOW ? : %ld * %ld = %ld (%ld)  (%zd), Idim=%ld Jdim=%ld\n",
+			(long)m,(long)k,(long)(m*k),(long)(m)*(long)(k),(size_t)((size_t)m)*((size_t)k),(long)Idim,(long)Jdim);
+
+		RSB_ERROR(	"WARNING : Coordinate index overflow possible for a single pass sort."
+					"Switching to double pass.\n");
+*/
+		want_two_pass_sort = 1;
+	}
+
+	if(want_two_pass_sort)
+	{
+		if(WA && wb >= RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_TWO_PASS(nnz,m,k,br,bc))
+			CP=WA;
+		else
+			CP = rsb__malloc(RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_TWO_PASS(nnz,m,k,br,bc));
+		if(!CP)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		it = - rsb_time();
+		errval = rsb__do_coo_index_sort_on_rows_array_make(CP,IA,m,br,nnz,typecode);
+		it += rsb_time();
+	}
+	else
+	{
+		if(WA && wb >= RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_ONE_PASS(nnz,m,k,br,bc))
+			K=WA;
+		else
+			K = rsb__malloc(RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_ONE_PASS(nnz,m,k,br,bc));
+		if(!K)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		it = - rsb_time();
+		errval = rsb__do_nnz_index_sort_array_make(K,IA,JA,m,k,0,br,bc,nnz,typecode,flags,0,op_flags);
+
+		it += rsb_time();
+	}
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	/* 
+	   note : sorting nnz elements of half size almost doesn't improve sort times :
+           it means there's a lot of overhead...
+	 */
+
+	/*
+	 * On my x86 machines I have experienced some 15% gain over qsort in recompiling glibc's with a macro cmp argument,
+	 * and a paradoxical slowdown when setting const'ly the record size.
+	 * This may be not true on other architectures, however.
+	 */
+
+	/* should sort here : FIXME : this could be faster */
+
+	st = - rsb_time();
+#ifdef RSB_HAVE_GSL
+	/* uhm, slow */
+	if(want_two_pass_sort)
+		gsl_heapsort( CP , (size_t) nnz, 2*sizeof(rsb_coo_idx_t), &rsb_compar_coo_idx_t );
+	else
+		gsl_heapsort( K , (size_t) nnz, 2*sizeof(rsb_nnz_idx_t), &rsb_compar_nnz_idx_t );
+#else /* RSB_HAVE_GSL */
+	if(want_two_pass_sort)
+		qsort( CP , (size_t) nnz, 2*sizeof(rsb_coo_idx_t), &rsb_compar_coo_idx_t );
+	else
+		qsort( K , (size_t) nnz, 2*sizeof(rsb_nnz_idx_t), &rsb_compar_nnz_idx_t );
+#endif /* RSB_HAVE_GSL */
+	st += rsb_time();
+
+	pt = - rsb_time();
+	/* compacting K into its first half */
+	if(want_two_pass_sort)
+		rsb__do_util_compact_permutation_coo_idx_t_array(CP, nnz);
+	else
+		rsb__do_util_compact_permutation_nnz_idx_t_array(K , nnz);
+
+	/* TODO : we should do this in place. */
+	if(want_two_pass_sort)
+	{
+		if(flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT)
+		{
+			errval = rsb__do_permute_values_in_place_with_coo_index(rVA, rIA, rJA, CP, nnz, typecode);
+		}
+		else
+		{
+			errval = rsb__do_permute_values_with_coo_index(rVA, VA, rIA, IA, rJA, JA, CP, nnz, typecode);
+			RSB_COO_MEMCPY(rVA,rIA,rJA,VA,IA,JA,0,0,nnz,RSB_NUMERICAL_TYPE_SIZE(typecode));
+		}
+	}
+	else
+
+	{
+		if(flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT)
+			errval = rsb__do_permute_values_in_place_with_nnz_index(rVA, rIA, rJA, K, nnz, typecode);
+		else
+			errval = rsb__do_permute_values_with_nnz_index(rVA, VA, rIA, IA, rJA, JA, K, nnz, typecode);
+	}
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	pt += rsb_time();
+
+	if(want_two_pass_sort)
+	{
+		rsb_nnz_idx_t n1=0,n2=0;
+		size_t el_size = RSB_SIZEOF(typecode);
+
+		if(!CP)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		it = - rsb_time();
+		while(n1!=nnz)
+		{
+			/* FIXME : qsort is slow. should use the faster routines we already have. */
+			/* FIXME : need specialized code */
+#if 0
+			while( n2+1<nnz && IA[n1]/br==IA[n2+1]/br )
+				++n2;
+#else
+			/* EXPERIMENTAL */
+			/*
+				we don't know in advance how many elements belong to this block row.
+				we first go fast forward, then slow down :)
+			 */
+			rsb_nnz_idx_t delta=1;
+			while( n2+delta<nnz && IA[n1]/br==IA[n2+delta]/br )
+				n2+=delta,delta*=2;
+
+			/* now, n2+delta>=nnz  ||  IA[n1]/br!=IA[n2+delta]/br */
+	                RSB_DEBUG_ASSERT(n2+delta>=nnz  ||  IA[n1]/br!=IA[n2+delta]/br);
+			/* if delta == 0, we are done. */
+			while( delta>0 )
+			{
+				if( n2>=nnz || IA[n1]/br!=IA[n2]/br )
+					delta/=2, n2-=delta;
+				else
+				if( n2+delta<nnz && IA[n1]/br==IA[n2+delta]/br )
+					n2+=delta, delta/=2;
+				else
+					delta/=2;
+			}
+			RSB_DEBUG_ASSERT( n2  < nnz && IA[n1]/br==IA[n2  ]/br );
+			RSB_DEBUG_ASSERT( n2+1>=nnz || IA[n1]/br!=IA[n2+1]/br );
+	                RSB_DEBUG_ASSERT(n2<nnz);
+	                RSB_DEBUG_ASSERT(IA[n1]/br==IA[n2]/br);
+#endif
+
+			/* FIXME : WE NEED A SPECIALIZED CODE , rsb_nnz_idx_t != rsb_coo_idx_t */
+			if(0)RSB_INFO("sorting : %zd .. %zd\n",(rsb_printf_int_t)n1,(rsb_printf_int_t)n2);
+			//errval = rsb__do_coo_index_sort_on_rows_array_make(CP,JA+n1,k,bc,(n2+1)-n1,typecode);
+			errval = rsb__do_nnz_index_sort_array_make(CP,IA+n1,JA+n1,m,k,IA[n1],br,bc,(n2+1)-n1,typecode,flags,0,op_flags);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+			}
+			//qsort( CP , (size_t) (n2+1)-n1, 2*sizeof(rsb_coo_idx_t), &rsb_compar_coo_idx_t );
+			qsort( CP , (size_t) (n2+1)-n1, 2*sizeof(rsb_nnz_idx_t), &rsb_compar_nnz_idx_t );
+			rsb__do_util_compact_permutation_nnz_idx_t_array(CP, (n2+1)-n1);
+			if(flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_PERMUTATION_SORT)
+				errval = rsb__do_permute_values_in_place_with_nnz_index(((char*)rVA)+el_size*n1, rIA+n1, rJA+n1, CP, (n2+1)-n1, typecode);
+			else
+				errval = rsb__do_permute_values_with_nnz_index(((char*)rVA)+el_size*n1,((char*)VA)+el_size*n1, rIA+n1, IA+n1, rJA+n1, JA+n1, CP, (n2+1)-n1, typecode);
+
+			//errval = rsb__do_permute_values_in_place_with_coo_index(((char*)VA)+el_size*n1, IA+n1, JA+n1, CP, (n2+1)-n1, typecode);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES)
+			}
+
+//	for(n=n1;RSB_LIKELY(n<=n2);++n) printf("%d : %d %d\n",n1,IA[n],JA[n]); printf("\n");
+
+			++n2;n1=n2;
+		}
+	}
+
+//	RSB_INFO("#sorting : nnz/s : %lg\n",((double)nnz)/(st));
+//	RSB_INFO("#sorting : nnz*log(nnz)/s : %lg\n",((double)nnz)*log((double)nnz)/(st));
+
+	// sorting/permutation time = 4 ~ 10 
+	if( RSB_WANT_VERBOSE_MESSAGES )
+	RSB_INFO(	"# sorting times:\n"
+			"#index init 		: %lg\n"
+			"#index sorting (qsort)	: %lg\n"
+			"#data permutation	: %lg\n"
+			"#sorting/permutation	: %lg\n",
+			it,st,pt,st/pt
+	);
+	//RSB_INFO("#allocation time   : %lg\n",at);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+/*	{rsb_nnz_idx_t i; for(i=0;i<nnz;++i)RSB_INFO("%d : %d \n",i,K[nnz+i]);}
+	{rsb_nnz_idx_t i; for(i=0;i<nnz;++i)RSB_INFO("%d , %d \n",IA[i],JA[i]);}*/
+//	{rsb_nnz_idx_t i; for(i=0;i<nnz;++i)RSB_INFO("%d , %d \n",rIA[i],rJA[i]);}
+
+err:
+	if(WA && want_two_pass_sort)
+	{
+		if( wb >= RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_TWO_PASS(nnz,m,k,br,bc))
+			;
+		else
+			RSB_CONDITIONAL_FREE(CP);
+	} 
+	if(WA && !want_two_pass_sort)
+	{
+		if( wb >= RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_ONE_PASS(nnz,m,k,br,bc))
+			;
+		else
+			RSB_CONDITIONAL_FREE(K);
+	} 
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+
+
+static RSB_INLINE rsb_coo_idx_t rsb_do_undilate_coo_odd(rsb_coo_idx_t w)
+{
+	rsb_coo_idx_t E=w;
+	if (sizeof(rsb_coo_idx_t)==1)
+	{
+		E = (E & 0x11) | ((E & 0x44)>>1);
+		E = (E & 0x0F) | ((E & 0x30)>>2);
+	}
+	else
+	if (sizeof(rsb_coo_idx_t)==2)
+	{
+		E = (E & 0x1111) | ((E & 0x4444)>>1);
+		E = (E & 0x0303) | ((E & 0x3030)>>2);
+		E = (E & 0x000F) | ((E & 0x0F00)>>4);
+	}
+	else
+	if (sizeof(rsb_coo_idx_t)==4)
+	{
+		E = (E & 0x11111111) | ((E & 0x44444444)>>1);
+		E = (E & 0x03030303) | ((E & 0x30303030)>>2);
+		E = (E & 0x000F000F) | ((E & 0x0F000F00)>>4);
+		E = (E & 0x000000FF) | ((E & 0x00FF0000)>>8);
+	}
+	else
+	if (sizeof(rsb_coo_idx_t)==8)
+	{
+		E = (E & 0x1111111111111111) | ((E & 0x4444444444444444)>>1 );
+		E = (E & 0x0303030303030303) | ((E & 0x3030303030303030)>>2 );
+		E = (E & 0x000F000F000F000F) | ((E & 0x0F000F000F000F00)>>4 );
+		E = (E & 0x000000FF000000FF) | ((E & 0x00FF000000FF0000)>>8 );
+		E = (E & 0x000000000000FFFF) | ((E & 0x000000FFFF000000)>>16);
+		RSB_ERROR(RSB_ERRM_FYCITINS);
+	}
+	else
+	{
+		RSB_ERROR(RSB_ERRM_FYCITINS);
+		/* FIXME : fatal! */
+	}
+	return E;
+}
+
+static RSB_INLINE rsb_coo_idx_t rsb_do_undilate_coo_even(rsb_coo_idx_t w)
+{
+	return rsb_do_undilate_coo_odd(w>>1);
+}
+
+static RSB_INLINE rsb_coo_idx_t rsb_do_dilate_coo(rsb_coo_idx_t w)
+{
+	rsb_coo_idx_t E=w;
+	if (sizeof(rsb_coo_idx_t)==1)
+	{
+		E = (E | (E << 2)) & 0x33;
+		E = (E | (E << 1)) & 0x55;
+	}
+	else
+	if (sizeof(rsb_coo_idx_t)==2)
+	{
+		E = (E | (E << 4)) & 0x0F0F;
+		E = (E | (E << 2)) & 0x3333;
+		E = (E | (E << 1)) & 0x5555;
+	}
+	else
+	if (sizeof(rsb_coo_idx_t)==4)
+	{
+		E = (E | (E << 8)) & 0x00FF00FF;
+		E = (E | (E << 4)) & 0x0F0F0F0F;
+		E = (E | (E << 2)) & 0x33333333;
+		E = (E | (E << 1)) & 0x55555555;
+	}
+	else
+	if (sizeof(rsb_coo_idx_t)==8)
+	{
+		E = (E | (E <<16)) & 0x0000FFFF0000FFFF;
+		E = (E | (E << 8)) & 0x00FF00FF00FF00FF;
+		E = (E | (E << 4)) & 0x0F0F0F0F0F0F0F0F;
+		E = (E | (E << 2)) & 0x3333333333333333;
+		E = (E | (E << 1)) & 0x5555555555555555;
+	}
+	else
+	{
+		RSB_ERROR(RSB_ERRM_FYCITINS);
+		/* FIXME : fatal! */
+	}
+	return E;
+}
+
+#define RSB_COO_INDEX_LO_MASK	0x0000FFFF
+#define RSB_COO_INDEX_HI_MASK	0xFFFF0000
+#define RSB_COO_INDEX_HBITSOF    ((RSB_CHAR_BIT*(sizeof(rsb_coo_idx_t))/2))
+#define RSB_COO_INDEX_HI_SHIFTED(X) (((X)&RSB_COO_INDEX_HI_MASK)>>RSB_COO_INDEX_HBITSOF)
+#define RSB_COO_INDEX_EVEN_MASK	0xAAAAAAAA
+#define RSB_COO_INDEX_ODD_MASK	0x55555555
+
+static RSB_INLINE rsb_coo_idx_t RSB_Z_2_COO_HI_WORD(rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	return rsb_do_undilate_coo_even(j) |(rsb_do_undilate_coo_even(i)<<RSB_COO_INDEX_HBITSOF);
+}
+static RSB_INLINE rsb_coo_idx_t RSB_Z_2_COO_LO_WORD(rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	return rsb_do_undilate_coo_odd(j) |(rsb_do_undilate_coo_odd(i)<<RSB_COO_INDEX_HBITSOF);
+}
+static RSB_INLINE rsb_coo_idx_t RSB_COO_2_Z_HI_WORD(rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	return ( (rsb_do_dilate_coo(RSB_COO_INDEX_HI_SHIFTED(i))<<1)| (rsb_do_dilate_coo(RSB_COO_INDEX_HI_SHIFTED(j))));
+}
+static RSB_INLINE rsb_coo_idx_t RSB_COO_2_Z_LO_WORD(rsb_coo_idx_t i, rsb_coo_idx_t j)
+{
+	return ( (rsb_do_dilate_coo(i&RSB_COO_INDEX_LO_MASK)<<1)| (rsb_do_dilate_coo(j&RSB_COO_INDEX_LO_MASK) ));
+}	
+
+rsb_err_t rsb__do_index_based_z_morton_sort( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode
+	,enum rsb_op_flags_t op_flags
+	)
+{
+	/**
+		\ingroup gr_internals
+	*/
+	//rsb_nnz_idx_t * K=NULL;
+	//rsb_coo_idx_t Idim;
+	//rsb_coo_idx_t Jdim;
+	//const rsb_coo_idx_t br=1,bc=1;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	
+	//rsb_nnz_idx_t n;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	//rsb_time_t it=0;
+	//rsb_nnz_idx_t nIdim,nJdim;
+
+	RSB_DEBUG_ASSERT(m);
+	RSB_DEBUG_ASSERT(k);
+	//RSB_DEBUG_ASSERT(br);
+	//RSB_DEBUG_ASSERT(bc);
+
+	{
+		rsb_nnz_idx_t n=0;
+		rsb_coo_idx_t t;
+		for(n=0;n<nnz;++n)
+		{
+//			printf("%d: %0x %0x -> ",n,rIA[n],rJA[n]);
+			t = RSB_COO_2_Z_HI_WORD(rIA[n],rJA[n]);
+			rJA[n]=RSB_COO_2_Z_LO_WORD(rIA[n],rJA[n]);
+			rIA[n]=t;
+//			printf(" %0x %0x\n",rIA[n],rJA[n]) ;
+		}
+	//	for(n=0;n<nnz;++n) printf("%d: %d %d\n",n,rIA[n],rJA[n]);
+		errval = rsb_util_sort_row_major_inner(rVA,rIA,rJA,nnz,m,k,typecode,flags);
+		for(n=0;n<nnz;++n)
+		{
+//			printf("%x: %0x %0x -> ",n,rIA[n],rJA[n]);
+			t = RSB_Z_2_COO_HI_WORD(rIA[n],rJA[n]);
+			rJA[n]=RSB_Z_2_COO_LO_WORD(rIA[n],rJA[n]);
+			rIA[n]=t;
+//			printf(" %0x %0x\n",rIA[n],rJA[n]) ;
+		}
+//		for(n=0;n<nnz;++n) printf("%d: %d %d\n",n,rIA[n],rJA[n]);
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_srt.h b/rsb_srt.h
new file mode 100644
index 0000000..5e6a6da
--- /dev/null
+++ b/rsb_srt.h
@@ -0,0 +1,149 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains sorting functions.
+ * */
+
+#ifndef RSB_SRT_H_INCLUDED
+#define RSB_SRT_H_INCLUDED
+
+#include "rsb_internals.h"	/* rsb_coo_matrix_t */
+
+rsb_err_t rsb__do_util_sortcoo(
+	void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode,
+	const struct rsb_mtx_partitioning_info_t * pinfop , rsb_flags_t flags, void * WA, size_t wb);
+
+rsb_err_t rsb__do_index_based_bcsr_sort( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+       	, void * WA, size_t wb);
+
+rsb_nnz_idx_t rsb__asymmetric_z_index( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl);
+void rsb__asymmetric_z_nnz_indices( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl, rsb_nnz_idx_t * a , rsb_nnz_idx_t * b );
+
+rsb_err_t rsb__do_index_based_bcsr_msort( 
+	rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, void * VA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	,void * WA, size_t wb
+	);
+
+rsb_err_t rsb__do_index_based_recursive_bcsr_sort( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	);
+
+rsb_err_t rsb__do_nnz_index_based_sort_and_permute( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	);
+
+void rsb__do_util_compact_permutation_nnz_idx_t_array(rsb_nnz_idx_t * K, rsb_nnz_idx_t nnz);
+void rsb__do_util_compact_permutation_coo_idx_t_array(rsb_coo_idx_t * K, rsb_nnz_idx_t nnz);
+rsb_err_t rsb__do_coo_index_sort_on_rows_array_make( 
+	rsb_coo_idx_t * K, const rsb_coo_idx_t * IA,
+	const rsb_coo_idx_t m, const rsb_coo_idx_t br,
+	const rsb_nnz_idx_t nnz, const rsb_type_t typecode);
+
+rsb_err_t rsb__do_nnz_index_based_bcsr_msort( 
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags
+	,enum rsb_op_flags_t op_flags
+	,void * WA, size_t wb);
+
+rsb_err_t rsb__do_double_pass_nnz_index_based_bcsr_msort( 
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags);/* FIXME */
+
+rsb_err_t rsb__do_nnz_index_sort_array_make( 
+	rsb_nnz_idx_t * K, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t roffset,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags,
+	int want_recursive_sort
+	,enum rsb_op_flags_t op_flags
+	/*, int want_rows_sort */);
+
+rsb_err_t rsb__do_double_coo_index_sort_array_make( 
+	rsb_coo_idx_t * K, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_coo_idx_t roffset,
+	rsb_coo_idx_t br, rsb_coo_idx_t bc,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode,
+	rsb_flags_t flags,
+	int want_recursive_sort
+	,enum rsb_op_flags_t op_flags
+	/*, int want_rows_sort */);
+
+rsb_nnz_idx_t rsb__nearest_power_of_two( const rsb_nnz_idx_t n );
+
+void rsb__asymmetric_z_indices_encode( const rsb_coo_idx_t i, const rsb_coo_idx_t j, rsb_coo_idx_t m, rsb_coo_idx_t k	, int ml, int kl , rsb_coo_idx_t *h, rsb_coo_idx_t *l);
+rsb_err_t rsb__do_index_based_z_morton_sort( 
+	const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA,
+	rsb_coo_idx_t * rIA, rsb_coo_idx_t * rJA, void * rVA,
+	rsb_coo_idx_t m, rsb_coo_idx_t k,
+	rsb_nnz_idx_t nnz,
+	rsb_type_t typecode
+	,enum rsb_op_flags_t op_flags
+	);
+
+#define RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_ONE_PASS(NNZ,M,K,BR,BC) (((NNZ)+1) * sizeof(rsb_nnz_idx_t)  * 2)
+#define RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_TWO_PASS(NNZ,M,K,BR,BC)  \
+	RSB_MAX((((NNZ)+1) * sizeof(rsb_coo_idx_t)  * 2),(K)*(BR) * sizeof(rsb_nnz_idx_t) * 2)
+
+
+#define RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT(NNZ,M,K,BR,BC) \
+	RSB_MAX( \
+		RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_ONE_PASS(NNZ,M,K,BR,BC), \
+		RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT_TWO_PASS(NNZ,M,K,BR,BC))
+#endif /* RSB_SRT_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_srtp.c b/rsb_srtp.c
new file mode 100644
index 0000000..5275bb0
--- /dev/null
+++ b/rsb_srtp.c
@@ -0,0 +1,421 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains parallel sorting functions.
+ * */
+
+#include "rsb_common.h"
+
+#define RSB_DO_WANT_PSORT_VERBOSE 0	/* set this to >0 to print parallel sort statistics */
+#define RSB_DO_WANT_PSORT_TIMING (RSB_DO_WANT_PSORT_VERBOSE+0) 	/* set this to 1 to print some statistics */
+#define RSB_DO_WANT_PSORT_FASTER_BUT_RISKY 0	/* FIXME: STILL UNFINISHED  */
+#define RSB_WANT_SORT_PARALLEL_BUT_SLOW 1
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+rsb_err_t rsb__util_sort_row_major_parallel(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k,  rsb_type_t typecode, rsb_flags_t flags)
+{
+	/**
+	 * \ingroup gr_util
+	 * TODO: should describe somewhere our technique: this is a mixed counting sort + merge sort.
+	*/
+
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	int cfi;
+	size_t el_size;
+	float cfa[] = {2}; // will use threads count as a reference
+//	float cfa[] = {1}; // will use cache size as a reference
+//	float cfa[] = {-1};  // will use as much memory as possible
+	const long wet = rsb_get_num_threads(); /* want executing threads */
+
+	if(nnz<RSB_MIN_THREAD_SORT_NNZ*wet)
+	{
+		// FIXME: it is known that very small matrices (e.g.: 2x2 from `make tests`) are not handled, here.
+		// however, this should be handled in a better way than this :)
+		return rsb_util_sort_row_major_inner(VA,IA,JA,nnz,m,k,typecode,flags);
+	}
+
+	if(!IA || !JA || !VA)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	el_size = RSB_SIZEOF(typecode);
+
+	for(cfi=0;cfi<sizeof(cfa)/sizeof(float);++cfi)
+	{
+		void *W = NULL;
+		rsb_char_t *IW = NULL;
+		int ti;
+		long cs,bs,tc,ns,tcs;
+		rsb_nnz_idx_t cnnz = 0;
+		size_t bnnz = 0,fsm = rsb__sys_free_system_memory();
+		size_t wb = 0;
+
+#if RSB_DO_WANT_PSORT_TIMING
+		rsb_time_t dt,st,mt,tt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+		// compute how many bytes are necessary for an element
+		ns=2*sizeof(rsb_coo_idx_t)+el_size;
+		// compute how many bytes are necessary for the whole processing
+		bs=nnz*ns;
+		if(cfa[cfi]>1)
+			tcs=bs+(wet*ns);
+		else if(cfa[cfi]>0)
+			tcs = rsb__get_lastlevel_c_size()*cfa[cfi]*wet;/* FIXME: '*wet' is a hack just for benchmark-related issues */
+		else
+			tcs=fsm/2; /* could be 0 */
+
+		if(tcs<1)
+			tcs=bs;	/* could happen, for an interfacing problem */
+		else
+		if(fsm>0)
+		{
+			tcs = RSB_MIN(fsm,tcs);
+		}
+
+		// prepare a buffer
+		W = rsb__malloc(tcs);
+		if(!W)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(erri,RSB_ERRM_ES)
+		}	
+		cs=tcs/wet;
+		//RSB_INFO("cache is %d bytes\n",cs);
+
+		// compute the nnz fitting in the buffer
+		cnnz=cs/ns;
+		// compute the total count of necessary passes
+		//tc=(bs+cs-1)/cs;
+		tc=(nnz+(cnnz-1))/(cnnz);
+		
+		wb = RSB_DO_REQUIRE_BYTES_FOR_INDEX_BASED_SORT(cnnz,m,k,1,1);
+		IW = rsb__malloc(wb*wet);
+		if(!IW)
+		{
+			errval = RSB_ERR_ENOMEM;
+			RSB_PERR_GOTO(erri,RSB_ERRM_ES);
+		}	
+
+		//RSB_INFO("there are %d nnz (%d bytes), %d times the cache (%z bytes), %d nnz per cache\n",nnz,bs,tc,cs,cnnz);
+#if RSB_DO_WANT_PSORT_TIMING
+		dt = rsb_time();
+		st=-dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+
+		// this phase is potentially parallel, and is the slower one.
+		// NOTE:before parallelization, one should avoid allocations during sort, or serialize in them in some way! 
+		#pragma omp parallel for schedule(static,1) shared(IA,JA,VA)   RSB_NTC 
+		for(ti=0;ti<tc;++ti)
+		{
+			size_t fnnz=ti*cnnz;
+			rsb_nnz_idx_t bnnz=(ti==tc-1)?(nnz-fnnz):cnnz;
+//			RSB_INFO("s:%d..%d (bnnz=%d)\n",fnnz,fnnz+bnnz-1,bnnz);
+			rsb__util_sort_row_major_buffered(((rsb_byte_t*)VA)+el_size*fnnz,IA+fnnz,JA+fnnz,bnnz,m,k,typecode,flags,IW+wb*ti,wb);
+			RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(VA+fnnz,IA+fnnz,JA+fnnz,bnnz,typecode,NULL,flags));
+		}
+
+#if RSB_DO_WANT_PSORT_TIMING
+		dt = rsb_time();
+		st+=dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+		// this phase is potentially parallel, too
+		for(bnnz=cnnz;bnnz<nnz;)
+		{
+		//	size_t fnnz;
+			int fi, fn = ((nnz-bnnz)+(2*bnnz-1))/(2*bnnz);
+			#pragma omp parallel for schedule(static,1) shared(IA,JA,VA)   RSB_NTC 
+//			for(fnnz=0;fnnz<nnz-bnnz;fnnz+=2*bnnz)
+			for(fi=0;fi<fn;++fi)
+			{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+				rsb_char_t * lW=((rsb_char_t*)W)+cs*omp_get_thread_num();
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+				rsb_char_t * lW=((rsb_char_t*)W)+cs*0;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+				size_t fnnz=fi*2*bnnz;
+				size_t lnnz=(fnnz+2*bnnz>nnz)?(nnz-(fnnz+bnnz)):bnnz;
+				void *fVA=((rsb_char_t*)VA)+el_size*fnnz;
+				void *fIA=IA+fnnz;
+				void *fJA=JA+fnnz;
+#if RSB_PS_ASSERT
+				void *bVA=((rsb_char_t*)VA)+el_size*(fnnz+bnnz);
+				void *bIA=IA+fnnz+bnnz;
+				void *bJA=JA+fnnz+bnnz;
+#endif /* RSB_PS_ASSERT */
+				//RSB_INFO("m:%d..%d %d..%d (%d) (bnnz=%d) (lnnz=%d)\n",fnnz,fnnz+bnnz-1,fnnz+bnnz,fnnz+bnnz+lnnz-1,cs,bnnz,lnnz);
+//				RSB_INFO("sentinel:%x %d %d\n",IA+fnnz+bnnz+lnnz,IA[fnnz+bnnz+lnnz],JA[fnnz+bnnz+lnnz]);
+				RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(fVA,fIA,fJA,bnnz,typecode,NULL,flags));
+				RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(bVA,bIA,bJA,lnnz,typecode,NULL,flags));
+				rsb__do_util_merge_sorted_subarrays_in_place(fVA,fIA,fJA,lW,bnnz,lnnz,cs,flags,typecode);
+//				RSB_INFO("sentinel:%x %d %d\n",IA+fnnz+bnnz+lnnz,IA[fnnz+bnnz+lnnz],JA[fnnz+bnnz+lnnz]);
+				RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(fVA,fIA,fJA,bnnz,typecode,NULL,flags));
+				RSB_PS_ASSERT(!rsb__util_is_sorted_coo_as_row_major(fVA,fIA,fJA,bnnz+lnnz,typecode,NULL,flags));
+			}
+			#pragma omp barrier
+			bnnz *= 2;
+		}
+#if RSB_DO_WANT_PSORT_TIMING
+		mt = - dt;
+		dt = rsb_time();
+		mt += dt;
+		tt = mt + st;
+		RSB_INFO("using %zd partitions, (sort=%.5lg+merge=%.5lg)=%.5lg, on %d threads\n",(size_t)tc,st,mt,tt,wet);
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+
+//		assert(!rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,nnz,typecode,NULL,flags));
+//		if(rsb__util_is_sorted_coo_as_row_major(VA,IA,JA,nnz,typecode,NULL,flags))
+//			RSB_PERR_GOTO(err,RSB_ERRM_EM);
+
+erri:
+		RSB_CONDITIONAL_FREE(W);
+		RSB_CONDITIONAL_FREE(IW);
+	}
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb_util_sort_row_major_inner(void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, const rsb_nnz_idx_t nnz, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const  rsb_type_t typecode , const rsb_flags_t flags /*, void * WA, size_t wb */)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_WANT_SORT_PARALLEL_BUT_SLOW
+		if( rsb_global_session_handle.asm_sort_method > 0 )
+		/* parallel and scaling but slow */
+			errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,m,k,typecode,flags);
+		else
+#else /* RSB_WANT_SORT_PARALLEL_BUT_SLOW */
+#endif  /* RSB_WANT_SORT_PARALLEL_BUT_SLOW */
+			/* not so parallel nor scaling but fast */
+			errval = rsb_util_sort_row_major_bucket_based_parallel(VA,IA,JA,nnz,m,k,typecode,flags);
+		return errval;
+}
+
+rsb_err_t rsb_util_sort_row_major_bucket_based_parallel(void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, const rsb_nnz_idx_t nnz, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const  rsb_type_t typecode , const rsb_flags_t flags /*, void * WA, size_t wb */)
+{
+	/**
+		\ingroup gr_internals
+		FIXME: EXPERIMENTAL, DOCUMENT ME 
+		FIXME: shall stand duplicates, and so consequently e.g. m*n<nnz (it is actual input, e.g. out of the sparse matrices' sum).
+	*/
+	int psc = RSB_PSORT_CHUNK;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+//	rsb_nnz_idx_t frnz = 0;
+	rsb_nnz_idx_t mnzpr = 0;
+	rsb_nnz_idx_t *PA = NULL;
+	void *WA = NULL;
+	rsb_coo_idx_t *iWA = NULL;
+	rsb_coo_idx_t *jWA = NULL;
+	rsb_coo_idx_t *nWA = NULL;
+	void *vWA = NULL;
+	rsb_nnz_idx_t n = 0;
+	size_t el_size = RSB_SIZEOF(typecode);
+	//struct rsb_mtx_partitioning_info_t pinfop;
+	/* const long wet = rsb_get_num_threads();*/ /* want executing threads */
+	const long wet = rsb__set_num_threads(RSB_THREADS_GET_MAX_SYS); /* want executing threads; FIXME: it seems there is a severe bug with the definition of RSB_NTC */
+#if RSB_DO_WANT_PSORT_TIMING
+	rsb_time_t dt,pt,st,mt,ct;
+	rsb_time_t tt = - rsb_time();
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+	rsb_int_t ei = RSB_DO_FLAG_HAS(flags,RSB_FLAG_FORTRAN_INDICES_INTERFACE) ? 1 : 0;
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+		RSB_PERR_GOTO(ret,"\n");
+	}
+
+	if(nnz<2)
+		goto ret;
+
+	if(RSB_MUL_OVERFLOW(sizeof(rsb_nnz_idx_t),(m+2),size_t,rsb_non_overflowing_t)
+	|| RSB_MUL_OVERFLOW(sizeof(rsb_nnz_idx_t)*2+el_size,(nnz),size_t,rsb_non_overflowing_t))
+	{
+		errval = RSB_ERR_LIMITS;
+		RSB_PERR_GOTO(err,"sorry, allocating that much memory would cause overflows\n");
+	}
+	PA = rsb__calloc(sizeof(rsb_nnz_idx_t)*(m+2));
+//	WA = rsb__calloc(RSB_MAX(sizeof(rsb_coo_idx_t),el_size)*(nnz+1));
+//	WA = rsb__calloc((2+3*sizeof(rsb_coo_idx_t)+el_size)*nnz);
+//	WA = rsb__calloc((2*sizeof(rsb_coo_idx_t)+el_size)*nnz);
+	WA = rsb__calloc_parallel((sizeof(rsb_coo_idx_t)*2+el_size)*nnz); // NEW 20101201
+
+	if(!PA || !WA)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(err,"after calloc, pa=%p, wa=%p\n",PA,WA);
+	}
+	iWA=((rsb_coo_idx_t*) WA);
+	jWA=((rsb_coo_idx_t*)iWA)+nnz;
+	vWA=((rsb_coo_idx_t*)jWA)+nnz;
+//	nWA=((rsb_char_t*)vWA)+(el_size*nnz);
+
+	/* saving one head element with a trick */
+	++PA;
+#if RSB_DO_WANT_PSORT_TIMING
+	dt = rsb_time();
+	mt=-dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+
+#if RSB_DO_WANT_PSORT_FASTER_BUT_RISKY
+# if 1
+	/* FIXME: unfinished and incorrect code */
+	#pragma omp parallel for schedule(static,psc) shared(PA,IA) RSB_NTC 
+	for(n=0;n<nnz;++n)
+		PA[IA[n]+1-ei]++;
+# else
+	/* actually, this code is VERY SLOW :) */
+	#pragma omp parallel reduction(|:errval) shared(PA,IA) 
+	{
+		rsb_nnz_idx_t n;
+		rsb_thr_t th_id = omp_get_thread_num();
+		rsb_thr_t tn = omp_get_num_threads();
+
+		for(n=0;RSB_LIKELY(n<nnz);++n)
+			if(IA[n]%tn==th_id)
+				PA[IA[n]+1-ei]++;
+	}
+	#pragma omp barrier
+#endif
+#else /* RSB_DO_WANT_PSORT_FASTER_BUT_RISKY */
+	/* setting PA[i] to contain the count of elements on row i */
+	for(n=0;RSB_LIKELY(n<nnz);++n)
+	{
+		RSB_ASSERT(IA[n]>=0);
+		RSB_ASSERT(IA[n]<=m);
+		PA[IA[n]+1-ei]++;
+#if RSB_DO_WANT_PSORT_VERBOSE>1
+		RSB_INFO("PA[m] = %d\n",PA[m]);
+		RSB_INFO("IA[%d] = %d   PA[%d] = %d\n",n,IA[n],IA[n]+1-ei,PA[IA[n]+1-ei]);
+#endif /* RSB_DO_WANT_PSORT_VERBOSE */
+	}
+#endif /* RSB_DO_WANT_PSORT_FASTER_BUT_RISKY */
+	/* setting PA[i] to contain the count of elements before row i */
+	for(n=0;RSB_LIKELY(n<m);++n)
+	{
+		PA[n+1] += PA[n];
+#if RSB_DO_WANT_PSORT_VERBOSE>1
+		RSB_INFO("PA[%d] = %d\n",n,PA[n]);
+#endif /* RSB_DO_WANT_PSORT_VERBOSE */
+	}
+#if RSB_DO_WANT_PSORT_VERBOSE>1
+	RSB_INFO("PA[%d] = %d\n",n,PA[n]);
+#endif /* RSB_DO_WANT_PSORT_VERBOSE */
+
+#if RSB_DO_WANT_PSORT_TIMING
+	dt = rsb_time();
+	mt+=dt;
+	pt=-dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+	/* shuffling elements on the basis of their row 
+	 * FIXME : this is the slowest part of this code
+	 * its performance is largely dependent on cache lenghts and latencies.. */
+	/* FIXME : parallelization of this is challenging */
+	rsb_util_do_scatter_rows(vWA,iWA,jWA,VA,IA,JA,PA-ei,nnz,typecode);
+	--PA; /* PA has been modified. */
+#if RSB_DO_WANT_PSORT_TIMING
+	dt = rsb_time();
+	pt+=dt;
+	st=-dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+//	RSB_COA_MEMCPY(IA,iWA,0,0,nnz);
+//	RSB_COA_MEMCPY(JA,jWA,0,0,nnz);
+//	RSB_A_MEMCPY(VA,vWA,0,0,nnz,el_size);
+//	SB_COA_MEMCPY_parallel(IA,iWA,0,0,nnz);
+//	RSB_COA_MEMCPY_parallel(JA,jWA,0,0,nnz);
+//	RSB_A_MEMCPY_parallel(VA,vWA,0,0,nnz,el_size);
+	/* restore the row pointers with a trick */
+
+	RSB_ASSERT(PA[m]==nnz);
+
+	/* TODO: parallelization of this ? FIXME: is this necessary ? */
+	for(n=0;n<m;++n)
+		mnzpr = RSB_MAX(mnzpr,PA[n+1]-PA[n]);
+
+	nWA = rsb__malloc(sizeof(rsb_nnz_idx_t)*(mnzpr+2)*wet);/* rsb__malloc is evil inside openmp */
+	if(!nWA) { RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+
+	psc = RSB_MIN(psc,m);
+	/* the rows are ready to be sorted (FIXME: this is slow, and could be optimized very much) */
+//	#pragma omp parallel for reduction(|:errval)
+//	#pragma omp parallel for
+//	#pragma omp parallel for schedule(static,10)
+//	#pragma omp parallel for schedule(static,1)
+//	#pragma omp parallel for schedule(static,psc) shared(iWA,jWA,vWA,nWA,PA)   num_threads(wet)
+	#pragma omp parallel for schedule(static,psc) shared(iWA,jWA,vWA,nWA,PA)   RSB_NTC 
+	for(n=0;n<m;++n)
+	{
+		rsb_nnz_idx_t nnz1,nnz0;
+#if 1
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		rsb_thread_t th_id = omp_get_thread_num();
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		rsb_thread_t th_id=0;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		rsb_nnz_idx_t tnoff=th_id*(mnzpr+2);
+		nnz1=PA[n+1];
+		nnz0=PA[n];
+#if RSB_DO_WANT_PSORT_VERBOSE
+		RSB_INFO("psort row %d/%d: nonzeros [%d .. %d/%d] on thread %d\n",(int)n,m,(int)nnz0,(int)nnz1,(int)nnz,(int)th_id);
+#endif /* RSB_DO_WANT_PSORT_VERBOSE */
+		if(nnz1-nnz0<2)
+			continue;/* skip empty line. TODO: could implement with macro sorting algorithms for few nnz */
+		if(!RSB_SOME_ERROR(rsb_do_msort_up(nnz1-nnz0,jWA+nnz0,nWA+tnoff)))
+			rsb_ip_reord(nnz1-nnz0,((rsb_char_t*)vWA)+el_size*nnz0,iWA+nnz0,jWA+nnz0,nWA+tnoff,typecode);
+#else
+		nnz1=PA[n+1];
+		nnz0=PA[n];
+		rsb__do_util_sortcoo(vWA+nnz0,iWA+nnz0,jWA+nnz0,m,k,nnz1-nnz0,typecode,NULL,flags,NULL,0);
+#endif
+	}
+
+#if RSB_DO_WANT_PSORT_TIMING
+	dt = rsb_time();
+	st+=dt;
+	ct=-dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+
+	RSB_COA_MEMCPY_parallel(IA,iWA,0,0,nnz);
+	RSB_COA_MEMCPY_parallel(JA,jWA,0,0,nnz);
+	RSB_A_MEMCPY_parallel(VA,vWA,0,0,nnz,el_size);
+#if RSB_DO_WANT_PSORT_TIMING
+	dt = rsb_time();
+	ct+=dt;
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+err:
+	RSB_CONDITIONAL_FREE(PA);
+	RSB_CONDITIONAL_FREE(WA);
+	RSB_CONDITIONAL_FREE(nWA);
+#if RSB_DO_WANT_PSORT_TIMING
+	dt = rsb_time();
+	tt+=dt;
+	RSB_INFO("pt:%lg  st:%lg  tt:%lg  mt:%lg ct:%lg\n",pt,st,tt,mt,ct);
+#endif /* RSB_DO_WANT_PSORT_TIMING */
+ret:
+	RSB_DO_ERR_RETURN(errval)
+}
+/* @endcond */
diff --git a/rsb_srtp.h b/rsb_srtp.h
new file mode 100644
index 0000000..7c630c3
--- /dev/null
+++ b/rsb_srtp.h
@@ -0,0 +1,40 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains parallel sorting functions.
+ * */
+
+#ifndef RSB_PSORT_H_INCLUDED
+#define RSB_PSORT_H_INCLUDED
+
+#include "rsb_common.h"
+
+rsb_err_t rsb_util_sort_row_major_inner(void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, const rsb_nnz_idx_t nnz, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const  rsb_type_t typecode , const rsb_flags_t flags /*, void * WA, size_t wb */);
+rsb_err_t rsb__util_sort_row_major_parallel(void *VA, rsb_coo_idx_t * IA, rsb_coo_idx_t * JA, rsb_nnz_idx_t nnz, rsb_coo_idx_t m, rsb_coo_idx_t k,  rsb_type_t typecode, rsb_flags_t flags);
+rsb_err_t rsb_util_sort_row_major_bucket_based_parallel(void * RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, const rsb_nnz_idx_t nnz, const rsb_coo_idx_t m, const rsb_coo_idx_t k, const  rsb_type_t typecode , const rsb_flags_t flags /*, void * WA, size_t wb */);
+
+#endif /* RSB_PSORT_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_strmif.c b/rsb_strmif.c
new file mode 100644
index 0000000..29b89a6
--- /dev/null
+++ b/rsb_strmif.c
@@ -0,0 +1,43 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/* This file was generated by the Makefile */
+#include "rsb.h"
+#include "rsb_common.h"
+#include "rsb_do.h"
+rsb_err_t rsb__do_get_matrix_info_from_string(const struct rsb_mtx_t *matrix, const rsb_char_t *mis, void* info, size_t buflen)
+{ rsb_err_t errval=RSB_ERR_BADARGS; if(!matrix || !mis || !info)goto err;
+if(0 == strcmp(mis,"RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_INDEX_STORAGE_IN_BYTES__TO__SIZE_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_MATRIX_ROWS__TO__RSB_COO_INDEX_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_MATRIX_COLS__TO__RSB_COO_INDEX_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_MATRIX_NNZ__TO__RSB_NNZ_INDEX_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_TOTAL_SIZE__TO__SIZE_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_TOTAL_SIZE__TO__SIZE_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_MATRIX_FLAGS__TO__RSB_FLAGS_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_MATRIX_TYPECODE__TO__RSB_TYPE_T,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_MATRIX_INFO__TO__CHAR_P")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_MATRIX_INFO__TO__CHAR_P,info,buflen); goto done;}
+if(0 == strcmp(mis,"RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T")){ errval = rsb__do_get_matrix_info(matrix,RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T,info,buflen); goto done;}
+done:
+return errval;
+err: return RSB_ERR_GENERIC_ERROR;
+}
+/* @endcond */
diff --git a/rsb_stropts.c b/rsb_stropts.c
new file mode 100644
index 0000000..2c535de
--- /dev/null
+++ b/rsb_stropts.c
@@ -0,0 +1,41 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/* This file was generated by the Makefile */
+#include "rsb.h"
+#include "rsb_common.h"
+rsb_err_t rsb__stropts_set(const rsb_char_t *opn, const rsb_char_t *arg)
+{ rsb_err_t errval=RSB_ERR_NO_ERROR; if(!opn || !arg)goto err;
+if(0 == strcmp(opn,"RSB_IO_WANT_SORT_METHOD")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_SORT_METHOD,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_CACHE_BLOCKING_METHOD")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_CACHE_BLOCKING_METHOD,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_SUBDIVISION_MULTIPLIER")){ rsb_real_t val = rsb__util_atof(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_SUBDIVISION_MULTIPLIER,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_BOUNDED_BOX_COMPUTATION")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_BOUNDED_BOX_COMPUTATION,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_EXECUTING_THREADS")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXTRA_VERBOSE_INTERFACE,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING")){ const rsb_char_t* val = arg; RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_LEAF_LEVEL_MULTIVEC")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_LEAF_LEVEL_MULTIVEC,&val,errval); goto done;}
+if(0 == strcmp(opn,"RSB_IO_WANT_VERBOSE_TUNING")){ rsb_int_t val = rsb__util_atoi(arg); RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_VERBOSE_TUNING,&val,errval); goto done;}
+done:
+return errval;
+err: return RSB_ERR_GENERIC_ERROR;
+}
+/* @endcond */
diff --git a/rsb_struct.h b/rsb_struct.h
new file mode 100644
index 0000000..f684d77
--- /dev/null
+++ b/rsb_struct.h
@@ -0,0 +1,359 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+#ifndef RSB_RSB_STRUCT_H_INCLUDED
+#define RSB_RSB_STRUCT_H_INCLUDED
+
+/* @cond INNERDOC  */
+
+/*! 
+ * A type for the size (in bytes) of memory areas.  */
+typedef size_t rsb_size_t;
+
+/*!
+ * A typedef for printing non negative integers.
+ */
+typedef rsb_size_t rsb_printf_int_t;
+
+/*!
+ * The inner datatype used for the bitmap structure.
+ * */
+typedef signed int rsb_bitmap_data_t;
+
+/*! A type for specifying a sparse matrix format. */
+typedef rsb_flags_t rsb_fmt_t;
+
+/*!  A floating point numerical type for performance (MFLOPS) measurements.  */
+typedef rsb_real_t rsb_perf_t;
+
+/*!  A floating point numerical type for fillin measurements (obsolete).  */
+typedef rsb_real_t rsb_fillin_t;
+
+/*!
+ An integer type for submatrix indices.
+ */
+typedef int rsb_submatrix_idx_t;
+
+#define RSB_MAX_SUBM_COUNT (RSB_MAX_VALUE_FOR_TYPE(rsb_submatrix_idx_t))
+#define RSB_SUBM_IDX_MARKER (RSB_MAX_SUBM_COUNT)
+
+/*!
+ \internal
+ An integer type which by definition should not overflow, in most cases of interest.
+ */
+typedef int rsb_non_overflowing_t;
+
+
+#define RSB_BLK_MUL_OVERFLOW(R,C) RSB_MUL_OVERFLOW((R),(C),rsb_blk_idx_t,rsb_non_overflowing_t)
+#define RSB_COO_MUL_OVERFLOW(R,C) RSB_MUL_OVERFLOW((R),(C),rsb_coo_idx_t,rsb_non_overflowing_t)
+#define RSB_NNZ_MUL_OVERFLOW(R,C) RSB_MUL_OVERFLOW(R,C,rsb_nnz_idx_t,rsb_non_overflowing_t)
+
+/*!
+ A type for byte strings.
+ */
+typedef unsigned char rsb_byte_t;
+
+
+/* @cond INNERDOC  */
+/*!
+ \name Macros for overflow detection in common (INTERNALS) operations. 
+
+ They are tricky because should serve both signed and unsigned typedefs.
+ The following macros should be handled with care.
+ */
+#define RSB_INDEX_OF_SAFE_EXTRA 2 	/*< this is the value that could be added with no overflow to indices values */
+#define RSB_ADD_OVERFLOW(R,C,T) ((int)((T)(((T)(R))+((T)(C)))<((T)(R))) || (int)((T)(((T)(R))+((T)(C)))<((T)(C))))
+#define RSB_MUL_OVERFLOW(R,C,T,H) ((R)?((((R)*(C))/(R))==(C)?0:1):0)
+#define RSB_BLK_ADD_OVERFLOW(R,C) RSB_ADD_OVERFLOW((R),(C),rsb_blk_idx_t)
+#define RSB_COO_ADD_OVERFLOW(R,C) RSB_ADD_OVERFLOW((R),(C),rsb_coo_idx_t)
+#define RSB_NNZ_ADD_OVERFLOW(R,C) RSB_ADD_OVERFLOW((R),(C),rsb_nnz_idx_t)
+
+/*!
+ Macros to get indices types liminal values (which we often use as markers).
+*/
+#define RSB_IS_UNSIGNED(T) (!RSB_IS_SIGNED(T))
+#define RSB_PROBABLY_SAME_TYPES(T1,T2) ((RSB_IS_SIGNED(T1)==RSB_IS_SIGNED(T2)) && sizeof(T1)==sizeof(T2))
+#define RSB_MIN_SIGNED(T) (-1 - RSB_MAX_SIGNED(T))
+
+#define RSB_IS_VALUE_MORE_THAN_HALF_BITS_LONG(V,T)	(((V)>>RSB_COO_HALF_BITS_SIZE)>0) /*< */
+#define RSB_IS_COO_VALUE_MORE_THAN_HALF_BITS_LONG(V) RSB_IS_VALUE_MORE_THAN_HALF_BITS_LONG(V,rsb_coo_idx_t) /*< */
+
+#define RSB_MIN(X,Y) ((X)<(Y)?(X):(Y))		/*!< quick macro for minimum */
+#define RSB_MAX(X,Y) ((X)<(Y)?(Y):(X))		/*!< quick macro for maximum */
+#define RSB_ABS(X) ((X)<0?-(X):(X))		/*!< quick macro for abs()*/
+
+#define RSB_FOUR 4			/*!< a constant with the number of quadrants */
+/* @endcond */
+
+/* @cond INNERDOC  */
+#define RSB_REAL_ZERO 0.0		/*!< \internal internal */
+#define RSB_TIME_ZERO RSB_REAL_ZERO 	/*!< \internal internal */	
+#define RSB_BOOL_MAYBE	(-1) /*!< a reserved, "maybe" value for rsb_bool_t */
+#define RSB_INVALID_FLAGS	(-1)		/*!< \internal internal */
+#define RSB_INVALID_TRANS RSB_INVALID_FLAGS	/*!< \internal internal */
+#define RSB_INVALID_TRANS_CHAR '?'		/*!< \internal internal */
+#define RSB_XOR(X,Y) 	(((X)!=0)^ ((Y)!=0))	/*!< \internal internal */
+#define RSB_AND(X,Y) 	(((X)!=0)&&((Y)!=0))	/*!< \internal internal */
+#define RSB_OR(X,Y) 	(((X)!=0)||((Y)!=0))	/*!< \internal internal */
+#define RSB_NAND(X,Y)   (!RSB_AND(X,Y))		/*!< \internal internal */
+/* @endcond */
+
+/* @cond INNERDOC  */
+#define RSB_BOOL_XOR(X,Y) 	((X)^(Y)) /*!< A logical XOR for rsb_bool_t values. */
+#define RSB_BOOL_OR(X,Y) 	((X)||(Y)) /*!< A logical OR for rsb_bool_t values. */
+#define RSB_BOOL_AND(X,Y) 	((X)&&(Y)) /*!< A logical OR for rsb_bool_t values. */
+#define RSB_BOOL_NOT(X) 	(!(X)) /*!< A logical NOT for rsb_bool_t values. */
+#define RSB_BOOL_NAND(X,Y) 	RSB_BOOL_NOT(RSB_BOOL_AND(X,Y)) /*!< A logical NAND for rsb_bool_t values. */
+#define RSB_BOOL_NOR(X,Y) 	RSB_BOOL_NOT(RSB_BOOL_OR(X,Y)) /*!< A logical NOR for rsb_bool_t values. */
+/* @endcond */
+
+
+
+/* @cond INNERDOC  */
+/*!
+ * \internal
+ * \ingroup gr_internals
+ * \brief An internal, helper structure (OBSOLETE).
+ * \internal
+ */
+struct rsb_expected_info_t{
+	/*! Expected fillin */
+	/* FIXME : here should also be a map of expected fillin */
+	rsb_fillin_t efillin;
+};
+/* @endcond */
+
+/* @cond INNERDOC  */
+/*!
+ * \internal
+ * \ingroup gr_internals
+ * \brief An internal, helper structure (not for end users).
+ * \internal
+ */
+struct rsb_translated_matrix_t
+{
+	struct rsb_mtx_t * mtxlp;
+	rsb_submatrix_idx_t level;
+	rsb_coo_idx_t	roff,coff;
+	rsb_coo_idx_t	nr,nc;
+};
+/* @endcond */
+
+/*!
+ * \ingroup rsb_doc_matrix_assembly
+ * \brief A structure for the RSB (Recursive Sparse Blocks) representation of sparse matrices.
+ * \n
+ * This is an opaque container for a recursive storage of COO/CSR submatrices. 
+ * \n
+ * The user is not supposed to manipulate this structure directly.
+ * \n
+ * This structure shall be only manipulated through the use of appropriate functions. 
+ * \n
+ * Knowledge of this structure is not required at all (in any case) use the library.
+ * \see rsb_doc_matrix_assembly on how to instantiate/destroy this structure.
+ * \see rsb_doc_matrix_operations for computational operations using it.
+ *
+ * \note: VBR and BCSR submatrices are not supported.
+ */
+struct rsb_mtx_t
+{
+	/*!
+		values of matrix coefficients.
+		array sized ( element_count == nnz * fillin ) * el_size (CSR,BCSR,VBR) 
+	 */
+	void * VA;
+
+	/*!  bpntr[bri] points to the location of bindx of the first nonzero block entry of block row bri.
+			   if the ith block row contains only zeros then bpntr[i]==bpntr[i+1] (VBR,BCSR,CSR) */
+	rsb_nnz_idx_t *bpntr;
+
+	/*!  bindx[bi] contains the block column index of the bi^th nonzero block (VBR,BCSR,CSR) */
+	rsb_coo_idx_t	*bindx;	/* bindx[m->block_count] should be zero, for technical reasons (for the last 'virtual' block) */
+
+	rsb_nnz_idx_t nnz;	/*! matrix rows, columns */
+	rsb_coo_idx_t nr,nc;	/*! matrix (declared) nonzeros */
+	rsb_flags_t flags; 	/*! structural flags, describing some optional features */
+	rsb_blk_idx_t br, bc;	/*! block row and column size (only if BCSR) */
+	rsb_type_t typecode; 	/*! as specified in the RSB_NUMERICAL_TYPE_* preprocessor symbols in types.h (See \ref matrix_type_symbols_section)	*/
+	rsb_fmt_t matrix_storage; /*! as specified in the RSB_MATRIX_STORAGE_* preprocessor symbols in types.h 	*/
+
+	/*!
+		intptr[bi] points (logically: in terms of numerical elements count) to the location in VA of the (0,0) entry in the bi^th block entry (VBR).
+		array sized 
+	*/
+	rsb_nnz_idx_t  *indptr;
+
+	/*!  rpntr[bri] contains the row index of first row in the bri^th block row
+	          ( row    partitioning indices : M_b +1 elements )  (CSR,BCSR,VBR)
+	     note that rpntr[Mdim] could be more than m.
+	*/
+	rsb_coo_idx_t	*rpntr;
+
+	/*!  cpntr[bcj] contains the column index of the first column in the bcj^th block column
+	          ( column partitioning indices : K_b +1 elements ) (VBR) */
+	rsb_coo_idx_t *cpntr;
+
+	/*!  these are aliases for rpntr and cpntr for the major dimension (Mpntr) and minor one (mpntr) 
+	 */
+	rsb_coo_idx_t *mpntr,*Mpntr;
+
+	/* int  *mpntr,*Mpntr;*/	/* aliases for rpntr and cpntr (M stays for major, m for minor) */
+	
+	/*! block row and column counts */
+	rsb_blk_idx_t M_b, K_b;
+
+	/*!  these are aliases for M_b and K_b for the major dimension (Mdim) and minor one (mdim) 
+	 *  ifdef RSB_FLAG_WANT_COLUMN_MAJOR_ORDER, the aliasing is swapped.
+	 * */
+	rsb_blk_idx_t Mdim,mdim;
+
+	/*! The count of blocks (regardless their size) : <= nnz */
+	rsb_nnz_idx_t block_count;
+
+	/*! The overall number of elements el_size bytes each (>=nnz) */
+	rsb_size_t element_count;
+	
+	/*! the size >= 1, in bytes, of the sparse matrix numerical elements type */
+	rsb_size_t el_size;
+
+	/*! Time needed for matrix structure analysis, during construction */
+	rsb_time_t sat;
+
+	/*! Time needed for elements insertion, during construction */
+	rsb_time_t eit;
+
+	/*! Time needed for sorting elements (if sorted), during construction */
+	rsb_time_t est;
+
+	/*! Performance estimation time */
+	rsb_time_t pet;
+
+	/*! Cooordinate cleanup time */
+	rsb_time_t ect;
+
+	/*! Coordinate partitioning time */
+	rsb_time_t cpt;
+
+	/*! Recursive sort time  */
+	rsb_time_t rpt;
+
+	/*! Total assembly time */
+	rsb_time_t tat;
+
+	/*! Submatrix pointers for recursion storage */
+	struct rsb_mtx_t * sm[RSB_FOUR];
+
+/* #if RSB_STORE_IDXSA */
+	/*! Index storage amount. Temporarily here: FIXME. */
+	rsb_size_t idxsa;
+	/*
+#else */
+	/*! A structure with expectation info during construction (FIXME: this member is obsolete and will be deleted soon) */
+	/* struct rsb_expected_info_t einfo; */
+/* #endif */
+
+	/*! A pointer to an array of leaf submatrices pointers (only valid on root) */
+	struct rsb_translated_matrix_t * all_leaf_matrices;
+
+	/*! The number of leaf submatrices pointers in all_leaf_matrices (only valid on root) */
+	rsb_submatrix_idx_t all_leaf_matrices_n;
+
+	/*! In a recursive representation, the offset of the submatrix with respect to the original one (respectively, rows and columns)  */
+	rsb_coo_idx_t	roff,coff;
+
+	/*! In a recursive representation, with the RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS flag, the offset of these data arrays from the beginning of the global ones  */
+	rsb_nnz_idx_t	nzoff;
+
+	/*! In a recursive representation, broff (bcoff) is the offset of the submatrix first non empty row (column) with respect to the matrix.  */
+	rsb_coo_idx_t	broff,bcoff;
+
+	/*! In a recursive representation, bm (bk) is the last non empty row (column) in the submatrix.  */
+	rsb_coo_idx_t bm,bk;
+};
+
+/*!
+ * Macros for printing out summary info about a matrix.
+ * Accept a valid \ref rsb_mtx_t  pointer as an argument.
+ *
+ * Usage example:
+ * \code
+ * printf(RSB_PRINTF_MTX_SUMMARY_ARGS(mtxAp));
+ * \endcode
+ */
+#define RSB_PRINTF_MTX_SUMMARY_ARGS(MTXAP)  \
+			"(%d x %d)[%p]{%c} @ (%d(%d..%d),%d(%d..%d)) (%d nnz, %.2lg nnz/r) flags 0x%x (coo:%d, csr:%d, hw:%d, ic:%d), storage: %x, subm: %d, symflags:'"\
+					"%s"	\
+					"%s"	\
+					"%s"	\
+					"%s"	\
+					"%s"	\
+					"'"	\
+					, \
+				(MTXAP)->nr, (MTXAP)->nc, (const void*)(MTXAP),				\
+				(MTXAP)->typecode,						\
+				(MTXAP)->roff,						\
+				(MTXAP)->broff,						\
+				(MTXAP)->roff+(MTXAP)->bm,						\
+				(MTXAP)->coff,						\
+				(MTXAP)->bcoff,						\
+				(MTXAP)->coff+(MTXAP)->bk,						\
+			       	(MTXAP)->nnz,									\
+			       	((double)(MTXAP)->nnz)/(MTXAP)->nr,							\
+			       	(MTXAP)->flags,								\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_WANT_COO_STORAGE),			\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_WANT_BCSS_STORAGE),			\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_USE_HALFWORD_INDICES),		\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_ASSEMBLED_IN_COO_ARRAYS),			\
+				(MTXAP)->matrix_storage,							\
+				(MTXAP)->all_leaf_matrices_n,							\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_UPPER)?"U":"",			\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_LOWER)?"L":"",			\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_TRIANGULAR)?"T":"",			\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_SYMMETRIC)?"S":"",			\
+				RSB_DO_FLAG_HAS((MTXAP)->flags,RSB_FLAG_HERMITIAN)?"H":""
+
+#define RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(MTXAP)  \
+			"%d x %d, type %c, %d nnz, %.2lg nnz/r, %ld subms, %d lsubms, %2.4lf bpnz"\
+					, 								\
+				(MTXAP)->nr, (MTXAP)->nc, 						\
+				(MTXAP)->typecode,							\
+			       	(MTXAP)->nnz,								\
+			       	((double)(MTXAP)->nnz)/(MTXAP)->nr,					\
+				rsb__submatrices(MTXAP),							\
+				(MTXAP)->all_leaf_matrices_n,						\
+				((double)rsb__get_index_storage_amount(MTXAP)) / ((MTXAP)->nnz)
+
+#define RSB_PRINTF_MATRIX_BOUNDS_SUMMARY_ARGS(MTXAP)  \
+			"(nr=%d x nc=%d, nnz=%d)[%p]{type=%c} @ (nzoff=%d, roff=%d,broff=%d,bm=%d, coff=%d,bcoff=%d,bk=%d) " \
+					, \
+				(MTXAP)->nr, (MTXAP)->nc, (MTXAP)->nnz, (const void*)(MTXAP),				\
+				(MTXAP)->typecode,						\
+				(MTXAP)->nzoff,						\
+				(MTXAP)->roff,						\
+				(MTXAP)->broff,						\
+				(MTXAP)->bm,						\
+				(MTXAP)->coff,						\
+				(MTXAP)->bcoff,						\
+				(MTXAP)->bk
+/* @endcond */
+
+
+#endif
diff --git a/rsb_swt.c b/rsb_swt.c
new file mode 100644
index 0000000..d9f874b
--- /dev/null
+++ b/rsb_swt.c
@@ -0,0 +1,970 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief This source file contains experimental functions 
+ * */
+#include "rsb_internals.h"		/* */
+#include "rsb_swt.h"		/* */
+#define RSB_INNER_CAST(X) (X)
+
+rsb_err_t rsb__do_switch_leaf(struct rsb_mtx_t * mtxAp, rsb_fmt_t matrix_storage, rsb_flags_t flags, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t *TA)
+{
+	/*
+	 * In place switch of rows ordered COO to COO or CSR, either halfword-compressed or not.
+	 * Does not require submatrix bounds to be computed.
+	 * If *TA, no allocations shall originate from here.
+	 * If a reasonable conversion (e.g. no to-CSR conversion with nnzA<nrA+1) is being requested, (sizeof(rsb_coo_idx_t) * RSB_MIN(mtxMp->nnz,1+mtxMp->nr) ) should suffice for TA.
+	 * TODO: this function calls OpenMP-enabled functions (e.g.: rsb__util_compress_to_row_pointers_array); fix this in an appropriate way.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void * VA = mtxAp->VA, *IA = mtxAp->bpntr, *JA = mtxAp->bindx;
+	rsb_nnz_idx_t nnzA = mtxAp->nnz;
+	rsb_coo_idx_t nrA = mtxAp->nr, ncA = mtxAp->nc;
+
+	/* RSB_STDOUT("switch with off %d/%d, flag %d, ms %d.\n", roff, coff, flags & RSB_FLAG_USE_HALFWORD_INDICES, matrix_storage); */
+
+	if(matrix_storage == RSB_MATRIX_STORAGE_AUTO )
+	{
+		matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+		if( nnzA >= nrA+1 && nnzA >= ncA+1 )
+			matrix_storage = RSB_MATRIX_STORAGE_BCSR;
+
+		/*
+		RSB_DO_FLAG_DEL(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+		matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+		*/
+
+		/*
+		 * Todo: enable:
+		 *
+		if( RSB_INDICES_FIT_IN_HALFWORD(nrA, ncA))
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+		*/
+	}
+
+	if(!RSB_INDICES_FIT_IN_HALFWORD(nrA, ncA))
+		RSB_DO_FLAG_DEL(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+
+	switch(matrix_storage)
+	{
+		case( RSB_MATRIX_STORAGE_BCSR ):	/* ... -> CSR */
+		if( roff != 0 || coff != 0 )
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		if( nnzA < nrA+1 )
+		{
+			errval = RSB_ERR_BADARGS;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+		switch(flags & RSB_FLAG_USE_HALFWORD_INDICES)
+		{
+			case(RSB_FLAG_USE_HALFWORD_INDICES): /* ... -> HCSR */
+
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCSR) /* CSR -> HCSR */
+			{
+				/* row pointers are ok */
+				if(!(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+					rsb__do_switch_array_to_halfword_coo(JA,nnzA,0);
+				if( (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+					; /* columns indices are ok */
+			}
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR) /* COO -> HCSR */
+			{
+				if( (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+				{
+					rsb__do_switch_array_to_fullword_coo(RSB_INNER_CAST(rsb_half_idx_t*) IA,nnzA,0);
+				}
+				errval = rsb__util_compress_to_row_pointers_array(TA,nnzA,nrA,RSB_FLAG_C_INDICES_INTERFACE,RSB_FLAG_C_INDICES_INTERFACE,IA);
+				if(!(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+				{
+					rsb__do_switch_array_to_halfword_coo(JA,nnzA,0);
+				}
+			}
+			mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCSR;
+			RSB_DO_FLAG_SUBST(mtxAp->flags,RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS|RSB_FLAG_USE_HALFWORD_INDICES);
+			RSB_DO_FLAG_ADD(mtxAp->flags,(RSB_FLAG_USE_HALFWORD_INDICES_CSR));
+			break;
+
+			case(RSB_FLAG_USE_FULLWORD_INDICES):	/* -> FCSR */
+
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCSR) /* CSR -> FCSR */
+			{
+				/* row pointers are ok */
+				if(!(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+					; /* all done: CSR -> FCSR */
+				if( (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+					rsb__do_switch_array_to_fullword_coo(RSB_INNER_CAST(rsb_half_idx_t*) JA,nnzA,0); /* HCSR -> FCSR */
+			}
+
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR) /* COO -> FCSR */
+			{
+				if( (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)) /* HCOO -> FCSR */
+				{
+					rsb__do_switch_array_to_fullword_coo(RSB_INNER_CAST(rsb_half_idx_t*) IA,nnzA,0); /* HCSR -> FCSR */
+					rsb__do_switch_array_to_fullword_coo(RSB_INNER_CAST(rsb_half_idx_t*) JA,nnzA,0); /* HCSR -> FCSR */
+				}
+ 				/* FCOO -> FCSR */
+				errval = rsb__util_compress_to_row_pointers_array(TA,nnzA,nrA,RSB_FLAG_C_INDICES_INTERFACE,RSB_FLAG_C_INDICES_INTERFACE,IA);
+			}
+			mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCSR;
+			RSB_DO_FLAG_SUBST(mtxAp->flags,RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS|RSB_FLAG_USE_HALFWORD_INDICES,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+			RSB_DO_FLAG_ADD(mtxAp->flags,(RSB_FLAG_USE_CSR_RESERVED)); /* ! */
+			break;
+			default:
+			errval = RSB_ERR_UNIMPLEMENTED_YET;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			break;
+		}
+		if(nnzA)
+		{
+			RSB_ASSERT( mtxAp->bpntr[0] == 0 );
+			RSB_ASSERT( mtxAp->bpntr[nrA] == nnzA );
+		}
+		break;
+
+		case( RSB_MATRIX_STORAGE_BCOR ): /* COO -> ... */
+		switch(flags & RSB_FLAG_USE_HALFWORD_INDICES) /* COO -> H... */
+		{
+			case(RSB_FLAG_USE_HALFWORD_INDICES):	/* -> HCOO */
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCSR)
+			{
+				errval = rsb__do_switch_compressed_array_to_fullword_coo(IA,nrA,roff,TA);
+				rsb__do_switch_array_to_halfword_coo(IA,nnzA,0);
+			}
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR)
+			{
+				if( (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+					rsb__util_hcoo_array_add(RSB_INNER_CAST(rsb_half_idx_t*) IA,nnzA,roff);
+				else
+					rsb__do_switch_array_to_halfword_coo(IA,nnzA,roff);
+			}
+
+			if(!(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				rsb__do_switch_array_to_halfword_coo(JA,nnzA,coff);
+			}
+			else
+			if( (mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES))
+			{
+				rsb__util_hcoo_array_add(RSB_INNER_CAST(rsb_half_idx_t*) JA,nnzA,coff);
+			}
+			mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+			RSB_DO_FLAG_SUBST(mtxAp->flags,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS,RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS|RSB_FLAG_USE_HALFWORD_INDICES_COO);
+			break;
+
+			case(RSB_FLAG_USE_FULLWORD_INDICES):	/* -> FCOO */
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCSR)
+			{
+				errval = rsb__do_switch_compressed_array_to_fullword_coo(IA,nrA,roff,TA);
+			}
+			if(mtxAp->matrix_storage == RSB_MATRIX_STORAGE_BCOR)
+			{
+				if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+					rsb__do_switch_array_to_fullword_coo(RSB_INNER_CAST(rsb_half_idx_t*) IA,nnzA,roff);
+				else
+					rsb__util_coo_array_add(IA,nnzA,roff);
+			}
+			if(mtxAp->flags & RSB_FLAG_USE_HALFWORD_INDICES)
+				rsb__do_switch_array_to_fullword_coo(RSB_INNER_CAST(rsb_half_idx_t*) JA,nnzA,coff);
+			else
+				rsb__util_coo_array_add(JA,nnzA,coff);
+			mtxAp->matrix_storage = RSB_MATRIX_STORAGE_BCOR;
+			RSB_DO_FLAG_SUBST(mtxAp->flags,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS|RSB_FLAG_USE_HALFWORD_INDICES,RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS);
+			break;
+			default:
+			errval = RSB_ERR_UNIMPLEMENTED_YET;
+			RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+			break;
+		}
+		break;
+	}
+ret:
+	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_QUAD_PARTITIONING);
+err:
+	return errval;
+}
+
+rsb_bool_t rsb__do_is_candidate_size_for_halfword_coo(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+#if 0
+{
+	rsb_coo_idx_t i,j,ij;
+	i=m;
+	j=k;
+	ij = RSB_COO_HALFWORDS_VALUES_PACK(i,j);
+	i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+	j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+	RSB_INFO("(%d %d) -> (%d %d) (%d)\n",m,k,i,j,ij);
+}
+#endif
+	rsb_bool_t is = RSB_BOOL_FALSE;
+
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES_COO))
+		is=(!RSB_IS_COO_VALUE_MORE_THAN_HALF_BITS_LONG(m) && !RSB_IS_COO_VALUE_MORE_THAN_HALF_BITS_LONG(k));
+	else
+		is = RSB_BOOL_FALSE;
+	return is;
+}
+
+rsb_bool_t rsb__do_is_candidate_size_for_halfword_csr(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	rsb_bool_t is = RSB_BOOL_FALSE;
+	if( RSB_DO_FLAG_HAS(flags,RSB_FLAG_USE_HALFWORD_INDICES))
+		is=(/*!RSB_IS_COO_VALUE_MORE_THAN_HALF_BITS_LONG(m) && */!RSB_IS_COO_VALUE_MORE_THAN_HALF_BITS_LONG(k));
+	else
+		is = RSB_BOOL_FALSE;
+	return is;
+}
+
+rsb_bool_t rsb__do_is_candidate_size_for_halfword(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_flags_t flags)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	rsb_bool_t is = RSB_BOOL_FALSE;
+	is = rsb__do_is_candidate_size_for_halfword_csr(m,k,nnz,flags) || rsb__do_is_candidate_size_for_halfword_coo(m,k,flags);
+	return is;
+}
+
+rsb_err_t rsb_do_is_candidate_for_fullword_coo(const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	rsb_bool_t is = RSB_BOOL_FALSE;
+	if(!mtxAp || !rsb__is_terminal_recursive_matrix(mtxAp) || !rsb__is_css_matrix(mtxAp) /* || rsb__is_not_unsymmetric(mtxAp)*/)
+		return RSB_BOOL_FALSE;
+
+	if( RSB_DO_FLAG_HAS(mtxAp->flags,RSB_FLAG_WANT_COO_STORAGE))
+		is = RSB_BOOL_TRUE;
+	else
+		is = RSB_BOOL_FALSE;
+	return is;
+}
+
+rsb_err_t rsb__do_is_candidate_for_halfword_coo(const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	if(!mtxAp || !rsb__is_terminal_recursive_matrix(mtxAp) || !rsb__is_coo_matrix(mtxAp) )
+		return RSB_BOOL_FALSE;
+
+	if((mtxAp->nnz/mtxAp->Mdim) > RSB_CONST_MIN_NNZ_PER_ROW_FOR_COO_SWITCH)
+		return RSB_BOOL_FALSE;
+
+	return rsb__do_is_candidate_size_for_halfword_coo(mtxAp->nr,mtxAp->nc,mtxAp->flags);
+}
+
+rsb_err_t rsb__do_is_candidate_for_halfword_csr(const struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	if(!mtxAp || !rsb__is_terminal_recursive_matrix(mtxAp) || (!rsb__is_css_matrix(mtxAp)/* || rsb__is_not_unsymmetric(mtxAp)*/
+			&& !rsb__is_bcss_matrix(mtxAp)))
+		return RSB_BOOL_FALSE;
+
+	return rsb__do_is_candidate_size_for_halfword_csr(mtxAp->nr,mtxAp->nc,mtxAp->nnz,mtxAp->flags);
+}
+
+void rsb__do_switch_array_to_fullword_coo(rsb_half_idx_t *hp, rsb_nnz_idx_t n, const rsb_coo_idx_t off)
+{
+        /*! 
+         * \ingroup gr_experimental
+         * */
+#if 0
+        /* FIXME: with icc -fast, this produce bad results (on an array of length 2 with [0,1], produces zeros)! */
+        rsb_coo_idx_t *p=(rsb_coo_idx_t*)hp;
+        register rsb_nnz_idx_t k;
+        for(k=n;k>0;--k)
+                p[k-1]=(rsb_coo_idx_t) hp[k-1];
+#else
+#if !defined(__INTEL_COMPILER)
+	register	/* with debug compile mode on, icc -O0 had problems here, too */ 
+#endif /* __INTEL_COMPILER */
+        rsb_nnz_idx_t k;
+	
+        if(n<1)
+		return;
+	if(off==0)
+#if defined(__INTEL_COMPILER)
+	/* using Intel(R) C Intel(R) 64 Compiler XE for applications running on Intel(R) 64, Version 12.0.0.084 Build 20101006 we noticed a wrong operation (zeroes and/or junk ones were computed), if not using the 'novector' pragma. */
+	#pragma novector
+#endif /* __INTEL_COMPILER */
+        for(k=n;RSB_LIKELY(k>1);--k)
+        {   
+                ((rsb_coo_idx_t*)hp)[k-1]=hp[k-1];
+        }   
+	else
+#if defined(__INTEL_COMPILER)
+	/* using Intel(R) C Intel(R) 64 Compiler XE for applications running on Intel(R) 64, Version 12.0.0.084 Build 20101006 we noticed a wrong operation (zeroes and/or junk ones were computed), if not using the 'novector' pragma. */
+	#pragma novector
+#endif /* __INTEL_COMPILER */
+        for(k=n;RSB_LIKELY(k>1);--k)
+        {   
+                ((rsb_coo_idx_t*)hp)[k-1]=off+hp[k-1];
+        }   
+        ((rsb_coo_idx_t*)hp)[0]=off+hp[0];
+#endif
+}
+
+void rsb__do_switch_array_to_halfword_coo(rsb_coo_idx_t *p, rsb_nnz_idx_t n, const rsb_half_idx_t off)
+{
+	/*!
+	 * \ingroup gr_experimental
+	 * */
+	rsb_half_idx_t *hp=(rsb_half_idx_t*)p;
+	register rsb_nnz_idx_t k;
+	if(off)
+	for(k=0;RSB_LIKELY(k<n);++k)
+		hp[k]=((rsb_half_idx_t)p[k])+off;
+	else
+	for(k=0;RSB_LIKELY(k<n);++k)
+		hp[k]=((rsb_half_idx_t)p[k]);
+}
+
+rsb_err_t rsb__do_switch_to_halfword_csr(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp || !rsb__do_is_candidate_for_halfword_csr(mtxAp))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+/*	RSB_INFO("HCSR for %d %d\n",mtxAp->roff,mtxAp->coff); */
+	rsb__do_switch_array_to_halfword_coo(mtxAp->bindx,mtxAp->nnz,0);
+	RSB_DO_FLAG_SUBST(mtxAp->flags,RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES,RSB_FLAG_USE_HALFWORD_INDICES_CSR);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_to_halfword_coo(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(!mtxAp || !rsb__do_is_candidate_for_halfword_coo(mtxAp))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#if 0
+  	/* RSB_INFO("HCOO for %d %d\n",mtxAp->roff,mtxAp->coff); */
+	for(i=0;i<mtxAp->Mdim;++i)
+	{
+		for(k=mtxAp->bpntr[i];k<mtxAp->bpntr[i+1]  ;++k)
+		{
+		       	j=mtxAp->bindx[k];
+			ij = RSB_COO_HALFWORDS_VALUES_PACK(i,j);
+			mtxAp->bindx[k]=ij;
+#if 0
+			RSB_ASSERT(RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij)==i);
+			RSB_ASSERT(RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij)==j);
+#endif
+		}
+	}
+#else
+	rsb__do_switch_array_to_halfword_coo(mtxAp->bindx,mtxAp->nnz,0);
+	rsb__do_switch_array_to_halfword_coo(mtxAp->bpntr,mtxAp->nnz,0);
+#endif
+	RSB_DO_FLAG_SUBST(mtxAp->flags,RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES,RSB_FLAG_USE_HALFWORD_INDICES_COO);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_to_fullword_csr(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * TODO:RENAME: rsb__do_switch_to_fullword_csr -> rsb__mtx_rsb2csr
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_half_idx_t *hbindx;
+
+	if(!mtxAp || !rsb__do_is_candidate_for_halfword_csr(mtxAp))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	hbindx=(rsb_half_idx_t*)mtxAp->bindx;
+
+	rsb__do_switch_array_to_fullword_coo(hbindx,mtxAp->nnz,0);
+	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_switch_to_fullword_coo(struct rsb_mtx_t * mtxAp)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * TODO: get rid of this.
+	 * TODO:RENAME: rsb__do_switch_to_fullword_coo -> rsb__do_switch_to_fullword_csr_from_halfword_coo/rsb__mtx_rsb2coo
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	register rsb_nnz_idx_t k;
+
+	if(!mtxAp || !rsb__do_is_candidate_for_halfword_coo(mtxAp))
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	for(k=0;k<mtxAp->nnz;++k)
+	{
+		mtxAp->bindx[k]=RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(mtxAp->bindx[k]);
+	}
+	mtxAp->bindx[mtxAp->nnz]=0;
+	RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_MUTUALLY_EXCLUSIVE_SWITCHES);
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#if 0
+
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spmv_unua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : UNFINISHED,EXPERIMENTAL
+	 *
+
+	if (flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+	in rsb_BCSR_spmv_uaua_double_N_r1_c1_u_U :
+	if(rsb__do_is_candidate_size_for_halfword_coo(Mdim,mdim))
+		return rsb__do_EXPERIMENTAL_halfword_coo_spmv_uaua_double( VA, rhs, out, Mdim, mdim, bindx, bpntr, indptr, rpntr, cpntr, br, bc, roff, coff);
+
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j,ij;
+	double acc=0;
+	register rsb_coo_idx_t i0;
+	nnz=bpntr[Mdim];
+
+#if 0
+	for(k=0;k<nnz;++k)
+	{
+		ij=bindx[k];
+		j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+		out[i]-=rhs[j]*VA[k];
+	}
+#else
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	ij=bindx[k];
+	i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+	j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+	i0=i;
+
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		acc=0;
+		for(;i==i0;)
+		{
+			acc += rhs[j]*VA[k];
+			++k;
+			ij=bindx[k];
+			i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+			j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		}
+		out[i0]-=acc;
+		i0=i;
+	}
+#endif
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spmv_uaua_double_sym(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_experimental
+	 * FIXME : EXPERIMENTAL
+	 *
+
+	if (flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+	in rsb_BCSR_spmv_uaua_double_N_r1_c1_u_U :
+	if(rsb__do_is_candidate_size_for_halfword_coo(Mdim,mdim))
+		return rsb__do_EXPERIMENTAL_halfword_coo_spmv_uaua_double( VA, rhs, out, Mdim, mdim, bindx, bpntr, indptr, rpntr, cpntr, br, bc, roff, coff);
+
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j,ij;
+	double acc=0,lacc=0;
+	double tacc=0,tlacc=0;
+	register rsb_coo_idx_t i0,j0;
+	nnz=bpntr[Mdim];
+
+#if 0
+	for(k=0;k<nnz;++k)
+	{
+		ij=bindx[k];
+		j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+		out[i]+=rhs[j]*VA[k];
+	}
+#else
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	ij=bindx[k];
+	i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+	j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+	i0=i;
+
+	if(roff==coff)
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		tacc=0;acc=0;lacc=0;tlacc=0;
+		for(;i==i0;)
+		{
+			lacc =rhs[j]*VA[k];
+			tlacc = rhs[i]*VA[k];
+			acc +=lacc;
+			++k;
+			ij=bindx[k];
+			i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+			j0=j;
+			j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+			out[j0]+=tlacc;
+		}
+		if(i0==j0)
+			acc -= lacc,// on diag diagonal
+			tacc-=tlacc;// on diag diagonal
+		out[i0]+= acc;
+		i0=i;
+	}
+	else
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		double * tout=(out+coff)-roff;
+		tacc=0;acc=0;lacc=0;tlacc=0;
+		for(;i==i0;)
+		{
+			const double * trhs=(rhs+roff)-coff;
+			lacc = rhs[j]*VA[k];
+			tlacc=trhs[i]*VA[k];
+			acc +=lacc;
+			++k;
+			ij=bindx[k];
+			i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+			j0=j;
+			j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+			tout[j0]+=tlacc;
+		}
+		out[i0] += acc;
+		i0=i;
+	}
+#endif
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spmv_uaua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_experimental
+	 * FIXME : EXPERIMENTAL
+	 *
+
+	if (flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+	in rsb_BCSR_spmv_uaua_double_N_r1_c1_u_U :
+	if(rsb__do_is_candidate_size_for_halfword_coo(Mdim,mdim))
+		return rsb__do_EXPERIMENTAL_halfword_coo_spmv_uaua_double( VA, rhs, out, Mdim, mdim, bindx, bpntr, indptr, rpntr, cpntr, br, bc, roff, coff);
+
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j,ij;
+	nnz=bpntr[Mdim];
+
+#if 0
+	for(k=0;k<nnz;++k)
+	{
+		ij=bindx[k];
+		j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+		out[i]+=rhs[j]*VA[k];
+	}
+#else
+	double acc=0;
+	register rsb_coo_idx_t i0;
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	ij=bindx[k];
+	i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+	j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+	i0=i;
+
+	while(k<nnz)
+	//while(ij != RSB_MARKER_COO_VALUE)
+	{
+		acc=0;
+		for(;i==i0;)
+		{
+			acc += rhs[j]*VA[k];
+			++k;
+			ij=bindx[k];
+			i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+			j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		}
+		out[i0]+=acc;
+		i0=i;
+	}
+#endif
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spsv_uxua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : UNFINISHED,EXPERIMENTAL
+	 *
+
+	if (flags & RSB_FLAG_USE_HALFWORD_INDICES_COO)
+	in rsb_BCSR_spmv_uaua_double_N_r1_c1_u_U :
+	if(rsb__do_is_candidate_size_for_halfword_coo(Mdim,mdim))
+		return rsb__do_EXPERIMENTAL_halfword_coo_spmv_uaua_double( VA, rhs, out, Mdim, mdim, bindx, bpntr, indptr, rpntr, cpntr, br, bc, roff, coff);
+
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j,ij;
+#if 0
+	nnz=bpntr[Mdim];
+	for(k=0;k<nnz;++k)
+	{
+		ij=bindx[k];
+		j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+		out[i]+=rhs[j]*VA[k];
+	}
+#else
+	register double acc=0;
+	register rsb_coo_idx_t i0;
+	nnz=bpntr[Mdim];
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	ij=bindx[k];
+	i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+	j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+	i0=i;
+	RSB_ASSERT(!i);
+	RSB_ASSERT(!j);
+
+	out[i]=(out[i])/VA[k];
+
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		acc=0;
+		for(;j<i;)
+		{
+			acc += rhs[j]*VA[k];
+	//		RSB_INFO("%d %d\n",i,j);
+			++k;
+			ij=bindx[k];
+			i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+			j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		}
+		/* j==i */
+//		RSB_ASSERT(j==i);
+		out[i]=(out[i]-acc)/VA[k];
+	//	RSB_INFO("%d %d\n",i,j);
+
+		++k;
+		ij=bindx[k];
+		i = RSB_COO_HALFWORDS_VALUES_UNPACK_LI(ij);
+		j = RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(ij);
+		i0=i;
+	}
+#endif
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spmv_unua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : UNFINISHED,EXPERIMENTAL
+	 *
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j;
+	double acc=0;
+	register rsb_coo_idx_t i0;
+	register rsb_coo_idx_t * IA;
+	register rsb_coo_idx_t * JA;
+	nnz=*indptr;//FIXME: a trick
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	i=IA[k];
+	j=JA[k];
+	i0=i;
+
+	while(k<nnz)
+	{
+		acc=0;
+		for(;i==i0;)
+		{
+			acc += rhs[j]*VA[k];
+			++k;
+			i=IA[k];
+			j=JA[k];
+		}
+		out[i0]-=acc;
+		i0=i;
+	}
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spmv_uaua_double_sym(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_experimental
+	 * FIXME : EXPERIMENTAL
+	 *
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j;
+	double acc=0,lacc=0;
+	double tacc=0,tlacc=0;
+	register rsb_coo_idx_t i0,j0;
+	register rsb_coo_idx_t * IA;
+	register rsb_coo_idx_t * JA;
+	nnz=*indptr;//FIXME: a trick
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	i=IA[k];
+	j=JA[k];
+	i0=i;
+
+	if(roff==coff)
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		tacc=0;acc=0;lacc=0;tlacc=0;
+		for(;i==i0;)
+		{
+			lacc =rhs[j]*VA[k];
+			tlacc = rhs[i]*VA[k];
+			acc +=lacc;
+			++k;
+			i=IA[k];
+			j0=j;
+			j=JA[k];
+			out[j0]+=tlacc;
+		}
+		if(i0==j0)
+			acc -= lacc,// on diag diagonal
+			tacc-=tlacc;// on diag diagonal
+		out[i0]+= acc;
+		i0=i;
+	}
+	else
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		double * tout=(out+coff)-roff;
+		tacc=0;acc=0;lacc=0;tlacc=0;
+		for(;i==i0;)
+		{
+			const double * trhs=(rhs+roff)-coff;
+			lacc = rhs[j]*VA[k];
+			tlacc=trhs[i]*VA[k];
+			acc +=lacc;
+			++k;
+			i=IA[k];
+			j0=j;
+			j=JA[k];
+			tout[j0]+=tlacc;
+		}
+		out[i0] += acc;
+		i0=i;
+	}
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spmv_uaua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_experimental
+	 * FIXME : EXPERIMENTAL
+	 *
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j;
+	register rsb_coo_idx_t * IA;
+	register rsb_coo_idx_t * JA;
+	double acc=0;
+	register rsb_coo_idx_t i0;
+	nnz=*indptr;//FIXME: a trick
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	i=IA[k];
+	j=JA[k];
+	i0=i;
+
+	while(k<nnz)
+	//while(ij != RSB_MARKER_COO_VALUE)
+	{
+		acc=0;
+		for(;i==i0;)
+		{
+			acc += rhs[j]*VA[k];
+			++k;
+			i=IA[k];
+			j=JA[k];
+		}
+		out[i0]+=acc;
+		i0=i;
+	}
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spsv_uxua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * FIXME : UNFINISHED,EXPERIMENTAL
+	 *
+	 *
+	 * */
+	register rsb_nnz_idx_t k,nnz;
+	register rsb_coo_idx_t i,j;
+	register double acc=0;
+	register rsb_coo_idx_t i0;
+	register rsb_coo_idx_t * IA;
+	register rsb_coo_idx_t * JA;
+	nnz=*indptr;//FIXME: a trick
+
+	if(nnz<1)
+		goto err;
+	k=0;
+	i=IA[k];
+	j=JA[k];
+	i0=i;
+	RSB_ASSERT(!i);
+	RSB_ASSERT(!j);
+
+	out[i]=(out[i])/VA[k];
+
+	//while(ij != RSB_MARKER_COO_VALUE)
+	while(k<nnz)
+	{
+		acc=0;
+		for(;j<i;)
+		{
+			acc += rhs[j]*VA[k];
+	//		RSB_INFO("%d %d\n",i,j);
+			++k;
+			i=IA[k];
+			j=JA[k];
+		}
+		/* j==i */
+//		RSB_ASSERT(j==i);
+		out[i]=(out[i]-acc)/VA[k];
+	//	RSB_INFO("%d %d\n",i,j);
+
+		++k;
+		i=IA[k];
+		j=JA[k];
+		i0=i;
+	}
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+
+#endif
+/* @endcond */
diff --git a/rsb_swt.h b/rsb_swt.h
new file mode 100644
index 0000000..2a08c73
--- /dev/null
+++ b/rsb_swt.h
@@ -0,0 +1,87 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief This source file contains experimental functions 
+ * */
+
+#ifndef RSB_SWT_H_INCLUDED
+#define RSB_SWT_H_INCLUDED
+#define RSB_MATRIX_STORAGE_AUTO 0x0	/* TODO: move to rsb_types.h */
+#include "rsb_internals.h"		/* */
+/*#define RSB_CONST_MIN_NNZ_PER_ROW_FOR_COO_SWITCH 4*/
+#define RSB_CONST_MIN_NNZ_PER_ROW_FOR_COO_SWITCH 2
+
+typedef unsigned short int rsb_half_idx_t;
+
+rsb_bool_t rsb__do_is_candidate_size_for_halfword_coo(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_flags_t flags);
+rsb_bool_t rsb__do_is_candidate_for_halfword_coo(const struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__do_switch_to_halfword_coo(struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__do_switch_to_fullword_coo(struct rsb_mtx_t * mtxAp);
+#if 0
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spmv_aa_double_sym(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spmv_aa_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+rsb_err_t rsb__do_EXPERIMENTAL_halfword_coo_spsv_uxua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+#endif
+
+
+rsb_err_t rsb__do_is_candidate_size_for_halfword(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+rsb_err_t rsb__do_is_candidate_size_for_halfword_csr(rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_flags_t flags);
+rsb_err_t rsb__do_is_candidate_for_halfword_csr(const struct rsb_mtx_t * mtxAp);
+#define RSB_FLAG_USE_FULLWORD_INDICES	0x00000000
+rsb_err_t rsb__do_switch_leaf(struct rsb_mtx_t * mtxAp, rsb_fmt_t matrix_storage, rsb_flags_t flags, rsb_coo_idx_t roff, rsb_coo_idx_t coff, rsb_coo_idx_t *TA);
+rsb_err_t rsb__do_switch_to_halfword_csr(struct rsb_mtx_t * mtxAp);
+rsb_err_t rsb__do_switch_to_fullword_csr(struct rsb_mtx_t * mtxAp);
+
+#define RSB_COO_HALFWORDS_VALUES_PACK(LI,LJ)	((LJ)|((LI)<<RSB_COO_HALF_BITS_SIZE))/* logical row and column index pack   FIXME */
+#define RSB_COO_HALFWORDS_VALUES_UNPACK_LI(LIJ)	((((LIJ))>>RSB_COO_HALF_BITS_SIZE)&~((-1)<<RSB_COO_HALF_BITS_SIZE))	/* logical row index unpack FIXME */
+#define RSB_COO_HALFWORDS_VALUES_UNPACK_UJ(LIJ)	(((LIJ) &~((-1)<<RSB_COO_HALF_BITS_SIZE)))	/* logical row index unpack FIXME */
+void rsb__do_switch_array_to_halfword_coo(rsb_coo_idx_t  *p, rsb_nnz_idx_t n, const rsb_half_idx_t off);
+void rsb__do_switch_array_to_fullword_coo(rsb_half_idx_t *p, rsb_nnz_idx_t n, const rsb_coo_idx_t off);
+
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spmv_unua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spmv_aa_double_sym(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spmv_aa_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+
+rsb_err_t rsb__do_EXPERIMENTAL_fullword_coo_spsv_uxua_double(
+	const double * restrict VA, const double * restrict rhs, double * restrict out,
+	const rsb_coo_idx_t  Mdim, const rsb_coo_idx_t  mdim, const rsb_nnz_idx_t * restrict bindx, const rsb_nnz_idx_t * restrict bpntr, const rsb_nnz_idx_t *restrict indptr, const rsb_coo_idx_t * restrict rpntr, const rsb_coo_idx_t * restrict cpntr, const rsb_coo_idx_t br, const rsb_coo_idx_t bc, const rsb_coo_idx_t roff, const rsb_coo_idx_t coff);
+rsb_err_t rsb_do_is_candidate_for_fullword_coo(const struct rsb_mtx_t * mtxAp);
+#endif /* RSB_SWT_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_sys.c b/rsb_sys.c
new file mode 100644
index 0000000..a2f0738
--- /dev/null
+++ b/rsb_sys.c
@@ -0,0 +1,1343 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief System, or standard library related functions.
+ * */
+
+#include <unistd.h>	/* sysconf */
+#include "rsb_internals.h"
+#include "rsb.h"
+#ifdef RSB_HAVE_LIMITS_H
+#include <limits.h>	/* CHAR_BIT */
+#endif /* RSB_HAVE_LIMITS_H */
+#include <assert.h>	/* assert */
+#ifdef RSB_HAVE_STDLIB_H
+#include <stdlib.h>	/* posix_memalign */
+#endif /* RSB_HAVE_STDLIB_H */
+#ifdef RSB_HAVE_MALLOC_H
+#include <malloc.h>	/* posix_memalign */
+#endif /* RSB_HAVE_MALLOC_H */
+#ifdef RSB_HAVE_SYS_SYSTEMCFG_H 
+#include <sys/systemcfg.h>	/* for _H_SYSTEMCFG */
+#endif /* RSB_HAVE_SYS_SYSTEMCFG_H  */
+#ifdef RSB_HAVE_SYS_MMAN_H
+#if RSB_HAVE_SYS_MMAN_H
+#include <sys/mman.h>	/* for mlockall */
+#endif /* RSB_HAVE_SYS_MMAN_H */
+#endif /* RSB_HAVE_SYS_MMAN_H */
+#if defined(RSB_HAVE_DMALLOC_H) && defined(RSB_WANT_DMALLOC) && (RSB_WANT_DMALLOC!=0)
+#include <dmalloc.h>	/* a debug library */
+#endif /* defined(RSB_HAVE_DMALLOC_H) && defined(RSB_WANT_DMALLOC) && (RSB_WANT_DMALLOC!=0) */
+#if defined(RSB_HAVE_DUMA_H) && defined(RSB_WANT_DUMA) && (RSB_WANT_DUMA!=0)
+#include <duma.h>	/* a debug library */
+#endif /* defined(RSB_HAVE_DUMA_H) && defined(RSB_WANT_DUMA) && (RSB_WANT_DUMA!=0) */
+/* #include <stdio.h> */	/* fileno */
+#if RSB_WITH_HWLOC
+#include <hwloc.h>
+#endif	/* RSB_WITH_HWLOC */
+
+/* set the following to 0 to get some real fun */
+#define RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION 0
+#if RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION
+/* FIXME: need a true random number generator interface */
+#define RSB_SHOULD_RANDOMLY_FAIL (rsb__rand_coo_index(1349)==42)
+#else /* RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION */
+#define RSB_SHOULD_RANDOMLY_FAIL 0
+#endif /* RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION */
+
+#define RSB_SUSPECT_ALLOCATION_SIZE 1024*1024*16 /* i.e. of notable size. */
+#define RSB_MEM_DEBUG 0 /* verbosity */
+#define RSB_MEM_DEBUG_REALLOC 0 /* verbosity */
+#define RSB_CHEAP_DEBUG 0 /* cheap extra checks on suspect memory wrapper related values */
+#define RSB_DEBUG_MARKER_AFTER_FREE 0 /* double free protection */
+#define RSB_DEBUG_SHRED_AFTER_FREE 0 /* protection against of re-use of freed areas */
+#define RSB_SHRED_BYTE /* 0xFF */ 0xF0 /* byte value to use when shredding memory */
+#define RSB_SHRED_WORD ( RSB_SHRED_BYTE | ( RSB_SHRED_BYTE<<8 ) | ( RSB_SHRED_BYTE<<16 ) | ( RSB_SHRED_BYTE<<24 ) )
+#define RSB_FREE_MARKER 0xDEADBEEF /* ( 3735928559) */ /* a marker for detecting double free */
+#define RSB_OVW_MARKER  0xBEEFBABE /* (-1091585346) */ /* a marker for detecting accidental overwrites */
+#define RSB_MW_ODMO ( 0) /* memory wrapper overwrite detection marker offset (set 0 to deactivate, set to a non-*MW* overlapping value to activate) */
+#define RSB_MW_SHMO (-1) /* memory wrapper shift marker offset */
+#define RSB_MW_SZMO (-2) /* memory wrapper size  marker offset */
+#define RSB_MW_ESLC (-RSB_MIN(RSB_MIN(RSB_MW_ODMO,RSB_MW_ODMO),RSB_MIN(RSB_MW_SZMO,RSB_MW_SHMO))) /* memory wrapper extra 'sizeof' locations count */
+
+#define RSB_MD_ASSERT RSB_ASSERT  /* memory debug assert macro */
+
+#if RSB_DEBUG_SHRED_AFTER_FREE
+#include <string.h>	/* memset */
+#endif
+
+#ifdef RSB_HAVE_SYS_RESOURCE_H 
+#define RSB_USE_RUSAGE 1
+#else
+#define RSB_USE_RUSAGE 0
+#endif
+
+#if RSB_USE_RUSAGE
+#include <sys/resource.h>	/* getrusage */
+#endif
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#if RSB_WANT_ALLOCATOR_LIMITS
+#define RSB_ALLOC_MEMAAA_LIMIT rsb_global_session_handle.memory_count_max
+#define RSB_ALLOC_MEMAAC_LIMIT rsb_global_session_handle.allocations_count_max
+#define RSB_ALLOC_LIMITS_TRESPASSED(AAA,AAC) RSB_UNLIKELY( ( ( RSB_ALLOC_MEMAAA_LIMIT > 0 && rsb_global_session_handle.allocated_memory+(AAA) >= RSB_ALLOC_MEMAAA_LIMIT ) || ( RSB_ALLOC_MEMAAC_LIMIT > 0 && rsb_global_session_handle.allocations_count+(AAC) > RSB_ALLOC_MEMAAC_LIMIT ) ) ? 1 : 0 )
+#else /* RSB_WANT_ALLOCATOR_LIMITS */
+#define RSB_ALLOC_LIMITS_TRESPASSED(AAA,AAC) 0
+#endif /* RSB_WANT_ALLOCATOR_LIMITS */
+#define RSB_ZERO_BYTE_ALLOC_CHECK	0
+
+#define RSB_TIME_SET_THREADS 0
+
+void rsb__g_rsb_memory_counter_init(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Memory counter reset.
+	 */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	rsb_global_session_handle.allocated_memory=0;
+	rsb_global_session_handle.allocations_count=0;
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+size_t rsb__get_g_rsb_memory_count(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * A mere accessor function.
+	 */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	return rsb_global_session_handle.allocated_memory;
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	return 0;
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+size_t rsb__get_g_rsb_allocations_count(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * A mere accessor function.
+	 */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	return rsb_global_session_handle.allocations_count;
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	return 0;
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+#if 1
+
+static void * rsb_aligned_free(void *p)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \param p a generic pointer allocated with rsb__aligned_malloc
+	 * \return the input pointer, in case of correct operation, NULL in case of error
+	 *
+	 * frees a memory area previously allocated with rsb__aligned_malloc,
+	 * and returns the pointer in case of successfull free,
+	 * or NULL in case of suspect error.
+	 * */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	size_t size;
+	size_t shift;
+	if( p == NULL )
+		return p;
+	#if RSB_DEBUG_SHRED_AFTER_FREE
+	if( (((rsb_int_t)p) & RSB_SHRED_WORD ) == RSB_SHRED_WORD ) /* shred-area read pointer detection */
+	{
+		RSB_STDERR("Warning: it is likely that pointer %p is invalid and was read from a previously freed area. Expect a crash now.\n",p);
+		RSB_MD_ASSERT(0);
+	}
+	#endif /* RSB_DEBUG_SHRED_AFTER_FREE */
+	size  = ((size_t*)p)[RSB_MW_SZMO];
+	#if RSB_MEM_DEBUG
+	RSB_STDERR("freeing   %zu bytes at %p (in hex:0x%0zx bytes)\n",size,p,size);
+	#endif
+	shift = ((size_t*)p)[RSB_MW_SHMO];
+	#if RSB_DEBUG_MARKER_AFTER_FREE
+	if(size == RSB_FREE_MARKER || shift == RSB_FREE_MARKER)
+	{
+		RSB_STDERR("Warning: it is almost certain that memory at %p has been already deallocated! Expect a crash now.\n",p);
+		RSB_MD_ASSERT(0);
+	}
+	((size_t*)p)[RSB_MW_SZMO] = RSB_FREE_MARKER;
+	((size_t*)p)[RSB_MW_SHMO] = RSB_FREE_MARKER;
+	#endif /* RSB_DEBUG_MARKER_AFTER_FREE */
+	if( RSB_MW_ODMO && ((size_t*)p)[RSB_MW_ODMO] != RSB_OVW_MARKER ) 
+	{
+		RSB_STDERR("Warning: memory at %p has been overwritten (marker value is %x instead of 0x%0zx) ! Expect crashes.\n",p,((size_t*)p)[RSB_MW_ODMO],RSB_OVW_MARKER );
+		RSB_MD_ASSERT(0);
+	}
+	#if RSB_DEBUG_SHRED_AFTER_FREE
+	if( ( size >= sizeof(rsb_int_t) ) && ( *(rsb_int_t*)p == RSB_SHRED_WORD ) ) /* shredded area re-free detection */
+	{
+		RSB_STDERR("Warning: it is possible that %zd-byte area at %p was recently freed (points to {0x%x, ...). May expect a crash now.\n",size,p,*(rsb_int_t*)p);
+		/* RSB_MD_ASSERT(0); */
+	}
+	memset(p, RSB_SHRED_BYTE, size);
+	#endif /* RSB_DEBUG_SHRED_AFTER_FREE */
+	p = (( char *)p)-(shift);
+	p = ((size_t*)p)-RSB_MW_ESLC;	/* we make room for the markers */
+	rsb_global_session_handle.allocated_memory -= size;
+	rsb_global_session_handle.allocations_count--;
+	free(p);
+	return p;
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	free(p);
+	return p;
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+void * rsb__realloc(void *rsb_data, size_t size)
+{
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	return rsb__do_realloc(rsb_data,size,sizeof(double)*2);
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	return rsb__do_realloc(rsb_data,size,1);
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+}
+
+void * rsb__do_realloc(void *rsb_data, size_t size, size_t alignment)
+{
+	void * p = rsb_data;
+	size_t extra = 0;
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	size_t osize;
+	size_t shift;
+	extra = sizeof(size_t)*RSB_MW_ESLC+alignment;
+
+	if(p==NULL)
+		return p;
+	osize  = ((size_t*)p)[RSB_MW_SZMO];
+	#if ( RSB_MEM_DEBUG || RSB_MEM_DEBUG_REALLOC )
+	RSB_STDERR("reallocating from %zu to %zu bytes (%+zd) at 0x%p (in hex: to 0x%0zx bytes)\n",osize,size,(size-osize),p,size);
+	#endif
+	shift = ((size_t*)p)[RSB_MW_SHMO];
+	p = (( char *)p)-(shift);
+	p = ((size_t*)p)-RSB_MW_ESLC;	/* we make room for the markers */
+	/* no free was performed, since extra>0 */
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	if(size==0) /* a free shall be performed */
+		extra=0;
+
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	if(size==0 && p) /* a free shall be performed */
+		rsb_global_session_handle.allocations_count--;
+
+	if(size>osize)
+		rsb_global_session_handle.allocated_memory+=size-osize;
+	else
+	if(p) /* a free shall be performed */
+		rsb_global_session_handle.allocated_memory-=osize-size;
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	p = realloc(p,size+extra);/* if freeing, either p or NULL will be returned */
+
+	if(!p)
+		return p;/* failure */
+
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	/*!
+	 * \ingroup gr_internals
+	 * TODO : no way to make survive the alignment ?
+	 * */
+#if 1
+	/* restoring back allocation info (and potentially losing alignment, because we have to keep the same shift!) */
+	p = ((size_t*)p)+RSB_MW_ESLC;	/* we make room for markers */
+	p = (( char *)p)+(shift);
+	/* to restore alignment, should perform a memmove */
+	((size_t*)p)[RSB_MW_SHMO] = shift;
+	((size_t*)p)[RSB_MW_SZMO] = size;
+	if( RSB_MW_ODMO ) ((size_t*)p)[RSB_MW_ODMO] = RSB_OVW_MARKER; 
+#else
+	/* bugful way */
+
+	{
+		size_t off;
+		off=((size_t)(((size_t*)p)+2))%(alignment); /* to the return address from ((size_t*)p)+2 */
+		shift = (alignment-off);
+		p = ((size_t*)p)+2;	/* we make room for two markers */
+		p = (( char *)p)+(shift);
+		((size_t*)p)[RSB_MW_SHMO] = shift;
+		((size_t*)p)[RSB_MW_SZMO] = size;
+		if( RSB_MW_ODMO ) ((size_t*)p)[RSB_MW_ODMO] = RSB_OVW_MARKER; 
+	}
+#endif
+	return p;
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	return p; /* success */
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	return NULL;
+}
+
+void * rsb__aligned_malloc(size_t size, size_t alignment)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * allocates size bytes and an integer, in a way to keep track of the allocated chunk size
+	 * \param size is the amount of needed bytes to allocate
+	 *
+	 * the returned area will be alignment bytes aligned.
+	 * this area should be deallocated with rsb_aligned_free.
+	 *
+	 * note that although the address will be aligned as asked, 
+	 * there will be 100% alignment and contiguity guarantee only
+	 * on machines with no virtual memory (on linux there seems not to be a user space contiguos
+	 * memory allocator like kernel's vmalloc).
+	 *
+	 * in case of lack of all of RSB_DISABLE_ALLOCATOR_WRAPPER, RSB_HAVE_POSIX_MEMALIGN and RSB_HAVE_MEMALIGN, malloc() will be used.
+	 * */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	void * p;
+	size_t extra = sizeof(size_t)*RSB_MW_ESLC+alignment;
+	size_t off;
+	size_t shift;
+
+	if(RSB_ALLOC_LIMITS_TRESPASSED(size+extra,1))
+	{
+		rsb__print_memory_allocation_info(); 
+		return NULL;
+	}
+
+#if RSB_ZERO_BYTE_ALLOC_CHECK
+	if(size == 0 && extra == 0)
+	{
+		RSB_ERROR(RSB_ERRM_ZSM);
+	}
+#endif
+	p = malloc( size + extra );
+	if(!p)
+		return p;/* failure */
+	RSB_DEBUG_ASSERT(rsb_global_session_handle.allocated_memory	<=(rsb_global_session_handle.allocated_memory+size  ));
+	/* the following could trigger during very very long/big runs, so .. */
+	RSB_DEBUG_ASSERT((rsb_global_session_handle.allocations_count	< (rsb_global_session_handle.allocations_count+1))
+			/* ... this line should fix that cases */
+			|| (rsb_global_session_handle.allocations_count+1)==0);
+
+	rsb_global_session_handle.allocated_memory+=size;
+	rsb_global_session_handle.allocations_count++;
+	off=((size_t)(((size_t*)p)+RSB_MW_ESLC))%(alignment); /* to the return address from ((size_t*)p)+RSB_MW_ESLC */
+	shift = (alignment-off);
+	p = ((size_t*)p)+RSB_MW_ESLC;	/* we make room for the markers */
+	p = (( char *)p)+(shift);
+	((size_t*)p)[RSB_MW_SHMO] = shift;
+	((size_t*)p)[RSB_MW_SZMO] = size;
+	if( RSB_MW_ODMO ) ((size_t*)p)[RSB_MW_ODMO] = RSB_OVW_MARKER; 
+	return p;
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	#if RSB_HAVE_POSIX_MEMALIGN 
+	void * p = NULL;
+        size_t ca = sizeof(void*); /* corrected alignment */
+        while(ca<alignment)
+                ca*=2;
+        alignment = ca; /* "The address  of  the  allocated  memory  will be a multiple of alignment, which must be a power of two and a multiple of sizeof(void *)." */                                                                                      
+	if(posix_memalign(&p, alignment, size))
+		return p; /* failure */
+	else
+		return p; /* success */
+	#elif RSB_HAVE_MEMALIGN 
+	return memalign( alignment, size);
+	#else
+	return malloc(size); /* no platform support for aligned alloc */
+	#endif
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+void * rsb__free(void *p)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * see rsb_aligned_free
+	 * */
+	#if RSB_MEM_DEBUG
+	RSB_STDERR("freeing %p\n",p);
+	#endif
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	return rsb_aligned_free(p);
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	free(p);
+	return p;
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+void * rsb__malloc(size_t size)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * see rsb__aligned_malloc
+	 * */
+	void * data=NULL;
+
+#if RSB_ZERO_BYTE_ALLOC_CHECK
+	if(size == 0)
+	{
+		RSB_ERROR(RSB_ERRM_ZSM);
+	}
+#endif
+	if(size >= RSB_MAX_ALLOCATABLE_MEMORY_CHUNK)
+	{	
+		#if RSB_MEM_DEBUG
+		RSB_STDERR("cannot allocate %zu bytes since it is more than the maximum allowed %zu\n",size,RSB_MAX_ALLOCATABLE_MEMORY_CHUNK);
+		#endif
+		return data;
+	}
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	data = rsb__aligned_malloc(size,sizeof(double)*2);
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	data = rsb__aligned_malloc(size,1);
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	data = malloc(size);
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+	#if RSB_MEM_DEBUG
+	RSB_STDERR("allocated %zu bytes to %p (in hex:0x%0zx bytes)\n",size,data,size);
+	#endif /* RSB_MEM_DEBUG */
+
+#if RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION
+	if(data)if(RSB_SHOULD_RANDOMLY_FAIL){rsb__free(data);data=NULL;}
+#endif /* RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION */
+	return data;
+}
+
+#else
+/* BEGIN OF DEAD CODE */
+void * rsb__free(void *rsb_data)
+{
+	/*!
+	 * TODO: DELETE THIS DEAD CODE
+	 *
+	 * \param rsb_data a generic pointer allocated with rsb__malloc
+	 * \return the input pointer, in case of correct operation, NULL in case of error
+	 *
+	 * deletes a memory area previously allocated with rsb__malloc,
+	 * and returns the pointer in case of successfull free,
+	 * or NULL in case of suspect error.
+	 * */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	size_t size;
+	if(!rsb_data)return rsb_data;
+
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	/*!
+	 * This is a trick : we take the offset written in the byte just 
+	 * behind the allocated area and use it to restore the original
+	 * [m|c]alloc-ated area.
+	 * */
+	size_t off = ((unsigned rsb_byte_t*)rsb_data)[RSB_MW_SHMO];
+	/* we decode back the whole allocated area size */
+	size=((size_t*)((unsigned rsb_byte_t*)(rsb_data)-(0x10-off)))[RSB_MW_SHMO];
+	/*
+	RSB_STDERR("freeing address after offset %d ... \n",off);
+	RSB_STDERR("freeing  %d bytes ... \n",size);
+	*/
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	/* we decode back the whole allocated area size */
+	size=*(((size_t*)(rsb_data))RSB_MW_SHMO);
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+
+	#if RSB_CHEAP_DEBUG
+	if( size < 0 || size > RSB_SUSPECT_ALLOCATION_SIZE )/* this is a BAD sign and a warning should be issued */
+	{
+		RSB_ERROR("WARNING : pointer x%08x[%d] contains (has been overwritten with ?) a suspect value : %08x==%d", rsb_data,RSB_MW_SHMO,size,size );
+	gaa
+		return NULL;
+	}
+	#endif /* RSB_CHEAP_DEBUG */
+	/* We update the global memory bookkeeping counter */
+	rsb_global_session_handle.allocated_memory-=size;
+	rsb_global_session_handle.allocations_count--;
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	/* We use the offset to restore the original pointer to free. */
+	free((  size_t*)((unsigned rsb_byte_t*)(rsb_data)-(0x10-off))RSB_MW_SHMO);
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	free((((size_t*)(rsb_data))RSB_MW_SHMO));
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+	return rsb_data;
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	/* RSB_DISABLE_ALLOCATOR_WRAPPER undefined */
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	return free(rsb_data);
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	return free(rsb_data);
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+
+void * rsb__malloc(size_t size)
+{
+	/*!
+	 * TODO: DELETE THIS DEAD CODE
+	 * 
+	 * (c)allocates size bytes and an integer, in a way to keep track of the allocated chunk size
+	 * \param size is the amount of needed bytes to allocate
+	 *
+	 * if RSB_WANT_DOUBLE_ALIGNED is defined, the returned area will be double (64 bits) aligned.
+	 * this area should be deallocated with rsb__free.
+	 * */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	void * p;
+#ifdef RSB_HAVE_POSIX_MEMALIGN
+	 /* we could integrate/replace with posix_memalign, memalign or continue using our custom code */
+#endif /* RSB_HAVE_POSIX_MEMALIGN */
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	/*
+	 * This is an explicit trick to give the user a double aligned memory area.
+	 * To achieve this, we allocate one extra double element, and a byte (four, really, for congruency reasons)
+	 * and write in one  there information on the shift amount.
+	 *
+	 * Of course; we count that sizeof(size_t)>sizeof(char).
+	 * */
+	size_t extra = sizeof(double)+sizeof(size_t)*2;
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	/*
+	 * We allocate one size_t element for storing allocation information (for explicit memory leaking checking).
+	 * */
+	size_t extra = sizeof(size_t);
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+	if(RSB_ALLOC_LIMITS_TRESPASSED(size,1))
+	{rsb__print_memory_allocation_info(); return NULL;}
+	p = calloc( size + extra, 1 );
+	if(!p)return p;
+	*(size_t*)p=size;/* note : not size + extra */
+	rsb_global_session_handle.allocated_memory+=size;
+	rsb_global_session_handle.allocations_count++;
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	/*
+	 * WARNING : We determine the current 64 bits p alignment, so we are interested in the last 5 bits,
+	 * really.
+	 * DANGER  : is this portable ?
+	 * */
+	size_t off=(((size_t)p)+sizeof(size_t))&0xF;	/* can be 0 ... F */
+	/*
+	RSB_STDERR("allocated totally %d bytes \n",size+extra);
+	RSB_STDERR("allocated %d ... \n",size);
+	RSB_STDERR("allocation offset %d ... \n",off);
+	*/
+	((unsigned rsb_byte_t*)(p))[sizeof(size_t)+((0x10-off)RSB_MW_SHMO)]=(unsigned char)off;/* will be used to compute back the base pointer allocated */
+	return ((unsigned rsb_byte_t*)p)+(0x10-off)+sizeof(size_t);
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	return ((size_t*)p)+1;
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+#else /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	/* RSB_DISABLE_ALLOCATOR_WRAPPER undefined */
+#ifdef RSB_WANT_DOUBLE_ALIGNED
+	return calloc(size,1);
+#else /* RSB_WANT_DOUBLE_ALIGNED */
+	return calloc(size,1);
+#endif /* RSB_WANT_DOUBLE_ALIGNED */
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+}
+/* END OF DEAD CODE */
+#endif /* 1 */
+
+rsb_time_t rsb_do_time(void)
+{
+	/*!
+	   \ingroup gr_internals
+
+	   Returns a current relative time in seconds.
+	   The user should rely on this function only for time difference computations.
+	 */
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	/* return omp_get_wtime(); */ /* for future use */
+#endif
+	/* SVr4, 4.3BSD.  POSIX.1-2001 */
+	/* ( could also use psb_wtime or mpi_wtime )*/
+	/* FIXME : gettimeofday() gives pessimistic estimates ! */
+	/* FIXME : gettimeofday() could be in time.h or sys/times.h */
+	/* FIXME : timer sanity is of paramount inportance ! Should check for its sanity at startup! */
+#if defined(RSB_HAVE_GETTIMEOFDAY)
+	register double t = RSB_REAL_ZERO;
+	struct timeval tv1;
+	gettimeofday(&tv1, NULL);
+	t  =  (double)(tv1.tv_sec) + ((double)(tv1.tv_usec))*1.e-6;
+	return t;
+#elif defined(RSB_HAVE_TIMES) && defined(RSB_HAVE_SYSCONF) && defined(_SC_CLK_TCK)
+	/* POSIX.1 */
+	struct tms buffer;
+	times(&buffer);
+	return ( (rsb_time_t) ((clock_t)buffer.tms_utime) ) / ( (rsb_time_t)sysconf(_SC_CLK_TCK) );
+#else /* defined(RSB_HAVE_TIMES) && defined(RSB_HAVE_SYSCONF) && defined(_SC_CLK_TCK) */
+#error("You should better find timing routine, dude.\n")
+	return -1;/* this is bad */
+#endif /* defined(RSB_HAVE_TIMES) && defined(RSB_HAVE_SYSCONF) && defined(_SC_CLK_TCK) */
+}
+
+rsb_time_t rsb__timer_sanity(void)
+{
+	/*!
+		\ingroup gr_internals
+		
+		Could the timer lead to negative time intervals ? (we found it can happen, sadly)
+		\return the minimum interval length after a bunch of timer calls
+
+		TODO : could we do something about this ?
+	*/
+	rsb_time_t md,d;
+	int i;
+
+	md = - rsb_time();
+	md += rsb_time();
+	for(i=0;i<RSB_TIMER_SANITY_TEST_TIMES;++i)
+	{
+		d = - rsb_time();
+		d += rsb_time();
+		md=d<md?d:md;
+	}
+	return md;
+}
+
+rsb_time_t rsb__timer_granularity(void)
+{
+	/*!
+	   \ingroup gr_internals
+
+	 * Tries to estimate the granularity of the timing function (that is, its overhead) by measuring it.
+         * This value will be important when estimating minimal times for various self benchmarking operations.
+         * 
+	 * A test measuring the average and deviation of this parameter may be also useful.
+	 * It does not guarantee return with a broken timing function.
+	 * */
+	register double t = RSB_TIME_ZERO, t0 = RSB_TIME_ZERO;
+	int times = RSB_TIMER_GRANULARITY_TEST_TIMES;
+	register int i = times;
+
+#if 0
+	i = times;
+	/* results in the following two code snippets differ. it should be due to numerical roundoff. */
+	while(i--)
+	{
+		t -= rsb_time();
+		/* no op, only call overhead */
+		t += rsb_time();
+	}
+	return t/(times*2);
+#else
+	/* this is more accurate (in particular: slower) but could be optimized out without an accumulator cookie (FIXME) */
+	t0 = rsb_time();
+	--i;
+	t = -t0;
+
+	while(i--)
+	{
+		/* no op, only call overhead */
+		rsb_time();
+		rsb_time();
+	}
+
+	t += rsb_time();
+	
+	t = t/(times*2);
+
+	if(t <= RSB_TIME_ZERO)
+		goto so_fast;
+	else
+		goto ret;
+so_fast: /* FIXME: No guarantee of return with a broken timing function. */
+	while( ( t = rsb_time() ) <= t0)
+		;
+	t -= t0;
+ret:
+	return t;
+#endif
+}
+
+rsb_err_t rsb__print_memory_allocation_info(void)
+{
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+/*!
+ \ingroup gr_internals
+
+ * A global memory counter, used for debugging purposes.
+ * */
+#if RSB_ALLOW_STDOUT
+	RSB_STDOUT("rsb_global_session_handle.allocated_memory       \t:%zu\n",(rsb_printf_int_t)rsb_global_session_handle.allocated_memory);
+	RSB_STDOUT("rsb_global_session_handle.allocations_count  \t:%zu\n",(rsb_printf_int_t)rsb_global_session_handle.allocations_count);
+	return RSB_ERR_NO_ERROR; 
+#endif /* RSB_ALLOW_STDOUT */
+#endif /* RSB_DISABLE_ALLOCATOR_WRAPPER */
+	return RSB_ERR_UNSUPPORTED_FEATURE;
+}
+
+void * rsb__calloc(size_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * Allocates an amount of n bytes set to zero.
+	 *
+	 * \param n is the amount of bytes to allocates
+	 * \return the newly allocated area.
+	 *
+	 * This memory area should be freed with rsb__free.
+	 * */
+	void * p = rsb__malloc(n);
+	if(p)
+		RSB_BZERO(p,n);
+#if(!RSB_QUIET_MEM_ERRORS)
+	/* TODO : message diagnostics should be optable out, or debug levels-based */
+        else
+	{
+                RSB_ERROR("cannot allocate %zu bytes!\n",n);
+		rsb__print_memory_allocation_info();
+	}
+#endif /* (!RSB_QUIET_MEM_ERRORS) */
+	/* should be ((int*)p)[RSB_MW_SHMO]==n */
+	return p;
+}
+
+void * rsb__calloc_parallel(size_t n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 */
+	void *p = rsb__calloc(n);
+
+	if(p)
+		RSB_BZERO_parallel(p,n);
+	return p;
+}
+
+int rsb_error(const char * format, ...)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \param as the first printf argument.
+	 * \return an error code or 0
+	 *
+	 * For now, a wrapper around printf.
+	 * It will print given arguments on stdout.
+	 * */
+	va_list ap;
+	int rc=0;
+
+	va_start(ap,format);
+#ifndef RSB_QUIET
+	rc = vprintf(format,ap);
+#endif /* RSB_QUIET */
+	va_end(ap);
+	return rc;
+}
+
+long rsb__get_lnc_size(int n)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \returns the nth level data cache size if > 0, -1 on error, 0 if no such level cache
+	 * Cache levels start from 1.
+	 * */
+	long cs=0;
+
+	if(rsb_global_session_handle.memory_hierarchy_levels>0)
+		return rsb_global_session_handle.caches[n].size;
+
+	switch(n)
+	{
+		case 1:
+		{
+#ifdef RSB_HAVE_SYSCONF 
+#ifdef _SC_LEVEL1_DCACHE_SIZE
+			cs=sysconf(_SC_LEVEL1_DCACHE_SIZE);
+#endif /* _SC_LEVEL1_DCACHE_SIZE */
+#endif /* RSB_HAVE_SYSCONF  */
+#ifdef _H_SYSTEMCFG
+			cs=_system_configuration.dcache_size;
+#endif /* _H_SYSTEMCFG */
+			if(cs == 0) cs = rsb__get_lnc_size_hwloc(n);
+		}
+		break;
+		case 2:
+		{
+#ifdef RSB_HAVE_SYSCONF 
+#ifdef _SC_LEVEL2_CACHE_SIZE
+			cs=sysconf(_SC_LEVEL2_CACHE_SIZE);
+#endif /* _SC_LEVEL2_CACHE_SIZE */
+#endif /* RSB_HAVE_SYSCONF */
+#ifdef _H_SYSTEMCFG
+			cs=_system_configuration.L2_cache_size;
+#endif /* _H_SYSTEMCFG */
+			if(cs == 0) cs = rsb__get_lnc_size_hwloc(n);
+		}
+		break;
+		case 3:
+		{
+#ifdef RSB_HAVE_SYSCONF 
+#ifdef _SC_LEVEL3_CACHE_SIZE
+			cs=sysconf(_SC_LEVEL3_CACHE_SIZE);
+#endif /* _SC_LEVEL3_CACHE_SIZE */
+#endif /* RSB_HAVE_SYSCONF */
+#ifdef _H_SYSTEMCFG
+	//		cs=_system_configuration.L3_cache_size; // Does not exist :(
+#endif /* _H_SYSTEMCFG */
+			if(cs == 0) cs = rsb__get_lnc_size_hwloc(n);
+		}
+		break;
+		default :
+		/* For now, we don't handle more cache levels */
+		cs=-1;
+	}
+	cs=cs<0?0:cs;
+	return cs;
+}
+
+long rsb__get_l1c_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the first level data cache size.
+	 * \note see rsb__get_lnc_size
+	 * */
+	return rsb__get_lnc_size(1);
+}
+
+long rsb__get_l2c_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the second level data cache size.
+	 * \note see rsb__get_lnc_size
+	 * */
+	return rsb__get_lnc_size(2);
+}
+
+long rsb__get_l3c_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the third level data cache size.
+	 * \note see rsb__get_lnc_size
+	 * */
+	return rsb__get_lnc_size(3);
+}
+
+long rsb__get_l4c_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the fourth level data cache size.
+	 * \note see rsb__get_lnc_size
+	 * */
+	return rsb__get_lnc_size(4);
+}
+
+long rsb__know_cache_sizes(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return do we know the caches sizes ?
+	 * */
+	return rsb__get_cache_levels_num() > 0;
+}
+
+long rsb__get_first_level_c_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the first level data cache size
+	 * or zero if not known or not available.
+	 * */
+	rsb_int_t cln = rsb__get_cache_levels_num();
+
+	if(rsb_global_session_handle.memory_hierarchy_levels>0)
+		return rsb_global_session_handle.caches[1].size;
+
+	if(cln>0)
+		return rsb__get_lnc_size(1);
+	else
+		return 0;
+}
+
+long rsb__get_lastlevel_c_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the last level data cache size
+	 * or zero if not known or not available.
+	 * */
+	rsb_int_t cln = rsb__get_cache_levels_num();
+
+	if(rsb_global_session_handle.memory_hierarchy_levels>0)
+		return rsb_global_session_handle.caches[rsb_global_session_handle.memory_hierarchy_levels].size;
+
+	if(cln>0)
+		return rsb__get_lnc_size(cln);
+	else
+		return 0;
+}
+
+long rsb__get_cache_block_byte_size(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 * */
+	long flc = RSB_MIN(RSB_MAX(1,rsb_global_session_handle.memory_hierarchy_levels),2);
+	long cbs = RSB_MIN(rsb__get_lnc_size(flc),rsb__get_lastlevel_c_size_per_thread());
+
+	switch(rsb_global_session_handle.cache_blocking_method)
+	{
+		case -1: return cbs/2; break;
+		case  1: return cbs*2; break;
+		default:
+		case  0: return cbs;
+	}
+}
+
+static long rsb_want_executing_threads(void)
+{
+	/*!
+	  	\ingroup gr_internals
+		Will always return a value 1 <= N <= RSB_CONST_MAX_SUPPORTED_CORES
+		FIXME: make so that 
+		rsb_want_executing_threads() == rsb_set_executing_threads()
+		or rather write
+	       	rsb__set_num_threads(RSB_THREADS_GET)
+	*/
+	long wt = 1;
+
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	wt = rsb_global_session_handle.rsb_want_threads;
+	if(wt<RSB_CONST_MIN_SUPPORTED_CORES)
+		return RSB_CONST_MIN_SUPPORTED_CORES;
+	wt = RSB_MIN(wt,RSB_CONST_MAX_SUPPORTED_CORES);
+#endif
+	return wt;
+}
+
+long rsb__get_lastlevel_c_size_per_thread(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * \return the last level data cache size divided by the number of active threads
+	 * or zero if not known or not available.
+	 * */
+	return (rsb__get_lastlevel_c_size()/rsb_want_executing_threads());
+//	return rsb__get_lastlevel_c_size()/sqrt(rsb_want_executing_threads());
+}
+
+rsb_int_t rsb__get_cache_levels_num(void)
+{
+	/*!
+	 \ingroup internals
+	 \return the count of cache levels if >0, -1 if unknown, 0 if no caches
+	*/
+	long cs,l=1;
+
+	for(l=1;l<RSB_MAX_SUPPORTED_CACHE_LEVELS;++l)
+	{
+		cs = rsb__get_lnc_size(l);
+		if(!cs){--l;break;}
+	}
+	return l;
+}
+
+int rsb_getopt_long(
+	int argc, char * const argv[], const char *optstring,
+	const rsb_option *longopts, int *longindex)
+{
+	/*!
+	   \ingroup internals
+	  
+	   A compatibility wrapper.
+	 */
+#ifdef RSB_HAVE_GETOPT_LONG
+	return getopt_long(argc,argv,optstring,longopts,longindex);
+#else /* RSB_HAVE_GETOPT_LONG */
+	return getopt(argc,argv,optstring);	/* a remedy */
+#endif /* RSB_HAVE_GETOPT_LONG */
+}
+
+rsb_err_t rsb__sys_init(void)
+{
+	/*!
+	 \ingroup internals
+
+	 should check some system related init stuff,
+	 to prevent nasty errors during execution.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+	if(sizeof(rsb_err_t)<4)
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+
+	if(sizeof(rsb_flags_t)<4)
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+
+	/*! \todo : we could check for 'base' overflow cases to define 
+	 	    some limit cases..
+	 */
+	if(sizeof(rsb_coo_idx_t)>sizeof(rsb_nnz_idx_t))
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+
+	if(sizeof(rsb_blk_idx_t)>sizeof(rsb_coo_idx_t))
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+
+	if(rsb__get_l1c_size()<=0)
+		rsb_global_session_handle.min_leaf_matrix_bytes = RSB_EXPERIMENTAL_MIN_LEAF_ELEMENTS*sizeof(double);
+	else
+		rsb_global_session_handle.min_leaf_matrix_bytes = rsb__get_l1c_size();
+
+#ifdef CHAR_BIT	/* limits.h */
+	if( RSB_CHAR_BIT != CHAR_BIT )
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+#endif /* CHAR_BIT */
+
+	if(rsb__get_lastlevel_c_size()<0)
+		rsb_global_session_handle.avg_leaf_matrix_bytes=4*RSB_EXPERIMENTAL_MIN_LEAF_ELEMENTS*sizeof(double);
+	else
+		rsb_global_session_handle.avg_leaf_matrix_bytes = rsb__get_lastlevel_c_size()*2;
+#if RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION
+	{
+		rsb_time_t tseed = rsb_time();
+		unsigned int uiseed=(*(unsigned int*)(&tseed));
+		RSB_WARN("#Starting library with enabled malloc fault injection.\n# Initializing with random seed of value: %u\n",uiseed);
+		/* In this way, the user may introduce faults by recompiling the code. TODO: rsb_lib_init() based seed passing. */
+		srand(uiseed);
+	}
+#endif /* RSB_WANT_RANDOM_MALLOC_FAULT_INJECTION */
+
+	RSB_DO_ERR_RETURN(errval)
+}
+
+void *rsb_memcpy(void *RSB_RESTRICT dest, const void *RSB_RESTRICT src, size_t n)
+{
+	/*!
+	  	\ingroup gr_internals
+
+		Say you want to use a custom memcpy function
+		or perform some statistics measurement.
+
+		Well, this is the place to hack.
+		TODO: use the 'restrict' keyword.
+	*/
+#if 0
+	{
+		register unsigned char*dp=NULL;
+		register const unsigned char*sp=NULL;
+		for(dp=dest,sp=src;RSB_LIKELY(dp<dest+n);++dp,++sp)
+			*dp=*sp;
+	}
+#else
+	return memcpy(dest,src,n);
+#endif
+}
+
+size_t rsb__sys_free_system_memory(void)
+{
+	/*!
+	  	\ingroup gr_internals
+
+		System free memory, or 0.
+	*/
+        size_t free_mem=0;
+        long int pagesize =0;
+        long int mem_pages=0;
+
+#ifdef RSB_HAVE_SYSCONF
+#if   defined(PAGESIZE)
+        pagesize=sysconf(PAGESIZE);
+#elif defined(_SC_PAGESIZE)
+        pagesize=sysconf(_SC_PAGESIZE);
+#elif defined(PAGE_SIZE)
+        pagesize=sysconf(PAGE_SIZE);
+#else /* PAGESIZE */
+#endif /* PAGESIZE */
+#if   defined(_SC_AVPHYS_PAGES)
+        mem_pages=sysconf(_SC_AVPHYS_PAGES);
+#endif /* _SC_AVPHYS_PAGES */
+#endif /* RSB_HAVE_SYSCONF */
+	if(pagesize<1 || mem_pages<1)
+		free_mem=0;
+	else
+		free_mem=((size_t)pagesize)*((size_t)mem_pages);
+	return free_mem;
+}
+
+size_t rsb__sys_total_system_memory(void)
+{
+	/*!
+	  	\ingroup gr_internals
+	*/
+        size_t tot_mem=0;
+        long int pagesize =0;
+        long int mem_pages=0;
+
+#ifdef RSB_HAVE_SYSCONF
+#if   defined(PAGESIZE)
+        pagesize=sysconf(PAGESIZE);
+#elif defined(_SC_PAGESIZE)
+        pagesize=sysconf(_SC_PAGESIZE);
+#elif defined(PAGE_SIZE)
+        pagesize=sysconf(PAGE_SIZE);
+#else /* PAGE_SIZE */
+#endif /* PAGE_SIZE */
+#if   defined(_SC_PHYS_PAGES)
+        mem_pages = sysconf(_SC_PHYS_PAGES);
+#endif /* _SC_PHYS_PAGES */
+#endif /* RSB_HAVE_SYSCONF */
+	if(pagesize<1 || mem_pages<1)
+		tot_mem=0;
+	else
+		tot_mem=((size_t)pagesize)*((size_t)mem_pages);
+	return tot_mem;
+}
+
+static long rsb_set_executing_threads(long tn)
+{
+	/*!
+	 	FIXME: new
+	  	\ingroup gr_internals
+	*/
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	/* multi threaded case */
+	if(tn > RSB_CONST_MAX_SUPPORTED_CORES)
+	{
+		RSB_ERROR("cannot set %ld threads: a maximum of %ld is supported\n",tn,RSB_CONST_MAX_SUPPORTED_CORES);
+		return RSB_CONST_MIN_SUPPORTED_CORES;
+	}
+	tn = RSB_MIN(tn,RSB_CONST_MAX_SUPPORTED_CORES);
+	if(tn < RSB_CONST_MIN_SUPPORTED_CORES)
+	{
+		/* a value < 0 means the user wants the threads count to be set automatically */
+	//	return 1;
+		tn = rsb_global_session_handle.rsb_g_threads;
+	}
+
+#if (RSB_TIME_SET_THREADS==1)
+	rsb_time_t dt = -rsb_time();
+#endif
+#if (RSB_USE_OMP_SET_NUM_THREADS==1)
+	omp_set_num_threads(tn);
+#endif
+#if (RSB_TIME_SET_THREADS==1)
+	dt += rsb_time();
+	RSB_STDOUT("setting threads (%d) took %lf s\n",tn,dt);
+#endif
+	/* FIXME : 20101111 on my GNU box, the following does not return tn, but seems to have effect. Weird */
+	//tn = omp_get_num_threads();
+	rsb_global_session_handle.rsb_want_threads = tn;/* FIXME : a hack */
+	return tn;
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	/* single threaded case */
+	return RSB_CONST_MIN_SUPPORTED_CORES;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+}
+
+rsb_err_t rsb__lock_as_memory_resident(rsb_bool_t dolock)
+{
+	/* FIXME: need mechanisms to get/set/restore this setting */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_HAVE_MLOCKALL
+	int retval = 0;
+	/* "mlockall() locks all pages mapped into the address space of the calling process. "*/
+	if(dolock)
+	{
+		retval = mlockall(MCL_FUTURE|MCL_CURRENT); 
+	}
+	else
+	{
+		retval = munlockall(); 
+	}
+	if(retval)
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		/* FIXME: shall introduce RSB_ERR_SYSCALL_ERROR */
+		RSB_ERROR(RSB_ERRM_FCOVMU);
+	}
+#else /* RSB_HAVE_MLOCKALL */
+	RSB_ERROR(RSB_ERRM_COVMUINS);
+	errval = RSB_ERR_UNSUPPORTED_FEATURE;
+#endif /* RSB_HAVE_MLOCKALL */
+	return errval; /* what about retval ? */
+}
+
+int rsb__fileno(FILE *stream)
+{
+#ifndef RSB_HAVE_FILENO
+	return -1;
+#else /* RSB_HAVE_FILENO */
+	return fileno(stream);
+#endif /* RSB_HAVE_FILENO */
+}
+
+rsb_int_t rsb__set_num_threads(rsb_int_t tn)
+{
+	/*!
+	   	\ingroup rsb_doc_library rsb_doc_rsb 
+
+	 	Gets and/or sets number of librsb running threads.
+		If \a tn is RSB_THREADS_AUTO, sets threads count to a default value.
+		If \a tn is RSB_THREADS_GET, only returns the count of active threads.
+		If \a tn is RSB_THREADS_GET_MAX, returns max count of supported threads.
+	 	\todo: shall promote this as a rsb.h function and make all similar ones static, and put them in thread.c !
+		\return: number of running threads.
+	 */
+
+	long rtn=0;
+
+	switch(tn)
+	{
+		case(RSB_THREADS_GET):
+		rtn = rsb_want_executing_threads();
+		break;
+		case(RSB_THREADS_AUTO):
+		tn=0;
+		break;
+		case(RSB_THREADS_GET_MAX_SYS):
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		/* rtn = omp_get_max_threads(); */
+		/* rtn = omp_get_thread_limit(); */
+		rtn = rsb_global_session_handle.rsb_g_threads;
+#else /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		rtn = 1;
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		break;
+		case(RSB_THREADS_GET_MAX_LIB):
+		rtn = RSB_CONST_MAX_SUPPORTED_CORES;
+		break;
+		case(RSB_THREADS_GET_MAX):
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		rtn = omp_get_max_threads();
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+		rtn = RSB_MIN(rtn,RSB_CONST_MAX_SUPPORTED_CORES);
+		break;
+		default:
+		rtn = rsb_set_executing_threads(tn);
+	}
+	RSB_DEBUG_ASSERT(rtn>0);
+	return (rsb_int_t) rtn;
+}
+
+#if 0
+#include <execinfo.h>
+void rsb_print_trace (void)
+{
+	/* according to a glibc docs example */
+	void *array[10];
+	size_t size;
+	char **strings;
+	size_t i;
+	size = backtrace (array, 10);
+	strings = backtrace_symbols (array, size);
+	printf ("Obtained %zd stack frames.\n", size);
+	for (i = 0; i < size; i++)
+		printf ("%s\n", strings[i]);
+	free (strings);
+}
+#endif
+
+#if RSB_USE_RUSAGE
+static rsb_time_t rsb_rstv(struct timeval*tvp)
+{
+        register double t = 0.0;
+        t  =  (double)(tvp->tv_sec) + ((double)(tvp->tv_usec))*1.e-6;
+        return t;
+}
+
+#define RSB_K 1024
+rsb_err_t rsb__getrusage(void)
+{
+	/*
+	 * Shall work independently from rsb_lib_init/rsb_lib_exit.
+	 * */
+	struct rusage usage;
+	int gru = getrusage(RUSAGE_SELF,&usage);
+
+	RSB_STDOUT("getrusage() stats:\n");
+	/*("ru_ixrss : %ld (integral shared memory size)\n",usage.ru_ixrss);*/
+	RSB_STDOUT("ru_maxrss: %ld (maximum resident set size -- MB)\n",usage.ru_maxrss / RSB_K);
+	RSB_STDOUT("ru_stime : %0.4lgs (system CPU time used)\n",rsb_rstv(&usage.ru_stime));
+	RSB_STDOUT("ru_utime : %0.4lgs (user CPU time used)\n",rsb_rstv(&usage.ru_utime));
+#if 0
+	RSB_STDOUT("ru_utime : %0.4lg (user page faults (hard page faults))\n",rsb_rstv(&usage.ru_majflt));
+	RSB_STDOUT("ru_utime : %0.4lg (page reclaims (soft page faults))\n",rsb_rstv(&usage.ru_minflt));
+#endif
+
+	return gru == 0 ? RSB_ERR_NO_ERROR : RSB_ERR_GENERIC_ERROR;
+}
+#else /* RSB_USE_RUSAGE */
+rsb_err_t rsb__getrusage(void)
+{
+	return RSB_ERR_NO_ERROR;
+}
+#endif /* RSB_USE_RUSAGE */
+
+const rsb_char_t * rsb__getenv(const rsb_char_t * name)
+{
+	const rsb_char_t * evv = NULL;
+	
+#ifdef RSB_HAVE_GETENV
+	evv = getenv(name);
+#endif /* RSB_HAVE_GETENV */
+
+	return evv;
+}
+
+const rsb_char_t * rsb__getenv_nnr(const rsb_char_t * name)
+{
+	RSB_DEBUG_ASSERT( name != NULL );
+	return rsb__getenv(name) ? rsb__getenv(name) : name + strlen(name);
+}
+
+long rsb__get_lnc_size_hwloc(int n)
+{
+	/* Gets cache size using hwloc.h. EXPERIMENTAL */
+	long size = 0;
+#if RSB_WITH_HWLOC
+	int levels = 0;
+	hwloc_topology_t topology;
+	hwloc_cpuset_t cpuset;
+	hwloc_obj_t obj;
+	hwloc_topology_init(&topology);
+	hwloc_topology_load(topology);
+	for (obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0); obj; obj = obj->parent)
+		if (obj->type == HWLOC_OBJ_CACHE)
+			if(++levels == n)
+        			size = obj->attr->cache.size;
+    	hwloc_topology_destroy(topology);
+#endif	/* RSB_WITH_HWLOC */
+	return size;
+}
+
+/* @endcond */
diff --git a/rsb_sys.h b/rsb_sys.h
new file mode 100644
index 0000000..091d2e8
--- /dev/null
+++ b/rsb_sys.h
@@ -0,0 +1,259 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief System, or standard library related functions.
+ * @author Michele Martone
+ * */
+#ifndef RSB_SYS_H_INCLUDED
+#define RSB_SYS_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include <stdlib.h>
+#include <time.h>	/* clock, difftime */
+#include <sys/time.h>	/* timeval,gettimeofday */
+#include <stdio.h>	/* printf */
+#include <strings.h>   /* formerly bzero (now using memset) */
+#include <string.h>	/* memset, memcpy */
+#include <stdarg.h>	/* vprintf, va_start, va_end */
+#if RSB_HAVE_LIMITS_H 
+#include <limits.h>	/* CHAR_BIT */
+#endif /* RSB_HAVE_LIMITS_H  */
+#include "rsb_perf.h"
+
+void * rsb__malloc(size_t size);
+void * rsb__calloc(size_t n);
+void * rsb__calloc_parallel(size_t n);
+rsb_time_t rsb_do_time(void);
+rsb_time_t rsb__timer_granularity(void );
+void * rsb__free(void *rsb_data);
+void * rsb__realloc(void *rsb_data, size_t size);
+void * rsb__do_realloc(void *rsb_data, size_t size, size_t alignment);
+void * rsb__aligned_malloc(size_t size, size_t alignment);
+rsb_err_t rsb__sys_info(void);
+long rsb__get_l1c_size(void);
+long rsb__get_l2c_size(void);
+long rsb__get_l3c_size(void);
+long rsb__get_l4c_size(void);
+long rsb__get_lastlevel_c_size(void);
+long rsb__get_first_level_c_size(void);
+rsb_int_t rsb__get_cache_levels_num(void);
+long rsb__get_lnc_size(int n);
+long rsb__know_cache_sizes(void);
+rsb_err_t rsb__sys_init(void);
+/* long rsb_want_executing_threads(void); */
+
+#ifndef RSB_CONDITIONAL_FREE_FAKE
+#define RSB_CONDITIONAL_FREE_FAKE(p) {if((p))/*rsb__free((p))*/;(p)=NULL;}
+#endif /* RSB_CONDITIONAL_FREE_FAKE */
+/* A useful macro */
+#ifndef RSB_CONDITIONAL_FREE
+#define RSB_CONDITIONAL_FREE(p) {if((p)){rsb__free((p));(p)=NULL;}} /* frees and nullifies the associated pointer. */
+/*#define RSB_CONDITIONAL_FREE(p) RSB_CONDITIONAL_FREE_FAKE(p) */
+#endif /* RSB_CONDITIONAL_FREE */
+
+extern int rsb_error(const char * format, ...);
+void rsb__g_rsb_memory_counter_init(void);
+size_t rsb__get_g_rsb_allocations_count(void);
+
+/* ... and what about __PRETTY_FUNCTION__ ? */
+#ifndef __func__
+#ifdef __STDC_VERSION__
+#if __STDC_VERSION__ < 199901L
+# if __GNUC__ >= 2
+#  define __func__ __FUNCTION__
+# else /* __GNUC__  */
+#  define __func__ "<unknown>"
+# endif /* __GNUC__  */
+#endif /* __STDC_VERSION__ */
+#else /* __STDC_VERSION__ */
+# define __func__ "<unknown>"
+#endif /* __STDC_VERSION__ */
+#endif /* __func__ */
+
+/* '1' is better than '{;}' in situations like : if(..)RSB_INFO(...);else RSB_INFO(...); */
+#define RSB_NULL_EXPRESSION_FOR_ZEN_HAPPINESS 1
+#define RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS RSB_NULL_EXPRESSION_FOR_ZEN_HAPPINESS
+
+/* FIXME : write a RSB_PRINTF */
+/* note that the first argument should be a format string! */
+/* note that variadic macros should not be supported by standard C++ */
+/* note that putting brackets around this macro in the macro itself would break
+	conditionals like:
+	if(foo)
+		RSB_ERROR("bar");
+	else
+		RSB_ERROR("baz");
+ */
+#if RSB_INT_ERR_VERBOSITY==1
+
+/* #define RSB_FFL_PRINTF printf("In file %20s (in %s) at line %10d:\n",__FILE__,__func__,__LINE__) */
+#define RSB_FFL_PRINTF printf("In %s located in %20s:%d :\n",__func__,__FILE__,__LINE__)
+
+#define RSB_DEPRECATED( ... ) \
+	RSB_FFL_PRINTF,	\
+	rsb_error( __VA_ARGS__ ), \
+	printf(" is DEPRECATED !!\n") \
+
+#define RSB_ERROR( ... ) \
+	RSB_FFL_PRINTF,	\
+	rsb_error( __VA_ARGS__ )
+
+#define RSB_OCTAVE_ERROR( ... ) \
+	RSB_ERROR("ERROR:"), RSB_ERROR( __VA_ARGS__ ),octave_failed_tests++;
+
+#define RSB_FATAL( ... ) {RSB_ERROR( __VA_ARGS__ );exit(-1);}
+#else /* RSB_INT_ERR_VERBOSITY */
+#define RSB_DEPRECATED( ... )  RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#define RSB_ERROR( ... ) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#define RSB_OCTAVE_ERROR( ... ) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#define RSB_FATAL( ... )  RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#endif /* RSB_INT_ERR_VERBOSITY */
+/* FIXME: RSB_FATAL is obsolete */
+/* FIXME: RSB_ERROR shall be diversified */
+
+
+#define RSB_IOLEVEL RSB_WANT_IO_LEVEL 
+/*#define RSB_IOLEVEL 7*/ /* FIXME: EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL EXPERIMENTAL */
+#define RSB_ALLOW_FPRINTF (RSB_IOLEVEL&4)
+#define RSB_ALLOW_STDOUT  (RSB_IOLEVEL&1)
+#define RSB_ALLOW_STDERR  (RSB_IOLEVEL&2)
+
+#if RSB_ALLOW_STDERR
+/* WARNING : calling this without arguments causes segfaults! */
+#define RSB_STDERR( ... ) fprintf(stderr, __VA_ARGS__ )
+#else /* RSB_ALLOW_STDERR */
+#define RSB_STDERR( ... ) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#endif /* RSB_ALLOW_STDERR */
+#define RSB_IO_ERROR RSB_STDERR
+#define RSB_IO_NOTICE RSB_STDERR
+
+#if RSB_ALLOW_STDOUT
+/* explicit standard output printout */
+#define RSB_STDOUT( ... ) fprintf(stdout, __VA_ARGS__ )
+
+/* */
+/*#define RSB_DEBUGINFO( ... ) printf("%s @ %10d (%s):\n",__FILE__,__LINE__,__func__),RSB_STDOUT(__VA_ARGS__)*/
+
+/** RSB_WARN is used in not-yet-implemented-feature-but-skip-error-triggering situations.  */
+#define RSB_WARN( ... ) \
+	RSB_STDOUT("%s\n#","#*****************************************************************************"),\
+	RSB_STDOUT( __VA_ARGS__ ),\
+	RSB_STDOUT("%s\n","#*****************************************************************************")
+#else
+#define RSB_STDOUT( ... ) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#define RSB_WARN( ... ) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 
+#endif /* RSB_ALLOW_STDOUT */
+
+
+#if (RSB_WANT_IO_LEVEL==0)
+#define RSB_QUIET 1
+#endif /* RSB_WANT_IO_LEVEL */
+
+/* RSB_INFO is the stream of informative messages which are user requested and expected (that is, not errors). */
+#ifdef RSB_QUIET
+#define RSB_INFO( ... ) RSB_NULL_COMMA_STATEMENT_FOR_ZEN_HAPPINESS 	
+#else /* RSB_QUIET */
+#define RSB_INFO( ... ) ((rsb_global_session_handle.out_stream)?fprintf(rsb_global_session_handle.out_stream, __VA_ARGS__ ):RSB_NULL_EXPRESSION_FOR_ZEN_HAPPINESS)
+#endif /* RSB_QUIET */
+/* RSB_FPRINTF is just a tool */
+#define RSB_FPRINTF( ... ) fprintf( __VA_ARGS__ )
+
+#if   defined(__GNUC__)
+	/* GCC */
+        #define RSB_UNLIKELY(expr) __builtin_expect(!!(expr),0)
+        #define RSB_LIKELY(expr)   __builtin_expect(!!(expr),1)
+        #define RSB_ALIGNED __attribute__((aligned (sizeof(double)*sizeof(unsigned char))))
+/*        #define RSB_ALIGNED __attribute__((aligned (64)))	*/
+#else /* __GNUC__ */
+        #define RSB_UNLIKELY(expr)  (expr)
+        #define RSB_LIKELY(expr)   (expr)
+        #define RSB_ALIGNED
+#endif /* __GNUC__ */
+#define RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE_MAX_CHARS 128
+#define RSB_PERFORMANCE_BINARY_DUMP_FILE_SIGNATURE \
+"this is a non portable performance dump file, dude........\x40\x40\x40\x40"
+
+#define RSB_TIMER_GRANULARITY_TEST_TIMES (1024*128)
+#define RSB_TIMER_SANITY_TEST_TIMES (1024)
+#define RSB_MIN_ALLOWED_CACHE_BLOCK_SIZE (1024)	/* in bytes */
+#define RSB_MAX_ALLOWED_CACHE_BLOCK_SIZE ((1024)*(1024)*(1024))	/* in bytes */
+
+#define RSB_BZERO(b,len) (memset((b), '\0', (len)), (void) 0) /* recommendation from IEEE Std 1003.1 since bzero has been made legacy */
+
+#define RSB_BZERO_P(P) RSB_BZERO(P,sizeof(*(P)))
+
+#define RSB_MEMMOVE memmove	/**< we have the chance of using a custom memmove function in this way */
+
+#ifdef RSB_HAVE_STRCPY
+#define RSB_STRCPY strcpy	/**< we have the chance of using a custom strcpy function in this way */
+#else /* RSB_HAVE_STRCPY */
+#error "missing a definition of RSB_STRCPY!"
+#endif /* RSB_HAVE_STRCPY */
+
+#ifdef RSB_HAVE_MEMCMP
+#define RSB_MEMCMP memcmp	/**< we have the chance of using a custom memcmp function in this way */
+#else /* RSB_HAVE_MEMCMP */
+#error "missing a definition of RSB_MEMCMP!"
+#endif /* RSB_HAVE_MEMCMP */
+
+int rsb_getopt_long( int argc, char * const argv[], const char *optstring, const rsb_option *longopts, int *longindex);
+#define rsb_numerical_memcpy(TYPECODE,DST,DOFF,SRC,SOFF,N) {size_t es = RSB_NUMERICAL_TYPE_SIZE(TYPECODE); 	\
+	rsb_memcpy( 					\
+			((rsb_byte_t*)(DST)+es*(DOFF)) ,	\
+			((const rsb_byte_t*)(SRC)+es*(SOFF)) ,	\
+			es*(N));	\
+} 		/* see rsb__xcopy(DST,SRC,DOFF,SOFF,N,size_t el_size) */
+void *rsb_memcpy(void *RSB_RESTRICT dest, const void *RSB_RESTRICT src, size_t n);
+
+rsb_time_t rsb__timer_sanity(void);
+size_t rsb__sys_free_system_memory(void);
+size_t rsb__sys_total_system_memory(void);
+long rsb__get_lastlevel_c_size_per_thread(void);
+long rsb__get_cache_block_byte_size(void);
+/* long rsb_set_executing_threads(long tn); */
+rsb_err_t rsb__print_memory_allocation_info(void);
+rsb_err_t rsb__lock_as_memory_resident(rsb_bool_t dolock);
+int rsb__fileno(FILE *stream);
+rsb_err_t rsb__getrusage(void);
+const rsb_char_t * rsb__getenv(const rsb_char_t * name);
+const rsb_char_t * rsb__getenv_nnr(const rsb_char_t * name);
+long rsb__get_lnc_size_hwloc(int n);
+
+#define RSB_THREADS_GET_MAX_LIB	-5
+#define RSB_THREADS_GET_MAX_SYS	-4
+#define RSB_THREADS_GET_MAX	-3
+#define RSB_THREADS_GET		-2
+#define RSB_THREADS_AUTO	-1
+rsb_int_t rsb__set_num_threads(rsb_int_t tn);
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_SYS_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_test_accuracy.c b/rsb_test_accuracy.c
new file mode 100644
index 0000000..362a058
--- /dev/null
+++ b/rsb_test_accuracy.c
@@ -0,0 +1,237 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some functions testing accuracy.
+ * */
+
+#include "rsb_internals.h"
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_NORMWISE_BACKWARD_ERROR_TOLERANCE 1.e-6
+#define RSB_WANT_VERBOSE_ACCURACY_TESTS 0
+
+rsb_err_t rsb__vectors_reinit(void *rhs, void *out, rsb_type_t typecode, rsb_nnz_idx_t rn, rsb_nnz_idx_t on, size_t incr, size_t inco) 
+{
+	if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,on,NULL,out,inco))){ return RSB_ERR_INTERNAL_ERROR;  }
+	if(RSB_SOME_ERROR(rsb__fill_with_ones( rhs,typecode,rn,incr))){ return RSB_ERR_INTERNAL_ERROR; }
+	return RSB_ERR_NO_ERROR;
+}
+
+void * rsb__calloc_vector(rsb_nnz_idx_t n, rsb_type_t typecode)
+{
+       	size_t so = RSB_SIZEOF(typecode);
+	return rsb__calloc(so*n);
+}
+
+void * rsb__malloc_vector(rsb_nnz_idx_t n, rsb_type_t typecode)
+{
+       	size_t so = RSB_SIZEOF(typecode);
+	return rsb__malloc(so*n);
+}
+
+void * rsb__realloc_vector(void* p, rsb_nnz_idx_t n, rsb_type_t typecode)
+{
+       	size_t so = RSB_SIZEOF(typecode);
+	return rsb__realloc(p,so*n);
+}
+
+rsb_err_t rsb__init_rsb_struct_from_coo(struct rsb_mtx_t *mtxAp, const struct rsb_coo_matrix_t *coop)
+{
+	/* FIXME: UNFINISHED  */
+	/* FIXME: static, local, new, and thus no error checking! */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb__init_struct(mtxAp);
+	mtxAp->bpntr=coop->IA;
+	mtxAp->bindx=coop->JA;
+	mtxAp->VA=coop->VA;
+	mtxAp->typecode=coop->typecode;
+	mtxAp->nr=coop->nr;
+	mtxAp->nc=coop->nc;
+	mtxAp->nnz=coop->nnz;
+	mtxAp->br=1;
+	mtxAp->bc=1;
+	mtxAp->cpntr=0;
+	mtxAp->rpntr=0;
+	mtxAp->roff=0;
+	mtxAp->coff=0;
+	mtxAp->broff=0;
+	mtxAp->bcoff=0;
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_fullword_coo(const struct rsb_coo_matrix_t*coop, rsb_flags_t flags, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA)
+{
+	/* FIXME: UNFINISHED  */
+	struct rsb_mtx_t mtxA;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	RSB_DO_ERROR_CUMULATE(errval,rsb__init_rsb_struct_from_coo(&mtxA,coop));
+	if(RSB_SOME_ERROR(errval)) goto err;
+	flags = RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS|RSB_DO_FLAG_FILTEROUT((flags),RSB_DO_FLAGS_EXTRACT_STORAGE(flags));
+	if((errval = rsb__do_set_init_storage_flags(&mtxA,flags))!=RSB_ERR_NO_ERROR)
+		goto err;
+	RSB_DO_FLAG_ADD(mtxA.flags,RSB_DO_FLAG_FILTERONLY(flags,RSB_FLAGS_RSB_AGNOSTIC));
+	RSB_DO_ERROR_CUMULATE(errval,rsb_do_spmv_non_recursive(&mtxA,x,y,alphap,betap,incx,incy,transA RSB_DEFAULT_INNER_NRHS_SPMV_ARGS	) );
+err:
+	return errval;
+}
+
+static rsb_err_t rsb_do_check_normwise_backward_error(const struct rsb_mtx_t*mtxAp, const void *X, const void *AX, const void * B, rsb_trans_t transA)
+{
+	/* normwise backward error in the infinity norm */
+	/* FIXME: UNFINISHED  */
+	/* FIXME: this won't work for integer, of course  */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_aligned_t Xinorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t Binorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t Ainorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t denominator[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t err[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t eps[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_type_t typecode = mtxAp->typecode;
+	rsb_coo_idx_t tm = RSB_MTX_TRANSPOSED_ROWS(mtxAp,transA);
+	rsb_coo_idx_t tk = RSB_MTX_TRANSPOSED_COLS(mtxAp,transA);
+	RSB_NUMERICAL_TYPE_SET_ELEMENT_FROM_DOUBLE(eps,RSB_NORMWISE_BACKWARD_ERROR_TOLERANCE,typecode);
+	if(RSB_SOME_ERROR(errval = rsb__do_matrix_norm(mtxAp,Ainorm,RSB_EXTF_NORM_INF))){goto err;};
+	if(RSB_SOME_ERROR(errval = rsb__vector_sum_of_abs(Xinorm,X,typecode,tk))){goto err;}
+	if(RSB_SOME_ERROR(errval = rsb__vector_sum_of_abs(Binorm,B,typecode,tm))){goto err;}
+	if(RSB_SOME_ERROR(errval = rsb__vector_mult(Ainorm,Xinorm,denominator,typecode,1))){goto err;};;
+	if(RSB_SOME_ERROR(errval = rsb__util_vector_add(denominator,Binorm,typecode,1))){goto err;};;
+	if(RSB_SOME_ERROR(errval = rsb__vector_sum_of_abs_diffs(err,AX,B,typecode,tk))){goto err;};
+	if(RSB_SOME_ERROR(errval = rsb__util_vector_div(err,denominator,typecode,1))){goto err;};;
+	// checking if 
+	if(!RSB_IS_ELEMENT_LESS_THAN(err,eps,typecode))
+	{
+		errval = RSB_ERR_INTERNAL_ERROR;
+		if(RSB_WANT_VERBOSE_ACCURACY_TESTS)
+		RSB_ERROR("error is %lg, more than %lg!\n",*(double*)(&err[0]),*(double*)(&eps[0]));
+		goto err;
+	}
+	else
+	{
+		if(RSB_WANT_VERBOSE_ACCURACY_TESTS)
+		RSB_STDOUT("error is %lg, less than %lg.\n",*(double*)(&err[0]),*(double*)(&eps[0]));
+	}
+err:
+	return errval;
+}
+
+rsb_err_t rsb__do_spmv_accuracy_test(const struct rsb_coo_matrix_t*coop, rsb_thread_t * ca, rsb_thread_t cn, rsb_flags_t flags)
+{
+	/* FIXME: UNFINISHED  */
+	/* this is mostly a debug function */
+	/* FIXME: what about incx/incy/trans ? */
+	/* FIXME: shal support ca=NULL, cn=0  */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_thread_t ci=0;
+	void*X=NULL,*Y=NULL,*Z=NULL;
+	rsb_coo_idx_t vd;
+	rsb_coo_idx_t incx=1,incy=1;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t twoalpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t zsum[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_thread_t default_ca[2],default_cn=2;
+	void*alphap=alpha,*betap=NULL/*,*twoalphap=twoalpha*/;
+	if(RSB_SOME_ERROR(errval = rsb__util_is_valid_coo_struct(coop)))
+	{
+		goto err;
+	}
+	rsb__util_set_area_to_converted_integer(alpha,coop->typecode,1);
+	rsb__util_set_area_to_converted_integer(twoalpha,coop->typecode,2);
+	vd = RSB_MAX(coop->nr,coop->nc);
+	X = rsb__malloc_vector(vd,coop->typecode);
+	Y = rsb__malloc_vector(vd,coop->typecode);
+	Z = rsb__calloc_vector(vd,coop->typecode);
+	if(!X||!Y||!Z){RSB_ERROR("vector allocation problems!\n");RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto err;}
+	rsb__fill_with_ones(X,coop->typecode,vd,1);
+	/* we compute spmv on this coo instance */
+	errval = rsb__do_spmv_fullword_coo(coop,flags,X,Z,alphap,betap,incx,incy,transA);
+	if(RSB_SOME_ERROR(errval)){RSB_ERROR("!\n");goto err;}
+	errval = rsb__util_vector_sum(zsum,Z,coop->typecode,vd);
+	if(RSB_SOME_ERROR(errval)){RSB_ERROR("!\n");goto err;}
+	rsb__vector_to_abs(zsum,coop->typecode,1);
+
+	/* FIXME: need clone + cleanup here! */
+	if(cn==0 || ca==NULL)
+	{
+		ca = default_ca;
+		cn = default_cn;
+		ca[0] = 1;
+		ca[1] = rsb_get_num_threads();
+		if(ca[1] <= ca[0]) cn = 1;
+	}
+	for(ci=0;ci<cn;++ci)
+	{
+		struct rsb_mtx_t*mtxAp=NULL;
+		rsb_aligned_t ysum[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+		rsb__set_num_threads(ca[ci]);
+		if(RSB_WANT_VERBOSE_ACCURACY_TESTS)
+		RSB_STDOUT("for %d threads:\n",ca[ci]);
+		mtxAp = rsb__do_mtx_alloc_from_coo_const(coop->VA,coop->IA,coop->JA,coop->nnz,coop->typecode,coop->nr,coop->nc,RSB_DEFAULT_ROW_BLOCKING,RSB_DEFAULT_COL_BLOCKING,flags,&errval);
+		if(RSB_SOME_ERROR(errval)){RSB_ERROR("!\n");goto err;}
+		if(!mtxAp){errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("!\n");goto err;}
+		rsb__cblas_Xscal(coop->typecode,vd,NULL,Y,1);
+		/* TODO: may iterate the following in order to test indeterminism */
+		if(mtxAp->nnz != coop->nnz)
+		{
+			/* input not cleaned up? */
+			if(RSB_WANT_VERBOSE_ACCURACY_TESTS)
+			RSB_STDOUT("%d vs %d nnz ? input not cleaned up?\n",mtxAp->nnz,coop->nnz);
+			errval = RSB_ERR_BADARGS;
+			goto ierr;
+		}
+		errval = rsb_do_spmv(transA,alphap,mtxAp,X,incx,betap,Y,incy);
+		if(RSB_SOME_ERROR(errval)){RSB_ERROR("!\n");goto ierr;}
+		errval = rsb__util_vector_sum(ysum,Y,coop->typecode,vd);
+		if(RSB_SOME_ERROR(errval)){RSB_ERROR("!\n");goto ierr;}
+		rsb__vector_to_abs(ysum,coop->typecode,1);
+		//rsb__debug_print_vector(ysum,1,coop->typecode,1);
+		if(!RSB_SOME_ERROR(rsb__do_are_same(ysum,zsum,1,coop->typecode,1,1)))
+		{
+			/* same result (no numerical error at all). no further check needed. */
+			if(RSB_WANT_VERBOSE_ACCURACY_TESTS)
+			RSB_STDOUT("Identical values.\n");
+		}
+		else
+		{
+			if(RSB_WANT_VERBOSE_ACCURACY_TESTS)
+			RSB_STDOUT("Non identical values. Checking backward error.\n");
+			errval = rsb_do_check_normwise_backward_error(mtxAp,X,Y,Z,transA);
+			if(RSB_SOME_ERROR(errval)){goto ierr;}
+		}
+ierr:
+		RSB_MTX_FREE(mtxAp);
+	}
+err:
+	RSB_CONDITIONAL_FREE(X);
+	RSB_CONDITIONAL_FREE(Y);
+	RSB_CONDITIONAL_FREE(Z);
+	return errval;
+}
+
+/* @endcond */
+
diff --git a/rsb_test_accuracy.h b/rsb_test_accuracy.h
new file mode 100644
index 0000000..17b0470
--- /dev/null
+++ b/rsb_test_accuracy.h
@@ -0,0 +1,42 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief
+ * This source file contains some functions testing accuracy.
+ * */
+
+#ifndef RSB_TEST_ACCURACY_H_INCLUDED
+#define RSB_TEST_ACCURACY_H_INCLUDED
+
+#include "rsb_internals.h"
+rsb_err_t rsb__vectors_reinit(void *rhs, void *out, rsb_type_t typecode, rsb_nnz_idx_t rn, rsb_nnz_idx_t on, size_t incr, size_t inco); 
+void * rsb__calloc_vector(rsb_nnz_idx_t n, rsb_type_t typecode);
+void * rsb__malloc_vector(rsb_nnz_idx_t n, rsb_type_t typecode);
+void * rsb__realloc_vector(void* p, rsb_nnz_idx_t n, rsb_type_t typecode);
+rsb_err_t rsb__do_spmv_accuracy_test(const struct rsb_coo_matrix_t*coop, rsb_thread_t * ca, rsb_thread_t cn, rsb_flags_t flags);
+rsb_err_t rsb__do_spmv_fullword_coo(const struct rsb_coo_matrix_t*coop, rsb_flags_t flags, const void * x, void * y, const void *alphap, const void * betap, rsb_coo_idx_t incx, rsb_coo_idx_t incy, rsb_trans_t transA);
+rsb_err_t rsb__init_rsb_struct_from_coo(struct rsb_mtx_t *mtxAp, const struct rsb_coo_matrix_t *coop);
+#endif /* RSB_TEST_ACCURACY_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_test_matops.c b/rsb_test_matops.c
new file mode 100644
index 0000000..9178f1c
--- /dev/null
+++ b/rsb_test_matops.c
@@ -0,0 +1,9914 @@
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief 
+
+ Matrix Operations testing code source file.
+ This is NOT part of the library: only of companion programs.
+
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#include "rsb_test_matops.h"
+
+/* FIXME: necessary, until we use so many #ifdefs in this program */
+#include "rsb-config.h"
+#include "rsb_common.h"
+#include "rsb_mkl.h"
+
+#if RSB_HAVE_LIBGEN_H
+#include <libgen.h>	/* for basename (20101226 FIXME : superseded by rsb__basename usage)*/
+#endif /* RSB_HAVE_LIBGEN_H */
+
+#define RSB_HAVE_METIS 0 /* FIXME: unfinished */
+#if RSB_HAVE_METIS
+#include <metis/metis.h>
+#endif /* RSB_HAVE_METIS */
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+#ifdef RSB_HAVE_OSKI_OSKI_H 
+#include <oski/oski.h>
+#else /* RSB_HAVE_OSKI_OSKI_H */
+#error "you should disable oski benchmarking at configure time!"
+#endif /* RSB_HAVE_OSKI_OSKI_H */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+#ifdef RSB_HAVE_UNISTD_H
+#include <unistd.h>
+#endif /* RSB_HAVE_UNISTD_H */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#define RSB_UTIL_CSR_IDX_OCCUPATION(R,C,NNZ) (sizeof(rsb_coo_idx_t)*nnz+sizeof(rsb_nnz_idx_t)*nrA)
+#define RSB_UTIL_COO_IDX_OCCUPATION(R,C,NNZ) (sizeof(rsb_coo_idx_t)*2*nnz)
+#define RSB_UTIL_COO_OCCUPATION(R,C,NNZ,TYPE) (RSB_UTIL_COO_IDX_OCCUPATION(R,C,NNZ)+(NNZ)*(RSB_SIZEOF(TYPE)))
+#define RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH() RSB_FPRINTF_MATRIX_ESSENTIALS(stdout,mtxAp,filename,cc) 
+#define RSB_DIV(Q,D) ( ( (Q)+(D)-1 ) / (D) )
+extern struct rsb_session_handle_t rsb_global_session_handle;
+#define RSB_NEGATED_EXAGGERATED_TUNER_TIMES -999999.0
+#define RSB_MKL_APPROPRIATE_AT_TIME_SPEC(TS) ( (TS) != RSB_NEGATED_EXAGGERATED_TUNER_TIMES )
+RSB_INTERNALS_RSBENCH_HEAD_DECLS
+#define RSBENCH_MAY_SQUIT(LABEL,ACTION) { if(RSB_SHALL_QUIT) { RSB_INFO("Terminating execution earlier due to interactive user request.\n"); ACTION; goto LABEL; } }
+#define RSBENCH_MAY_TQUIT(LABEL,ACTION) { if(maxtprt > RSB_TIME_ZERO && maxtprt < rsb_time()+totprt) { RSB_INFO("Terminating execution earlier due to user set max timer of %2.3lg s.\n",maxtprt); ACTION; goto LABEL; } }
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	#define RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,TIMES,PCIP) if(want_perf_counters>0){rsb_perf_counters_update(); if(PMSG)rsb_perf_counters_dump(MSG,NULL,TIMES,PCIP); rsb_perf_counters_reset();/* TEMPORARY */}
+	#define RSB_PERFORMANCE_COUNTERS_DUMP(MSG,PMSG) if(want_perf_counters>1)RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,1,NULL) 
+#else /* RSB_WANT_PERFORMANCE_COUNTERS */
+	#define RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,TIMES,PCIP) 
+	#define RSB_PERFORMANCE_COUNTERS_DUMP(MSG,PMSG)
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+
+#if RSB_WITH_LIKWID
+#define RSB_TM_LIKWID_MARKER_R_START(R) if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)  if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_STOP(R)
+#else
+#define RSB_TM_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_WITH_LIKWID */
+
+#ifdef RSB_HAVE_REGEX_H 
+#include <regex.h>
+#endif /* RSB_HAVE_REGEX_H */
+#define RSBENCH_STDERR RSB_STDERR
+
+#define RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH  defined(RSB_WANT_PERFORMANCE_COUNTERS) && (RSB_WANT_PERFORMANCE_COUNTERS==1)
+
+static int rsb__echo_cargs(const int argc, rsb_char_t * const argv[])
+{
+	int argci;
+
+	if(argc > 0)
+		RSBENCH_STDOUT("# %s",argv[0]);
+	for(argci=1; argci<argc; ++argci)
+	{
+		RSBENCH_STDOUT(" %s",argv[argci]);
+	}
+	RSBENCH_STDOUT("\n");
+	return 0;
+}
+
+#ifdef RSB_HAVE_REGEX_H 
+static	rsb_bool_t rsb_regexp_match(const rsb_char_t*s, const rsb_char_t*r)
+	{
+		regex_t regex;
+		const int nmatch = 1;
+		regmatch_t pmatch[nmatch];
+		rsb_bool_t match = RSB_BOOL_FALSE;
+		int ignorecase = 0;
+		int ignorenewlines = 0;
+
+		if(!r || !strlen(r))
+			goto ret;
+
+		if(regcomp(&regex,r, 0 | REG_EXTENDED | (ignorecase==0?0:REG_ICASE) )!=0)
+		{
+			RSB_ERROR("error calling regcomp; invalid regexp: %s\n",s);
+			goto ret;
+		}
+
+		if(regexec(&regex,s+0,nmatch,pmatch,0)!=REG_NOMATCH)
+		{
+			match = RSB_BOOL_TRUE;
+		}
+		regfree(&regex);
+ret:
+		return match;
+	}
+#endif /* RSB_HAVE_REGEX_H */
+
+static void rsb__echo_timeandlabel(const char*l, const char*r, rsb_time_t *stp)
+{
+	rsb_time_t ct = rsb_time();
+
+	if(stp && *stp)
+		RSBENCH_STDOUT("#%s%.0lf (after %.1lfs of w.c.t.)%s",l?l:"",ct,ct-*stp,r?r:"");
+	else
+		RSBENCH_STDOUT("#%s%.0lf%s",l?l:"",ct,r?r:"");
+	if(stp)
+		*stp = ct;
+}
+
+static void rsb__impcdstr(char * dst, const char * h, const char *t, const char * pp, const char * ap)
+{
+	/* There is some overlap with rsb__cat_compver and rsb__sprint_matrix_implementation_code that shall be resolved. */
+	rsb_char_t buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+
+	rsb__cat_compver(buf);
+	strcat(buf,"");
+	rsb__sprintf(dst,"%s%s_%s_%.0lf_%s%s%s",pp?pp:"",h,rsb__getenv_nnr("HOSTNAME"),rsb_time(),buf,ap?ap:"",t);
+}
+
+int rsb_test_help_and_exit(rsb_char_t *argv0, rsb_option *o, int code){
+	    size_t i=0;
+
+            printf("%s %s",argv0," where OPTIONS are taken from :\n");
+            for(i=0;o[i].val;++i)
+            {
+                if(o[i].val<RSB_MAX_VALUE_FOR_TYPE(rsb_char_t) && isprint(o[i].val)  )/* please do not swap conditions : some isprint() implementations segfault on this */
+		{
+                	printf("\t-%c",(rsb_char_t)(o[i].val));
+		}
+		else
+			printf("\t");
+                printf("\t\t");
+		if(o[i].name)
+	                printf("--%s",o[i].name);
+                switch(o[i].has_arg)
+		{
+	                case no_argument:
+	                break;
+	                case required_argument:
+	                printf(" <arg>");
+	                break;
+	                case optional_argument:
+	                printf(" [=arg]");
+	                break;
+	                default:
+        	        ;
+                };
+                printf("\n");
+	    }
+            printf("\n");
+	    printf("Arguments to --want-autotune of the format \"%s\", where S is the autotuning time in seconds, X is the number of tries, T the number of starting threads, V can be either q for quiet autotuning or v for a verbose one (can be specified twice). Valid examples: 3.0s2x4tv, 3.0s2x0tq, 3.0s, 2.0s10x . See documentation of rsb_tune_spmm for a full explanation of these parameters role in auto-tuning.\n",RSB_WAT_FMT_H);
+            printf("Report bugs to %s.\n",RSB_PACKAGE_BUGREPORT);
+            return code;
+}
+
+/* one function for each of (spmv_uaua,spsv_uxua,mat_stats)*/
+int rsb__main_block_partitioned_spmv_uaua(const int argc, rsb_char_t * const argv[])
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This function implements a complete program for using our variable block
+	 * rows sparse matrix storage as it was a fixed block size format.
+	 * It is useful for benchmark against fixed block sparse matrix codes.
+	 * 
+	 * This function will benchmark the "spmv_uaua" matrix operation.
+	 * */
+
+	/*
+	 * This example main program reads in a Matrix Market file in block format and multiplies it against a unit vector.
+	 **/
+	rsb_option options[] = {
+	    {"all-flags",	0 , NULL, 0x51},/* Q */  
+	    {"allow-any-transposition-combination",	0 , NULL, 0x61617463 },/* aatc */  
+	    {"alpha",	required_argument, NULL , 0x414C},/* AL */
+	    {"alternate-sort",	no_argument, NULL , 0x4153},/* AS */
+	    {"auto-blocking",	0 , NULL, 0x41},/* A */
+	    {"be-verbose",		0, NULL, 0x76},	/* v */
+	    {"beta",	required_argument, NULL ,  0x4246},/* BE */
+	    {"block-columnsize",	required_argument, NULL, 0x63},/* c */  
+	    {"block-rowsize",   required_argument, NULL, 0x72 },/* r */
+	    {"cache-blocking",	required_argument, NULL , 0x4342},/* CB */
+/*	    {"cache-flush",	no_argument, NULL, 0x4343},*/ /*   */
+	    {"column-expand",	required_argument, NULL, 0x6B},/* k */  
+	    {"compare-competitors",	no_argument, NULL, 0x6363},/* cc */  
+	    {"convert",	0, NULL, 0x4B},/* K */  
+/*	    {"convert",	required_argument, NULL, 0x4B},*//* K   */
+	    {"dense",	required_argument, NULL, 0x64 },   /* d */
+	    {"diagonal-dominance-check",	no_argument , NULL, 0x4444},/* DD */  /* new */
+	    {"dump-n-lhs-elements",	required_argument , NULL, 0x444444},/* DDD */  /* new */
+	    {"echo-arguments",	no_argument , NULL, 0x6563686f},/* echo */  /* new */
+	    {"flush-cache-in-iterations",	no_argument, NULL, 0x4343},/*  */  
+	    {"impatient",	no_argument, NULL, 0x696d7061},/* impa[tient] */  
+	    {"no-flush-cache-in-iterations",	no_argument, NULL, 0x434E},/*  */  
+	    {"flush-cache-around-loop",	no_argument, NULL, 0x434343},/*  */  
+	    {"want-ancillary-execs",	no_argument, NULL, 0x767646},/*  */  
+	    {"no-want-ancillary-execs",	no_argument, NULL, 0x42767646},/*  */  
+	    {"no-flush-cache-around-loop", no_argument	, NULL, 0x43434E},/*  */  
+	    {"want-no-recursive",	no_argument, NULL, 0x776e720a},/*  */  
+	    {"guess-blocking",	no_argument , NULL, 0x47},/* G */
+	    {"help",	no_argument , NULL, 0x68},	/* h */
+	    {"ilu0",	no_argument , NULL, 0x494B55},/* ILU */  /* new */
+	    {"incx",	required_argument, NULL, 0xb1bb0 },/* */  
+	    {"incy",	required_argument, NULL, 0xb1bb1 },/* */  
+	    {"in-place-assembly-experimental",	no_argument , NULL, 0x6970},/* i */  
+	    {"in-place-csr",	0 , NULL, 0x69},/* i */  
+	    {"in-place-permutation",	no_argument, NULL, 0x50},   /* P */
+#if RSB_WITH_LIKWID
+	    {"likwid",	no_argument, NULL, 0x6c696b77},   /* likw */
+#endif /* RSB_WITH_LIKWID */
+	    {"lower",	required_argument, NULL, 0x6c},   /* l */
+	    {"lower-dense",	required_argument, NULL, 0x6c64},   /* ld */
+	    {"generate-lowerband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"gen-lband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"generate-spacing",	required_argument, NULL, 0xbabb2 },   /* */
+	    {"matrix-dump",	0 , NULL, 0x44044},/* D */  
+	    {"matrix-dump-graph",	required_argument , NULL, 0x44047},/* DG */  
+	    {"matrix-dump-internals",	0 , NULL, 0x49049},/* I */  
+	    {"merge-experimental",	required_argument , NULL, 0x6d656578},/* meex */  
+	    {"split-experimental",	required_argument , NULL, 0x73706578},/* spex */  
+	    {"ms-experimental",	required_argument , NULL, 0x6d736578},/* msex */  
+	    {"matrix-filename",	required_argument, NULL, 0x66},/* f */  
+	    {"matrix-storage",	required_argument, NULL, 0x46},/* F */  
+	    {"matrix-time",	0 , NULL, 0x4D},/* M */  /* new */
+	    {"mem-hierarchy-info",	required_argument , NULL, 0x4D4D},/* MM */  /* new */
+	    {"max-runtime",	required_argument , NULL, 0x6d617275},/* maru */
+	    {"no-op",		0 , NULL, 0x4E},	/* N */
+	    {"notranspose",	no_argument, NULL, 0x5051},   /* do not transpose the operation */
+	    {"nrhs",	required_argument, NULL, 0x6e726873},   /* */
+	    {"one-nonunit-incx-incy-nrhs-per-type",	no_argument, NULL, 0x6e697270},   /* */
+	    RSB_BENCH_PROG_OPTS
+	    {"oski-benchmark",	0 , NULL, 0x42},/* B: only long option *//* comparative benchmarking agains OSKI */
+	    {"mkl-benchmark",	0 , NULL, 0x4C},/* L: only long option *//* comparative benchmarking agains MKL */
+	    {"out-lhs",		0 , NULL, 0x6F},/* o */	/* should accept an output file name, optionally */
+	    {"out-rhs",		0 , NULL, 0x6F6F},/* o */	/* should accept an output file name, optionally */
+	    {"override-matrix-name",	required_argument , NULL, 0x6F6D6E},/* omn */	
+	    {"pattern-mark",	0 , NULL, 0x70},/* p */
+	    {"pre-transpose",	no_argument, NULL, 0x5454},   /* transpose the matrix before assembly  */
+	    {"read-as-binary",		required_argument, NULL, 0x62},/* b */
+	    {"repeat-constructor",	required_argument , NULL, 0x4A4A},
+	    {"reuse-io-arrays",	no_argument , NULL, 0x726961}, /* ria */
+	    {"no-reuse-io-arrays",	no_argument , NULL, 0x6e726961 }, /* nria */
+	    {"reverse-alternate-rows",	no_argument , NULL, 0x4A4A4A},
+	    {"generate-upperband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"gen-uband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"generate-diagonal",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"gen-diag",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"zig-zag",	no_argument , NULL, 0x4A4A4A},
+	    {"subdivision-multiplier",	required_argument, NULL , 0x534D},/* SM */
+#if RSB_WANT_BOUNDED_BOXES
+	    {"bounded-box",	required_argument, NULL , 0x4242},/* BB */
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	    {"sort",		0 , NULL, 0x73},	/* s */
+	    {"no-leaf-multivec",	no_argument, NULL , 0x6e6c6d6d},/* nlmm */
+	    {"with-leaf-multivec",	no_argument, NULL , 0x636c6d6d},/* wlmm */
+	    {"sort-after-load",	no_argument, NULL, 0x7373},/* ss */  
+	    {"skip-loading-symmetric-matrices",	 no_argument, NULL, 0x736c736d},/* slsm */  
+	    {"skip-loading-unsymmetric-matrices",no_argument, NULL, 0x736c756d},/* slum */  
+	    {"skip-loading-hermitian-matrices",no_argument, NULL, 0x736c686d},/* slhm */  
+	    {"skip-loading-not-unsymmetric-matrices",no_argument, NULL, 0x736c6e75},/* slnu */  
+	    {"skip-loading-if-more-nnz-matrices",required_argument, NULL, 0x736c6d6},/* slmn */  
+	    {"skip-loading-if-less-nnz-matrices",required_argument, NULL, 0x736c6e6e},/* slnn */  
+	    {"skip-loading-if-more-filesize-kb-matrices",required_argument, NULL, 0x736c6d73},/* slms */  
+#ifdef RSB_HAVE_REGEX_H 
+	    {"skip-loading-if-matching-regex",required_argument, NULL, 0x736c6d72},/* slmr */  
+#endif /* RSB_HAVE_REGEX_H */
+	    {"skip-loading-if-matching-substr",required_argument, NULL, 0x736c7373},/* slss */  
+	    {"times",		required_argument, NULL, 0x74},/* t */  
+	    {"transpose-as",	required_argument, NULL, 0x5040},   /* do transpose the operation */
+	    {"transpose",	no_argument, NULL, 0x5050},   /* do transpose the operation */
+	    {"also-transpose",	no_argument, NULL, 0x4150},  /* N,T: do transpose the operation after no transposition */
+	    {"all-transposes",	no_argument, NULL, 0x616c6c74},  /* N,T,C */
+	    {"type",		required_argument, NULL, 0x54},/* T */  
+	    {"types",		required_argument, NULL, 0x54},/* T */  
+	    {"update",		0 , NULL, 0x55},	/* U */
+	    {"as-unsymmetric",		0 , NULL, 0x5555},	/* UU: TODO: to insert such a test in as default, in order to quantify the benefit of symmetry */
+	    {"as-symmetric",		0 , NULL, 0x5353},	/* SS */
+	    {"only-lower-triangle",		0 , NULL, 0x4F4C54},	/* OLT */
+   	    {"only-upper-triangle",		0 , NULL, 0x4F4554},	/* OUT */
+	    {"verbose",	no_argument , NULL, 0x56},/* V */
+	    {"want-io-only",	no_argument , NULL, 0x4949},/* --want-io-only */
+	    {"want-nonzeroes-distplot",	no_argument, NULL, 0x776E68},/* wnh */  
+	    {"want-accuracy-test",	no_argument, NULL, 0x776174},/* wat */  
+	    {"want-getdiag-bench",	no_argument , NULL, 0x774446},/* wde */  /* FIXME: obsolete ? */
+	    {"want-getrow-bench",	no_argument , NULL, 0x777246},/* wre */  /* FIXME: obsolete ? */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	    {"want-perf-counters",	no_argument , NULL, 0x707763},/* wpc */
+#endif
+	    {"want-print-per-subm-stats",	no_argument , NULL, 0x77707373},/* wpss */
+	    {"want-only-accuracy-test",	no_argument, NULL, 0x776F6174},/* woat */  
+	    {"want-autotune",	required_argument, NULL, 0x7772740a},/* wrt */  
+	    {"want-no-autotune",	no_argument, NULL, 0x776e7274},/* wnrt */  
+#if RSB_HAVE_METIS
+	    {"want-metis-reordering",	no_argument, NULL, 0x776d6272 },/* wmbr */  
+#endif
+	    {"want-mkl-autotune",	required_argument, NULL, 0x776d6174},/* wmat */  
+	    {"want-mkl-one-based-indexing",	no_argument, NULL, 0x776d6f62 },/* wmob */  
+	    {"want-unordered-coo-test",	no_argument, NULL, 0x775563},/* */  
+	    {"with-flags",	required_argument, NULL, 0x71},/* q */  
+	    {"write-as-binary",	required_argument, NULL, 0x77 }, /* w */
+	    {"write-as-csr",	required_argument, NULL,  0x63777273 }, /* wcsr */
+	    {"write-performance-record",	required_argument, NULL, 0x77707266 }, /* write performance record file  */
+	    {"performance-record-name-append",	required_argument, NULL, 0x77707261 }, /* ...append  */
+	    {"performance-record-name-prepend",	required_argument, NULL, 0x77707270 }, /* ...prepend  */
+	    {"write-no-performance-record",	no_argument, NULL, 0x776e7072 }, /* write no performance record */
+	    {"discard-read-zeros",	no_argument, NULL,  0x64697a65 }, /* dize */
+	    {"z-sorted-coo",	no_argument, NULL , 0x7A},/* z */
+	    {0,0,0,0}	};
+
+	rsb_nnz_idx_t nnz = 0;/* was 0 */
+	int c;
+	int opt_index = 0;
+
+	rsb_coo_idx_t *IA = NULL, *JA = NULL;
+	void *VA = NULL;
+
+	int g_estimate_matrix_construction_time = 0;
+	int g_all_flags = 0;
+	int g_sort_only = 0;
+	int repeat_construction = 1;	/* times to call the matrix constructor (the more times, the more accurate measurements) */
+
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT, typecode_old = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_int ntypecodes = 0,typecodesi;
+	const rsb_int maxtypes = 2*RSB_IMPLEMENTED_TYPES;
+	rsb_type_t typecodes[maxtypes+1] ;
+
+	rsb_blk_idx_t br = 1;
+	rsb_blk_idx_t bc = 1;
+	char * bcs = NULL, *brs = NULL, *cns = NULL, *mhs = NULL;
+	rsb_blk_idx_t * brv = NULL;
+	rsb_blk_idx_t * bcv = NULL;
+	int brl = 0;
+	int bcl = 0;
+	rsb_thread_t ca_[1] = {1};
+	rsb_thread_t * ca = ca_;
+	rsb_thread_t cn = 1, ci = 0, cc = ca[ci];
+
+	int times = 100;	/* the default number of times to perform spmv_uaua */
+	rsb_coo_idx_t nrA = 0, ncA = 0, ndA = 0;
+	int filenamen = 0, filenamei = 0;
+#define RSB_RSBENCH_STATIC_FILENAMEA 1
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_MAX_MTXFILES 256
+	const rsb_char_t *filenamea[RSB_RSBENCH_MAX_MTXFILES];
+#else
+	const rsb_char_t **filenamea = NULL;
+#endif
+	const rsb_char_t *filename = NULL;
+	const rsb_char_t *filename_old = NULL;
+	const rsb_char_t *usfnbuf = NULL;
+	rsb_char_t*fprfn = NULL, *cprfn = NULL, *apprfn = NULL, *ppprfn = NULL; /* final/checkpoint      performance file name , append/prepend */
+	rsb_char_t fprfnb[RSB_MAX_FILENAME_LENGTH], cprfnb[RSB_MAX_FILENAME_LENGTH];/* final/checkpoint      performance file name buffers */
+	rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+	rsb_char_t*fnbufp[1]={&(fnbuf[0])};
+	rsb_char_t * dump_graph_file=NULL;
+	rsb_flags_t flags_o = RSB_FLAG_NOFLAGS|RSB_FLAG_OWN_PARTITIONING_ARRAYS;
+/*	RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS)	;	*/ /* FIXME : EXPERIMENTAL (watch nnz count on a multi blocking run ...) */
+	rsb_flags_t flagsa[128] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	rsb_flags_t r_flags = RSB_FLAG_NOFLAGS; /* recycling flags */
+	int fn = 1, fi = 0;/* for flags */
+	int tn = 1, ti = 0;/* for transposition */
+	int g_debug = 0;
+	int be_verbose = 0;
+	int pattern_only = 0;
+	int dumpout = 0;
+	int dumpout_internals = 0, merge_experimental = 0, split_experimental = 0;
+	int just_enter_tuning = 1;
+	rsb_char_t * csr_w_filename = NULL;
+	rsb_char_t * b_w_filename = NULL;
+	rsb_char_t * b_r_filename = NULL;
+	int dumpvec = rsb_dumpvec_no;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	int guess_blocking_test = 0;		/* guess test stuff */
+	rsb_int want_column_expand = 0;
+	rsb_perf_t bperf=0,wperf=0,cperf=0;			/* guess test stuff */
+	rsb_fillin_t egfillin=0,ebfillin=0,bfillin=0,maxfillin=0;	/* guess test stuff */
+	rsb_blk_idx_t bri=0,bci=0;		/* guess test stuff */
+	rsb_perf_t omta = RSB_REAL_ZERO; /* op memory traffic amount */
+	rsb_fillin_t fillin = RSB_REAL_ZERO;
+	rsb_perf_t raw_Mflops = RSB_REAL_ZERO,true_Mflops = RSB_REAL_ZERO, true_gem_Mflops = RSB_REAL_ZERO;
+	rsb_char_t buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+	rsb_fillin_t efillin = RSB_REAL_ZERO;
+	rsb_perf_t eperf = RSB_REAL_ZERO;
+
+	rsb_bool_t should_recycle_matrix = RSB_BOOL_FALSE; /* reuse the matrix across measurements */
+	rsb_bool_t should_recycle_io = RSB_BOOL_TRUE;/* reuse the input arrays */
+	rsb_bool_t g_allow_any_tr_comb = RSB_BOOL_FALSE; /* allow any transposition combination */
+	
+	rsb_trans_t transAo = RSB_DEFAULT_TRANSPOSITION;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_nnz_idx_t should_generate_dense = 0;
+	rsb_nnz_idx_t should_generate_dense_nc = 0;
+	rsb_nnz_idx_t should_generate_lband = -1, should_generate_uband = -1;
+	rsb_nnz_idx_t want_generated_spacing = 0;
+	rsb_bool_t want_only_star_scan = RSB_BOOL_FALSE;
+	rsb_blk_idx_t nrhs = 1, nrhsn = 1, nrhsi = 1, nrhsl = 1;
+	const char*nrhss = NULL;
+	rsb_blk_idx_t *nrhsa = NULL;
+	size_t outnri = 0, rhsnri = 0;
+	rsb_nnz_idx_t n_dumpres = 0;
+	rsb_nnz_idx_t n_dumprhs = 0;
+	rsb_bool_t ignore_failed_fio = RSB_BOOL_TRUE; /* FIXME 20140912 experimental */
+	rsb_bool_t want_convert = RSB_BOOL_FALSE;
+	rsb_bool_t want_update = RSB_BOOL_FALSE;
+	rsb_int_t want_impatiently_soon_pre_results = 0; /* FIXME: temporary */
+	rsb_bool_t want_inner_flush = RSB_BOOL_FALSE;
+	rsb_bool_t want_outer_flush = RSB_BOOL_TRUE;
+	rsb_bool_t want_ancillary_execs = RSB_BOOL_FALSE;
+	rsb_time_t st = RSB_TIME_ZERO;
+	rsb_time_t totiot = RSB_TIME_ZERO; /* total I/O time */
+	rsb_time_t totatt = RSB_TIME_ZERO; /* total ancillary tests time */ /* FIXME: is this complete ? */
+	rsb_time_t totct = RSB_TIME_ZERO; /* total conversions time */ /* FIXME: is this complete ? */
+	rsb_time_t tottt = RSB_TIME_ZERO; /* total tuning time */
+	rsb_time_t totht = RSB_TIME_ZERO; /* total checks time */ /* FIXME: is this complete ? */
+	rsb_time_t maxtprt = RSB_TIME_ZERO; /* max total program run time */
+	const rsb_time_t totprt = - rsb_time(); /* total program run time */
+	rsb_bool_t want_as_unsymmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_as_symmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_lowtri = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_upptri = RSB_BOOL_FALSE;
+	rsb_bool_t want_sort_after_load = RSB_BOOL_FALSE;
+	rsb_bool_t want_slsm = RSB_BOOL_FALSE, want_slum = RSB_BOOL_FALSE, want_slnu = RSB_BOOL_FALSE, want_slhm = RSB_BOOL_FALSE;
+	rsb_nnz_idx_t want_slmn = 0,  want_slnn = 0,  want_slms = 0;
+#ifdef RSB_HAVE_REGEX_H
+	const rsb_char_t * want_slmr = NULL;
+#endif /* RSB_HAVE_REGEX_H */
+	const rsb_char_t * want_slss = NULL;
+	rsb_bool_t do_perform_ilu = RSB_BOOL_FALSE;
+	rsb_bool_t do_perform_ddc = RSB_BOOL_FALSE;
+	rsb_bool_t want_in_place_assembly = RSB_BOOL_FALSE;
+	rsb_bool_t want_accuracy_test = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_nonzeroes_distplot = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getdiag_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getrow_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_coo_idx_t mib = 0; /* MKL index base (FIXME: declared here and not within RSB_WANT_MKL because CSR copy made even with no MKL) */
+#if RSB_WANT_MKL
+	rsb_bool_t want_mkl_bench = RSB_BOOL_FALSE;
+	rsb_bool_t want_mkl_bench_csr = RSB_BOOL_TRUE;
+	rsb_bool_t want_mkl_bench_gem = RSB_BOOL_TRUE;
+	rsb_bool_t want_mkl_bench_coo = RSB_BOOL_FALSE;
+#endif /* RSB_WANT_MKL */
+	rsb_time_t totmt = RSB_TIME_ZERO; /* total mkl/competitors (tuning) time */
+	rsb_bool_t want_perf_dump = RSB_BOOL_FALSE;
+	void*rspr = NULL; /* rsb sampled performance record structure pointer */
+
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t errnorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t * alphap = &(alpha[0]);
+	rsb_aligned_t * betap = &(beta[0]);
+	rsb_int alphai = 1, betai = 1;
+	rsb_coo_idx_t incX = 1, incY = 1;
+	rsb_blk_idx_t incXn = 1, incXi = 1;
+	rsb_blk_idx_t incYn = 1, incYi = 1;
+	rsb_blk_idx_t *incXa = NULL, *incYa = NULL;
+	rsb_coo_idx_t ldX = 0, ldY = 0;
+	rsb_bool_t want_incX = RSB_BOOL_FALSE,want_incY = RSB_BOOL_FALSE;
+	rsb_bool_t want_verbose = RSB_BOOL_FALSE;
+	rsb_int_t want_verbose_tuning = 0;
+	rsb_bool_t want_transpose = RSB_BOOL_FALSE;
+	#if 1
+	const int max_io = 10;
+	struct rsb_initopts io={NULL,NULL,0,RSB_IO_SPECIFIER_SET},*iop=&io;
+	rsb_int_t should_use_cb_method = 0;
+	rsb_real_t subdivision_multiplier = 0.0;
+#if RSB_WANT_BOUNDED_BOXES
+	rsb_int_t want_bounded_box=1;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	rsb_int_t want_no_leaf_spmm=0;
+	void * io_values[max_io];
+	enum rsb_opt_t io_keys[max_io];
+	#else /* 1 */
+	struct rsb_initopts *iop = RSB_NULL_INIT_OPTIONS;
+	#endif /* 1 */
+	rsb_bool_t should_use_alternate_sort = RSB_BOOL_FALSE;
+	rsb_bool_t reverse_odd_rows = RSB_BOOL_FALSE;
+	rsb_bool_t zsort_for_coo = RSB_BOOL_FALSE;
+	rsb_bool_t want_unordered_coo_bench = RSB_BOOL_FALSE;
+	rsb_time_t unordered_coo_op_tot_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, unordered_coo_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, unordered_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : unfinished */
+	rsb_time_t oski_t = RSB_TIME_ZERO,oski_m_t = RSB_TIME_ZERO,oski_a_t = RSB_TIME_ZERO,oski_t_t = RSB_TIME_ZERO;
+	oski_idx_t * Aptr=NULL;
+	oski_idx_t * Aind=NULL;
+	oski_value_t * Aval=NULL;
+	oski_matrix_t A_tunable;
+        oski_vecview_t x_view;
+        oski_vecview_t y_view;
+	void * Oval = NULL;
+	rsb_coo_idx_t *OIA=NULL,*OJA=NULL;
+        rsb_char_t oxform[256];
+        double oalpha = 1, obeta = 0;
+	rsb_bool_t want_oski_bench=0;
+	#ifdef RSB_HAVE_SETENV
+	setenv("OSKI_LUA_PATH",OSKI_LUA_PATH,0/* if 0, will not override. if 1, it would. */);
+	#endif /* RSB_HAVE_SETENV */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	rsb_time_t tinf = rsb__timer_granularity();
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_bool_t want_likwid = RSB_BOOL_FALSE;
+	rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+	rsb_time_t want_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES, want_mkl_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+	rsb_bool_t want_io_only = RSB_BOOL_FALSE;
+	rsb_int wat = 1;	/* want autotuning threads choice */
+	rsb_int wai = 1;	/* want autotuning rounds */
+	char wav = 0x56;	/* want autotuning verbose */
+	int wavf = RSB_AUT0_TUNING_VERBOSE;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	int want_perf_counters = 0;
+#endif
+	rsb_bool_t want_print_per_subm_stats = RSB_BOOL_FALSE;
+#if RSB_HAVE_METIS
+	rsb_bool_t want_wmbr = RSB_BOOL_FALSE;
+#endif
+	rsb_bool_t want_recursive = RSB_BOOL_TRUE;
+
+	io.keys = io_keys;
+	io.values = io_values;
+	io.n_pairs = 0;
+
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc,argv,RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS"b:w:BGht:f:r:c:vpn:MNS:Bk:KU" /* Flawfinder: ignore */
+		/* s is in anyway, with RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS */
+		"o:O:"
+		, options, &opt_index);
+		if (c == -1)break;
+
+		RSB_DO_FLAG_ADD(flags_o,rsb__sample_program_options_get_flags(c,optarg));
+
+		switch (c)
+		{
+			case 0x62:	/* b */
+			b_r_filename = optarg;
+			break;
+			case  0xb1bb0:
+#if 0
+				incX = rsb__util_atoi(optarg);
+				if(incX<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incX>1)RSBENCH_STDOUT("# setting incX=%d\n",incX);
+				want_incX = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incXn,&incXa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case  0x6970:
+				RSBENCH_STDOUT("# WARNING: in place assembly is an UNFINISHED, EXPERIMENTAL feature\n");
+				want_in_place_assembly = RSB_BOOL_TRUE;
+			break;
+			case  0xb1bb1:
+#if 0
+				incY = rsb__util_atoi(optarg);
+				if(incY<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incY>1)RSBENCH_STDOUT("# setting incY=%d\n",incY);
+				want_incY = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incYn,&incYa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case 0x6c:
+			case 0x6c64: /* lower-dense */
+			{
+				should_generate_dense = - rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0x6c696b77:
+#if RSB_WITH_LIKWID
+				want_likwid = RSB_BOOL_TRUE;
+				#else /* RSB_WITH_LIKWID */
+				#endif /* RSB_WITH_LIKWID */
+			break;
+			case 0x6c6c:
+			{
+				should_generate_lband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_uband==-1)should_generate_uband=0;
+			}
+			break;
+			case 0x7575:
+			{
+				should_generate_uband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_lband==-1)should_generate_lband=0;
+			}
+			break;
+			case 0x6464: /* gen-diag */
+			{
+				should_generate_uband = 0;
+				should_generate_lband = 0;
+				should_generate_dense = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0xbabb2:
+			{
+				want_generated_spacing = rsb__util_atoi(optarg);
+			}
+			break;
+			case 0x6e697270:
+			want_only_star_scan = RSB_BOOL_TRUE;
+			break;
+			case 0x64: /* dense */
+			{
+				/* should_generate_dense = rsb__util_atoi(optarg); */  // FIXME ! PROBLEMS
+				int sargs = sscanf(optarg,"%dx%d",&should_generate_dense,&should_generate_dense_nc);
+				if( should_generate_dense_nc == 0)
+					should_generate_dense_nc = should_generate_dense;
+				/* RSBENCH_STDOUT("# Requested generation of a %d by %d matrix\n",should_generate_dense,should_generate_dense_nc); */
+			}
+			break;
+			/* FIXME : please note that specifying two or more times -r or -c will cause memory leaks */
+			case 0x72:/* r */
+			brs=optarg;
+			break;
+			case 0x63: /* c */
+			bcs=optarg;
+			break;
+			case 0x42: /* oski : B */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			want_oski_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_OSKI_BENCHMARKING */
+			RSB_ERROR("Sorry, OSKI comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+			break;
+			case 0x4C: /* MKL : L */
+#if RSB_WANT_MKL
+			want_mkl_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_MKL */
+			RSB_ERROR("Sorry, MKL comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_MKL */
+			break;
+			case 0x61617463:
+			g_allow_any_tr_comb = RSB_BOOL_TRUE;
+			break;
+			case 0x51: /* Q (do not ask me why) */
+			g_all_flags = 1;
+			break;
+			break;
+			case 0x44044: /* D */
+			dumpout = 1;
+			break;
+			case 0x5040: /*  */
+			transAo = rsb__do_transposition_from_char(*optarg);	/* */
+			break;
+			case 0x4150:
+			tn = 2;
+			break;
+			case 0x616c6c74:
+			tn = 3;
+			break;
+			case 0x5050: /*  */
+			transAo = rsb__do_transpose_transposition(transAo);
+			break;
+			case 0x5051: /*  */
+			transAo = RSB_TRANSPOSITION_N;
+			break;
+			case 0x6e726873: /*  */
+#if 0
+			nrhs = rsb__util_atoi(optarg);
+			/* if(nrhs>1){ RSB_ERROR("Sorry, nrhs > 1 still unsupported!\n"); goto err; } */
+#else
+			nrhss = optarg;
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(nrhss,&nrhsn,&nrhsa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+
+			break;
+			case 0x5454: /*  */
+			want_transpose = !want_transpose;
+			break;
+			case 0x44047: /* DG */
+			dump_graph_file = optarg;
+			break;
+			case 0x49049: /* I */
+			dumpout_internals = 1;
+			break;
+			case 0x6d656578: /* meex */
+			merge_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x73706578: /* spex */
+			split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x6d736578: /* msex */
+			merge_experimental = split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x4444 : /* DD */
+			do_perform_ddc = RSB_BOOL_TRUE;
+			break;
+			case 0x444444 : /* DDD */
+			n_dumprhs = n_dumpres = rsb__util_atoi(optarg);
+			break;
+			case 0x6563686f: /* echo */
+			{
+				rsb_int argi=0;
+				if(argc>0) printf("#args: %s",argv[0]);
+				for(argi=1;argi<argc;++argi)
+					printf(" %s",argv[argi]);
+				printf("\n");
+			}
+			break;
+			case 0x494B55 : /* ILU */
+			do_perform_ilu = RSB_BOOL_TRUE;
+			break;
+			case 0x696d7061: /* */
+			want_impatiently_soon_pre_results = 1;
+			break;
+			case 0x4343: /* */
+			want_inner_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x434E: /* */
+			want_inner_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x434343: /*  */
+			want_outer_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x43434E: /*  */
+			want_outer_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x776e720a: /*  */
+			want_recursive = RSB_BOOL_FALSE;
+			break;
+			case 0x4D: /* M */
+			g_estimate_matrix_construction_time=1;
+			break;
+			case 0x7A:
+			zsort_for_coo = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the now active Z sort feature will only apply to COO submatrices\n");
+			break;
+			case 0x726961:
+			RSBENCH_STDOUT("# setting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_TRUE;
+			break;
+			case 0x6e726961:
+			RSBENCH_STDOUT("# unsetting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_FALSE;
+			break;
+			case 0x4A4A4A:
+			reverse_odd_rows = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the row reversal feature only applies to CSR submatrices, and on indices only\n");
+			break;
+			case 0x6F6D6E:
+			usfnbuf = optarg;
+			break;
+			case 0x4A4A:
+			repeat_construction = rsb__util_atoi(optarg);
+			if(repeat_construction<1)
+			{
+				RSB_ERROR("Constructor repetition times should be a positive number!\n");goto err;
+			}
+			break;
+			case 0x4342: /* CB */
+			should_use_cb_method = rsb__util_atoi(optarg);
+			break;
+			case 0x4153: /* AS */
+			should_use_alternate_sort = RSB_BOOL_TRUE;
+			break;
+			case 0x534D: /* SM */
+			subdivision_multiplier = rsb__util_atof(optarg);
+			break;
+#if RSB_WANT_BOUNDED_BOXES
+			case 0x4242: /* BB */
+			want_bounded_box = rsb__util_atoi(optarg);
+			break;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+			case 0x6e6c6d6d: /* nlmm */
+			want_no_leaf_spmm = /*rsb__util_atoi(optarg)*/ -1;
+			break;
+			case 0x636c6d6d: /* wlmm */
+#if RSB_ENABLE_INNER_NRHS_SPMV
+			want_no_leaf_spmm = 0;
+#else
+			RSB_ERROR("Cannot activate the RSB_IO_WANT_LEAF_LEVEL_MULTIVEC option because RSB_ENABLE_INNER_NRHS_SPMV is opted out!\n");goto err;
+#endif
+			break;
+			case 0x4D4D: /* MM */
+			mhs = optarg;
+			break;
+			case 0x6d617275:
+			maxtprt = rsb__util_atof(optarg);
+			maxtprt = RSB_MAX( RSB_TIME_ZERO, maxtprt  );
+			break;
+			case 0x6F: /* o */
+			dumpvec = rsb_dumpvec_res;
+			break;
+			case 0x6F6F: /* o */
+			dumpvec = rsb_dumpvec_rhs;
+			break;
+			case 0x70: /* p */
+			pattern_only = 1;
+			break;
+			case 0x4E: /* N */
+			g_sort_only = 1;
+			break;
+			/* handled by rsb__sample_program_options_get_flags() */
+			case 0x73: /* s */
+				RSB_DEPRECATED("use of the sort flag");
+				flags_o = flags_o;
+			break;
+			case 0x7373: /* ss */
+			want_sort_after_load = RSB_BOOL_TRUE;
+			break;
+			case 0x736c736d: /* slsm */
+			want_slsm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c756d: /* slum */
+			want_slum = RSB_BOOL_TRUE;
+			break;
+			case 0x736c686d: /* slhm */
+			want_slhm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6e75: /* slnu */
+			want_slnu = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6d6: /* slmn */
+			want_slmn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6e6e: /* slnn */
+			want_slnn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6d73: /* slms */
+			want_slms = rsb__util_atoi_km2(optarg);
+			break;
+#ifdef RSB_HAVE_REGEX_H
+			case 0x736c6d72: /* slmr */
+			want_slmr = (optarg);
+			break;
+#endif /* RSB_HAVE_REGEX_H */
+			case 0x736c7373: /* slss */
+			want_slss = (optarg);
+			break;
+			case 0x74: /* t */
+			times = rsb__util_atoi(optarg);
+			break;
+			case 0x47: /* G */
+			guess_blocking_test = 1;
+			break;
+			case 0x54: /* T */
+			{
+				const char*toa = optarg;
+				ntypecodes=0; /* this neutralizes former -T ... option */
+				/* if( *optarg == 0x3A || *optarg == 0x2A ) */ /* : or * aka colon or asterisk */
+				if( ( ! isalpha(*optarg) ) || ( strstr(optarg,"all") != NULL ) )
+					toa = RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS ;
+				for(;*toa;++toa)
+				if(isalpha(*toa))
+				{
+					if(ntypecodes<maxtypes)
+						typecodes[ntypecodes++]=typecode=toupper(*toa);
+					else
+					{
+						RSB_ERROR("Up to %d types supported! P.s.: Use a punctuation symbol to ask for all supported types.\n",maxtypes);
+						goto err;
+					}
+				}
+				typecodes[ntypecodes] = RSB_NUL;
+			}
+			break;
+			case 0x56: /* V */
+			want_verbose = RSB_BOOL_TRUE;
+			want_verbose_tuning ++;
+			break;
+			case 0x4949: /* II */
+			want_io_only = RSB_BOOL_TRUE;
+			break;
+			case 0x66: /* f */
+			filename = optarg;
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_ADDF(FILENAME)	if(filenamen<RSB_RSBENCH_MAX_MTXFILES)filenamea[filenamen++] = (FILENAME); else {errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Please increase RSB_RSBENCH_MAX_MTXFILES (%d) and recompile !!\n",RSB_RSBENCH_MAX_MTXFILES);goto err;}
+#else
+ /* FIXME: for some reason, this seems to break e.g.  ./rsbench -oa -Ob --nrhs 1,2 -f pd.mtx -f A.mtx.
+    Of course this is wrong also w.r.t. rsb_calloc/rsb_lib_init, but that is not a problem.
+    Using calloc / realloc does not solve the problem.  */
+#define RSB_RSBENCH_ADDF(FILENAME)		if(filenamen==0) \
+				filenamea = rsb__calloc(sizeof(filenamea)*(filenamen+1)); \
+			else \
+				filenamea = rsb__do_realloc(filenamea, sizeof(filenamea)*(filenamen+1), sizeof(filenamea)); \
+			filenamea[filenamen++] = (FILENAME);
+#endif
+			RSB_RSBENCH_ADDF(filename) /* FIXME */
+			break;
+			case 0x414C: /* AL */
+			alphai = rsb__util_atoi(optarg);
+			break;
+			case 0x4246: /* BE */
+			betai = rsb__util_atoi(optarg);
+			break;
+			case 0x4B: /* K */
+			want_convert = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x55: /* U */
+			want_update = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x5353: /* SS */
+			want_as_symmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x5555: /* UU */
+			want_as_unsymmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4C54: /* OLT */
+			want_only_lowtri = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4554: /* OUT */
+			want_only_upptri = RSB_BOOL_TRUE;
+			break;
+			case 0x6363:
+			/* this flag activates all interfaced libraries (if any) */
+#if RSB_WANT_MKL
+			want_mkl_bench = RSB_BOOL_TRUE;
+#endif /* RSB_WANT_MKL */
+			break;
+			case 0x6B: /* ncA */
+			want_column_expand = rsb__util_atoi(optarg);
+			break;
+			case 0x6E: /* n */
+			cns = optarg; /* cores (threads) numbers (specification) string */
+			break;
+			case 0x76: /* spmv_uauz */
+			be_verbose = 1;
+			break;
+			case 0x774446:	/* wde */
+			want_getdiag_bench = 1;
+			break;
+			case 0x776E68:	/* wnh */
+			want_nonzeroes_distplot = 1;
+			break;
+			case 0x777246:	/* wre */
+			want_getrow_bench = 1;
+			break;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			case 0x707763:	/* wpc */
+			want_perf_counters = 1; /* 1 is what user wants; 2 is for debug purposes */
+			break;
+#endif
+			case 0x77707373:	/* wpss */
+			want_print_per_subm_stats = RSB_BOOL_TRUE;
+			break;
+			case 0x776F6174:	/* woac */
+			want_accuracy_test = 2;
+			break;
+			case 0x776e7274:	/* wnrt */
+			want_autotuner = RSB_TIME_ZERO;
+			wai=wat=0;
+			want_autotuner = merge_experimental = split_experimental = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+			break;
+			case 0x7772740a:	/* wrt */
+			/* want_autotuner = rsb__util_atof(optarg); */
+			{
+				char wavv = 0x0;
+				int sargs = sscanf(optarg,"%lfs%dx%dt%c%c",&want_autotuner,&wai,&wat,&wav,&wavv);
+
+				if(!*optarg)
+					sargs = 0;
+				RSBENCH_STDOUT(" Passed %d arguments via autotuning string \"%s\" (an empty string requests defaults)\n",sargs,optarg);
+				if(sargs < 0)
+				{
+					RSBENCH_STDOUT("Wrong autotuning string detected!\n");
+					rsb_test_help_and_exit(argv[0],options, 0);
+					exit(0);
+				}
+				switch(sargs)
+				{
+					case(EOF):
+					case(0):
+						want_autotuner = 10.0;
+					case(1):
+						wai = 1;
+					case(2):
+						wat = 0;
+					case(3):
+						wav = 0;
+					case(4):
+						wavv = 0;
+					case(5):
+					break;
+				}
+				/* RSBENCH_STDOUT("Got an autotuning string: %lfs%dx%dt%c%c\n",want_autotuner,wai,wat,wav,wavv); */
+				if(toupper(wav)==0x56) /* V */
+					wavf = RSB_AUT0_TUNING_VERBOSE;
+				else
+					wavf = RSB_AUT0_TUNING_SILENT ;
+				if(toupper(wavv)==0x56) /* V */
+					wavf++;
+				if(toupper(wai)>RSB_CONST_MAX_TUNING_ROUNDS)
+				{
+					RSBENCH_STDOUT("Restricting the number of tuning round to %d (%d is too much!).\n",RSB_CONST_MAX_TUNING_ROUNDS,wai);
+					wai = RSB_CONST_MAX_TUNING_ROUNDS;
+				}
+				RSBENCH_STDOUT("Will invoke autotuning for ~%lf s x %d rounds, specifying verbosity=%d and threads=%d. (>0 means no structure tuning; 0 means only structure tuning, <0 means tuning of both with (negated) thread count suggestion).\n",want_autotuner,wai,wavf,wat);
+			}
+			want_mkl_autotuner = want_autotuner;
+			break;
+#if RSB_HAVE_METIS
+			case 0x776d6272:	/* wmbr */
+			want_wmbr = RSB_BOOL_TRUE;
+			break;
+#endif
+			case 0x776d6174:	/* wmat */
+			sscanf(optarg,"%lf",&want_mkl_autotuner);
+			want_mkl_autotuner = RSB_MAX(1.0,want_mkl_autotuner); /* FIXME: actual value is unimportant as long as it is positive ! */
+			break;
+			case 0x776d6f62:	/* wmob */
+			mib = 1;
+			break;
+			case 0x776174:	/* wac */
+			want_accuracy_test = 1;
+			break;
+			case 0x775563:
+			want_unordered_coo_bench = RSB_BOOL_TRUE;
+			break;
+			case 0x767646:	/* wae */
+			want_ancillary_execs = RSB_BOOL_TRUE;
+			break;
+			case 0x42767646:	/* nwae */
+			want_ancillary_execs = RSB_BOOL_FALSE;
+			break;
+			case 0x77:	/* w */
+			b_w_filename = optarg;
+			break;
+			case 0x63777273:	/* wcsr */
+			csr_w_filename = optarg;
+			break;
+			case 0x77707266:
+			fprfn = optarg;
+			want_perf_dump = RSB_BOOL_TRUE;
+			if(optarg && !*optarg)
+				fprfn = NULL;
+			break;
+			case 0x776e7072:
+			fprfn = NULL;
+			want_perf_dump = RSB_BOOL_FALSE;
+			break;
+			case 0x77707261:
+			apprfn = optarg;
+			break;
+			case 0x77707270:
+			ppprfn = optarg;
+			break;
+			case 0x64697a65 :	/* dize */
+			RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS);
+			break;
+			case 0x68: /* h */
+			/* should use rsb_test_help_and_exit */
+			RSBENCH_STDERR(
+				"%s "RSB_INFOMSG_SAK".\n"
+				"You can use it to perform sparse matrix - unitary vector multiplication, "
+				"specifying the blocking parameters, the times to perform multiplication.\n"
+				"\n"
+				"Additional debugging flags (-d, -p) are present.\n"
+				"\n"
+				"Usage : %s [OPTIONS]\n where OPTIONS are taken from "
+				"[ -f filename ] \n"
+				"[ -F matrix_storage=[b|c|bc] ] \n"
+				"[ -r br ] \n"
+				"[ -c bc ] \n"
+				"[ -t TIMES ]\n"
+				"[ -n OPENMP_THREADS ]\n"
+				"[ -T ( S | D | I | C ) /* float, double, integer, character*/ ] \n"
+				"[ -s /* will internally sort out nnzs */ ] \n"
+				"[ -p /* will set to 1 nonzeros */ ] \n"
+				"[-d /* if debugging on */]: \n"
+				"[-A /* for auto-blocking */]: \n"
+				"[ -h ] \n"
+				"\n"
+				"please note that not all of the suggested numerical types could be compiled in right now and/or work well.default is double.\n"
+				"\n"
+				"\n"
+				"e.g.: %s -f raefsky4.mtx -t 10 -T :   # 10 times for each of the supported numerical types\n",
+				argv[0],
+				argv[0],
+				argv[0]);
+			rsb_test_help_and_exit(argv[0],options, 0);
+			exit(0);
+	    	}
+	}
+
+	if( (!RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_QUAD_PARTITIONING)) && want_recursive != RSB_BOOL_FALSE )
+	{
+		RSB_WARN("Assuming a recursive matrix structure is requested...\n");
+		RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_QUAD_PARTITIONING);
+	}
+	for (c = optind; c < argc; c++)                                                     
+	{
+		RSB_RSBENCH_ADDF(argv[c])
+	}
+	if(want_verbose == RSB_BOOL_TRUE)
+	{
+		rsb_char_t cbuf[RSB_MAX_COMPILE_COMMAND_LENGTH];
+		rsb__echo_timeandlabel(" beginning run at ","\n",&st);
+		rsb__echo_cargs(argc, argv);
+		errval = rsb__do_lib_get_info_str(0, &cbuf[0], sizeof(cbuf)-1);
+		if(RSB_SOME_ERROR(errval))
+			errval = RSB_ERR_NO_ERROR;
+		else
+			RSBENCH_STDOUT("# compiled with: %s\n",cbuf);
+	}
+	printf("# average timer granularity: %2.3lg s\n",tinf);
+	if(want_perf_dump)
+	{
+		if(!fprfn)
+		{
+			rsb__impcdstr(fprfnb+strlen(fprfnb),"rsbench_pr",".rpr",ppprfn,apprfn);
+			fprfn = fprfnb;
+		}
+		if(!cprfn)
+			rsb__sprintf(cprfnb,"%s.tmp",fprfn),
+			cprfn = cprfnb;
+		printf("# Will write a final performance record to file %s and periodic checkpoints to %s\n",fprfn,cprfn);
+	}
+	if( maxtprt > RSB_TIME_ZERO )
+		printf("# If program run time will exceed %2.3lg s, will attempt early termination.\n",maxtprt );
+
+	RSBENCH_STDOUT("# will %s""perform ancillary tests.\n", want_ancillary_execs ?"":"NOT ");
+	RSBENCH_STDOUT("# will flush cache memory: %s between each operation measurement series, and %s between each operation.\n", want_outer_flush?"":"NOT", want_inner_flush?"":"NOT");
+	RSBENCH_STDOUT("# will %s any zero encountered in the matrix.\n", ( RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_DISCARD_ZEROS) )?"discard":"keep");
+	if( nrhsa == NULL ) nrhsa = &nrhs;
+	if( incXa == NULL ) incXa = &incX;
+	if( incYa == NULL ) incYa = &incY;
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_INIT;}
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if(ntypecodes==0)
+		typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	if(ntypecodes==0)
+	{
+		typecodes[ntypecodes++] = typecode;
+		typecodes[ntypecodes] = RSB_NUL;
+	}
+
+	io.n_pairs=0;
+	if(should_use_alternate_sort)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SORT_METHOD;
+		io.n_pairs++;
+	}
+	if(should_use_cb_method!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_CACHE_BLOCKING_METHOD;
+		io.n_pairs++;
+	}
+	if(mhs!=NULL)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&mhs;
+		io.keys[io.n_pairs]=RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING;
+		io.n_pairs++;
+	}
+	if(subdivision_multiplier!=0.0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&subdivision_multiplier;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SUBDIVISION_MULTIPLIER;
+		io.n_pairs++;
+	}
+#if RSB_WANT_BOUNDED_BOXES
+	if(want_bounded_box==0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_bounded_box;
+		io.keys[io.n_pairs]=RSB_IO_WANT_BOUNDED_BOX_COMPUTATION;
+		io.n_pairs++;
+	}
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	if(want_no_leaf_spmm!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_no_leaf_spmm;
+		io.keys[io.n_pairs]=RSB_IO_WANT_LEAF_LEVEL_MULTIVEC;
+		io.n_pairs++;
+	}
+
+#ifdef RSB_HAVE_UNISTD_H
+{
+	extern char **environ;
+	char **me = NULL;
+	rsb_int_t rpevc = 0; /* RSB_ prefixed environment variables count */
+
+	for(me=environ;*me;++me)
+		if( strstr(*me,"RSB_") == *me )
+			rpevc++;
+
+	if( rpevc )
+	{
+		RSB_STDOUT("# The user specified %d RSB_ prefixed environment variables:\n",rpevc);
+		for(me=environ;*me;++me)
+			if( strstr(*me,"RSB_") == *me )
+				RSB_STDOUT("#  export %s\n",*me);
+	}
+}
+#endif /* RSB_HAVE_UNISTD_H */
+	
+	
+	if( rsb__getenv("KMP_AFFINITY") )
+		RSB_STDOUT("# export KMP_AFFINITY=%s\n",rsb__getenv("KMP_AFFINITY"));
+	if( rsb__getenv("OMP_PROC_BIND") )
+		RSB_STDOUT("# export OMP_PROC_BIND=%s\n",rsb__getenv("OMP_PROC_BIND"));
+	if( rsb__getenv("OMP_NUM_THREADS") )
+		RSB_STDOUT("# export OMP_NUM_THREADS=%s\n",rsb__getenv("OMP_NUM_THREADS"));
+
+	if( want_verbose != RSB_BOOL_FALSE )
+		RSBENCH_STDOUT("# user specified a verbosity level of %d (each --verbose occurrence counts +1)\n",want_verbose_tuning );
+	else
+		RSBENCH_STDOUT("# user did not specify any verbosity level (each --verbose occurrence counts +1)\n");
+
+	if((errval = rsb_lib_init(iop))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR("Error while initializing the library.");
+		goto err;
+	}
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	if((errval = rsb_perf_counters_init())!=RSB_ERR_NO_ERROR)
+	{
+		RSBENCH_STDERR("problem initializing performance counters (rsb_perf_counters_init gave %d)\n",(int)errval);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#endif
+
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )
+	{
+		RSB_STDOUT("# auto-tuning oriented output implies  times==0 iterations and sort-after-load.\n");
+		times = 0;
+		/* if(want_verbose) */
+		want_impatiently_soon_pre_results = 1;
+		want_sort_after_load = RSB_BOOL_TRUE;
+	}
+	else
+	if( times < 1 )
+	{
+		RSB_STDOUT("# The iteration times should be specified as a positive number!\n");
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	else
+		RSB_STDOUT("# Will measure on times=%d iterations.\n",times);
+
+	if( 0 == filenamen )
+#if RSB_RSBENCH_STATIC_FILENAMEA
+	       	filenamea[0] = fnbufp[0];
+#else
+	       	filenamea = &fnbufp;
+#endif
+	filenamen = RSB_MAX(1,filenamen);
+
+	if(cns)
+	{
+		ca = NULL;
+		cn = 0;
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(cns,&cn,&ca)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	}
+	else
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		/* #define rsb_get_max_threads omp_get_max_threads */
+		cn = 1;
+		ca_[0] = omp_get_max_threads ();
+		RSBENCH_STDOUT("# User did not specify threads; assuming %d.\n", cn );
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	}
+
+#if RSB_WANT_MKL
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) )
+		want_mkl_bench_csr = RSB_BOOL_FALSE;
+#endif /* RSB_WANT_MKL */
+
+	RSBENCH_STDOUT("# Using alpha=%d beta=%d for rsb_spmv/rsb_spsv/rsb_spmm/rsb_spsm.\n",alphai,betai);
+
+	if(want_perf_dump) 
+		rsb__pr_init(&rspr, NULL, filenamen, cn, incXn, incYn, nrhsn, ntypecodes, tn);
+
+	for(     filenamei=0;     filenamei<filenamen+want_impatiently_soon_pre_results  ;++filenamei     )
+	{
+		if( filenamea && ( filenamea[filenamei] != filename_old) && filename_old && want_impatiently_soon_pre_results && want_perf_dump && filenamei>0 && filenamen>1) 
+		{
+			int filenameif = filenamei-1;
+			RSBENCH_STDOUT("# ====== BEGIN Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL);
+			RSBENCH_STDOUT("# ======  END  Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			if( filenameif > 0 && filenameif < filenamen-1) /* not after first and not at last */
+				RSBENCH_STDOUT("# ====== BEGIN Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen),
+				errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL),
+				RSBENCH_STDOUT("# ======  END  Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen);
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			errval = rsb__pr_save(cprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+
+		if( filenamei >= filenamen )
+			continue; /* temporary: only for the want_impatiently_soon_pre_results trick */
+
+		if(filenamea)
+		{
+			filename = filenamea[filenamei];
+		}
+
+		if(filenamen>1)
+		{
+			RSBENCH_STDOUT("# multi-file benchmarking (file %d/%d) -- now using %s\n",filenamei+1,filenamen,rsb__basename(filename));
+		}
+
+	for(     incXi=0;     incXi<incXn     ;++incXi     )
+	{
+	for(     incYi=0;     incYi<incYn     ;++incYi     )
+	{
+	for(     nrhsi=0;     nrhsi<nrhsn     ;++nrhsi     )
+	{
+	for(typecodesi=0;typecodesi<ntypecodes;++typecodesi)
+	{
+	rsb_flags_t flags = flags_o;
+	rsb_thread_t cl; /* cores number last (overrides cn for this typecode cycle) */
+	typecode = typecodes[typecodesi];
+
+	if(ntypecodes>1)
+	{
+		RSBENCH_STDOUT("# multi-type benchmarking (%s) -- now using typecode %c (last was %c).\n",typecodes,typecode,typecode_old);
+		if( RSB_MATRIX_UNSUPPORTED_TYPE ( typecode ) )
+		{
+			RSBENCH_STDOUT("# Skipping unsupported type \"%c\" -- please choose from \"%s\".\n",typecode,RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS );
+			continue;
+		}
+	}
+
+	nrhs = nrhsa[nrhsi];
+	if( nrhsn > 1 && nrhss )
+	{
+		RSBENCH_STDOUT("# multi-nrhs benchmarking (%s) -- now using nrhs %d.\n",nrhss,nrhs);
+	}
+	incX = incXa[incXi];
+	incY = incYa[incYi];
+	if(incXn>1)
+	{
+		RSBENCH_STDOUT("# multi-incX benchmarking (%d/%d) -- now using incX=%d.\n",incXi+1,incXn,incX);
+	}
+	if(incYn>1)
+	{
+		RSBENCH_STDOUT("# multi-incY benchmarking (%d/%d) -- now using incY=%d.\n",incYi+1,incYn,incY);
+	}
+
+	if( want_only_star_scan )
+		if( RSB_MIN(incXi,1) + RSB_MIN(incYi,1) + RSB_MIN(nrhsi,1) > 1 ) /* two or more exceed index one */
+		{
+			RSBENCH_STDOUT("# Skipping a case with incX=%d incY=%d nrhs=%d.\n",incX,incY,nrhs);
+			goto frv;
+		}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	/* rsb__getrusage(); */ /* FIXME: new (20140727) */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	RSBENCH_STDOUT("( allocated_memory:%zd allocations_count:%zd)",rsb_global_session_handle.allocated_memory,rsb_global_session_handle.allocations_count);
+#endif
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+
+	if(cns)
+	{
+		cc = ca[ci];
+	}
+	cl=cn;
+	if(bcs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(bcs,&bcl,&bcv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	if(brs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(brs,&brl,&brv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+
+
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(beta,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(alpha,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+	/* FIXME: the following collides with the former */
+	rsb__util_set_area_to_converted_integer(alphap,typecode,alphai);
+	rsb__util_set_area_to_converted_integer(betap ,typecode,betai);
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : note that this option is not compatible with g_sort_only .. */
+        oski_Init();
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	g_debug = ((flags & RSB_FLAG_SHOULD_DEBUG) != 0);
+
+	if(g_sort_only)RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+
+	if(typecode==-1)
+	{
+		RSBENCH_STDERR("error : please recompile with double precision floating point numbers supported! \n");
+		return RSB_ERR_GENERIC_ERROR;
+	}
+	rsb__util_set_area_to_converted_integer(&pone[0],typecode,+1);
+
+
+
+	if(brl<1) { /* this is a hack */ brv = rua; brl = RSB_ROWS_UNROLL_ARRAY_LENGTH;}
+	if(bcl<1) { /* this is a hack */ bcv = cua; bcl = RSB_COLUMNS_UNROLL_ARRAY_LENGTH;}
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		RSBENCH_STDERR("This numerical type is not supported.\n");
+		goto err;
+	}
+
+	/* CONDITIONALLY, GENERATING A MATRIX */
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense);
+		rsb_nnz_idx_t spacing = want_generated_spacing>1?want_generated_spacing:1;
+		
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb__sprintf(fnbuf,"banded-%dx%d-%d+%d-%dnz-spaced-%d",dim*spacing,dim*spacing,should_generate_lband,should_generate_uband,RSB_NNZ_OF_BANDED(dim,should_generate_lband,should_generate_uband),spacing);
+		}
+		else
+		{
+		if(want_generated_spacing>0)
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*dim);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz-spaced-%d",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim,spacing);
+		}
+		else
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*should_generate_dense_nc);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim);
+		}
+		}
+		if(want_incX)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incX-%d",incX);
+		if(want_incY)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incY-%d",incY);
+/*		rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim,dim,dim*dim);*/
+/*		rsb__sprintf(fnbuf,"dense-%dx%d",dim,dim);*/
+		filename=&(fnbuf[0]);
+	}
+
+	if(usfnbuf)
+		filename=usfnbuf;
+
+	/* CONDITIONALLY, READING A MATRIX FROM FILE */
+if(filename || b_r_filename)
+{
+
+	rsb_blk_idx_t M_b=0;/* was 0 */
+	rsb_blk_idx_t K_b=0;
+	rsb_nnz_idx_t i=0;
+
+	rsb_coo_idx_t *p_r=NULL,*p_c=NULL;	/* FIXME : get rid of these */
+	void *lhs=NULL,*rhs=NULL;
+	int bcvi=0;
+	int brvi=0;
+	rsb_time_t frt = RSB_TIME_ZERO;
+
+	if( filename != filename_old )
+	{
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	if(!should_recycle_io) { RSB_DEBUG_ASSERT( VA == NULL ); }
+	if( should_recycle_io && VA && filename == filename_old )
+	{
+		flags = r_flags;
+		if( typecode != typecode_old )
+		{
+			void *VA_ = rsb__malloc_vector(nnz,typecode);
+			errval = rsb__do_copy_converted_scaled(VA, VA_, NULL, typecode_old, typecode, nnz, RSB_DEFAULT_TRANSPOSITION);
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR(RSB_ERRM_ES);goto err; }
+			RSB_CONDITIONAL_FREE(VA);
+			VA = VA_;
+			RSBENCH_STDOUT("# Reusing type converted (%c->%c) arrays from last iteration instead of reloading matrix file.\n",typecode_old,typecode);
+			typecode_old = typecode;
+		}
+		else
+		{
+			RSBENCH_STDOUT("# Reusing same type     (type %c) arrays from last iteration instead of reloading matrix file.\n",typecode);
+		}
+		goto have_va_ia_ja;
+	}
+	if((!should_generate_dense) && (!b_r_filename))
+	{
+		rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+		rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+		size_t fsz = rsb_sys_filesize(filename);
+
+		frt = - rsb_time();
+
+#ifdef RSB_HAVE_REGEX_H
+		if( want_slmr && rsb_regexp_match(rsb__basename(filename),want_slmr) == RSB_BOOL_TRUE )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches regex /%s/.\n",filename,want_slmr);
+			goto nfnm;
+		}
+#endif /* RSB_HAVE_REGEX_H */
+		if( want_slss && ( strstr( rsb__basename(filename), want_slss ) != NULL ) )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches substring %s.\n",filename,want_slss);
+			goto nfnm;
+		}
+		/* if(RSB_SOME_ERROR(rsb__do_util_get_matrix_dimensions(filename,&ncA,&nrA,&nnz,NULL)) ) */
+		if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,&nrA,&ncA,&nnz,NULL,&is_symmetric,&is_hermitian,NULL,NULL,NULL,NULL)) )
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+			if( ignore_failed_fio )
+			{
+				RSBENCH_STDERR("Will ignore error and continue with the following files.\n");
+				errval = RSB_ERR_NO_ERROR;
+				goto nfnm;
+			}
+			goto err;
+		}
+		if( want_slnu == RSB_BOOL_TRUE && ( is_hermitian || is_symmetric ) )
+		{
+			RSB_STDOUT("# skipping loading not unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slsm == RSB_BOOL_TRUE && is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading symmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slhm == RSB_BOOL_TRUE && is_hermitian )
+		{
+			RSB_STDOUT("# skipping loading hermitian matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slum == RSB_BOOL_TRUE && !is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slmn > 0 && want_slmn <  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d > %d allowed nonzeroes.\n",filename,nnz,want_slmn);
+			goto nfnm;
+		}
+		if( want_slms > 0 && want_slms <= fsz / 1024 )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %zd>=%zd allowed filesize (KiB).\n",filename,fsz,want_slms);
+			goto nfnm;
+		}
+		if( want_slnn > 0 && want_slnn >  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d < %d allowed nonzeroes.\n",filename,nnz,want_slnn);
+			goto nfnm;
+		}
+	
+		RSB_STDOUT("# reading %s (%zd bytes / %zd "RSB_MEGABYTE_SYM" / %zd nnz / %zd rows / %zd columns / %zd MiB COO) as type %c...\n",rsb__basename(filename),fsz,RSB_DIV(fsz,RSB_MEGABYTE),(size_t)nnz,(size_t)nrA,(size_t)ncA,RSB_DIV(RSB_UTIL_COO_OCCUPATION(nrA,ncA,nnz,typecode),RSB_MEGABYTE),typecode);
+
+		if( ( nrA == ncA ) && ( nrA > 1 ) && ( want_only_lowtri || want_only_upptri ) )
+			nnz += nrA;	/* the loading routine shall allocate nnz+nrA */
+		else
+ 			nnz = 0;	/* the loading routine should determine nnz */
+
+		totiot -= rsb_time();
+		errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&nrA,&ncA,&nnz,typecode,flags,NULL,NULL);
+		totiot += rsb_time();
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+			goto err;
+		}
+		else
+		{
+			rsb_bool_t is_lower = RSB_BOOL_FALSE;
+			rsb_bool_t is_upper = RSB_BOOL_FALSE;
+			rsb_bool_t is_vector = RSB_BOOL_FALSE;
+
+			filename_old = filename;
+			typecode_old = typecode;
+
+			frt += rsb_time();
+			RSB_STDOUT("# file input of %s took %6.2lf s (%.0lf nnz, %.0lf nnz/s ) (%.2lf MB/s ) \n",rsb__basename(filename),frt,
+				(((double)nnz)),
+				(((double)nnz)/frt),
+				(((double)rsb_sys_filesize(filename))/(frt*RSB_INT_MILLION))
+			);
+
+			if (want_io_only)
+			{
+				/*  */
+				goto err;
+			}
+
+			if(want_transpose)
+			{
+				RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+				RSB_SWAP(rsb_coo_idx_t,nrA,ncA);
+				flags = rsb__do_flip_uplo_flags(flags);
+			}
+
+			if( nrA==ncA && nrA>1 && ( want_only_lowtri || want_only_upptri ) )
+			{
+				rsb_nnz_idx_t discarded = 0;
+				/*
+				rsb__util_coo_array_set_sequence(IA+nnz,nrA,0,1);
+				rsb__util_coo_array_set_sequence(JA+nnz,nrA,0,1);
+				 */
+				RSB_FCOO_ISET(IA+nnz,0,nrA);
+				RSB_FCOO_ISET(JA+nnz,0,nrA);
+				rsb__fill_with_ones(((rsb_byte_t*)VA)+RSB_SIZEOF(typecode)*nnz,typecode,nrA,1);
+				nnz += nrA;	/* nnz+nrA this number has been overwritten as nnz */
+				if( want_only_lowtri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+					errval = rsb_weed_out_non_lowtri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non lower elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+				if( want_only_upptri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+					errval = rsb_weed_out_non_upptri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non upper elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+
+				if(RSB_SOME_ERROR(errval))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			}
+
+			if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,NULL,NULL,NULL,NULL,&is_symmetric,&is_hermitian,NULL,&is_lower,&is_upper,&is_vector) ))
+			{
+				RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+				goto err;
+			}
+			if( is_vector )
+			{
+				RSBENCH_STDERR("file %s seems to store a vector\n",filename);
+				goto err;
+			}
+			if(RSB_BOOL_AND(want_as_unsymmetric,want_as_symmetric))
+			{
+				RSBENCH_STDERR("requiring both symmetric and unsymmetric flags is contradictory!\n");
+				goto err;
+			}
+			if(want_as_unsymmetric)
+			{
+				is_symmetric = RSB_BOOL_FALSE;
+				is_hermitian = RSB_BOOL_FALSE;
+			}
+			if(want_as_symmetric)
+			{
+				is_symmetric = RSB_BOOL_TRUE;
+				is_hermitian = RSB_BOOL_TRUE;
+			}
+			if(!RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && is_hermitian)
+			{
+				RSBENCH_STDOUT("# Warning: non complex matrix with hermitian flags! Converting to symmetric!\n");
+				is_hermitian = RSB_BOOL_FALSE;
+				is_symmetric = RSB_BOOL_TRUE;
+			}
+			/* TODO: use rsb__flags_from_props() */
+			if(is_hermitian == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_HERMITIAN);
+			}
+			if(is_symmetric == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+			}
+
+			if( (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER)) && (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)) )
+			{
+				/* is_upper and is_lower as declared in the matrix file */
+				if(is_upper)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+				if(is_lower)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+			}
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_cleanup_nnz(VA,IA,JA,nnz,0,0,nrA,ncA,&nnz,typecode,flags)); /* NEW */
+			if(RSB_SOME_ERROR(errval))
+			{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+			if(want_sort_after_load)
+			{
+				rsb_time_t dt = RSB_TIME_ZERO;
+				dt = - rsb_time();
+				if((errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS))!=RSB_ERR_NO_ERROR)
+				{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+				dt += rsb_time();
+				RSBENCH_STDOUT("#pre-sorting took %lg s\n",dt);
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+			}
+#if RSB_HAVE_METIS
+			if(want_wmbr)
+			{
+				/* FIXME: unfinished */
+				rsb_coo_idx_t *perm = NULL,*iperm = NULL,*vwgt = NULL;
+
+				perm  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+				iperm = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+#if 1
+				vwgt  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz));
+				rsb__util_coo_array_set(vwgt,nnz,0);
+#else
+				vwgt  = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+#endif
+				if( !perm || !iperm || !vwgt )
+				{
+					RSB_CONDITIONAL_FREE(iperm);
+					RSB_CONDITIONAL_FREE(perm);
+					RSB_CONDITIONAL_FREE(vwgt);
+				}
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				errval = rsb__do_switch_fullword_array_to_compressed(IA,nnz,nrA);
+				RSBENCH_STDOUT("Calling METIS_NodeND\n");
+				/*errval = */ METIS_NodeND(&nrA,IA,JA,vwgt,NULL,perm,iperm); /* Scotch wrapper crashes on vwgt=NULL. and is void */
+				RSBENCH_STDOUT("Exited  METIS_NodeND with code %d\n",errval);
+				/* if(errval == METIS_OK) */
+				{
+					RSBENCH_STDOUT("Permuting..\n");
+					errval = rsb__do_switch_compressed_array_to_fullword_coo(IA, nrA, 0, NULL);
+					errval = rsb__do_permute_rows_with_coo_index( IA, perm, nnz);
+					RSBENCH_STDOUT("Permuted.\n");
+					/* 
+					 */
+					for(i=0;i<nrA;++i){ RSB_STDOUT("%d\n",perm[i]);}
+				}
+				RSB_CONDITIONAL_FREE(vwgt);
+				RSB_CONDITIONAL_FREE(perm);
+				RSB_CONDITIONAL_FREE(iperm);
+			}
+			
+#endif /* RSB_HAVE_METIS */
+		}
+	}
+	else
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense),spacing=1;
+		if(want_generated_spacing>1)
+			spacing = want_generated_spacing;
+		dim *= spacing;
+
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb_nnz_idx_t lbw=should_generate_lband,ubw=should_generate_uband;
+			nrA = ncA = dim;
+			errval = rsb__generate_blocked_banded_coo(dim/spacing,spacing,lbw,ubw,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+		if(should_generate_dense>0)
+		{
+			RSB_DEBUG_ASSERT( should_generate_dense_nc != 0 );
+			/* full dense, no diag */
+			nrA = dim;
+			ncA = should_generate_dense_nc * spacing;
+			errval = rsb__generate_dense_full(nrA/spacing,ncA/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+			/* trick: lower triangular */
+			nrA=ncA=dim;
+			errval = rsb__generate_dense_lower_triangular_coo(dim/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER); /* 20121223	*/
+		}
+		}
+
+		if(want_sort_after_load)	
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+
+		if(want_as_symmetric)
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+	} /* should_generate_dense */
+have_va_ia_ja:
+	RSB_DEBUG_ASSERT( VA != NULL );
+	RSB_DEBUG_ASSERT( IA != NULL );
+	RSB_DEBUG_ASSERT( JA != NULL );
+	r_flags = flags;
+
+	/* CONDITIONALLY, PROCESSING THE INPUT */
+	if(!b_r_filename)
+	{
+		if(want_column_expand)
+		{
+			errval = rsb__do_column_expand(JA,nnz,&ncA,want_column_expand);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+		}
+
+		if( pattern_only )
+			rsb__fill_with_ones(VA,typecode,nnz,1);
+
+		if( dumpout )
+		{
+			errval = rsb__test_print_coo_mm(typecode,flags,IA,JA,VA,nrA,ncA,nnz,RSB_BOOL_TRUE,RSB_DEFAULT_STREAM);
+			//COO equivalent for rsb_file_mtx_save(mtxAp,NULL);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+			goto ret;
+		}
+	}
+#if 1
+	if(want_nonzeroes_distplot)
+	{
+		/* FIXME: Unfinished: printout not adequate ! */
+		/* FIXME: Shall use a separate routine for this! Please regard this code as temporary */
+		rsb_coo_idx_t median_m=0,median_k=0,stdd_m=0,stdd_k=0,nzp_m=nnz/nrA,nzp_k=nnz/ncA;
+		rsb_coo_idx_t*idxv=NULL;
+		rsb_coo_idx_t mm=0;
+		rsb_nnz_idx_t cs=0;
+		rsb_bool_t po = RSB_BOOL_TRUE;
+		const int histres=100;
+		const rsb_char_t*pmsg="\n\nplot \"-\" using 1:2 title \"cumulative %s population (nnz)\"\n";
+		RSBENCH_STDOUT("set xtics rotate\n");
+		RSBENCH_STDOUT("set term postscript eps color\n");
+		RSBENCH_STDOUT("set output \"%s-distplot.eps\"\n", rsb__basename(filename));
+		RSBENCH_STDOUT("set multiplot layout 1,2 title \"%s (%d x %d, %d nnz)\"\n", rsb__basename(filename),nrA,ncA,nnz);
+
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+
+		mm=nrA<histres?1:nrA/histres;
+		idxv = rsb__calloc(sizeof(rsb_coo_idx_t)*(ndA));
+		if(!idxv)
+			goto nohists;
+
+		for(i=0;i<nnz;++i)
+			if(IA[i] < nrA && IA[i] >= 0 )
+				idxv[IA[i]]++;
+		for(i=0;i<nrA;++i)
+			if(median_m<nnz/2)
+				{ median_m+=idxv[i]; }
+			else
+				{ break; }
+		median_m=i; 
+
+		RSB_STDOUT(pmsg,"rows");
+		if(po) for(i=0;i<nrA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		mm=ncA<histres?1:ncA/histres;
+
+		for(i=0;i<nrA;++i)
+			stdd_m+=(idxv[i]-nzp_m)*(idxv[i]-nzp_m);
+		stdd_m=nrA<2?0:sqrt(stdd_m/(nrA-1));
+
+
+		for(i=0;i<ncA;++i)
+			idxv[i]=0;
+
+		for(i=0;i<nnz;++i)
+			if(JA[i] < ncA && JA[i] >= 0 )
+				idxv[JA[i]]++;
+		for(i=0;i<ncA;++i)
+			if(median_k<nnz/2)
+				{ median_k+=idxv[i]; }
+			else
+				{ break; }
+		median_k=i; 
+
+		cs=0;
+		RSB_STDOUT(pmsg,"columns");
+		if(po) for(i=0;i<ncA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		for(i=0;i<ncA;++i)
+			stdd_k+=(idxv[i]-nzp_k)*(idxv[i]-nzp_k);
+		stdd_k=ncA<2?0:sqrt(stdd_k/(ncA-1));
+
+		RSBENCH_STDOUT("unset multiplot\n");
+		RSBENCH_STDOUT("#%%:NNZ_PER_ROW_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_m);
+		RSBENCH_STDOUT("#%%:ROWS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_m/(double)nrA));
+		RSBENCH_STDOUT("#%%:NNZ_PER_COL_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_k);
+		RSBENCH_STDOUT("#%%:COLS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_k/(double)ncA));
+nohists:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+		RSB_CONDITIONAL_FREE(idxv); RSB_CONDITIONAL_FREE(idxv);
+		goto ret;
+	}
+	#endif /* 1 */
+	if(want_unordered_coo_bench)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+		lhs = rsb__calloc_vector(ndA*nrhs*incY,typecode);
+		rhs = rsb__calloc_vector(ndA*nrhs*incX,typecode);
+
+		if(!lhs || !rhs)
+		{
+			RSB_ERROR("problems allocating vectors");
+			RSB_CONDITIONAL_FREE(lhs); RSB_CONDITIONAL_FREE(rhs);
+			{ errval = RSB_ERR_INTERNAL_ERROR; goto err; }
+		}
+
+		if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+		for(i=0;i<times;++i)
+		{
+			if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			unordered_coo_op_time = - rsb_time();
+			if((errval = rsb__do_spmv_fullword_coo(&coo,flags,rhs,lhs,alphap,betap,incX,incY,transA))!=RSB_ERR_NO_ERROR) { goto erru; }
+			unordered_coo_op_time += rsb_time();
+			unordered_coo_op_time_best = RSB_MIN_ABOVE_INF(unordered_coo_op_time_best,unordered_coo_op_time,tinf);
+			unordered_coo_op_tot_time+=unordered_coo_op_time;
+		}
+		if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+erru:
+		RSB_CONDITIONAL_FREE(lhs); RSB_CONDITIONAL_FREE(rhs);
+		if(want_verbose == RSB_BOOL_TRUE)
+		{
+			/* FIXME ! 20110427 */
+			struct rsb_mtx_t matrixs;
+			mtxAp=&matrixs;
+			rsb__init_rsb_struct_from_coo(mtxAp,&coo);
+			mtxAp->flags = RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS|RSB_DO_FLAG_FILTEROUT((flags),RSB_DO_FLAGS_EXTRACT_STORAGE(flags));
+			rsb__do_set_init_storage_flags(mtxAp,mtxAp->flags);
+			raw_Mflops=nnz*2;
+			RSBENCH_STDOUT("%%:UNORDERED_COO_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+			RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)raw_Mflops)/(RSB_REAL_MILLION*unordered_coo_op_time_best));
+			mtxAp=NULL;
+		}
+	}
+	/* CONDITIONALLY, PERFORMING SOME TEST ON THE INPUT */
+	if(want_accuracy_test>=1)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_accuracy_test(&coo,ca,cn,flags));
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_ERROR("accuracy based test failed!\n");
+			goto err;
+		}
+		if(want_accuracy_test>1)
+		{
+			goto done;
+		}
+	}
+
+		if( (flags & RSB_FLAG_QUAD_PARTITIONING) && g_all_flags==1)
+		{
+			int /*ci=0,*/hi=0,oi=0;
+			fn=0;
+			for(ci=0;ci<3;++ci)
+/*			for(di=0;di<2;++di)*/
+			for(oi=0;oi<2;++oi)
+			for(hi=0;hi<2;++hi)
+/*			for(li=0;li<2;++li)*/
+			{
+#if 0
+				flagsa[di+hi*2+li*4+ci*8]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+	
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#else /* 0 */
+				flagsa[fn]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[fn],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+				//RSB_DO_FLAG_ADD(flagsa[fn],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],oi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#endif /* 0 */
+				++fn;
+			}
+		}
+		else
+		{
+			fn=1;
+			flagsa[fn-1]=flags;
+		}
+
+		if(!want_perf_dump)
+		if(!( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )) /* otherwise pr__set.. cannot distinguish samples */
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			/* adds a no-recursion flag case */
+			RSB_DO_FLAG_DEL(flags,RSB_FLAG_QUAD_PARTITIONING);
+/*			if(fn)*/
+/*				flags=flagsa[fn-1];	*//* copy from the last */
+/*			else*/
+/*				flagsa[fn]=flags;	*//* impose these flags */
+			for(fi=fn;fi>0;--fi)
+				flagsa[fi]=flagsa[fi-1];/* shift forward */
+			RSB_DO_FLAG_DEL(flagsa[0],RSB_FLAG_QUAD_PARTITIONING);
+			++fn;	/* add ours */
+		}
+
+		for(ti=0;ti<tn;++ti)
+		{
+
+	rsb_time_t op_t = RSB_TIME_ZERO;
+	rsb_time_t mct = RSB_TIME_ZERO;	/* matrix construction time */
+	rsb_time_t fet = RSB_TIME_ZERO;	/* fillin estimation time */
+
+	rsb_time_t sct = RSB_TIME_ZERO;	/* serial (if minimum number of cores is 1) matrix construction time */
+	rsb_time_t pct = RSB_TIME_ZERO;	/* parallel (if maximum number of cores > 1) matrix construction time */
+
+	rsb_time_t smt = RSB_TIME_ZERO;	/* serial multiplication time */
+	rsb_time_t pmt = RSB_TIME_ZERO;	/* parallel multiplication time */
+
+	
+	rsb_time_t sest = RSB_TIME_ZERO;	/**/
+	//rsb_time_t sect = RSB_TIME_ZERO;	/**/
+	rsb_time_t ssat = RSB_TIME_ZERO;	/**/
+	rsb_time_t seit = RSB_TIME_ZERO;	/**/
+	rsb_time_t scpt = RSB_TIME_ZERO;	/**/
+
+	rsb_time_t mest = RSB_TIME_ZERO;	/**/
+	rsb_time_t mect = RSB_TIME_ZERO;	/**/
+	rsb_time_t msat = RSB_TIME_ZERO;	/**/
+	rsb_time_t meit = RSB_TIME_ZERO;	/**/
+	rsb_time_t mcpt = RSB_TIME_ZERO;	/**/
+
+	rsb_time_t me_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;     /* experimental merge */
+	rsb_time_t at_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME, at_mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME; /* experimental merge */
+	rsb_thread_t at_mkl_csr_nt = RSB_AT_THREADS_AUTO, me_at_nt = RSB_AT_THREADS_AUTO;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+	rsb_time_t best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t base_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;	/* for comparative benchmarking */
+	rsb_time_t serial_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;	/* for comparative benchmarking */
+	rsb_time_t spmv_t = RSB_TIME_ZERO;
+	rsb_time_t tot_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t spsv_d_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t spsv_spmv_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t best_spsv_spmv_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t spsv_f_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+#endif
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	struct rsb_pci_t rsb_pci;
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+#if RSB_WANT_MKL
+	void *M_VA=NULL; MKL_INT *M_IA=NULL,*M_JA=NULL;
+	void *M_VAC=NULL; MKL_INT *M_IAC=NULL,*M_JAC=NULL;
+	rsb_time_t mkl_coo2csr_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_coo_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_csr_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_csr_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_csr_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+
+	rsb_time_t mkl_gem_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_gem_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_gem_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_gem_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	struct rsb_ts_t btpms[2]; /* first is tuned, first is not */
+	rsb_flags_t mif = ( mib == 0 ) ? RSB_FLAG_NOFLAGS : RSB_FLAG_FORTRAN_INDICES_INTERFACE; /* MKL index flags */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	struct rsb_pci_t mkl_coo_pci,mkl_csr_pci,mkl_gem_pci;
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+#endif /* RSB_WANT_MKL */
+	struct rsb_attr_t attr;	/* this structure is rather large (100k, as of 20140223); with future parameters it shall be rather heap allocated */
+	struct rsb_ts_t otpos, btpos;
+
+	RSB_BZERO_P((&otpos));
+	RSB_BZERO_P((&btpos));
+	RSB_BZERO_P((&attr));
+		transA = transAo;
+		if(ti>0)
+			transA = rsb__do_transpose_transposition(transAo);
+		if(ti==2)
+			transA = RSB_TRANSPOSITION_C;
+		if(!  (
+			( RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && (ti!=0) && ( flags & RSB_FLAG_SOME_SYMMETRY ) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti!=0) && ( flags & RSB_FLAG_SYMMETRIC) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti==2) &&!( flags & RSB_FLAG_SOME_SYMMETRY) )  ||
+			g_allow_any_tr_comb
+		))
+		if(tn>1)
+		{
+			RSBENCH_STDOUT("# multi-transpose benchmarking -- now using transA = %c.\n",RSB_TRANSPOSITION_AS_CHAR(transA));
+		}
+		if( /* transA != RSB_TRANSPOSITION_N */ ti>0 && RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC) )
+		{
+			RSBENCH_STDOUT("# symmetric matrix --- skipping transposed benchmarking\n");
+			continue;
+		}
+		for(fi=0;fi<fn;++fi)
+		for(brvi=-1;brvi<brl;++brvi)
+		for(bcvi=-1;bcvi<bcl;++bcvi)
+#ifndef  RSB_COORDINATE_TYPE_H
+		if(!(flagsa[fi] & RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+#endif /* RSB_COORDINATE_TYPE_H */
+		for(ci=0;ci<cn;++ci)	/* here just for should_recycle_matrix */
+		if(!(ca[ci]>1 && !(RSB_DO_FLAG_HAS(flagsa[fi],RSB_FLAG_QUAD_PARTITIONING)))) /* no need for more than one core without recursion */
+		{
+			cc = ca[ci];
+	rsb_time_t diag_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t diag_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t getrow_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t getrow_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t diag_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t getrow_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t no_lock_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, no_lock_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME,
+	serial_no_lock_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME, no_lock_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t qt_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, qt_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME,
+	qt_op_tot_time = RSB_TIME_ZERO;
+			should_recycle_matrix=(ci>0)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+			/* if this is the special "vanilla CSR" run after/before recursive runs ... */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			flags=flagsa[fi];
+			if(cn>1 && !RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+
+			best_spsv_spmv_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			op_t = RSB_TIME_ZERO;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+			best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			spmv_t = RSB_TIME_ZERO;
+			tot_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_d_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_spmv_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_f_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+
+			if(brl>0 && bcl>0)
+			{
+				/* this is a trick and an unclean programming practice */
+				if(brvi==-1)++brvi;
+				if(bcvi==-1)++bcvi;
+				br = brv[brvi];
+				bc = bcv[bcvi];
+			}
+			else
+			{	
+				/* br, bc already set */
+			}
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+			/*	
+			* FIXME : laziness
+			*/
+						if( br!=1 || bc!=1 || !rsb__util_are_flags_suitable_for_optimized_1x1_constructor(flags) )
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+			if(0)
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+			{
+				p_r = rsb__util_get_partitioning_array(br,nrA,&M_b,flags);
+				p_c = rsb__util_get_partitioning_array(bc,ncA,&K_b,flags);
+
+				if((! p_r) || (! p_c))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					errval = RSB_ERR_ENOMEM;
+					goto erri;
+				}
+			}
+
+			if(  ( br!=1 || bc!=1 || p_r || p_c ) && ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR ))
+			{
+				/*  */
+				RSB_WARN("WARNING : disabling in place allocation flag : it is only allowed for 1x1!\n");
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR) ;
+			}
+
+
+
+
+
+			if(!mtxAp)
+			{
+				int mci=0;
+				if(b_r_filename)
+				{
+					rsb_err_t errval_;
+					mct = - rsb_time();
+					mtxAp = rsb__load_matrix_file_as_binary(b_r_filename,&errval_);
+					mct += rsb_time();
+					if((RSB_SOME_ERROR(errval)) || !mtxAp )
+					{
+						RSB_ERROR(RSB_ERRM_ES);
+						goto err;
+					}
+					else
+					{
+						nnz = mtxAp->nnz;
+						nrA = mtxAp->nr;
+						ncA = mtxAp->nc;
+					}
+
+					filename=b_r_filename;// for info purposes
+					flags=mtxAp->flags;
+				}
+				else
+				{
+				mect=mest=msat=meit=mcpt = RSB_TIME_ZERO;	/* resetting al values */
+
+				for(mci=0;mci<repeat_construction;++mci)
+				{
+					if(repeat_construction>1 && mci==0)
+						RSBENCH_STDOUT("# will repeat constructor %d times\n",repeat_construction);
+					mct = - rsb_time();
+					if(want_in_place_assembly)
+					{
+						mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnz,typecode,nrA,ncA,br,bc,flags,&errval);
+					}
+					else
+						mtxAp = rsb_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,nrA,ncA,br,bc,flags,&errval);
+					mct += rsb_time();
+					if((RSB_SOME_ERROR(errval)) || !mtxAp )
+					{
+						RSB_ERROR(RSB_ERRM_ES);
+						goto err;
+					}
+
+/*					RSBENCH_STDOUT("running constructor for time %d/%d\n",mci+1,repeat_construction);*/
+					if(mect == RSB_TIME_ZERO || mect>mtxAp->ect)
+						mect=mtxAp->est;
+					if(mest == RSB_TIME_ZERO || mest>mtxAp->est)
+						mest=mtxAp->est;
+					if(msat == RSB_TIME_ZERO || msat>mtxAp->sat)
+						msat=mtxAp->sat;
+					if(meit == RSB_TIME_ZERO || meit>mtxAp->eit)
+						meit=mtxAp->eit;
+					if(mcpt == RSB_TIME_ZERO || mcpt>mtxAp->cpt)
+						mcpt=mtxAp->cpt;
+					if(mci != repeat_construction-1)
+					{ RSB_MTX_FREE(mtxAp);	/* we only wanted timings */ }
+					else
+					{
+						/* we keep the mtxAp, and set best individual times */;
+						mtxAp->est=mest;
+						mtxAp->ect=mect;
+						mtxAp->sat=msat;
+						mtxAp->eit=meit;
+						mtxAp->cpt=mcpt;
+					}
+				}
+				}
+				if(ci==0 && sct == RSB_TIME_ZERO)
+					//sct=mct;
+					sct=mtxAp->tat;
+				if(ci==cn-1 && pct == RSB_TIME_ZERO)
+					//pct=mct;
+					pct=mtxAp->tat;
+			} /* !mtxAp */
+			
+			if(do_perform_ddc == RSB_BOOL_TRUE)
+			{
+			if(rsb__is_square(mtxAp))
+			{
+				/* FIXME: experimental, new. should write a test with octave for this */
+				void * DV = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				void * RS = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				rsb_aligned_t mtwo[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+				if(!RS||!DV) { errval = RSB_ERR_ENOMEM; goto noddc; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_infty_norm(mtxAp,RSB_TRANSPOSITION_N,RS));
+				rsb__util_set_area_to_converted_integer(mtwo,mtxAp->typecode,-2);
+				RSB_DO_ERROR_CUMULATE(errval,rsb__dodo_getdiag(mtxAp,DV));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__vector_to_abs(DV,mtxAp->typecode,mtxAp->nr));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,mtxAp->nr,mtwo,DV,1));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xaxpy(mtxAp->typecode,mtxAp->nr,NULL,DV,1,RS,1));
+				if(rsb__util_count_negative(RS,mtxAp->typecode,mtxAp->nr)==mtxAp->nr)
+					RSBENCH_STDOUT("#matrix is diagonal dominant\n");
+				else
+					RSBENCH_STDOUT("#matrix is not diagonal dominant\n");
+				RSBENCH_STDOUT("#diagonal dominance computed in ? s\n");
+noddc:
+				RSB_CONDITIONAL_FREE(DV); RSB_CONDITIONAL_FREE(RS);
+				if(RSB_SOME_ERROR(errval))
+					goto err;
+			}
+			else
+			{
+				RSB_ERROR("input matrix is not square: cannot compute the diagonal dominance check\n");
+			}
+			}
+
+			if( dump_graph_file )
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_DOT,dump_graph_file));
+
+			if(do_perform_ilu == RSB_BOOL_TRUE)
+			{
+				/* FIXME: experimental */
+				rsb_time_t ilut = - rsb_time();
+				RSB_STDOUT("performing EXPERIMENTAL ILU-0\n");
+				errval = rsb__prec_ilu0(mtxAp);//TODO: actually, only for CSR
+				ilut += rsb_time();
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+				else
+					RSB_STDOUT("performed EXPERIMENTAL ILU-0 with success in %lg s.\n",ilut);
+				rsb_file_mtx_save(mtxAp,NULL);
+				goto ret;
+			} /* do_perform_ilu */
+
+			if(want_update && mtxAp)
+			{
+				rsb_time_t ct = - rsb_time();
+				/* FIXME: this is update, not conversion, so it should not be here */
+				errval = rsb__do_set_coo_elements(mtxAp,VA,IA,JA,nnz);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				/* missing check */
+				RSBENCH_STDOUT("#individual update of %d elements in assembled RSB took %2.5f s: %2.5f%% of construction time\n",nnz,ct,(100*ct)/mtxAp->tat);
+			} /* want_update */
+
+			if(want_convert && mtxAp)
+			{
+				/* FIXME: here all conversions should occur, and be benchmarked */
+				rsb_time_t ct;
+				rsb_nnz_idx_t rnz=0;
+				struct rsb_coo_matrix_t coo;
+
+				coo.nnz = RSB_MAX(mtxAp->nnz,RSB_MAX(nrA,ncA));
+				coo.typecode=mtxAp->typecode;
+				if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto errc;
+				}
+				coo.nr = mtxAp->nr;
+				coo.nc = mtxAp->nc;
+
+				ct = - rsb_time();
+				errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,coo.VA,coo.IA,coo.JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.typecode,
+					NULL,RSB_FLAG_NOFLAGS)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+				RSBENCH_STDOUT("#extraction to unsorted COO unimplemented\n");
+				//RSBENCH_STDOUT("#extraction of %d elements in unsorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+				RSB_DO_ERROR_CUMULATE(errval,rsb_mtx_get_coo(mtxAp,VA,IA,JA,RSB_FLAG_C_INDICES_INTERFACE));
+
+				rsb__util_coo_array_set(coo.JA,coo.nnz,0);
+				rsb_coo_sort(VA,IA,JA,mtxAp->nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+
+				ct = - rsb_time();
+				errval = rsb_mtx_get_csr(typecode,mtxAp, coo.VA, coo.IA, coo.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				for(i=0;i<mtxAp->nnz;++i)if(coo.JA[i]!=JA[i]){RSB_ERROR("@%d: %d != %d!\n",i,coo.JA[i],JA[i]);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+				if(RSB_SOME_ERROR(errval=rsb__csr_chk(coo.IA,coo.JA,coo.nr,coo.nc,coo.nnz,mib)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in CSR took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+/*				ct = - rsb_time();*/
+/*				errval = rsb__do_get_coo(mtxAp,&coo.VA,&coo.IA,&coo.JA);	// FIXME : bugged ?*/
+/*				if(RSB_SOME_ERROR(errval)) goto erri;*/
+/*				ct += rsb_time();*/
+/*				if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.typecode,*/
+/*					NULL,RSB_FLAG_NOFLAGS)))*/
+/*					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}*/
+/*				RSBENCH_STDOUT("#extraction of %d elements in sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);*/
+
+				rsb__util_coo_array_set(coo.IA,coo.nnz,0);
+				rsb_coo_sort(VA,JA,IA,mtxAp->nnz,ncA,nrA,typecode,RSB_FLAG_NOFLAGS);
+				ct = - rsb_time();
+				errval = rsb__do_get_csc(mtxAp,(rsb_byte_t**) &coo.VA,&coo.JA,&coo.IA);
+				if(RSB_SOME_ERROR(errval))
+					{goto erri;}
+				ct += rsb_time();
+				for(i=0;i<mtxAp->nnz;++i)if(coo.IA[i]!=IA[i]){RSB_ERROR("@%d: %d != %d!\n",i,coo.IA[i],IA[i]);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+				if(RSB_SOME_ERROR(rsb__csc_chk(coo.JA,coo.IA,coo.nr,coo.nc,coo.nnz,mib)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in CSC took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					ct = - rsb_time();
+					cmatrix = rsb__clone_simple(mtxAp);
+					ct += rsb_time();
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					if(!rsb__mtx_chk(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSB_MTX_FREE(cmatrix);
+				}
+				RSBENCH_STDOUT("#cloning of %d elements took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(cmatrix,RSB_BOOL_FALSE);
+					ct += rsb_time();
+					if(!rsb__mtx_chk(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					if(
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(cmatrix,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+					!= rsb__terminal_recursive_matrix_count(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+					RSBENCH_STDOUT("#conversion of %d elements to RCOO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					RSB_MTX_FREE(cmatrix);
+				}
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(cmatrix,&icoo);
+					ct += rsb_time();
+
+					if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(icoo.VA,icoo.IA,icoo.JA,icoo.nnz,icoo.typecode,NULL,RSB_FLAG_NOFLAGS)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSBENCH_STDOUT("#conversion of %d elements to sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+				
+				if(!RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nr))
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_csr(cmatrix,&icoo);
+					ct += rsb_time();
+					if(RSB_SOME_ERROR(rsb__csr_chk(icoo.IA,icoo.JA,icoo.nr,icoo.nc,icoo.nnz,mib)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSBENCH_STDOUT("#conversion of %d elements to CSR took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+
+				if(!RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nc))
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_csc(cmatrix,&icoo);
+					ct += rsb_time();
+					if(RSB_SOME_ERROR(rsb__csc_chk(icoo.JA,icoo.IA,icoo.nr,icoo.nc,icoo.nnz,mib)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+					RSBENCH_STDOUT("#conversion of %d elements to CSC took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(cmatrix,&icoo);
+					ct += rsb_time();
+
+					RSBENCH_STDOUT("#conversion of %d elements to unsorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+errc:
+				rsb__destroy_coo_matrix_t(&coo);
+			} /* want_convert */
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("problems assembling / converting matrix\n");
+				goto erri;
+			}
+
+			if(!mtxAp)
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;
+				RSB_ERROR("problems assembling matrix\n");
+				goto erri;
+			}
+
+			totht -= rsb_time();
+			if(!rsb__mtx_chk(mtxAp))
+			{
+				RSB_ERROR("matrix does not seem to be built correctly\n");
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+			totht += rsb_time();
+
+
+			if(zsort_for_coo)
+				rsb__do_zsort_coo_submatrices(mtxAp);
+			if(reverse_odd_rows)
+				rsb__do_reverse_odd_rows(mtxAp);
+
+			//rsb_file_mtx_save(mtxAp,NULL);
+			//rsb__dump_blocks(mtxAp);
+
+			if(b_w_filename || csr_w_filename)
+			{
+				const char * w_filename = b_w_filename ;
+				rsb_dump_flags_t dflags = RSB_CONST_DUMP_RSB;
+
+				if(csr_w_filename)
+					w_filename = csr_w_filename,
+					dflags = RSB_CONST_DUMP_CSR;
+
+				frt = -rsb_time();
+				errval = rsb__do_print_matrix_stats(mtxAp,dflags,w_filename);
+				frt += rsb_time();
+				rsb_perror(NULL,errval);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_NO_XDR); }
+				RSB_STDOUT("#file output of %s took %lf s (%.0lf nnz, %.0lf nnz/s ) (%.5lf MB/s ) \n",rsb__basename(w_filename),frt,
+					(((double)mtxAp->nnz)),
+					(((double)mtxAp->nnz)/frt),
+					(((double)rsb_sys_filesize(w_filename))/(frt*RSB_INT_MILLION))
+				);
+				goto ret;
+			}
+
+			if(dumpout_internals)
+			{
+				errval = rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_RECURSION,NULL);
+				if(RSB_SOME_ERROR(errval))goto err;
+				//goto ret; /* we want to continue */
+			}
+
+			errval = rsb__get_blocking_size(mtxAp,&br,&bc);
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("problems getting blocking size");
+				goto erri;
+			}
+
+			/* NOTE: the matrix constructor could have removed duplicates or zeros */
+			/* nnz=mtxAp->nnz; */ /* 20120922 commented out: in case of removed entries, it would remember this number in spite of unchanged IA,JA,VA arrays */ 
+			if(!RSB_IS_VALID_NNZ_COUNT(nnz)){errval = RSB_ERR_INTERNAL_ERROR;goto erri;}
+			/* NOTE: if loading from a binary dump, we need to set nrA,ncA */
+			nrA = mtxAp->nr;
+			ncA = mtxAp->nc;
+			ndA = RSB_MAX(nrA,ncA);
+			outnri = rhsnri = ndA;
+			ldX = (RSB_DOES_NOT_TRANSPOSE(transA) ? nrA : ncA) * incX; 	/* FIXME: still unused, e.g. in rsb__do_spmm_general */
+			ldY = (RSB_DOES_NOT_TRANSPOSE(transA) ? ncA : nrA) * incY; 
+			lhs = rsb__calloc((mtxAp->el_size*(ndA+br))*nrhs*incY);
+			rhs = rsb__calloc((mtxAp->el_size*(ndA+bc))*nrhs*incX);
+
+			if(!lhs || !rhs)
+			{
+				RSB_ERROR("problems allocating vectors");
+				RSB_CONDITIONAL_FREE(lhs);
+				RSB_CONDITIONAL_FREE(rhs);
+				{ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			}
+
+			if(RSB_SOME_ERROR(rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			if(merge_experimental || split_experimental || just_enter_tuning) /* FIXME: pass parameter */
+			{
+				struct rsb_mtx_t*mtxOp = NULL;
+				int wvmbat = RSB_AUT0_TUNING_SILENT; /* wanted verbosity in merge based autotuning */
+				int eps = 0; /* effective partitioning steps */
+				rsb_time_t btt = RSB_TIME_ZERO; /* blocks tuning time */
+				rsb_submatrix_idx_t maxms = merge_experimental, maxss = split_experimental;
+				int maxr = RSB_CONST_AUTO_TUNING_ROUNDS;
+				enum rsb_op_t op = rsb_op_spmv;
+				int mintimes = RSB_AT_MIN_TIMES/*RSB_AT_NTIMES_AUTO*/;
+				rsb_time_t maxtime = /* RSB_AT_TIME_AUTO*/ RSB_AT_MAX_TIME;
+				struct rsb_mtx_t mtxA = *mtxAp;
+
+				/* please note at_mkl_csr_nt in the following... */
+				if(maxms < 0 || maxss < 0) { at_mkl_csr_nt = me_at_nt = RSB_THREADS_AUTO; }
+				if(maxms < 0) maxms *= -1;
+				if(maxss < 0) maxss *= -1;
+				
+				RSBENCH_STDOUT("RSB Sparse Blocks Autotuner invoked requesting max %d splits and max %d merges in %d rounds, threads spec.%d (specify negative values to enable threads tuning).\n",maxss,maxms,maxr,me_at_nt);
+
+				if (want_verbose_tuning > 0)
+					wvmbat = RSB_AUT0_TUNING_VERBOSE;
+				if (want_verbose_tuning > 1)
+					wvmbat = RSB_AUT0_TUNING_QUATSCH ;
+				if (want_verbose_tuning > 2)
+					wvmbat = RSB_AUT0_TUNING_QUATSCH + 1;
+				btt -= rsb_time(); 
+
+				if( just_enter_tuning == 0 || merge_experimental == 0 && split_experimental == 0 )
+					maxr = 0;
+				mtxOp = mtxAp;
+				errval = rsb__tune_spxx(&mtxOp,NULL,&me_at_nt,maxr,maxms,maxss,RSB_CONST_MS_AT_AUTO_STEPS,RSB_AUT0_TUNING_DEFAULT_TIMES,maxtime,transA,alphap,NULL,nrhs,order,rhs,rhsnri,betap,lhs,outnri,op,&eps,&me_best_t,&me_at_best_t,wvmbat,rsb__basename(filename),&attr,&otpos,&btpos);
+
+				btt += rsb_time(); 
+				tottt += btt;
+				if(want_perf_dump) /* FIXME: shall give only values from the tuning routine */
+				if(RSB_DO_FLAG_HAS(/*mtxAp->*/flags,RSB_FLAG_QUAD_PARTITIONING))
+					rsb__pr_set(rspr, &mtxA, me_at_best_t<me_best_t?mtxOp:NULL, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, transA, me_best_t, RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_best_t, RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_nt, RSB_THREADS_AUTO, btt, eps, &otpos, &btpos, NULL, NULL);
+				if( mtxAp != mtxOp && mtxOp )
+			 	{
+					RSBENCH_STDOUT("RSB Autotuner suggested a new clone.\n");
+#if RSB_AT_DESTROYS_MTX
+					mtxAp = mtxOp;
+#else  /* RSB_AT_DESTROYS_MTX */
+#if 1
+ 					/* FIXME: this is to have mtxAp address constant. */
+					errval = rsb__mtx_transplant_from_clone(&mtxAp, mtxOp);
+					mtxOp = NULL;
+					if(RSB_SOME_ERROR(errval)) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#else
+				 	RSB_MTX_FREE(mtxAp); mtxAp = mtxOp;
+#endif
+#endif /* RSB_AT_DESTROYS_MTX */
+				 }
+			}
+
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+			if(RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ))
+			{
+				rsb_int_t otn = wat;
+				rsb_int_t*otnp = NULL;
+				rsb_real_t sf = RSB_REAL_ZERO;
+				rsb_time_t att = - rsb_time();
+				struct rsb_mtx_t * mtxOp = NULL;
+				struct rsb_mtx_t ** mtxOpp = NULL;
+				enum rsb_op_t op = rsb_op_spmv;
+
+				if(wat >  0)
+					otnp = &otn; /* starting thread suggestion */
+				if(wat == 0)
+				{
+					otnp = NULL; /* current thread count */
+					mtxOpp = &mtxOp; /* matrix structure tuning */
+				}
+				if(wat <  0)
+				{
+					otn = -wat; /* ;-) */
+					otnp = &otn; /* starting thread suggestion */
+					mtxOpp = &mtxOp; /* matrix structure tuning */
+				}
+				errval = rsb__tune_spxx(mtxOpp, &sf, otnp, wai, 0, 0, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES, want_autotuner, transA, alphap, mtxAp, nrhs, order, rhs, rhsnri, betap, lhs, outnri, op , NULL, NULL, NULL, wavf, rsb__basename(filename), &attr, &otpos, &btpos);
+				if(mtxOpp && *mtxOpp)
+				{
+					RSBENCH_STDOUT("RSB Autotuner suggested a new matrix: freeing the existing one.\n");
+					RSB_MTX_FREE(mtxAp);
+					mtxAp = mtxOp;
+					mtxOp = NULL;
+					mtxOpp = NULL;
+				}
+				att += rsb_time();
+				RSBENCH_STDOUT("RSB Autotuner took %lg s and estimated a speedup of %lf x\n",att,sf);
+				if(wat && otn > 0)
+				{
+					/* FIXME: this breaks consistency! Shall skip further cycles!  */
+					RSBENCH_STDOUT("Setting autotuning suggested thread count of %d (will skip further thread number configurations!)\n",otn);
+					/* rsb__set_num_threads(otn); */
+					RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+					if(want_ancillary_execs == RSB_BOOL_TRUE)
+					if(incX == 1 && incY == 1)
+					{
+						totatt -= rsb_time();
+						RSBENCH_STDOUT("# Post-autotuning performance recheck:\n");
+						/* errval = */ rsb__do_bench_spxm(NULL,NULL,transA,alphap,mtxAp,nrhs,order,rhs,rhsnri,betap,lhs,outnri,RSB_AT_TIME_AUTO,RSB_AT_NTIMES_AUTO,op,10,RSB_AUT0_TUNING_QUATSCH,NULL,NULL); /* just for check purposes */
+						totatt += rsb_time();
+					}
+					cc=otn;cl=ci+1;
+				}
+			}	/* want_autotuner */
+
+			if(RSB_SOME_ERROR(errval)) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				if(n_dumpres)
+				{
+					RSBENCH_STDOUT("##RSB LHS %d elements pre-peek:\n",n_dumpres);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incX);
+				}
+				if(n_dumprhs)
+				{
+					RSBENCH_STDOUT("##RSB RHS %d elements pre-peek:\n",n_dumprhs);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incX);
+				}
+			if ( times >= 0 ) /* benchmark of spmv_uaua */
+			{
+				/* 20140616 use this in conjunction with --dump-n-lhs-elements .. */
+				for(nrhsl=0;nrhsl<nrhs;++nrhsl)
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)rhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incX,nrhsl+1),
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)lhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incY,nrhsl+1);
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_RSB_SPMV_",0,times,NULL);
+				op_t = - rsb_time();
+				RSB_TM_LIKWID_MARKER_R_START("RSB_SPMV");
+				for(i=0;i<times;++i)  /* benchmark loop of spmv_uaua begin */
+				{
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spmv_t = - rsb_time();
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_RSB_SPMV_",0);
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR) /* benchmark -- mop is spmv_uaua */
+				{
+					RSBENCH_STDERR("[!] "RSB_ERRM_MV);
+					goto erri;
+				}
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_RSB_SPMV_",1);
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spmv_t += rsb_time();
+				tot_t += spmv_t;
+				best_t = RSB_MIN_ABOVE_INF(spmv_t,best_t,tinf);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+
+	#ifdef RSB_WANT_KERNELS_DEBUG
+				/* ... */
+	#endif /* RSB_WANT_KERNELS_DEBUG */
+				}  /* times: benchmark loop of spmv_uaua end */
+				RSB_TM_LIKWID_MARKER_R_STOP("RSB_SPMV");
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_RSB_SPMV_",1,times,&rsb_pci);
+				if((g_debug || 1) /*&& i==times-1*/)
+				{
+					/* this is debug information, very cheap to include */
+					RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_some_vector_stats(lhs,typecode,nrA,incY));
+				}
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(want_ancillary_execs == RSB_BOOL_TRUE)
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				no_lock_op_time = - rsb_time();
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_FAKE_LOCK,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR) { goto erri; }
+				no_lock_op_time += rsb_time();
+				no_lock_op_time_best = RSB_MIN_ABOVE_INF(no_lock_op_time_best,no_lock_op_time,tinf);
+				no_lock_op_tot_time += no_lock_op_time;
+			}
+			if(cc==1)serial_no_lock_op_time_best=no_lock_op_time_best;
+			totatt += no_lock_op_tot_time;
+
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+
+			if(want_ancillary_execs == RSB_BOOL_TRUE)
+			if(cc==1)
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				qt_op_time = - rsb_time();
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_WANT_SERIAL,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR) { goto erri; }
+				qt_op_time += rsb_time();
+				qt_op_time_best = RSB_MIN_ABOVE_INF(qt_op_time_best,qt_op_time,tinf);
+				qt_op_tot_time += qt_op_time;
+			}
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			totatt += qt_op_tot_time;
+
+				if((g_debug) /*&& i==times-1*/)
+				{
+					rsb_byte_t * out2=NULL;
+					out2=rsb__calloc(mtxAp->el_size*(RSB_MAX(nrA,ncA)+br)*nrhs);
+					if(!out2 /* || rsb__cblas_Xscal(mtxAp->typecode,nrA+br,NULL,out2,incY)*/) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+
+					RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_SHOULD_DEBUG);
+/*					rsb_spmv_uaua_testing( mtxAp, rhs, out2,transA );	*//* FIXME : INCOMPLETE */
+					RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_SHOULD_DEBUG);
+					/* bit-per-bit checking */
+					
+					rsb__util_vector_sum(errnorm,lhs,typecode,nrA);
+					RSBENCH_STDOUT("#sum:");
+					rsb__debug_print_vector(errnorm,1,typecode,1);
+					RSBENCH_STDOUT("\n");
+
+					if(dumpvec&rsb_dumpvec_res)/* new */
+						rsb__debug_print_vectors(lhs,out2,nrA,1,1,typecode);
+					
+					if(dumpvec&rsb_dumpvec_res)/* new */
+					{
+					if(RSB_MEMCMP(lhs,out2,mtxAp->el_size*(nrA+br*0))!=0)
+					{
+						RSB_ERROR("sparse matrix vector product cross check failed. diff (bad,good):\n");
+						rsb__debug_print_vectors_diff(lhs,out2,nrA,typecode,incY,incY,RSB_VECTORS_DIFF_DISPLAY_N);
+
+						if(out2)
+							rsb__free(out2);
+						{ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+					}
+					else
+						RSBENCH_STDOUT("sparse matrix vector product cross check succeeded\n");
+					}
+					if(out2)rsb__free(out2);
+				}
+				if(dumpvec&rsb_dumpvec_res)
+					rsb__debug_print_vector(lhs,nrA,typecode,incY);
+				if(dumpvec&rsb_dumpvec_rhs)
+					rsb__debug_print_vector(rhs,nrA,typecode,incX);
+
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if(n_dumpres)
+				{
+					RSBENCH_STDOUT("##RSB LHS %d elements post-peek:\n",n_dumpres);
+					rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+				}
+				if(n_dumprhs)
+				{
+					RSBENCH_STDOUT("##RSB RHS %d elements post-peek:\n",n_dumprhs);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+				}
+				if(!g_sort_only)
+				{
+					op_t += rsb_time();
+					op_t /= (double)times;
+					/*
+				if(RSB_WANT_VERBOSE_MESSAGES)
+				{RSBENCH_STDOUT("performed %lf Mflops in %lf seconds (%lf Mflops)\n",raw_Mflops, op_t, (raw_Mflops)/(op_t));
+				RSBENCH_STDOUT("raw data rate of (%lf Gbytes/sec)\n", ((double)(raw_Mflops)*(mtxAp->el_size))/(op_t*1000.0));	}*/
+				/*
+				if(RSB_WANT_VERBOSE_MESSAGES)
+				RSBENCH_STDOUT("nonzero data rate of (%lf Gbytes/sec, or %lf Mflops)\n",
+				(true_Mflops*(mtxAp->el_size))/(op_t*1000.0),
+				true_Mflops/(op_t)
+				);*/
+				}
+
+                                fillin = rsb__do_get_matrix_fillin(mtxAp);
+				if(g_sort_only)
+				{
+				/* FIXME :
+				 * please note that in this rudimentary model we take in account also the matrix creationtime.
+				 */
+                	                raw_Mflops= (rsb_perf_t) mtxAp->element_count;
+        	                        true_Mflops=(((double)mtxAp->nnz)*log((double)mtxAp->nnz))/RSB_REAL_MILLION;
+					op_t=mct;	/* our timed operation is matrix construction */
+				}
+				else
+				{
+	                                raw_Mflops = rsb__estimate_mflops_per_op_spmv_uaua(mtxAp);
+	                                true_Mflops = raw_Mflops/fillin;
+	                                raw_Mflops *=nrhs;
+	                                true_Mflops*=nrhs;
+				}
+
+
+#if RSB_WANT_MKL
+	if(want_mkl_bench && !(cc==1 && mkl_coo_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME))
+	{
+			rsb_nnz_idx_t annz = RSB_MAX(nnz,nrA+1),rnz=0,mklnz=nnz;
+			/* please note that mkl routines do not support stride */
+			/* FIXME: a non monotonically-increasing order will do harm */
+			mkl_coo2csr_time = RSB_TIME_ZERO;
+			mkl_coo_op_tot_time = RSB_TIME_ZERO;
+			mkl_coo_op_time = RSB_TIME_ZERO;
+			mkl_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			//mkl_coo_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			mkl_csr_op_tot_time = RSB_TIME_ZERO;
+			mkl_csr_op_time = RSB_TIME_ZERO;
+			mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			//mkl_csr_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			
+			if(nrhs>1)
+				want_mkl_bench_coo = RSB_BOOL_FALSE;/* 20130401 FIXME: this circumvents an Intel MKL bug */
+#if 1
+			//mkl_set_dynamic(1);
+			//RSBENCH_STDOUT("MKL failed enabling dynamic thread number control\n");
+			mkl_set_num_threads(cc);
+			//RSBENCH_STDOUT("MKL has %d threads now\n",mkl_get_num_threads());
+#else /* 1 */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+#endif /* 1 */
+			if(!want_sort_after_load)
+			if(!want_in_place_assembly)
+			{
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				mklnz = rsb_weed_out_duplicates (IA,JA,VA,nnz,typecode,RSB_FLAG_SORTED_INPUT);
+				if((!RSB_IS_VALID_NNZ_COUNT(mklnz)) || (!mklnz) || (RSB_SOME_ERROR(errval)))
+				{
+					RSB_PERR_GOTO(err,RSB_ERRM_EM);
+				}
+				annz = RSB_MAX(mklnz,nrA+1);
+			}
+			mkl_set_num_threads(cc); // necessary, or MKL will get puzzled
+
+		if(want_mkl_bench_coo)
+		{
+			totct -= rsb_time();
+			errval = rsb_util_coo_alloc_copy_and_stats(&M_VA,&M_IA,&M_JA,want_in_place_assembly?NULL:VA,want_in_place_assembly?NULL:IA,want_in_place_assembly?NULL:JA,NULL,NULL,mklnz,(annz-mklnz),typecode,0,mib,RSB_FLAG_NOFLAGS,NULL);
+			if(RSB_SOME_ERROR(errval)){RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+			//errval = rsb_mtx_get_coo(mtxAp,M_VA,M_IA,M_JA,flags); /* FIXME: use this */
+			errval = rsb__do_get_rows_sparse(RSB_DEFAULT_TRANSPOSITION,NULL,mtxAp,M_VA,M_IA,M_JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS|mif);
+			totct += rsb_time();
+	
+			if(!M_VA  || !M_IA  || !M_JA ){RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_COO_SPXV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_COO_SPMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_coo_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_COO_SPXV_",0);
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo_spmm(M_VA,nrA,ncA,nrhs,mklnz,M_IA,M_JA,rhs,rhsnri,lhs,outnri,alphap,betap,transA,typecode,flags));
+				else
+
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo_spmv(M_VA,nrA,ncA,mklnz,M_IA,M_JA,rhs,lhs,alphap,betap,transA,typecode,flags));
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_1KL_COO_SPXV_",1);
+				mkl_coo_op_time += rsb_time();
+				mkl_coo_op_time_best = RSB_MIN_ABOVE_INF(mkl_coo_op_time_best,mkl_coo_op_time,tinf);
+				mkl_coo_op_tot_time+=mkl_coo_op_time;
+			}
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_COO_SPMV");
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_COO_SPXV_",1,times,&mkl_coo_pci);
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL COO LHS %d elements post-peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			if(cc==1) 
+				mkl_coo_op_time_best_serial = mkl_coo_op_time_best;
+
+			RSB_CONDITIONAL_FREE(M_VA);
+			RSB_CONDITIONAL_FREE(M_IA);
+			RSB_CONDITIONAL_FREE(M_JA);
+		} /* want_mkl_bench_coo */
+
+		if(want_mkl_bench_csr || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) )
+		{
+			totct -= rsb_time();
+			errval = rsb_util_coo_alloc_copy_and_stats(&M_VAC,&M_IAC,&M_JAC,want_in_place_assembly?NULL:VA,want_in_place_assembly?NULL:IA,want_in_place_assembly?NULL:JA,NULL,NULL,mklnz,(annz-mklnz),typecode,0,mib,RSB_FLAG_NOFLAGS,NULL);
+			errval = rsb_mtx_get_csr(mtxAp->typecode,mtxAp,M_VAC,M_IAC,M_JAC,flags|mif);
+			totct += rsb_time();
+	
+			if(!M_VAC || !M_IAC || !M_JAC) {RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+				// FIXME: Missing error handling !
+
+                        if(0)/* if want bogus contents (for debug/inspection) */
+                        {
+                                rsb_coo_idx_t i,npr=(mklnz+nrA-1)/nrA;
+                                rsb_nnz_idx_t l;
+                                M_IAC[0]=0;
+                                for(i=1;i<nrA;++i)
+                                        M_IAC[i]=M_IAC[i-1]+npr;
+                                for(i=0;i<nrA;++i)
+                                        for(l=M_IAC[i];l<M_IAC[i+1];++l)
+                                                M_JAC[l]=l-M_IAC[i];
+                                M_IAC[nrA]=mklnz;
+                        }
+
+			totct -= rsb_time();
+			if(!want_in_place_assembly)
+			{
+				mkl_coo2csr_time = - rsb_time();
+				RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo2csr(nrA,ncA,mklnz,VA,IA,JA,M_VAC,M_IAC,M_JAC,typecode,mib));
+				mkl_coo2csr_time += rsb_time();
+				if(RSB_SOME_ERROR(rsb__csr_chk(M_IAC,M_JAC,nrA,ncA,mklnz,mib)))
+				{
+      					RSB_PERR_GOTO(err,RSB_ERRM_EM)
+				}
+			}
+			else
+			{
+				RSB_WARN("warning : skipping MKL coo2csr conversion (user chose in-place RSB build) \n");
+			}
+			totct += rsb_time();
+		} /* want_mkl_bench_csr || want_mkl_autotuner */
+
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL CSR LHS %d elements pre-peek:\n",n_dumpres);
+				rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incX);
+			}			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(n_dumprhs)
+			{
+				RSBENCH_STDOUT("##MKL CSR RHS %d elements pre-peek:\n",n_dumprhs);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+			}			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(want_mkl_bench_csr)
+			{
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_CSR_SPXV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_CSR_SPMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_csr_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_CSR_SPXV_",0);
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmm_bench(M_VAC,nrA,ncA,nrhs,mklnz,M_IAC,M_JAC,rhs,rhsnri,lhs,outnri,alphap,betap,transA,typecode,flags|mif,NULL,NULL,NULL,NULL));
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,NULL,NULL,NULL /* &mkl_csr_op_time */,NULL ));
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_MKL_CSR_SPXV_",1);
+				mkl_csr_op_time += rsb_time();
+				mkl_csr_op_time_best = RSB_MIN_ABOVE_INF(mkl_csr_op_time_best,mkl_csr_op_time,tinf);
+				mkl_csr_op_tot_time+=mkl_csr_op_time;
+			}
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_CSR_SPMV");
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_CSR_SPXV_",1,times,&mkl_csr_pci);
+			} /* want_mkl_bench_csr */
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(cc==1)mkl_csr_op_time_best_serial=mkl_csr_op_time_best;
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL CSR LHS %d elements post-peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			if(n_dumprhs)
+			{
+				RSBENCH_STDOUT("##MKL CSR RHS %d elements post-peek:\n",n_dumprhs);
+				rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+			}
+			if( mkl_csr_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+				RSBENCH_STDOUT("##MKL STUFF DEBUG omp_set_num_threads():%d==omp_get_num_threads():%d  bestserialcsr:%0.5lf vs bestcsr:%0.5lf\n",omp_get_num_threads(),cc,mkl_csr_op_time_best_serial,mkl_csr_op_time_best);
+			if( mkl_coo_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+				RSBENCH_STDOUT("##MKL STUFF DEBUG omp_set_num_threads():%d==omp_get_num_threads():%d  bestserialcoo:%0.5lf vs bestcoo:%0.5lf\n",omp_get_num_threads(),cc,mkl_coo_op_time_best_serial,mkl_coo_op_time_best);
+
+			if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) && want_mkl_autotuner > RSB_TIME_ZERO )
+			{
+				rsb_time_t btime = RSB_TIME_ZERO, matt = -rsb_time();
+				rsb_thread_t bthreads = at_mkl_csr_nt;
+				rsb_real_t sf = RSB_REAL_ZERO;
+				rsb_char_t * ops = "";
+
+				rsb__tattr_init(&(attr.clattr), NULL, nrA, mklnz, typecode, flags, nrhs);
+				attr.clattr.vl = 1; /* FIXME: new */
+				RSBENCH_STDOUT("# MKL CSR %s autotuning for thread spec. %d  trans %c (0=current (=%d),<0=auto,>0=specified)\n",ops,bthreads,RSB_TRANSPOSITION_AS_CHAR(transA),cc);
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmm_bench(M_VAC,nrA,ncA,nrhs,mklnz,M_IAC,M_JAC,rhs,rhsnri,lhs,outnri,alphap,betap,transA,typecode,flags|mif,&bthreads,&btime,&(attr.clattr),&btpms));
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,&bthreads,&btime,&(attr.clattr),&btpms));
+				ops = "SPMV";
+				bthreads = bthreads ? bthreads : cc;
+				RSBENCH_STDOUT("# MKL CSR %s best threads / time / perf. were: %d / %lg / %lg\n",ops,bthreads,btime,(rsb__estimate_mflops_per_op_spmv_uaua(mtxAp)*nrhs)/btime);
+				matt += rsb_time();
+				RSBENCH_STDOUT("MKL CSR Autotuner took %.2lgs and estimated a speedup of %lf / %lf = %lf x (best round %d samples at %d threads)\n",matt,(attr.clattr).dtpo,(attr.clattr).btpo,(attr.clattr).dtpo/(attr.clattr).btpo,attr.clattr.nit[attr.clattr.optt],attr.clattr.optt);
+				at_mkl_csr_op_time_best = btime;
+				at_mkl_csr_nt = bthreads;
+				mkl_csr_op_time_best = (attr.clattr).dtpo;
+				totmt += matt;
+				RSB_ASSERT( bthreads > 0 );
+			} /* want_mkl_autotuner */
+
+			if(want_mkl_bench_gem)
+			{
+				rsb_coo_idx_t gemdim=0;
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_GEMV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_GEMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_gem_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_GEMV_",0);
+				if(nrhs>1)
+					; /* FIXME */
+				/* FIXME: missing error handling */
+				rsb__mkl_gemv(typecode,VA,rhs,lhs,nnz,ndA,&gemdim);
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_MKL_GEMV_",1);
+				mkl_gem_op_time += rsb_time();
+				mkl_gem_op_time_best = RSB_MIN_ABOVE_INF(mkl_gem_op_time_best,mkl_gem_op_time,tinf);
+				mkl_gem_op_tot_time+=mkl_gem_op_time;
+			}
+			true_gem_Mflops=2*gemdim*gemdim;
+			true_gem_Mflops/=RSB_REAL_MILLION;
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_GEMV");
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_GEMV_",1,times,&mkl_gem_pci);
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(cc==1)mkl_gem_op_time_best_serial=mkl_gem_op_time_best;
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL GEMX LHS %d elements peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			} /* want_mkl_bench_gem */
+mklerr:
+			RSB_CONDITIONAL_FREE(M_VAC);
+			RSB_CONDITIONAL_FREE(M_IAC);
+			RSB_CONDITIONAL_FREE(M_JAC);
+			RSB_CONDITIONAL_FREE(M_VA);
+			RSB_CONDITIONAL_FREE(M_IA);
+			RSB_CONDITIONAL_FREE(M_JA);
+			rsb_perror(NULL,errval);
+		} /* want_mkl_bench  */
+#endif /* RSB_WANT_MKL */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			/* FIXME : should only exist for double as type */
+			if(want_oski_bench && guess_blocking_test!=2 /* g.b.t=2 is an extra run*/) 
+			{
+
+			rsb__sprintf(oxform,"return BCSR(InputMat, %zd, %zd)",(rsb_printf_int_t)br,(rsb_printf_int_t)bc);
+			//rsb__sprintf(oxform,"return BCSR(InputMat, %d, %d)",1,1);
+			/* FIXME : ncA and nrA are not enough : we should account for br and bc excess ! */
+
+			Oval = rsb__clone_area(VA,nnz*mtxAp->el_size);
+			OIA = rsb__clone_area(IA,nnz*sizeof(rsb_coo_idx_t));
+			OJA = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+
+			/* we need duplicates, for we later will use VA as it is */
+			if(!Oval || !OIA || !OJA)
+			{
+				RSB_ERROR("failed aux arrays allocation !\n");goto err;
+			}
+
+			/*
+				Unfortunately, Oski does not have native BCSR constructors, but 
+				rely on conversion from CSR.
+				So the measured time is more than it should, but a better
+				approximation than oski_CreateMatCSR only.
+			*/
+
+			oski_a_t = -rsb_time();
+			if(RSB_SOME_ERROR(rsb__allocate_csr_arrays_from_coo_sorted(Oval, OIA, OJA, nnz, nrA, ncA, typecode, &Aval, &Aptr, &Aind)))
+			{
+				RSB_ERROR("failed csr allocation !\n");goto err;
+			}
+			oski_a_t += rsb_time();
+
+			if(!Aval || !Aptr || !Aind)
+			{
+				RSB_ERROR("failed csr arrays allocation !\n");goto err;
+			}
+
+			oski_m_t = -rsb_time();
+			A_tunable = oski_CreateMatCSR (Aptr, Aind, Aval, nrA, ncA,        /* CSR arrays */
+                                // SHARE_INPUTMAT /*COPY_INPUTMAT*/,        /* "copy mode" */
+				 /*SHARE_INPUTMAT*/ COPY_INPUTMAT,        /* "copy mode" */
+                                 1, INDEX_ZERO_BASED);
+				// we should add : INDEX_SORTED, INDEX_UNIQUE
+				// 3, INDEX_ZERO_BASED, MAT_TRI_LOWER, MAT_UNIT_DIAG_IMPLICIT);
+			oski_m_t += rsb_time();
+
+		        if(A_tunable==INVALID_MAT)
+                	{
+				RSB_ERROR("invalid oski matrix!\n");goto err;
+			}
+
+			oski_t_t = -rsb_time();
+			if( oski_ApplyMatTransforms (A_tunable, oxform) )
+			{
+				RSB_ERROR("invalid transform!\n");goto err;
+			}
+			oski_t_t += rsb_time();
+
+			if(A_tunable==INVALID_MAT)
+			{
+				RSB_ERROR("invalid oski tuned matrix!\n");goto err;
+			}
+
+				/* FIXME : should error - check these steps */
+			//	RSBENCH_STDOUT("# oski : ncA=%zd, nrA=%zd\n",(rsb_printf_int_t)ncA,(rsb_printf_int_t)nrA);
+			        x_view = oski_CreateVecView( rhs, ncA, STRIDE_UNIT );
+			        y_view = oski_CreateVecView( lhs, nrA, STRIDE_UNIT );
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				oski_t = - rsb_time();
+				for(i=0;i<times;++i)
+				{
+#error FIXME: flush breaks measured time
+					if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+					/* y <- alpha A * x + beta * y */
+					if(oski_MatMult( A_tunable, OP_NORMAL, oalpha, x_view, obeta, y_view ))
+					{
+							RSB_ERROR("failed uuuoski_MatMult !\n");goto err;
+					}
+				}
+				oski_t += rsb_time();
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if(n_dumpres)
+					rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+				/* FIXME */
+	
+				oski_DestroyMat( A_tunable );
+				oski_DestroyVecView( x_view );
+				oski_DestroyVecView( y_view );
+				RSB_CONDITIONAL_FREE(Aptr);
+				RSB_CONDITIONAL_FREE(Aind);
+				RSB_CONDITIONAL_FREE(Aval);
+				RSB_CONDITIONAL_FREE(Oval);
+				RSB_CONDITIONAL_FREE(OJA  );
+				RSB_CONDITIONAL_FREE(OIA );
+				Aptr= Aind= Aval= NULL;
+			} /* want_oski_bench  */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+			if(ti>0)
+				want_getrow_bench=0;
+			if(want_getrow_bench)
+			{
+				const rsb_coo_idx_t nr=1;
+				void * RVA = NULL;
+				rsb_coo_idx_t*RIA = NULL;
+				rsb_coo_idx_t*RJA = NULL;
+
+				if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&RVA,&RIA,&RJA,mtxAp->nc*nr,typecode,RSB_BOOL_FALSE))){goto errgr;}
+				for(i=0;i<times;++i)
+				{
+					rsb_time_t getrow_op_time = RSB_TIME_ZERO;
+					rsb_coo_idx_t ri=0;
+					rsb_nnz_idx_t rnz=0;
+					getrow_op_time = - rsb_time();
+					for(ri=0;ri+nr-1<mtxAp->nr;ri+=nr)
+						RSB_DO_ERROR_CUMULATE(errval,rsb_mtx_get_coo_block(mtxAp,RVA,RIA,RJA,ri,RSB_MIN(mtxAp->nc-1,ri+nr-1),0,mtxAp->nc-1,NULL,NULL,&rnz,mtxAp->flags));
+					getrow_op_time += rsb_time();
+					getrow_op_time_best = RSB_MIN_ABOVE_INF(getrow_op_time_best,getrow_op_time,tinf);
+					getrow_op_tot_time+=getrow_op_time;
+				}
+				if(cc==1)getrow_op_time_best_serial=getrow_op_time_best;
+errgr:
+				RSB_CONDITIONAL_FREE(RVA);
+				RSB_CONDITIONAL_FREE(RIA);
+				RSB_CONDITIONAL_FREE(RJA);
+				if(RSB_SOME_ERROR(errval))
+				{goto err;}
+			} /* want_getrow_bench */
+
+			if(ti>0)
+				want_getdiag_bench=0;
+			if(want_getdiag_bench)
+			{
+				void * DV = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				if(!DV) { errval = RSB_ERR_ENOMEM; goto err; }
+				for(i=0;i<times;++i)
+				{
+					rsb_time_t diag_op_time = RSB_TIME_ZERO;
+					diag_op_time = - rsb_time();
+					RSB_DO_ERROR_CUMULATE(errval,rsb__dodo_getdiag(mtxAp,DV));
+					diag_op_time += rsb_time();
+					diag_op_time_best = RSB_MIN_ABOVE_INF(diag_op_time_best,diag_op_time,tinf);
+					diag_op_tot_time+=diag_op_time;
+				}
+				if(cc==1)diag_op_time_best_serial=diag_op_time_best;
+				RSB_CONDITIONAL_FREE(DV);
+				if(RSB_SOME_ERROR(errval))
+				{goto err;}
+			} /* want_getdiag_bench */
+
+			if(g_sort_only)
+			{
+				/* single line output, ideal for benchmark data to be processed later */
+				RSBENCH_STDOUT ( "%-20s	%s", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags));
+
+				RSBENCH_STDOUT ( "	%.3lf	%lg",
+				//raw_Mflops/op_t,	/* please note that in the sort case, it is an absolutely meaningless value */
+				true_Mflops/op_t,	/* algorithmic millions of ops per second (not an accurated model)  */
+				op_t/true_Mflops	/* the sorting algorithmic constant (not an accurated model) */
+				);
+			}
+			else
+			if(!g_estimate_matrix_construction_time)
+			{
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,true_Mflops/best_t,raw_Mflops/best_t,"spmv_uaua",flags);
+#else /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,true_Mflops/op_t,raw_Mflops/op_t,"spmv_uaua",flags);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+			}
+			if(g_estimate_matrix_construction_time)
+			{
+				/* in this case the user asked us too for :
+				   * matrix construction Mflops
+				   * a ratio of the selected op time with the matrix construction time
+				 */
+				RSBENCH_STDOUT("\t%.3lg\t%.3lg	", ((double)nnz)/(mct*RSB_REAL_MILLION), mct/op_t);
+				rsb__fprint_matrix_implementation_code(mtxAp, "spmv_uaua", flags, RSB_STDOUT_FD);
+				RSBENCH_STDOUT ( "\n");
+			}
+			omta=((double)rsb_spmv_memory_accessed_bytes(mtxAp));
+			
+#if RSB_WANT_MKL
+			if(want_mkl_bench)
+			{
+			if(want_mkl_bench_coo)
+			{
+				RSBENCH_STDOUT ( "#MKL_COO_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(mkl_coo_op_tot_time/times),raw_Mflops/op_t);
+				RSBENCH_STDOUT ( "#MKL_COO2CSR2SPMV_VS_US:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),(mkl_coo2csr_time)/(mkl_csr_op_tot_time/times),-1.0);
+			}
+			if(want_mkl_bench_csr)
+			{
+				RSBENCH_STDOUT ( "#MKL_CSR_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(mkl_csr_op_tot_time/times),raw_Mflops/op_t);
+			}
+			}
+#endif /* RSB_WANT_MKL */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			if(want_oski_bench)
+			{
+				RSBENCH_STDOUT ( "#OSKI_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(oski_t/times),raw_Mflops/op_t);
+				RSBENCH_STDOUT ( "#OSKI_VS_US-ASM~:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),oski_m_t+oski_t_t+oski_a_t,mct);
+			}
+#endif /* RSB_WANT_OSKI_BENCHMARKING  */
+			/* WARNING : we cannot use RSB_FLAG_SORTED_INPUT in the recursive case
+				     until the following routine will be able to use Z sorted values.. */
+			efillin = RSB_REAL_ZERO,eperf = RSB_REAL_ZERO;
+
+			/* FIXME : dies with ct20stif.mtx, now */
+			#if 0
+			RSB_WARN("warning : skipping rsb__estimate_expected_fillin_for_blocking\n");
+			fet = - rsb_time();
+			//rsb__estimate_expected_fillin_for_blocking(VA,IA,JA,nrA,ncA,nnz,typecode,flags/*|RSB_FLAG_SORTED_INPUT*/,br,bc,&efillin);/*TODO:thiscouldbedangerous:fixit!*/
+			efillin=mtxAp->einfo.efillin;	/* NEW */
+			fet += rsb_time();
+			#else /* 0 */
+			fet = RSB_TIME_ZERO;
+			#endif /* 0 */
+			rsb__estimate_expected_raw_performance_for_blocking(nrA,ncA,br,bc,nnz,typecode,flags,efillin,&eperf);
+
+			if(cc==1)
+			{
+				/* we need input flags, not instantiated matrix flags (which could have not that flag )*/
+				if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+					base_best_t=best_t;
+				else
+					serial_best_t=best_t;
+			}
+	
+			if(want_perf_dump) 
+			if(RSB_DO_FLAG_HAS(/*mtxAp->*/flags,RSB_FLAG_QUAD_PARTITIONING))
+			{
+#if RSB_WANT_MKL
+				/* FIXME: this #if is horrible */
+				rsb__pr_set(rspr, mtxAp/*NULL */ /* FIXME */, NULL, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, transA, RSB_CONST_IMPOSSIBLY_BIG_TIME, mkl_csr_op_time_best, RSB_CONST_IMPOSSIBLY_BIG_TIME, at_mkl_csr_op_time_best, RSB_THREADS_AUTO, at_mkl_csr_nt, RSB_CONST_IMPOSSIBLY_BIG_TIME, -1, NULL, NULL, &btpms[1], &btpms);
+#endif
+			}
+
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+			RSBENCH_STDOUT ( "#	%10.2lf	%10.2lf	( best, average net performance in %d tries ); diff:%2.0lf%%\n",
+				((double)true_Mflops/best_t), ((double)true_Mflops/op_t),
+				(int)times,
+				/* for marcin : */
+				((((double)true_Mflops/best_t)-((double)true_Mflops/op_t))*100)/((double)true_Mflops/op_t)
+				);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+
+			RSBENCH_STDOUT ( "#	%10.2lf	%10.2lf	%10.2lf %10.6lf (min bw, reasonable bw, exceedingly max bw, w/r ratio) (MB/s)\n"
+				     "#	%10.2lf (MB per mop) %10.2lf (rhs loads, with a variable degree of locality)\n"
+				     "#	%10.2lf (MB per mop, estimated)\n"
+				     "#	%10.2lf (assembly + extra to (best) mop time ratio) (%10.2lf s)\n"
+				     "#	%10.2lf (assembly (p.e.+s.a.+e.i.+e.s.+...) to mop time ratio)\n"
+/*				     "#	%10.2lf (performance estimation to mop time ratio)\n"*/
+/*				     "#	%10.2lf (gross fillin estimation to mop time ratio)\n"*/
+				     "#	%10.2lf (structure analysis to mop time ratio)\n"
+				     "#	%10.2lf (elements insertion to mop time ratio)\n"
+				     "#	%10.2lf (elements sorting to mop time ratio) (%10.2lf s)\n"
+				     "#	%10.2lf (elements partitioning to mop time ratio)\n"
+				     "#	%10.2lf (recursion sort to mop time ratio)\t%10.ld (max recursion depth)\n"
+				     "#	%10.2lf	%10.2lf (nnz per row/column)\n"
+					,
+				((double)rsb_spmv_memory_accessed_bytes_min(mtxAp))*(1.e-6/best_t) ,
+				((double)omta)*(1.e-6/best_t) ,
+				((double)rsb_spmv_memory_accessed_bytes_max(mtxAp))*(1.e-6/best_t) ,
+				((double)rsb_spmv_memory_accessed_bytes_wr_ratio(mtxAp)),
+				((double)omta)*(1.e-6),
+				(1.0>((fillin*nnz)/(br*ncA))?1.0:((fillin*nnz)/(br*ncA))),
+				((double)rsb_spmv_memory_accessed_bytes_(br,bc,nrA,ncA,efillin*nnz,((efillin*nnz)/br)/bc,nrA/br,mtxAp->el_size))*(1.e-6),
+				(mct)/(best_t),
+				(mtxAp->tat),
+				(mtxAp->tat)/(best_t),
+/*				(mtxAp->pet)/(best_t),*/
+/*				(fet)/(best_t),*/
+				(mtxAp->sat)/(best_t),
+				(mtxAp->eit)/(best_t),
+				(mtxAp->est)/(best_t), (mtxAp->est),
+				(mtxAp->cpt)/(best_t),
+				((mtxAp->rpt)/(best_t)),((long)rsb__get_recursive_matrix_depth(mtxAp)),
+				(double)nnz/nrA, (double)nnz/ncA
+				);
+				if(RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE>1)
+				RSBENCH_STDOUT ( 
+				     "#	%10.2lf (estimated fillin)"
+				     "#	%10.2lf (estimated fillin error)\n"
+				     "#	%10.2lf (estimated raw performance)"
+				     "#	%10.2lf (estimated raw performance error)\n"
+				     "#	%10.2lf (estimated net performance)"
+				     "#	%10.2lf (estimated net performance error)\n",
+				efillin, (efillin-fillin)/fillin,
+				eperf, (eperf-raw_Mflops/best_t)/(raw_Mflops/best_t),
+				efillin?(eperf/efillin):-1,efillin?(((eperf/efillin)-(true_Mflops/best_t))/(true_Mflops/best_t)):-1
+				);
+				RSBENCH_STDOUT( "#used index storage compared to COO:%zd vs %zd bytes (%.02lf%%) "
+					,(size_t)rsb__get_index_storage_amount(mtxAp),sizeof(rsb_coo_idx_t)*2*nnz
+					,(100*(double)rsb__get_index_storage_amount(mtxAp))/RSB_UTIL_COO_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+				);
+				RSBENCH_STDOUT( "; compared to CSR:%zd vs %zd bytes (%.02lf%%)\n"
+					,(size_t)rsb__get_index_storage_amount(mtxAp),
+					 (sizeof(rsb_coo_idx_t)*nnz+sizeof(rsb_nnz_idx_t)*(mtxAp->nr+1))
+					,(100*(double)rsb__get_index_storage_amount(mtxAp))/RSB_UTIL_CSR_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+				);
+			rsb__attr_dump(&attr);
+			RSB_BZERO_P((&attr));
+			if(ci==0 && smt == RSB_TIME_ZERO && RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+			{
+				smt=best_t;
+				sest=mest;
+				//sect=mect;
+				ssat=msat;
+				seit=meit;
+				scpt=mcpt;
+			}
+			if(ci==cl-1 && pmt == RSB_TIME_ZERO)
+			{
+				pmt=best_t;
+			}
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+				{
+					rsb_nnz_idx_t minnz=0,maxnz=0,avgnz=0;
+					rsb_bool_t vrpr = (times != 0) ? RSB_BOOL_TRUE : RSB_BOOL_FALSE;
+
+					if(vrpr)
+					{
+					RSBENCH_STDOUT("%%:PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/best_t);
+					RSBENCH_STDOUT("\t%le\t%le\n",true_Mflops,best_t);
+
+					RSBENCH_STDOUT("%%:OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",best_t);
+					}
+
+					if( no_lock_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					{
+					RSBENCH_STDOUT("%%:FAKE_LOCK_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/no_lock_op_time_best);
+
+					RSBENCH_STDOUT("%%:FAKE_LOCK_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",no_lock_op_time_best);
+
+					RSBENCH_STDOUT("%%:FAKE_LOCK_PERF_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",serial_no_lock_op_time_best/no_lock_op_time_best);
+					}
+
+					if(qt_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME && cc==1)
+					{
+					RSBENCH_STDOUT("%%:RECURSIVE_SERIAL_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/qt_op_time_best);
+
+					RSBENCH_STDOUT("%%:RECURSIVE_SERIAL_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",qt_op_time_best);
+					}
+
+
+					if(vrpr)
+					{
+					if( serial_best_t != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",serial_best_t/best_t);
+					}
+
+					RSBENCH_STDOUT("#%%:CONSTRUCTOR_*:SORT	SCAN	INSERT	SCAN+INSERT\n");
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_TIMES:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\t%10.6lf\t%10.6lf\t%10.6lf\n",mest,msat,meit,msat+meit);
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", mest+msat+meit);
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", msat);
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", meit);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", mest);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", sest/mest);
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", msat+meit);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", mest/best_t);
+
+					if(vrpr)
+					{
+					RSBENCH_STDOUT("%%:CLEANUP_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",mect/best_t);
+
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\t%10.2lf\t%10.2lf\t%10.2lf\n",mest/best_t,msat/best_t,meit/best_t,(msat+meit)/best_t);
+
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat+meit+mest)/best_t);
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat+meit)/best_t);
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat)/best_t);
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(meit)/best_t);
+					}
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat+seit+sest)/(msat+meit+mest));
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat+seit)/(msat+meit));
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat)/(msat));
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(seit)/(meit));
+
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\t%10.2lf\t%10.2lf\t%10.2lf\n",sest/mest,ssat/msat,seit/meit,(ssat+seit)/(meit+msat));
+
+					if( base_best_t != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:PERF_SCALING2CSR:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",base_best_t/best_t);
+
+
+					RSBENCH_STDOUT("#%%:SM_COUNTS:	Tot	HalfwordCsr	FullwordCsr	HalfwordCoo	FullwordCoo\n");
+					RSBENCH_STDOUT("%%:SM_COUNTS:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					//RSBENCH_STDOUT("\t%d\t%d\t%d\t%d\t%d\n",
+					RSBENCH_STDOUT("\t%ld\t%ld\t%ld\t%ld\t%ld\n",
+rsb__terminal_recursive_matrix_count(mtxAp),
+rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR),
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR),
+rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO),
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO)
+						);
+
+					RSBENCH_STDOUT("%%:SM_IDXOCCUPATIONRSBVSCOOANDCSR:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%zd\t%zd\t%zd\n",rsb__get_index_storage_amount(mtxAp),
+						RSB_UTIL_COO_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz),
+						RSB_UTIL_CSR_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+						);
+
+					RSBENCH_STDOUT("%%:SM_IDXOCCUPATION:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%zd\n",rsb__get_index_storage_amount(mtxAp));
+
+					RSBENCH_STDOUT("%%:SM_MEMTRAFFIC:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.0lf\n",omta);
+#if 0
+					/* new, elegant */
+					RSBENCH_STDOUT("%%:SM_MINMAXAVGSUBMNNZ:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					{
+						rsb_submatrix_idx_t i=0;
+						rsb_real_t avgnz = ((rsb_real_t)mtxAp->nnz) / mtxAp->all_leaf_matrices_n;
+						rsb_coo_idx_t maxnz = 0, minnz = RSB_MAX_MATRIX_NNZ ;
+
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+						{
+							struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[i].mtxlp;
+							maxnz = RSB_MAX(maxnz,submatrix->nnz);
+							minnz = RSB_MIN(minnz,submatrix->nnz);
+						}
+						RSBENCH_STDOUT(" %d %d %.2lf %d\n",minnz,maxnz,avgnz,mtxAp->all_leaf_matrices_n);
+					}
+#else
+					/* old, obsolete */
+					rsb__do_compute_terminal_nnz_min_max_avg_count(mtxAp,&minnz,&maxnz,&avgnz);
+					RSBENCH_STDOUT("%%:SM_MINMAXAVGNNZ:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%d\t%d\t%d\n",minnz,maxnz,avgnz);
+#endif
+
+				if(want_print_per_subm_stats)
+				{
+					RSBENCH_STDOUT("%%:SM_NNZ_HISTOGRAM:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					if(!mtxAp->all_leaf_matrices)
+						RSBENCH_STDOUT(" %zd\n",(size_t)mtxAp->nnz);
+					else
+					{
+						rsb_submatrix_idx_t i=0;
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+							RSBENCH_STDOUT(" %zd",(size_t)mtxAp->all_leaf_matrices[i].mtxlp->nnz);
+						RSBENCH_STDOUT("\n");
+					}
+
+					RSBENCH_STDOUT("%%:SM_NNZ_PER_ROW:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					if(!mtxAp->all_leaf_matrices)
+						RSBENCH_STDOUT(" %lf\n",((double)mtxAp->nnz)/mtxAp->nr);
+					else
+					{
+						rsb_submatrix_idx_t i=0;
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+							RSBENCH_STDOUT(" %.2lf",((double)mtxAp->all_leaf_matrices[i].mtxlp->nnz)/mtxAp->all_leaf_matrices[i].mtxlp->nr);
+						RSBENCH_STDOUT("\n");
+					}
+				} /* want_print_per_subm_stats */
+
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			if(want_perf_counters)
+				{
+					int i;
+					for(i=0;i<rsb_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:RSB_%s:",rsb_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",(size_t)(rsb_pci.eventvals[i]));
+					}
+				} /* want_perf_counters */
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+				}
+			} /* times */
+#if RSB_WANT_MKL
+				if(want_mkl_bench) /* 20110428 */
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+				{
+#ifdef mkl_get_version
+					MKLVersion mv;
+					mkl_get_version(&mv);
+					RSBENCH_STDOUT("#%%:MKL %d.%d-%d, %s, %s, %s, %s\n",mv.MajorVersion,mv.MinorVersion,mv.UpdateVersion,mv.ProductStatus,mv.Build,mv.Processor,mv.Platform);
+#else /* mkl_get_version */
+					RSBENCH_STDOUT("#%%:MKL, version unknown\n");
+#endif /* mkl_get_version */
+			if(want_mkl_bench_coo)
+			{
+					RSBENCH_STDOUT("%%:MKL_COO_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/mkl_coo_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_COO_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); RSBENCH_STDOUT("\t%10.6lf\n",mkl_coo_op_time_best);
+
+					if( mkl_coo_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_COO_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_coo_op_time_best_serial/mkl_coo_op_time_best);
+			}
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			if(want_perf_counters)
+				{
+					int i;
+					for(i=0;i<mkl_csr_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:MKL_CSR_%s:",mkl_csr_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",mkl_csr_pci.eventvals[i]);
+					}
+					if(want_mkl_bench_coo)
+					for(i=0;i<mkl_coo_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:MKL_COO_%s:",mkl_coo_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",mkl_coo_pci.eventvals[i]);
+					}
+				}
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+			if(want_mkl_bench_csr)
+			{
+					RSBENCH_STDOUT("%%:MKL_CSR_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/mkl_csr_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_CSR_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_csr_op_time_best);
+
+					if( mkl_csr_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_CSR_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_csr_op_time_best_serial/mkl_csr_op_time_best);
+			}
+			if(want_mkl_bench_gem)
+			{
+					RSBENCH_STDOUT("%%:MKL_GEMV_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_gem_Mflops/mkl_gem_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_GEMV_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_gem_op_time_best);
+
+					if( mkl_gem_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_GEMV_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_gem_op_time_best_serial/mkl_gem_op_time_best);
+			}
+
+					if( mkl_coo2csr_time != RSB_TIME_ZERO )
+					{
+					RSBENCH_STDOUT("%%:MKL_COO2CSR_T0_CSR_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_coo2csr_time);
+					RSBENCH_STDOUT("%%:MKL_COO2CSR_T0_CSR_OP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_coo2csr_time/mkl_csr_op_time_best);
+
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_VS_MKLCOO2CSR:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", (msat+meit)/(mkl_coo2csr_time));
+					}
+				} /* want_mkl_bench */
+#endif /* RSB_WANT_MKL */
+				if(want_getrow_bench)
+				{
+					const char*norsbnotice="";
+					const char*rsbnotice="NORSB_";
+					const char*notice=norsbnotice;
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+					{}
+				else
+					notice = rsbnotice;
+
+					RSBENCH_STDOUT("%%:%sGETROW_PERFORMANCE:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)mtxAp->nnz)/(RSB_REAL_MILLION*getrow_op_time_best));
+					RSBENCH_STDOUT("%%:%sGETROW_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",getrow_op_time_best);
+					RSBENCH_STDOUT("%%:%sGETROW_TO_SPMV_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",getrow_op_time_best/best_t);
+
+				}
+				if(want_getdiag_bench)
+				{
+					const char*norsbnotice="";
+					const char*rsbnotice="NORSB_";
+					const char*notice=norsbnotice;
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+					{}
+				else
+					notice = rsbnotice;
+
+					RSBENCH_STDOUT("%%:%sGETDIAG_PERFORMANCE:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)mtxAp->nr)/(RSB_REAL_MILLION*diag_op_time_best));
+					RSBENCH_STDOUT("%%:%sGETDIAG_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",diag_op_time_best);
+					RSBENCH_STDOUT("%%:%sGETDIAG_TO_SPMV_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",diag_op_time_best/best_t);
+
+				}
+				RSBENCH_STDOUT( "#\n");/* end of record */
+				if(guess_blocking_test)
+				{
+					rsb_flags_t oflags = RSB_FLAG_NOFLAGS;
+					/* TODO : should keep info of the worst, to */
+					rsb_perf_t nrp=(true_Mflops/op_t),bomta = RSB_REAL_ZERO /* best op memory traffic amount */;
+
+					if(guess_blocking_test==1)
+					{
+						if( nrp>RSB_REAL_ZERO && nrp>bperf)
+						{
+							bperf=nrp;
+							bomta=omta;
+							bfillin=fillin;
+							ebfillin=efillin;
+							bri=brvi;
+							bci=bcvi;
+						}
+					
+						if(brv[brvi]==1 && bcv[bcvi]==1)/* IF ANY! */
+						{
+							cperf=nrp;
+						}
+ 
+						if((nrp>RSB_REAL_ZERO && nrp<wperf) || wperf == RSB_REAL_ZERO)
+						{
+							wperf=nrp;
+						}
+
+						if( fillin > maxfillin )
+						{
+							maxfillin=fillin;
+						}
+					}
+
+					if( guess_blocking_test==2) 
+					{
+						egfillin=efillin;
+						RSBENCH_STDOUT("# GUESS DATA;  best performance was       :	%zd	%zd\n", (size_t)brv[bri], (size_t)bcv[bci] );
+						RSBENCH_STDOUT("# GUESS DATA;  guessed was                :	%zd	%zd\n", (size_t)br, (size_t)bc );
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff from best :	%lg\n", (nrp-bperf)/bperf );
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff from worst:	%lg\n", (nrp-wperf)/wperf );
+						if(cperf)
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff over CSR:	%lg\n", (nrp-cperf)/cperf );
+						RSBENCH_STDOUT("# GUESS DATA:  best/guessed op matrix traffic amount:	%lg	%lg\n", bomta,omta);
+						RSBENCH_STDOUT("#GUESS_TEST_:%-20s\t%20s\t%zd\t%zd\t%zd\t%zd\t%zd\t%zd\n",
+							rsb__basename(filename),
+							rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),
+				(rsb_printf_int_t)((nrp>=bperf*.95) || (brv[bri]==br && bcv[bci]==bc)),	/* (fuzzy WIN) */
+				(rsb_printf_int_t)((nrp>=bperf) || (brv[bri]==br && bcv[bci]==bc)),	/* if 1, best blocking guess (WIN) */
+				(rsb_printf_int_t)(nrp>=bperf),			/* if 1, best performance guess */
+				(rsb_printf_int_t)(brv[bri]==br && bcv[bci]==bc),	/* if 1, best blocking guess */
+				(rsb_printf_int_t)(nrp>=cperf),	/* if 0, we lose over (our) plain CSR  */
+				(rsb_printf_int_t)(nrp> wperf)	/* if 0, we performed as the worst blocking! */
+							);
+					flags=oflags;
+
+					RSBENCH_STDOUT(	"#GUESS_TEST:%-20s\t%-20s"
+						"\t%10.2lf"
+						"\t%10.2lf"
+						"\t%zd" "\t%zd"
+						"\t%10.4lf" "\t%10.2lf" "\t%10.4lf" "\t%10.2lf" "\t%10.4lf" "\n"
+						,
+						rsb__basename(filename),
+						rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),	
+						/* grmflops */
+						raw_Mflops/op_t,
+						/* egfillin */
+						egfillin,
+						/* bbr */
+						(rsb_printf_int_t)brv[bri],
+						/* bbc */
+						(rsb_printf_int_t)bcv[bci],
+						/* bfillin */
+						bfillin,
+						/* brmflops */
+						bperf*bfillin,
+						/* ebfillin */
+						ebfillin,
+						/* csrmflops */
+						cperf,
+						/* maxfillin */
+						maxfillin);
+
+						flags=oflags;
+					}
+				
+
+					if(brvi==brl-1 && bcvi==bcl-1 && guess_blocking_test==1)
+					{
+						oflags=flags;
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_AUTO_BLOCKING);
+						guess_blocking_test++;
+						--bcvi;	/* un altro giro :) */
+					}
+				} /* guess_blocking_test */
+		erri:
+			if(want_in_place_assembly && mtxAp)
+			{
+				rsb_time_t st = -rsb_time();
+				errval = rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+				st += rsb_time();
+				RSBENCH_STDOUT("# rsb_mtx_switch_to_coo time: %lg.\n",st);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			}
+			RSB_MTX_FREE(mtxAp);
+			RSB_CONDITIONAL_FREE(lhs);
+			RSB_CONDITIONAL_FREE(rhs);
+
+			RSB_CONDITIONAL_FREE(p_r);
+			RSB_CONDITIONAL_FREE(p_c);
+			
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);goto err;
+			}
+			if(brl==0 || bcl==0) break;
+		} /* ci : core (count) index */
+
+			if(want_verbose == RSB_BOOL_TRUE)
+			{
+            			RSBENCH_STDOUT("%%operation:matrix	CONSTRUCTOR[%d]	SPMV[%d]	SPMV[%d]\n",ca[0],ca[0],ca[cl-1]);
+            			RSBENCH_STDOUT("%%operation:%s	%lg	%lg	%lg\n",
+					rsb__basename(filename),sct,smt,pmt);
+            			RSBENCH_STDOUT("%%constructor:matrix	SORT[%d]	SCAN[%d]	SHUFFLE[%d]	INSERT[%d]\n",
+					ca[0],ca[0],ca[0],ca[0]);
+            			RSBENCH_STDOUT("%%constructor:%s	%lg	%lg	%lg	%lg\n",
+					rsb__basename(filename),sest,ssat,scpt,seit);
+			}
+		} /* ti (transposition index) */
+	}
+	else
+	{
+		RSBENCH_STDOUT("%s (spmv_uaua) : Please specify a matrix filename (with -f)\n",argv[0]);
+	}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+	rsb__getrusage();
+done:
+frv:
+	if( !should_recycle_io )
+	{
+		RSBENCH_STDOUT("# Freeing I/O arrays.\n");
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	
+	if(mtxAp && !should_recycle_matrix){RSB_MTX_FREE(mtxAp)}
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+		RSBENCH_MAY_SQUIT(ret,{}) /* early end of program */
+		RSBENCH_MAY_TQUIT(ret,{}) /* early end of program */
+	}	/* typecodesi */
+	}	/* nrhsi */
+	}	/* incXi */
+	}	/* incYi */
+nfnm:	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}	/* filenamei */
+	RSBENCH_STDOUT("# benchmarking terminated --- finalizing run.\n");
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	errval = rsb_perf_counters_finalize();
+	if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+#endif
+ret:
+	errval = RSB_ERR_NO_ERROR;
+goto rret;
+err:
+	rsb_perror(NULL,errval);
+	errval = RSB_ERR_GENERIC_ERROR;
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	if(want_in_place_assembly && mtxAp)rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+	RSB_MTX_FREE(mtxAp);
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+	if(RSB_SOME_ERROR(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)))return RSB_ERR_GENERIC_ERROR;
+rret:
+	if(want_perf_dump) 
+	{
+		RSBENCH_STDOUT("# ====== BEGIN Total summary record.\n");
+		errval = rsb__pr_dump(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL );
+		RSBENCH_STDOUT("# ======  END  Total summary record.\n");
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		errval = rsb__pr_save(fprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		RSBENCH_STDOUT("# Removing the temporary record file %s.\n",cprfn);
+		remove(cprfn);
+	}
+	if( ca  != ca_ ) {RSB_CONDITIONAL_FREE(ca);}
+#if !RSB_RSBENCH_STATIC_FILENAMEA
+	/* if(filenamea!=&fnbufp)RSB_CONDITIONAL_FREE(filenamea); */
+	if(filenamea!=&fnbufp)free(filenamea); /* FIXME */
+#endif
+	if(nrhsa!=(&nrhs))RSB_CONDITIONAL_FREE(nrhsa); /* FIXME: they get allocated (and thus shall be deallocated) before init */
+	if(incXa!=(&incX))RSB_CONDITIONAL_FREE(incXa);
+ 	if(incYa!=(&incY))RSB_CONDITIONAL_FREE(incYa); 
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_EXIT;} /* FIXME: and other cases ? */
+	if(want_verbose == RSB_BOOL_TRUE)
+		rsb__echo_timeandlabel(" terminating run at ","\n",&st);
+	return errval;
+}
+
+int rsb__main_block_partitioned_spsv_uxua(const int argc, rsb_char_t * const argv[])
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This function implements a complete program for using our variable block
+	 * rows sparse matrix storage as it was a fixed block size format.
+	 * It is useful for benchmark against fixed block sparse matrix codes.
+	 * 
+	 * This function will benchmark the "spsv_uxua" matrix operation.
+	 * */
+
+	/*
+	 * This example main program reads in a Matrix Market file in block format and multiplies it against a unit vector.
+	 **/
+	rsb_option options[] = {
+	    {"all-flags",	0 , NULL, 0x51},/* Q */  
+	    {"allow-any-transposition-combination",	0 , NULL, 0x61617463 },/* aatc */  
+	    {"alpha",	required_argument, NULL , 0x414C},/* AL */
+	    {"alternate-sort",	no_argument, NULL , 0x4153},/* AS */
+	    {"auto-blocking",	0 , NULL, 0x41},/* A */
+	    {"be-verbose",		0, NULL, 0x76},	/* v */
+	    {"beta",	required_argument, NULL ,  0x4246},/* BE */
+	    {"block-columnsize",	required_argument, NULL, 0x63},/* c */  
+	    {"block-rowsize",   required_argument, NULL, 0x72 },/* r */
+	    {"cache-blocking",	required_argument, NULL , 0x4342},/* CB */
+/*	    {"cache-flush",	no_argument, NULL, 0x4343},*/ /*   */
+	    {"column-expand",	required_argument, NULL, 0x6B},/* k */  
+	    {"compare-competitors",	no_argument, NULL, 0x6363},/* cc */  
+	    {"convert",	0, NULL, 0x4B},/* K */  
+/*	    {"convert",	required_argument, NULL, 0x4B},*//* K   */
+	    {"dense",	required_argument, NULL, 0x64 },   /* d */
+	    {"diagonal-dominance-check",	no_argument , NULL, 0x4444},/* DD */  /* new */
+	    {"dump-n-lhs-elements",	required_argument , NULL, 0x444444},/* DDD */  /* new */
+	    {"echo-arguments",	no_argument , NULL, 0x6563686f},/* echo */  /* new */
+	    {"flush-cache-in-iterations",	no_argument, NULL, 0x4343},/*  */  
+	    {"impatient",	no_argument, NULL, 0x696d7061},/* impa[tient] */  
+	    {"no-flush-cache-in-iterations",	no_argument, NULL, 0x434E},/*  */  
+	    {"flush-cache-around-loop",	no_argument, NULL, 0x434343},/*  */  
+	    {"want-ancillary-execs",	no_argument, NULL, 0x767646},/*  */  
+	    {"no-want-ancillary-execs",	no_argument, NULL, 0x42767646},/*  */  
+	    {"no-flush-cache-around-loop", no_argument	, NULL, 0x43434E},/*  */  
+	    {"want-no-recursive",	no_argument, NULL, 0x776e720a},/*  */  
+	    {"guess-blocking",	no_argument , NULL, 0x47},/* G */
+	    {"help",	no_argument , NULL, 0x68},	/* h */
+	    {"ilu0",	no_argument , NULL, 0x494B55},/* ILU */  /* new */
+	    {"incx",	required_argument, NULL, 0xb1bb0 },/* */  
+	    {"incy",	required_argument, NULL, 0xb1bb1 },/* */  
+	    {"in-place-assembly-experimental",	no_argument , NULL, 0x6970},/* i */  
+	    {"in-place-csr",	0 , NULL, 0x69},/* i */  
+	    {"in-place-permutation",	no_argument, NULL, 0x50},   /* P */
+#if RSB_WITH_LIKWID
+	    {"likwid",	no_argument, NULL, 0x6c696b77},   /* likw */
+#endif /* RSB_WITH_LIKWID */
+	    {"lower",	required_argument, NULL, 0x6c},   /* l */
+	    {"lower-dense",	required_argument, NULL, 0x6c64},   /* ld */
+	    {"generate-lowerband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"gen-lband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"generate-spacing",	required_argument, NULL, 0xbabb2 },   /* */
+	    {"matrix-dump",	0 , NULL, 0x44044},/* D */  
+	    {"matrix-dump-graph",	required_argument , NULL, 0x44047},/* DG */  
+	    {"matrix-dump-internals",	0 , NULL, 0x49049},/* I */  
+	    {"merge-experimental",	required_argument , NULL, 0x6d656578},/* meex */  
+	    {"split-experimental",	required_argument , NULL, 0x73706578},/* spex */  
+	    {"ms-experimental",	required_argument , NULL, 0x6d736578},/* msex */  
+	    {"matrix-filename",	required_argument, NULL, 0x66},/* f */  
+	    {"matrix-storage",	required_argument, NULL, 0x46},/* F */  
+	    {"matrix-time",	0 , NULL, 0x4D},/* M */  /* new */
+	    {"mem-hierarchy-info",	required_argument , NULL, 0x4D4D},/* MM */  /* new */
+	    {"max-runtime",	required_argument , NULL, 0x6d617275},/* maru */
+	    {"no-op",		0 , NULL, 0x4E},	/* N */
+	    {"notranspose",	no_argument, NULL, 0x5051},   /* do not transpose the operation */
+	    {"nrhs",	required_argument, NULL, 0x6e726873},   /* */
+	    {"one-nonunit-incx-incy-nrhs-per-type",	no_argument, NULL, 0x6e697270},   /* */
+	    RSB_BENCH_PROG_OPTS
+	    {"oski-benchmark",	0 , NULL, 0x42},/* B: only long option *//* comparative benchmarking agains OSKI */
+	    {"mkl-benchmark",	0 , NULL, 0x4C},/* L: only long option *//* comparative benchmarking agains MKL */
+	    {"out-lhs",		0 , NULL, 0x6F},/* o */	/* should accept an output file name, optionally */
+	    {"out-rhs",		0 , NULL, 0x6F6F},/* o */	/* should accept an output file name, optionally */
+	    {"override-matrix-name",	required_argument , NULL, 0x6F6D6E},/* omn */	
+	    {"pattern-mark",	0 , NULL, 0x70},/* p */
+	    {"pre-transpose",	no_argument, NULL, 0x5454},   /* transpose the matrix before assembly  */
+	    {"read-as-binary",		required_argument, NULL, 0x62},/* b */
+	    {"repeat-constructor",	required_argument , NULL, 0x4A4A},
+	    {"reuse-io-arrays",	no_argument , NULL, 0x726961}, /* ria */
+	    {"no-reuse-io-arrays",	no_argument , NULL, 0x6e726961 }, /* nria */
+	    {"reverse-alternate-rows",	no_argument , NULL, 0x4A4A4A},
+	    {"generate-upperband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"gen-uband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"generate-diagonal",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"gen-diag",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"zig-zag",	no_argument , NULL, 0x4A4A4A},
+	    {"subdivision-multiplier",	required_argument, NULL , 0x534D},/* SM */
+#if RSB_WANT_BOUNDED_BOXES
+	    {"bounded-box",	required_argument, NULL , 0x4242},/* BB */
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	    {"sort",		0 , NULL, 0x73},	/* s */
+	    {"no-leaf-multivec",	no_argument, NULL , 0x6e6c6d6d},/* nlmm */
+	    {"with-leaf-multivec",	no_argument, NULL , 0x636c6d6d},/* wlmm */
+	    {"sort-after-load",	no_argument, NULL, 0x7373},/* ss */  
+	    {"skip-loading-symmetric-matrices",	 no_argument, NULL, 0x736c736d},/* slsm */  
+	    {"skip-loading-unsymmetric-matrices",no_argument, NULL, 0x736c756d},/* slum */  
+	    {"skip-loading-hermitian-matrices",no_argument, NULL, 0x736c686d},/* slhm */  
+	    {"skip-loading-not-unsymmetric-matrices",no_argument, NULL, 0x736c6e75},/* slnu */  
+	    {"skip-loading-if-more-nnz-matrices",required_argument, NULL, 0x736c6d6},/* slmn */  
+	    {"skip-loading-if-less-nnz-matrices",required_argument, NULL, 0x736c6e6e},/* slnn */  
+	    {"skip-loading-if-more-filesize-kb-matrices",required_argument, NULL, 0x736c6d73},/* slms */  
+#ifdef RSB_HAVE_REGEX_H 
+	    {"skip-loading-if-matching-regex",required_argument, NULL, 0x736c6d72},/* slmr */  
+#endif /* RSB_HAVE_REGEX_H */
+	    {"skip-loading-if-matching-substr",required_argument, NULL, 0x736c7373},/* slss */  
+	    {"times",		required_argument, NULL, 0x74},/* t */  
+	    {"transpose-as",	required_argument, NULL, 0x5040},   /* do transpose the operation */
+	    {"transpose",	no_argument, NULL, 0x5050},   /* do transpose the operation */
+	    {"also-transpose",	no_argument, NULL, 0x4150},  /* N,T: do transpose the operation after no transposition */
+	    {"all-transposes",	no_argument, NULL, 0x616c6c74},  /* N,T,C */
+	    {"type",		required_argument, NULL, 0x54},/* T */  
+	    {"types",		required_argument, NULL, 0x54},/* T */  
+	    {"update",		0 , NULL, 0x55},	/* U */
+	    {"as-unsymmetric",		0 , NULL, 0x5555},	/* UU: TODO: to insert such a test in as default, in order to quantify the benefit of symmetry */
+	    {"as-symmetric",		0 , NULL, 0x5353},	/* SS */
+	    {"only-lower-triangle",		0 , NULL, 0x4F4C54},	/* OLT */
+   	    {"only-upper-triangle",		0 , NULL, 0x4F4554},	/* OUT */
+	    {"verbose",	no_argument , NULL, 0x56},/* V */
+	    {"want-io-only",	no_argument , NULL, 0x4949},/* --want-io-only */
+	    {"want-nonzeroes-distplot",	no_argument, NULL, 0x776E68},/* wnh */  
+	    {"want-accuracy-test",	no_argument, NULL, 0x776174},/* wat */  
+	    {"want-getdiag-bench",	no_argument , NULL, 0x774446},/* wde */  /* FIXME: obsolete ? */
+	    {"want-getrow-bench",	no_argument , NULL, 0x777246},/* wre */  /* FIXME: obsolete ? */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	    {"want-perf-counters",	no_argument , NULL, 0x707763},/* wpc */
+#endif
+	    {"want-print-per-subm-stats",	no_argument , NULL, 0x77707373},/* wpss */
+	    {"want-only-accuracy-test",	no_argument, NULL, 0x776F6174},/* woat */  
+	    {"want-autotune",	required_argument, NULL, 0x7772740a},/* wrt */  
+	    {"want-no-autotune",	no_argument, NULL, 0x776e7274},/* wnrt */  
+#if RSB_HAVE_METIS
+	    {"want-metis-reordering",	no_argument, NULL, 0x776d6272 },/* wmbr */  
+#endif
+	    {"want-mkl-autotune",	required_argument, NULL, 0x776d6174},/* wmat */  
+	    {"want-mkl-one-based-indexing",	no_argument, NULL, 0x776d6f62 },/* wmob */  
+	    {"want-unordered-coo-test",	no_argument, NULL, 0x775563},/* */  
+	    {"with-flags",	required_argument, NULL, 0x71},/* q */  
+	    {"write-as-binary",	required_argument, NULL, 0x77 }, /* w */
+	    {"write-as-csr",	required_argument, NULL,  0x63777273 }, /* wcsr */
+	    {"write-performance-record",	required_argument, NULL, 0x77707266 }, /* write performance record file  */
+	    {"performance-record-name-append",	required_argument, NULL, 0x77707261 }, /* ...append  */
+	    {"performance-record-name-prepend",	required_argument, NULL, 0x77707270 }, /* ...prepend  */
+	    {"write-no-performance-record",	no_argument, NULL, 0x776e7072 }, /* write no performance record */
+	    {"discard-read-zeros",	no_argument, NULL,  0x64697a65 }, /* dize */
+	    {"z-sorted-coo",	no_argument, NULL , 0x7A},/* z */
+	    {0,0,0,0}	};
+
+	rsb_nnz_idx_t nnz = 0;/* was 0 */
+	int c;
+	int opt_index = 0;
+
+	rsb_coo_idx_t *IA = NULL, *JA = NULL;
+	void *VA = NULL;
+
+	int g_estimate_matrix_construction_time = 0;
+	int g_all_flags = 0;
+	int g_sort_only = 0;
+	int repeat_construction = 1;	/* times to call the matrix constructor (the more times, the more accurate measurements) */
+
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT, typecode_old = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_int ntypecodes = 0,typecodesi;
+	const rsb_int maxtypes = 2*RSB_IMPLEMENTED_TYPES;
+	rsb_type_t typecodes[maxtypes+1] ;
+
+	rsb_blk_idx_t br = 1;
+	rsb_blk_idx_t bc = 1;
+	char * bcs = NULL, *brs = NULL, *cns = NULL, *mhs = NULL;
+	rsb_blk_idx_t * brv = NULL;
+	rsb_blk_idx_t * bcv = NULL;
+	int brl = 0;
+	int bcl = 0;
+	rsb_thread_t ca_[1] = {1};
+	rsb_thread_t * ca = ca_;
+	rsb_thread_t cn = 1, ci = 0, cc = ca[ci];
+
+	int times = 100;	/* the default number of times to perform spsv_uxua */
+	rsb_coo_idx_t nrA = 0, ncA = 0, ndA = 0;
+	int filenamen = 0, filenamei = 0;
+#define RSB_RSBENCH_STATIC_FILENAMEA 1
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_MAX_MTXFILES 256
+	const rsb_char_t *filenamea[RSB_RSBENCH_MAX_MTXFILES];
+#else
+	const rsb_char_t **filenamea = NULL;
+#endif
+	const rsb_char_t *filename = NULL;
+	const rsb_char_t *filename_old = NULL;
+	const rsb_char_t *usfnbuf = NULL;
+	rsb_char_t*fprfn = NULL, *cprfn = NULL, *apprfn = NULL, *ppprfn = NULL; /* final/checkpoint      performance file name , append/prepend */
+	rsb_char_t fprfnb[RSB_MAX_FILENAME_LENGTH], cprfnb[RSB_MAX_FILENAME_LENGTH];/* final/checkpoint      performance file name buffers */
+	rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+	rsb_char_t*fnbufp[1]={&(fnbuf[0])};
+	rsb_char_t * dump_graph_file=NULL;
+	rsb_flags_t flags_o = RSB_FLAG_NOFLAGS|RSB_FLAG_OWN_PARTITIONING_ARRAYS;
+/*	RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS)	;	*/ /* FIXME : EXPERIMENTAL (watch nnz count on a multi blocking run ...) */
+	rsb_flags_t flagsa[128] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	rsb_flags_t r_flags = RSB_FLAG_NOFLAGS; /* recycling flags */
+	int fn = 1, fi = 0;/* for flags */
+	int tn = 1, ti = 0;/* for transposition */
+	int g_debug = 0;
+	int be_verbose = 0;
+	int pattern_only = 0;
+	int dumpout = 0;
+	int dumpout_internals = 0, merge_experimental = 0, split_experimental = 0;
+	int just_enter_tuning = 1;
+	rsb_char_t * csr_w_filename = NULL;
+	rsb_char_t * b_w_filename = NULL;
+	rsb_char_t * b_r_filename = NULL;
+	int dumpvec = rsb_dumpvec_no;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	int guess_blocking_test = 0;		/* guess test stuff */
+	rsb_int want_column_expand = 0;
+	rsb_perf_t bperf=0,wperf=0,cperf=0;			/* guess test stuff */
+	rsb_fillin_t egfillin=0,ebfillin=0,bfillin=0,maxfillin=0;	/* guess test stuff */
+	rsb_blk_idx_t bri=0,bci=0;		/* guess test stuff */
+	rsb_perf_t omta = RSB_REAL_ZERO; /* op memory traffic amount */
+	rsb_fillin_t fillin = RSB_REAL_ZERO;
+	rsb_perf_t raw_Mflops = RSB_REAL_ZERO,true_Mflops = RSB_REAL_ZERO, true_gem_Mflops = RSB_REAL_ZERO;
+	rsb_char_t buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+	rsb_fillin_t efillin = RSB_REAL_ZERO;
+	rsb_perf_t eperf = RSB_REAL_ZERO;
+
+	rsb_bool_t should_recycle_matrix = RSB_BOOL_FALSE; /* reuse the matrix across measurements */
+	rsb_bool_t should_recycle_io = RSB_BOOL_TRUE;/* reuse the input arrays */
+	rsb_bool_t g_allow_any_tr_comb = RSB_BOOL_FALSE; /* allow any transposition combination */
+	
+	rsb_trans_t transAo = RSB_DEFAULT_TRANSPOSITION;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_nnz_idx_t should_generate_dense = 0;
+	rsb_nnz_idx_t should_generate_dense_nc = 0;
+	rsb_nnz_idx_t should_generate_lband = -1, should_generate_uband = -1;
+	rsb_nnz_idx_t want_generated_spacing = 0;
+	rsb_bool_t want_only_star_scan = RSB_BOOL_FALSE;
+	rsb_blk_idx_t nrhs = 1, nrhsn = 1, nrhsi = 1, nrhsl = 1;
+	const char*nrhss = NULL;
+	rsb_blk_idx_t *nrhsa = NULL;
+	size_t outnri = 0, rhsnri = 0;
+	rsb_nnz_idx_t n_dumpres = 0;
+	rsb_nnz_idx_t n_dumprhs = 0;
+	rsb_bool_t ignore_failed_fio = RSB_BOOL_TRUE; /* FIXME 20140912 experimental */
+	rsb_bool_t want_convert = RSB_BOOL_FALSE;
+	rsb_bool_t want_update = RSB_BOOL_FALSE;
+	rsb_int_t want_impatiently_soon_pre_results = 0; /* FIXME: temporary */
+	rsb_bool_t want_inner_flush = RSB_BOOL_FALSE;
+	rsb_bool_t want_outer_flush = RSB_BOOL_TRUE;
+	rsb_bool_t want_ancillary_execs = RSB_BOOL_FALSE;
+	rsb_time_t st = RSB_TIME_ZERO;
+	rsb_time_t totiot = RSB_TIME_ZERO; /* total I/O time */
+	rsb_time_t totatt = RSB_TIME_ZERO; /* total ancillary tests time */ /* FIXME: is this complete ? */
+	rsb_time_t totct = RSB_TIME_ZERO; /* total conversions time */ /* FIXME: is this complete ? */
+	rsb_time_t tottt = RSB_TIME_ZERO; /* total tuning time */
+	rsb_time_t totht = RSB_TIME_ZERO; /* total checks time */ /* FIXME: is this complete ? */
+	rsb_time_t maxtprt = RSB_TIME_ZERO; /* max total program run time */
+	const rsb_time_t totprt = - rsb_time(); /* total program run time */
+	rsb_bool_t want_as_unsymmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_as_symmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_lowtri = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_upptri = RSB_BOOL_FALSE;
+	rsb_bool_t want_sort_after_load = RSB_BOOL_FALSE;
+	rsb_bool_t want_slsm = RSB_BOOL_FALSE, want_slum = RSB_BOOL_FALSE, want_slnu = RSB_BOOL_FALSE, want_slhm = RSB_BOOL_FALSE;
+	rsb_nnz_idx_t want_slmn = 0,  want_slnn = 0,  want_slms = 0;
+#ifdef RSB_HAVE_REGEX_H
+	const rsb_char_t * want_slmr = NULL;
+#endif /* RSB_HAVE_REGEX_H */
+	const rsb_char_t * want_slss = NULL;
+	rsb_bool_t do_perform_ilu = RSB_BOOL_FALSE;
+	rsb_bool_t do_perform_ddc = RSB_BOOL_FALSE;
+	rsb_bool_t want_in_place_assembly = RSB_BOOL_FALSE;
+	rsb_bool_t want_accuracy_test = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_nonzeroes_distplot = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getdiag_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getrow_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_coo_idx_t mib = 0; /* MKL index base (FIXME: declared here and not within RSB_WANT_MKL because CSR copy made even with no MKL) */
+#if RSB_WANT_MKL
+	rsb_bool_t want_mkl_bench = RSB_BOOL_FALSE;
+	rsb_bool_t want_mkl_bench_csr = RSB_BOOL_TRUE;
+	rsb_bool_t want_mkl_bench_gem = RSB_BOOL_TRUE;
+	rsb_bool_t want_mkl_bench_coo = RSB_BOOL_FALSE;
+#endif /* RSB_WANT_MKL */
+	rsb_time_t totmt = RSB_TIME_ZERO; /* total mkl/competitors (tuning) time */
+	rsb_bool_t want_perf_dump = RSB_BOOL_FALSE;
+	void*rspr = NULL; /* rsb sampled performance record structure pointer */
+
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t errnorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t * alphap = &(alpha[0]);
+	rsb_aligned_t * betap = &(beta[0]);
+	rsb_int alphai = 1, betai = 1;
+	rsb_coo_idx_t incX = 1, incY = 1;
+	rsb_blk_idx_t incXn = 1, incXi = 1;
+	rsb_blk_idx_t incYn = 1, incYi = 1;
+	rsb_blk_idx_t *incXa = NULL, *incYa = NULL;
+	rsb_coo_idx_t ldX = 0, ldY = 0;
+	rsb_bool_t want_incX = RSB_BOOL_FALSE,want_incY = RSB_BOOL_FALSE;
+	rsb_bool_t want_verbose = RSB_BOOL_FALSE;
+	rsb_int_t want_verbose_tuning = 0;
+	rsb_bool_t want_transpose = RSB_BOOL_FALSE;
+	#if 1
+	const int max_io = 10;
+	struct rsb_initopts io={NULL,NULL,0,RSB_IO_SPECIFIER_SET},*iop=&io;
+	rsb_int_t should_use_cb_method = 0;
+	rsb_real_t subdivision_multiplier = 0.0;
+#if RSB_WANT_BOUNDED_BOXES
+	rsb_int_t want_bounded_box=1;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	rsb_int_t want_no_leaf_spmm=0;
+	void * io_values[max_io];
+	enum rsb_opt_t io_keys[max_io];
+	#else /* 1 */
+	struct rsb_initopts *iop = RSB_NULL_INIT_OPTIONS;
+	#endif /* 1 */
+	rsb_bool_t should_use_alternate_sort = RSB_BOOL_FALSE;
+	rsb_bool_t reverse_odd_rows = RSB_BOOL_FALSE;
+	rsb_bool_t zsort_for_coo = RSB_BOOL_FALSE;
+	rsb_bool_t want_unordered_coo_bench = RSB_BOOL_FALSE;
+	rsb_time_t unordered_coo_op_tot_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, unordered_coo_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, unordered_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : unfinished */
+	rsb_time_t oski_t = RSB_TIME_ZERO,oski_m_t = RSB_TIME_ZERO,oski_a_t = RSB_TIME_ZERO,oski_t_t = RSB_TIME_ZERO;
+	oski_idx_t * Aptr=NULL;
+	oski_idx_t * Aind=NULL;
+	oski_value_t * Aval=NULL;
+	oski_matrix_t A_tunable;
+        oski_vecview_t x_view;
+        oski_vecview_t y_view;
+	void * Oval = NULL;
+	rsb_coo_idx_t *OIA=NULL,*OJA=NULL;
+        rsb_char_t oxform[256];
+        double oalpha = 1, obeta = 0;
+	rsb_bool_t want_oski_bench=0;
+	#ifdef RSB_HAVE_SETENV
+	setenv("OSKI_LUA_PATH",OSKI_LUA_PATH,0/* if 0, will not override. if 1, it would. */);
+	#endif /* RSB_HAVE_SETENV */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	rsb_time_t tinf = rsb__timer_granularity();
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_bool_t want_likwid = RSB_BOOL_FALSE;
+	rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+	rsb_time_t want_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES, want_mkl_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+	rsb_bool_t want_io_only = RSB_BOOL_FALSE;
+	rsb_int wat = 1;	/* want autotuning threads choice */
+	rsb_int wai = 1;	/* want autotuning rounds */
+	char wav = 0x56;	/* want autotuning verbose */
+	int wavf = RSB_AUT0_TUNING_VERBOSE;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	int want_perf_counters = 0;
+#endif
+	rsb_bool_t want_print_per_subm_stats = RSB_BOOL_FALSE;
+#if RSB_HAVE_METIS
+	rsb_bool_t want_wmbr = RSB_BOOL_FALSE;
+#endif
+	rsb_bool_t want_recursive = RSB_BOOL_TRUE;
+
+	io.keys = io_keys;
+	io.values = io_values;
+	io.n_pairs = 0;
+
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc,argv,RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS"b:w:BGht:f:r:c:vpn:MNS:Bk:KU" /* Flawfinder: ignore */
+		/* s is in anyway, with RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS */
+		"o:O:"
+		, options, &opt_index);
+		if (c == -1)break;
+
+		RSB_DO_FLAG_ADD(flags_o,rsb__sample_program_options_get_flags(c,optarg));
+
+		switch (c)
+		{
+			case 0x62:	/* b */
+			b_r_filename = optarg;
+			break;
+			case  0xb1bb0:
+#if 0
+				incX = rsb__util_atoi(optarg);
+				if(incX<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incX>1)RSBENCH_STDOUT("# setting incX=%d\n",incX);
+				want_incX = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incXn,&incXa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case  0x6970:
+				RSBENCH_STDOUT("# WARNING: in place assembly is an UNFINISHED, EXPERIMENTAL feature\n");
+				want_in_place_assembly = RSB_BOOL_TRUE;
+			break;
+			case  0xb1bb1:
+#if 0
+				incY = rsb__util_atoi(optarg);
+				if(incY<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incY>1)RSBENCH_STDOUT("# setting incY=%d\n",incY);
+				want_incY = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incYn,&incYa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case 0x6c:
+			case 0x6c64: /* lower-dense */
+			{
+				should_generate_dense = - rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0x6c696b77:
+#if RSB_WITH_LIKWID
+				want_likwid = RSB_BOOL_TRUE;
+				#else /* RSB_WITH_LIKWID */
+				#endif /* RSB_WITH_LIKWID */
+			break;
+			case 0x6c6c:
+			{
+				should_generate_lband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_uband==-1)should_generate_uband=0;
+			}
+			break;
+			case 0x7575:
+			{
+				should_generate_uband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_lband==-1)should_generate_lband=0;
+			}
+			break;
+			case 0x6464: /* gen-diag */
+			{
+				should_generate_uband = 0;
+				should_generate_lband = 0;
+				should_generate_dense = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0xbabb2:
+			{
+				want_generated_spacing = rsb__util_atoi(optarg);
+			}
+			break;
+			case 0x6e697270:
+			want_only_star_scan = RSB_BOOL_TRUE;
+			break;
+			case 0x64: /* dense */
+			{
+				/* should_generate_dense = rsb__util_atoi(optarg); */  // FIXME ! PROBLEMS
+				int sargs = sscanf(optarg,"%dx%d",&should_generate_dense,&should_generate_dense_nc);
+				if( should_generate_dense_nc == 0)
+					should_generate_dense_nc = should_generate_dense;
+				/* RSBENCH_STDOUT("# Requested generation of a %d by %d matrix\n",should_generate_dense,should_generate_dense_nc); */
+			}
+			break;
+			/* FIXME : please note that specifying two or more times -r or -c will cause memory leaks */
+			case 0x72:/* r */
+			brs=optarg;
+			break;
+			case 0x63: /* c */
+			bcs=optarg;
+			break;
+			case 0x42: /* oski : B */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			want_oski_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_OSKI_BENCHMARKING */
+			RSB_ERROR("Sorry, OSKI comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+			break;
+			case 0x4C: /* MKL : L */
+#if RSB_WANT_MKL
+			want_mkl_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_MKL */
+			RSB_ERROR("Sorry, MKL comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_MKL */
+			break;
+			case 0x61617463:
+			g_allow_any_tr_comb = RSB_BOOL_TRUE;
+			break;
+			case 0x51: /* Q (do not ask me why) */
+			g_all_flags = 1;
+			break;
+			break;
+			case 0x44044: /* D */
+			dumpout = 1;
+			break;
+			case 0x5040: /*  */
+			transAo = rsb__do_transposition_from_char(*optarg);	/* */
+			break;
+			case 0x4150:
+			tn = 2;
+			break;
+			case 0x616c6c74:
+			tn = 3;
+			break;
+			case 0x5050: /*  */
+			transAo = rsb__do_transpose_transposition(transAo);
+			break;
+			case 0x5051: /*  */
+			transAo = RSB_TRANSPOSITION_N;
+			break;
+			case 0x6e726873: /*  */
+#if 0
+			nrhs = rsb__util_atoi(optarg);
+			/* if(nrhs>1){ RSB_ERROR("Sorry, nrhs > 1 still unsupported!\n"); goto err; } */
+#else
+			nrhss = optarg;
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(nrhss,&nrhsn,&nrhsa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+
+			break;
+			case 0x5454: /*  */
+			want_transpose = !want_transpose;
+			break;
+			case 0x44047: /* DG */
+			dump_graph_file = optarg;
+			break;
+			case 0x49049: /* I */
+			dumpout_internals = 1;
+			break;
+			case 0x6d656578: /* meex */
+			merge_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x73706578: /* spex */
+			split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x6d736578: /* msex */
+			merge_experimental = split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x4444 : /* DD */
+			do_perform_ddc = RSB_BOOL_TRUE;
+			break;
+			case 0x444444 : /* DDD */
+			n_dumprhs = n_dumpres = rsb__util_atoi(optarg);
+			break;
+			case 0x6563686f: /* echo */
+			{
+				rsb_int argi=0;
+				if(argc>0) printf("#args: %s",argv[0]);
+				for(argi=1;argi<argc;++argi)
+					printf(" %s",argv[argi]);
+				printf("\n");
+			}
+			break;
+			case 0x494B55 : /* ILU */
+			do_perform_ilu = RSB_BOOL_TRUE;
+			break;
+			case 0x696d7061: /* */
+			want_impatiently_soon_pre_results = 1;
+			break;
+			case 0x4343: /* */
+			want_inner_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x434E: /* */
+			want_inner_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x434343: /*  */
+			want_outer_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x43434E: /*  */
+			want_outer_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x776e720a: /*  */
+			want_recursive = RSB_BOOL_FALSE;
+			break;
+			case 0x4D: /* M */
+			g_estimate_matrix_construction_time=1;
+			break;
+			case 0x7A:
+			zsort_for_coo = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the now active Z sort feature will only apply to COO submatrices\n");
+			break;
+			case 0x726961:
+			RSBENCH_STDOUT("# setting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_TRUE;
+			break;
+			case 0x6e726961:
+			RSBENCH_STDOUT("# unsetting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_FALSE;
+			break;
+			case 0x4A4A4A:
+			reverse_odd_rows = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the row reversal feature only applies to CSR submatrices, and on indices only\n");
+			break;
+			case 0x6F6D6E:
+			usfnbuf = optarg;
+			break;
+			case 0x4A4A:
+			repeat_construction = rsb__util_atoi(optarg);
+			if(repeat_construction<1)
+			{
+				RSB_ERROR("Constructor repetition times should be a positive number!\n");goto err;
+			}
+			break;
+			case 0x4342: /* CB */
+			should_use_cb_method = rsb__util_atoi(optarg);
+			break;
+			case 0x4153: /* AS */
+			should_use_alternate_sort = RSB_BOOL_TRUE;
+			break;
+			case 0x534D: /* SM */
+			subdivision_multiplier = rsb__util_atof(optarg);
+			break;
+#if RSB_WANT_BOUNDED_BOXES
+			case 0x4242: /* BB */
+			want_bounded_box = rsb__util_atoi(optarg);
+			break;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+			case 0x6e6c6d6d: /* nlmm */
+			want_no_leaf_spmm = /*rsb__util_atoi(optarg)*/ -1;
+			break;
+			case 0x636c6d6d: /* wlmm */
+#if RSB_ENABLE_INNER_NRHS_SPMV
+			want_no_leaf_spmm = 0;
+#else
+			RSB_ERROR("Cannot activate the RSB_IO_WANT_LEAF_LEVEL_MULTIVEC option because RSB_ENABLE_INNER_NRHS_SPMV is opted out!\n");goto err;
+#endif
+			break;
+			case 0x4D4D: /* MM */
+			mhs = optarg;
+			break;
+			case 0x6d617275:
+			maxtprt = rsb__util_atof(optarg);
+			maxtprt = RSB_MAX( RSB_TIME_ZERO, maxtprt  );
+			break;
+			case 0x6F: /* o */
+			dumpvec = rsb_dumpvec_res;
+			break;
+			case 0x6F6F: /* o */
+			dumpvec = rsb_dumpvec_rhs;
+			break;
+			case 0x70: /* p */
+			pattern_only = 1;
+			break;
+			case 0x4E: /* N */
+			g_sort_only = 1;
+			break;
+			/* handled by rsb__sample_program_options_get_flags() */
+			case 0x73: /* s */
+				RSB_DEPRECATED("use of the sort flag");
+				flags_o = flags_o;
+			break;
+			case 0x7373: /* ss */
+			want_sort_after_load = RSB_BOOL_TRUE;
+			break;
+			case 0x736c736d: /* slsm */
+			want_slsm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c756d: /* slum */
+			want_slum = RSB_BOOL_TRUE;
+			break;
+			case 0x736c686d: /* slhm */
+			want_slhm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6e75: /* slnu */
+			want_slnu = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6d6: /* slmn */
+			want_slmn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6e6e: /* slnn */
+			want_slnn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6d73: /* slms */
+			want_slms = rsb__util_atoi_km2(optarg);
+			break;
+#ifdef RSB_HAVE_REGEX_H
+			case 0x736c6d72: /* slmr */
+			want_slmr = (optarg);
+			break;
+#endif /* RSB_HAVE_REGEX_H */
+			case 0x736c7373: /* slss */
+			want_slss = (optarg);
+			break;
+			case 0x74: /* t */
+			times = rsb__util_atoi(optarg);
+			break;
+			case 0x47: /* G */
+			guess_blocking_test = 1;
+			break;
+			case 0x54: /* T */
+			{
+				const char*toa = optarg;
+				ntypecodes=0; /* this neutralizes former -T ... option */
+				/* if( *optarg == 0x3A || *optarg == 0x2A ) */ /* : or * aka colon or asterisk */
+				if( ( ! isalpha(*optarg) ) || ( strstr(optarg,"all") != NULL ) )
+					toa = RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS ;
+				for(;*toa;++toa)
+				if(isalpha(*toa))
+				{
+					if(ntypecodes<maxtypes)
+						typecodes[ntypecodes++]=typecode=toupper(*toa);
+					else
+					{
+						RSB_ERROR("Up to %d types supported! P.s.: Use a punctuation symbol to ask for all supported types.\n",maxtypes);
+						goto err;
+					}
+				}
+				typecodes[ntypecodes] = RSB_NUL;
+			}
+			break;
+			case 0x56: /* V */
+			want_verbose = RSB_BOOL_TRUE;
+			want_verbose_tuning ++;
+			break;
+			case 0x4949: /* II */
+			want_io_only = RSB_BOOL_TRUE;
+			break;
+			case 0x66: /* f */
+			filename = optarg;
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_ADDF(FILENAME)	if(filenamen<RSB_RSBENCH_MAX_MTXFILES)filenamea[filenamen++] = (FILENAME); else {errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Please increase RSB_RSBENCH_MAX_MTXFILES (%d) and recompile !!\n",RSB_RSBENCH_MAX_MTXFILES);goto err;}
+#else
+ /* FIXME: for some reason, this seems to break e.g.  ./rsbench -oa -Ob --nrhs 1,2 -f pd.mtx -f A.mtx.
+    Of course this is wrong also w.r.t. rsb_calloc/rsb_lib_init, but that is not a problem.
+    Using calloc / realloc does not solve the problem.  */
+#define RSB_RSBENCH_ADDF(FILENAME)		if(filenamen==0) \
+				filenamea = rsb__calloc(sizeof(filenamea)*(filenamen+1)); \
+			else \
+				filenamea = rsb__do_realloc(filenamea, sizeof(filenamea)*(filenamen+1), sizeof(filenamea)); \
+			filenamea[filenamen++] = (FILENAME);
+#endif
+			RSB_RSBENCH_ADDF(filename) /* FIXME */
+			break;
+			case 0x414C: /* AL */
+			alphai = rsb__util_atoi(optarg);
+			break;
+			case 0x4246: /* BE */
+			betai = rsb__util_atoi(optarg);
+			break;
+			case 0x4B: /* K */
+			want_convert = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x55: /* U */
+			want_update = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x5353: /* SS */
+			want_as_symmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x5555: /* UU */
+			want_as_unsymmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4C54: /* OLT */
+			want_only_lowtri = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4554: /* OUT */
+			want_only_upptri = RSB_BOOL_TRUE;
+			break;
+			case 0x6363:
+			/* this flag activates all interfaced libraries (if any) */
+#if RSB_WANT_MKL
+			want_mkl_bench = RSB_BOOL_TRUE;
+#endif /* RSB_WANT_MKL */
+			break;
+			case 0x6B: /* ncA */
+			want_column_expand = rsb__util_atoi(optarg);
+			break;
+			case 0x6E: /* n */
+			cns = optarg; /* cores (threads) numbers (specification) string */
+			break;
+			case 0x76: /* spmv_uauz */
+			be_verbose = 1;
+			break;
+			case 0x774446:	/* wde */
+			want_getdiag_bench = 1;
+			break;
+			case 0x776E68:	/* wnh */
+			want_nonzeroes_distplot = 1;
+			break;
+			case 0x777246:	/* wre */
+			want_getrow_bench = 1;
+			break;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			case 0x707763:	/* wpc */
+			want_perf_counters = 1; /* 1 is what user wants; 2 is for debug purposes */
+			break;
+#endif
+			case 0x77707373:	/* wpss */
+			want_print_per_subm_stats = RSB_BOOL_TRUE;
+			break;
+			case 0x776F6174:	/* woac */
+			want_accuracy_test = 2;
+			break;
+			case 0x776e7274:	/* wnrt */
+			want_autotuner = RSB_TIME_ZERO;
+			wai=wat=0;
+			want_autotuner = merge_experimental = split_experimental = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+			break;
+			case 0x7772740a:	/* wrt */
+			/* want_autotuner = rsb__util_atof(optarg); */
+			{
+				char wavv = 0x0;
+				int sargs = sscanf(optarg,"%lfs%dx%dt%c%c",&want_autotuner,&wai,&wat,&wav,&wavv);
+
+				if(!*optarg)
+					sargs = 0;
+				RSBENCH_STDOUT(" Passed %d arguments via autotuning string \"%s\" (an empty string requests defaults)\n",sargs,optarg);
+				if(sargs < 0)
+				{
+					RSBENCH_STDOUT("Wrong autotuning string detected!\n");
+					rsb_test_help_and_exit(argv[0],options, 0);
+					exit(0);
+				}
+				switch(sargs)
+				{
+					case(EOF):
+					case(0):
+						want_autotuner = 10.0;
+					case(1):
+						wai = 1;
+					case(2):
+						wat = 0;
+					case(3):
+						wav = 0;
+					case(4):
+						wavv = 0;
+					case(5):
+					break;
+				}
+				/* RSBENCH_STDOUT("Got an autotuning string: %lfs%dx%dt%c%c\n",want_autotuner,wai,wat,wav,wavv); */
+				if(toupper(wav)==0x56) /* V */
+					wavf = RSB_AUT0_TUNING_VERBOSE;
+				else
+					wavf = RSB_AUT0_TUNING_SILENT ;
+				if(toupper(wavv)==0x56) /* V */
+					wavf++;
+				if(toupper(wai)>RSB_CONST_MAX_TUNING_ROUNDS)
+				{
+					RSBENCH_STDOUT("Restricting the number of tuning round to %d (%d is too much!).\n",RSB_CONST_MAX_TUNING_ROUNDS,wai);
+					wai = RSB_CONST_MAX_TUNING_ROUNDS;
+				}
+				RSBENCH_STDOUT("Will invoke autotuning for ~%lf s x %d rounds, specifying verbosity=%d and threads=%d. (>0 means no structure tuning; 0 means only structure tuning, <0 means tuning of both with (negated) thread count suggestion).\n",want_autotuner,wai,wavf,wat);
+			}
+			want_mkl_autotuner = want_autotuner;
+			break;
+#if RSB_HAVE_METIS
+			case 0x776d6272:	/* wmbr */
+			want_wmbr = RSB_BOOL_TRUE;
+			break;
+#endif
+			case 0x776d6174:	/* wmat */
+			sscanf(optarg,"%lf",&want_mkl_autotuner);
+			want_mkl_autotuner = RSB_MAX(1.0,want_mkl_autotuner); /* FIXME: actual value is unimportant as long as it is positive ! */
+			break;
+			case 0x776d6f62:	/* wmob */
+			mib = 1;
+			break;
+			case 0x776174:	/* wac */
+			want_accuracy_test = 1;
+			break;
+			case 0x775563:
+			want_unordered_coo_bench = RSB_BOOL_TRUE;
+			break;
+			case 0x767646:	/* wae */
+			want_ancillary_execs = RSB_BOOL_TRUE;
+			break;
+			case 0x42767646:	/* nwae */
+			want_ancillary_execs = RSB_BOOL_FALSE;
+			break;
+			case 0x77:	/* w */
+			b_w_filename = optarg;
+			break;
+			case 0x63777273:	/* wcsr */
+			csr_w_filename = optarg;
+			break;
+			case 0x77707266:
+			fprfn = optarg;
+			want_perf_dump = RSB_BOOL_TRUE;
+			if(optarg && !*optarg)
+				fprfn = NULL;
+			break;
+			case 0x776e7072:
+			fprfn = NULL;
+			want_perf_dump = RSB_BOOL_FALSE;
+			break;
+			case 0x77707261:
+			apprfn = optarg;
+			break;
+			case 0x77707270:
+			ppprfn = optarg;
+			break;
+			case 0x64697a65 :	/* dize */
+			RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS);
+			break;
+			case 0x68: /* h */
+			/* should use rsb_test_help_and_exit */
+			RSBENCH_STDERR(
+				"%s "RSB_INFOMSG_SAK".\n"
+				"You can use it to perform sparse matrix - unitary vector multiplication, "
+				"specifying the blocking parameters, the times to perform multiplication.\n"
+				"\n"
+				"Additional debugging flags (-d, -p) are present.\n"
+				"\n"
+				"Usage : %s [OPTIONS]\n where OPTIONS are taken from "
+				"[ -f filename ] \n"
+				"[ -F matrix_storage=[b|c|bc] ] \n"
+				"[ -r br ] \n"
+				"[ -c bc ] \n"
+				"[ -t TIMES ]\n"
+				"[ -n OPENMP_THREADS ]\n"
+				"[ -T ( S | D | I | C ) /* float, double, integer, character*/ ] \n"
+				"[ -s /* will internally sort out nnzs */ ] \n"
+				"[ -p /* will set to 1 nonzeros */ ] \n"
+				"[-d /* if debugging on */]: \n"
+				"[-A /* for auto-blocking */]: \n"
+				"[ -h ] \n"
+				"\n"
+				"please note that not all of the suggested numerical types could be compiled in right now and/or work well.default is double.\n"
+				"\n"
+				"\n"
+				"e.g.: %s -f raefsky4.mtx -t 10 -T :   # 10 times for each of the supported numerical types\n",
+				argv[0],
+				argv[0],
+				argv[0]);
+			rsb_test_help_and_exit(argv[0],options, 0);
+			exit(0);
+	    	}
+	}
+
+	if( (!RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_QUAD_PARTITIONING)) && want_recursive != RSB_BOOL_FALSE )
+	{
+		RSB_WARN("Assuming a recursive matrix structure is requested...\n");
+		RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_QUAD_PARTITIONING);
+	}
+	for (c = optind; c < argc; c++)                                                     
+	{
+		RSB_RSBENCH_ADDF(argv[c])
+	}
+	if(want_verbose == RSB_BOOL_TRUE)
+	{
+		rsb_char_t cbuf[RSB_MAX_COMPILE_COMMAND_LENGTH];
+		rsb__echo_timeandlabel(" beginning run at ","\n",&st);
+		rsb__echo_cargs(argc, argv);
+		errval = rsb__do_lib_get_info_str(0, &cbuf[0], sizeof(cbuf)-1);
+		if(RSB_SOME_ERROR(errval))
+			errval = RSB_ERR_NO_ERROR;
+		else
+			RSBENCH_STDOUT("# compiled with: %s\n",cbuf);
+	}
+	printf("# average timer granularity: %2.3lg s\n",tinf);
+	if(want_perf_dump)
+	{
+		if(!fprfn)
+		{
+			rsb__impcdstr(fprfnb+strlen(fprfnb),"rsbench_pr",".rpr",ppprfn,apprfn);
+			fprfn = fprfnb;
+		}
+		if(!cprfn)
+			rsb__sprintf(cprfnb,"%s.tmp",fprfn),
+			cprfn = cprfnb;
+		printf("# Will write a final performance record to file %s and periodic checkpoints to %s\n",fprfn,cprfn);
+	}
+	if( maxtprt > RSB_TIME_ZERO )
+		printf("# If program run time will exceed %2.3lg s, will attempt early termination.\n",maxtprt );
+
+	RSBENCH_STDOUT("# will %s""perform ancillary tests.\n", want_ancillary_execs ?"":"NOT ");
+	RSBENCH_STDOUT("# will flush cache memory: %s between each operation measurement series, and %s between each operation.\n", want_outer_flush?"":"NOT", want_inner_flush?"":"NOT");
+	RSBENCH_STDOUT("# will %s any zero encountered in the matrix.\n", ( RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_DISCARD_ZEROS) )?"discard":"keep");
+	if( nrhsa == NULL ) nrhsa = &nrhs;
+	if( incXa == NULL ) incXa = &incX;
+	if( incYa == NULL ) incYa = &incY;
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_INIT;}
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if(ntypecodes==0)
+		typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	if(ntypecodes==0)
+	{
+		typecodes[ntypecodes++] = typecode;
+		typecodes[ntypecodes] = RSB_NUL;
+	}
+
+	io.n_pairs=0;
+	if(should_use_alternate_sort)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SORT_METHOD;
+		io.n_pairs++;
+	}
+	if(should_use_cb_method!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_CACHE_BLOCKING_METHOD;
+		io.n_pairs++;
+	}
+	if(mhs!=NULL)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&mhs;
+		io.keys[io.n_pairs]=RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING;
+		io.n_pairs++;
+	}
+	if(subdivision_multiplier!=0.0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&subdivision_multiplier;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SUBDIVISION_MULTIPLIER;
+		io.n_pairs++;
+	}
+#if RSB_WANT_BOUNDED_BOXES
+	if(want_bounded_box==0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_bounded_box;
+		io.keys[io.n_pairs]=RSB_IO_WANT_BOUNDED_BOX_COMPUTATION;
+		io.n_pairs++;
+	}
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	if(want_no_leaf_spmm!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_no_leaf_spmm;
+		io.keys[io.n_pairs]=RSB_IO_WANT_LEAF_LEVEL_MULTIVEC;
+		io.n_pairs++;
+	}
+
+#ifdef RSB_HAVE_UNISTD_H
+{
+	extern char **environ;
+	char **me = NULL;
+	rsb_int_t rpevc = 0; /* RSB_ prefixed environment variables count */
+
+	for(me=environ;*me;++me)
+		if( strstr(*me,"RSB_") == *me )
+			rpevc++;
+
+	if( rpevc )
+	{
+		RSB_STDOUT("# The user specified %d RSB_ prefixed environment variables:\n",rpevc);
+		for(me=environ;*me;++me)
+			if( strstr(*me,"RSB_") == *me )
+				RSB_STDOUT("#  export %s\n",*me);
+	}
+}
+#endif /* RSB_HAVE_UNISTD_H */
+	
+	
+	if( rsb__getenv("KMP_AFFINITY") )
+		RSB_STDOUT("# export KMP_AFFINITY=%s\n",rsb__getenv("KMP_AFFINITY"));
+	if( rsb__getenv("OMP_PROC_BIND") )
+		RSB_STDOUT("# export OMP_PROC_BIND=%s\n",rsb__getenv("OMP_PROC_BIND"));
+	if( rsb__getenv("OMP_NUM_THREADS") )
+		RSB_STDOUT("# export OMP_NUM_THREADS=%s\n",rsb__getenv("OMP_NUM_THREADS"));
+
+	if( want_verbose != RSB_BOOL_FALSE )
+		RSBENCH_STDOUT("# user specified a verbosity level of %d (each --verbose occurrence counts +1)\n",want_verbose_tuning );
+	else
+		RSBENCH_STDOUT("# user did not specify any verbosity level (each --verbose occurrence counts +1)\n");
+
+	if((errval = rsb_lib_init(iop))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR("Error while initializing the library.");
+		goto err;
+	}
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	if((errval = rsb_perf_counters_init())!=RSB_ERR_NO_ERROR)
+	{
+		RSBENCH_STDERR("problem initializing performance counters (rsb_perf_counters_init gave %d)\n",(int)errval);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#endif
+
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )
+	{
+		RSB_STDOUT("# auto-tuning oriented output implies  times==0 iterations and sort-after-load.\n");
+		times = 0;
+		/* if(want_verbose) */
+		want_impatiently_soon_pre_results = 1;
+		want_sort_after_load = RSB_BOOL_TRUE;
+	}
+	else
+	if( times < 1 )
+	{
+		RSB_STDOUT("# The iteration times should be specified as a positive number!\n");
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	else
+		RSB_STDOUT("# Will measure on times=%d iterations.\n",times);
+
+	if( 0 == filenamen )
+#if RSB_RSBENCH_STATIC_FILENAMEA
+	       	filenamea[0] = fnbufp[0];
+#else
+	       	filenamea = &fnbufp;
+#endif
+	filenamen = RSB_MAX(1,filenamen);
+
+	if(cns)
+	{
+		ca = NULL;
+		cn = 0;
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(cns,&cn,&ca)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	}
+	else
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		/* #define rsb_get_max_threads omp_get_max_threads */
+		cn = 1;
+		ca_[0] = omp_get_max_threads ();
+		RSBENCH_STDOUT("# User did not specify threads; assuming %d.\n", cn );
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	}
+
+#if RSB_WANT_MKL
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) )
+		want_mkl_bench_csr = RSB_BOOL_FALSE;
+#endif /* RSB_WANT_MKL */
+
+	RSBENCH_STDOUT("# Using alpha=%d beta=%d for rsb_spmv/rsb_spsv/rsb_spmm/rsb_spsm.\n",alphai,betai);
+
+	if(want_perf_dump) 
+		rsb__pr_init(&rspr, NULL, filenamen, cn, incXn, incYn, nrhsn, ntypecodes, tn);
+
+	for(     filenamei=0;     filenamei<filenamen+want_impatiently_soon_pre_results  ;++filenamei     )
+	{
+		if( filenamea && ( filenamea[filenamei] != filename_old) && filename_old && want_impatiently_soon_pre_results && want_perf_dump && filenamei>0 && filenamen>1) 
+		{
+			int filenameif = filenamei-1;
+			RSBENCH_STDOUT("# ====== BEGIN Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL);
+			RSBENCH_STDOUT("# ======  END  Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			if( filenameif > 0 && filenameif < filenamen-1) /* not after first and not at last */
+				RSBENCH_STDOUT("# ====== BEGIN Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen),
+				errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL),
+				RSBENCH_STDOUT("# ======  END  Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen);
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			errval = rsb__pr_save(cprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+
+		if( filenamei >= filenamen )
+			continue; /* temporary: only for the want_impatiently_soon_pre_results trick */
+
+		if(filenamea)
+		{
+			filename = filenamea[filenamei];
+		}
+
+		if(filenamen>1)
+		{
+			RSBENCH_STDOUT("# multi-file benchmarking (file %d/%d) -- now using %s\n",filenamei+1,filenamen,rsb__basename(filename));
+		}
+
+	for(     incXi=0;     incXi<incXn     ;++incXi     )
+	{
+	for(     incYi=0;     incYi<incYn     ;++incYi     )
+	{
+	for(     nrhsi=0;     nrhsi<nrhsn     ;++nrhsi     )
+	{
+	for(typecodesi=0;typecodesi<ntypecodes;++typecodesi)
+	{
+	rsb_flags_t flags = flags_o;
+	rsb_thread_t cl; /* cores number last (overrides cn for this typecode cycle) */
+	typecode = typecodes[typecodesi];
+
+	if(ntypecodes>1)
+	{
+		RSBENCH_STDOUT("# multi-type benchmarking (%s) -- now using typecode %c (last was %c).\n",typecodes,typecode,typecode_old);
+		if( RSB_MATRIX_UNSUPPORTED_TYPE ( typecode ) )
+		{
+			RSBENCH_STDOUT("# Skipping unsupported type \"%c\" -- please choose from \"%s\".\n",typecode,RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS );
+			continue;
+		}
+	}
+
+	nrhs = nrhsa[nrhsi];
+	if( nrhsn > 1 && nrhss )
+	{
+		RSBENCH_STDOUT("# multi-nrhs benchmarking (%s) -- now using nrhs %d.\n",nrhss,nrhs);
+	}
+	incX = incXa[incXi];
+	incY = incYa[incYi];
+	if(incXn>1)
+	{
+		RSBENCH_STDOUT("# multi-incX benchmarking (%d/%d) -- now using incX=%d.\n",incXi+1,incXn,incX);
+	}
+	if(incYn>1)
+	{
+		RSBENCH_STDOUT("# multi-incY benchmarking (%d/%d) -- now using incY=%d.\n",incYi+1,incYn,incY);
+	}
+
+	if( want_only_star_scan )
+		if( RSB_MIN(incXi,1) + RSB_MIN(incYi,1) + RSB_MIN(nrhsi,1) > 1 ) /* two or more exceed index one */
+		{
+			RSBENCH_STDOUT("# Skipping a case with incX=%d incY=%d nrhs=%d.\n",incX,incY,nrhs);
+			goto frv;
+		}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	/* rsb__getrusage(); */ /* FIXME: new (20140727) */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	RSBENCH_STDOUT("( allocated_memory:%zd allocations_count:%zd)",rsb_global_session_handle.allocated_memory,rsb_global_session_handle.allocations_count);
+#endif
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+
+	if(cns)
+	{
+		cc = ca[ci];
+	}
+	cl=cn;
+	if(bcs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(bcs,&bcl,&bcv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	if(brs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(brs,&brl,&brv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+	if(incX!=incY)
+	{
+		RSB_ERROR("setting (incX=%d) != (incY=%d) in triangular solve is unsupported in this program\n",incX,incY);
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+
+
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(beta,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(alpha,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+	/* FIXME: the following collides with the former */
+	rsb__util_set_area_to_converted_integer(alphap,typecode,alphai);
+	rsb__util_set_area_to_converted_integer(betap ,typecode,betai);
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : note that this option is not compatible with g_sort_only .. */
+        oski_Init();
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	g_debug = ((flags & RSB_FLAG_SHOULD_DEBUG) != 0);
+
+	if(g_sort_only)RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+
+	if(typecode==-1)
+	{
+		RSBENCH_STDERR("error : please recompile with double precision floating point numbers supported! \n");
+		return RSB_ERR_GENERIC_ERROR;
+	}
+	rsb__util_set_area_to_converted_integer(&pone[0],typecode,+1);
+
+
+
+	if(brl<1) { /* this is a hack */ brv = rua; brl = RSB_ROWS_UNROLL_ARRAY_LENGTH;}
+	if(bcl<1) { /* this is a hack */ bcv = cua; bcl = RSB_COLUMNS_UNROLL_ARRAY_LENGTH;}
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		RSBENCH_STDERR("This numerical type is not supported.\n");
+		goto err;
+	}
+
+	/* CONDITIONALLY, GENERATING A MATRIX */
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense);
+		rsb_nnz_idx_t spacing = want_generated_spacing>1?want_generated_spacing:1;
+		
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb__sprintf(fnbuf,"banded-%dx%d-%d+%d-%dnz-spaced-%d",dim*spacing,dim*spacing,should_generate_lband,should_generate_uband,RSB_NNZ_OF_BANDED(dim,should_generate_lband,should_generate_uband),spacing);
+		}
+		else
+		{
+		if(want_generated_spacing>0)
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*dim);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz-spaced-%d",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim,spacing);
+		}
+		else
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*should_generate_dense_nc);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim);
+		}
+		}
+		if(want_incX)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incX-%d",incX);
+		if(want_incY)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incY-%d",incY);
+/*		rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim,dim,dim*dim);*/
+/*		rsb__sprintf(fnbuf,"dense-%dx%d",dim,dim);*/
+		filename=&(fnbuf[0]);
+	}
+
+	if(usfnbuf)
+		filename=usfnbuf;
+
+	/* CONDITIONALLY, READING A MATRIX FROM FILE */
+if(filename || b_r_filename)
+{
+
+	rsb_blk_idx_t M_b=0;/* was 0 */
+	rsb_blk_idx_t K_b=0;
+	rsb_nnz_idx_t i=0;
+
+	rsb_coo_idx_t *p_r=NULL,*p_c=NULL;	/* FIXME : get rid of these */
+	void *lhs=NULL,*rhs=NULL;
+	int bcvi=0;
+	int brvi=0;
+	rsb_time_t frt = RSB_TIME_ZERO;
+
+	if( filename != filename_old )
+	{
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	if(!should_recycle_io) { RSB_DEBUG_ASSERT( VA == NULL ); }
+	if( should_recycle_io && VA && filename == filename_old )
+	{
+		flags = r_flags;
+		if( typecode != typecode_old )
+		{
+			void *VA_ = rsb__malloc_vector(nnz,typecode);
+			errval = rsb__do_copy_converted_scaled(VA, VA_, NULL, typecode_old, typecode, nnz, RSB_DEFAULT_TRANSPOSITION);
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR(RSB_ERRM_ES);goto err; }
+			RSB_CONDITIONAL_FREE(VA);
+			VA = VA_;
+			RSBENCH_STDOUT("# Reusing type converted (%c->%c) arrays from last iteration instead of reloading matrix file.\n",typecode_old,typecode);
+			typecode_old = typecode;
+		}
+		else
+		{
+			RSBENCH_STDOUT("# Reusing same type     (type %c) arrays from last iteration instead of reloading matrix file.\n",typecode);
+		}
+		goto have_va_ia_ja;
+	}
+	if((!should_generate_dense) && (!b_r_filename))
+	{
+		rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+		rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+		size_t fsz = rsb_sys_filesize(filename);
+
+		frt = - rsb_time();
+
+			{
+				/* FIXME : we remove symmetry flags, for they are incompatible with triangular solve */
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_SYMMETRIC);
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_HERMITIAN);
+			/*
+				if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER))
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+				}
+				else
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+			*/
+				//RSB_DO_FLAG_ADD(flags,RSB_FLAG_DISCARD_ZEROS) ;//problematic : FIXME
+			}
+#ifdef RSB_HAVE_REGEX_H
+		if( want_slmr && rsb_regexp_match(rsb__basename(filename),want_slmr) == RSB_BOOL_TRUE )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches regex /%s/.\n",filename,want_slmr);
+			goto nfnm;
+		}
+#endif /* RSB_HAVE_REGEX_H */
+		if( want_slss && ( strstr( rsb__basename(filename), want_slss ) != NULL ) )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches substring %s.\n",filename,want_slss);
+			goto nfnm;
+		}
+		/* if(RSB_SOME_ERROR(rsb__do_util_get_matrix_dimensions(filename,&ncA,&nrA,&nnz,NULL)) ) */
+		if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,&nrA,&ncA,&nnz,NULL,&is_symmetric,&is_hermitian,NULL,NULL,NULL,NULL)) )
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+			if( ignore_failed_fio )
+			{
+				RSBENCH_STDERR("Will ignore error and continue with the following files.\n");
+				errval = RSB_ERR_NO_ERROR;
+				goto nfnm;
+			}
+			goto err;
+		}
+		if( want_slnu == RSB_BOOL_TRUE && ( is_hermitian || is_symmetric ) )
+		{
+			RSB_STDOUT("# skipping loading not unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slsm == RSB_BOOL_TRUE && is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading symmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slhm == RSB_BOOL_TRUE && is_hermitian )
+		{
+			RSB_STDOUT("# skipping loading hermitian matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slum == RSB_BOOL_TRUE && !is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slmn > 0 && want_slmn <  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d > %d allowed nonzeroes.\n",filename,nnz,want_slmn);
+			goto nfnm;
+		}
+		if( want_slms > 0 && want_slms <= fsz / 1024 )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %zd>=%zd allowed filesize (KiB).\n",filename,fsz,want_slms);
+			goto nfnm;
+		}
+		if( want_slnn > 0 && want_slnn >  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d < %d allowed nonzeroes.\n",filename,nnz,want_slnn);
+			goto nfnm;
+		}
+	
+		RSB_STDOUT("# reading %s (%zd bytes / %zd "RSB_MEGABYTE_SYM" / %zd nnz / %zd rows / %zd columns / %zd MiB COO) as type %c...\n",rsb__basename(filename),fsz,RSB_DIV(fsz,RSB_MEGABYTE),(size_t)nnz,(size_t)nrA,(size_t)ncA,RSB_DIV(RSB_UTIL_COO_OCCUPATION(nrA,ncA,nnz,typecode),RSB_MEGABYTE),typecode);
+
+		if( ( nrA == ncA ) && ( nrA > 1 ) && ( want_only_lowtri || want_only_upptri ) )
+			nnz += nrA;	/* the loading routine shall allocate nnz+nrA */
+		else
+ 			nnz = 0;	/* the loading routine should determine nnz */
+
+		totiot -= rsb_time();
+		errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&nrA,&ncA,&nnz,typecode,flags,NULL,NULL);
+		totiot += rsb_time();
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+			goto err;
+		}
+		else
+		{
+			rsb_bool_t is_lower = RSB_BOOL_FALSE;
+			rsb_bool_t is_upper = RSB_BOOL_FALSE;
+			rsb_bool_t is_vector = RSB_BOOL_FALSE;
+
+			filename_old = filename;
+			typecode_old = typecode;
+
+			frt += rsb_time();
+			RSB_STDOUT("# file input of %s took %6.2lf s (%.0lf nnz, %.0lf nnz/s ) (%.2lf MB/s ) \n",rsb__basename(filename),frt,
+				(((double)nnz)),
+				(((double)nnz)/frt),
+				(((double)rsb_sys_filesize(filename))/(frt*RSB_INT_MILLION))
+			);
+
+			if (want_io_only)
+			{
+				/*  */
+				goto err;
+			}
+
+			if(want_transpose)
+			{
+				RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+				RSB_SWAP(rsb_coo_idx_t,nrA,ncA);
+				flags = rsb__do_flip_uplo_flags(flags);
+			}
+
+			if( nrA==ncA && nrA>1 && ( want_only_lowtri || want_only_upptri ) )
+			{
+				rsb_nnz_idx_t discarded = 0;
+				/*
+				rsb__util_coo_array_set_sequence(IA+nnz,nrA,0,1);
+				rsb__util_coo_array_set_sequence(JA+nnz,nrA,0,1);
+				 */
+				RSB_FCOO_ISET(IA+nnz,0,nrA);
+				RSB_FCOO_ISET(JA+nnz,0,nrA);
+				rsb__fill_with_ones(((rsb_byte_t*)VA)+RSB_SIZEOF(typecode)*nnz,typecode,nrA,1);
+				nnz += nrA;	/* nnz+nrA this number has been overwritten as nnz */
+				if( want_only_lowtri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+					errval = rsb_weed_out_non_lowtri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non lower elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+				if( want_only_upptri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+					errval = rsb_weed_out_non_upptri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non upper elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+
+				if(RSB_SOME_ERROR(errval))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			}
+
+			if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,NULL,NULL,NULL,NULL,&is_symmetric,&is_hermitian,NULL,&is_lower,&is_upper,&is_vector) ))
+			{
+				RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+				goto err;
+			}
+			if( is_vector )
+			{
+				RSBENCH_STDERR("file %s seems to store a vector\n",filename);
+				goto err;
+			}
+			if(RSB_BOOL_AND(want_as_unsymmetric,want_as_symmetric))
+			{
+				RSBENCH_STDERR("requiring both symmetric and unsymmetric flags is contradictory!\n");
+				goto err;
+			}
+			if(want_as_unsymmetric)
+			{
+				is_symmetric = RSB_BOOL_FALSE;
+				is_hermitian = RSB_BOOL_FALSE;
+			}
+			if(want_as_symmetric)
+			{
+				is_symmetric = RSB_BOOL_TRUE;
+				is_hermitian = RSB_BOOL_TRUE;
+			}
+			if(!RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && is_hermitian)
+			{
+				RSBENCH_STDOUT("# Warning: non complex matrix with hermitian flags! Converting to symmetric!\n");
+				is_hermitian = RSB_BOOL_FALSE;
+				is_symmetric = RSB_BOOL_TRUE;
+			}
+			/* TODO: use rsb__flags_from_props() */
+			if(is_hermitian == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_HERMITIAN);
+			}
+			if(is_symmetric == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+			}
+
+			if( (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER)) && (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)) )
+			{
+				/* is_upper and is_lower as declared in the matrix file */
+				if(is_upper)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+				if(is_lower)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+			}
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_cleanup_nnz(VA,IA,JA,nnz,0,0,nrA,ncA,&nnz,typecode,flags)); /* NEW */
+			if(RSB_SOME_ERROR(errval))
+			{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+			if(want_sort_after_load)
+			{
+				rsb_time_t dt = RSB_TIME_ZERO;
+				dt = - rsb_time();
+				if((errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS))!=RSB_ERR_NO_ERROR)
+				{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+				dt += rsb_time();
+				RSBENCH_STDOUT("#pre-sorting took %lg s\n",dt);
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+			}
+#if RSB_HAVE_METIS
+			if(want_wmbr)
+			{
+				/* FIXME: unfinished */
+				rsb_coo_idx_t *perm = NULL,*iperm = NULL,*vwgt = NULL;
+
+				perm  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+				iperm = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+#if 1
+				vwgt  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz));
+				rsb__util_coo_array_set(vwgt,nnz,0);
+#else
+				vwgt  = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+#endif
+				if( !perm || !iperm || !vwgt )
+				{
+					RSB_CONDITIONAL_FREE(iperm);
+					RSB_CONDITIONAL_FREE(perm);
+					RSB_CONDITIONAL_FREE(vwgt);
+				}
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				errval = rsb__do_switch_fullword_array_to_compressed(IA,nnz,nrA);
+				RSBENCH_STDOUT("Calling METIS_NodeND\n");
+				/*errval = */ METIS_NodeND(&nrA,IA,JA,vwgt,NULL,perm,iperm); /* Scotch wrapper crashes on vwgt=NULL. and is void */
+				RSBENCH_STDOUT("Exited  METIS_NodeND with code %d\n",errval);
+				/* if(errval == METIS_OK) */
+				{
+					RSBENCH_STDOUT("Permuting..\n");
+					errval = rsb__do_switch_compressed_array_to_fullword_coo(IA, nrA, 0, NULL);
+					errval = rsb__do_permute_rows_with_coo_index( IA, perm, nnz);
+					RSBENCH_STDOUT("Permuted.\n");
+					/* 
+					 */
+					for(i=0;i<nrA;++i){ RSB_STDOUT("%d\n",perm[i]);}
+				}
+				RSB_CONDITIONAL_FREE(vwgt);
+				RSB_CONDITIONAL_FREE(perm);
+				RSB_CONDITIONAL_FREE(iperm);
+			}
+			
+#endif /* RSB_HAVE_METIS */
+		}
+	}
+	else
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense),spacing=1;
+		if(want_generated_spacing>1)
+			spacing = want_generated_spacing;
+		dim *= spacing;
+
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb_nnz_idx_t lbw=should_generate_lband,ubw=should_generate_uband;
+			nrA = ncA = dim;
+			errval = rsb__generate_blocked_banded_coo(dim/spacing,spacing,lbw,ubw,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+		if(should_generate_dense>0)
+		{
+			RSBENCH_STDOUT("Interpreting --dense as --lower-dense (full dense makes no sense for triangular solve).\n");
+			should_generate_dense = -should_generate_dense;
+			should_generate_dense_nc = 0;
+		}
+		if(should_generate_dense>0)
+		{
+			RSB_DEBUG_ASSERT( should_generate_dense_nc != 0 );
+			/* full dense, no diag */
+			nrA = dim;
+			ncA = should_generate_dense_nc * spacing;
+			errval = rsb__generate_dense_full(nrA/spacing,ncA/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+			/* trick: lower triangular */
+			nrA=ncA=dim;
+			errval = rsb__generate_dense_lower_triangular_coo(dim/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER); /* 20121223	*/
+		}
+		}
+
+		if(want_sort_after_load)	
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+
+		if(want_as_symmetric)
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+	} /* should_generate_dense */
+have_va_ia_ja:
+	RSB_DEBUG_ASSERT( VA != NULL );
+	RSB_DEBUG_ASSERT( IA != NULL );
+	RSB_DEBUG_ASSERT( JA != NULL );
+	r_flags = flags;
+			flags = rsb__do_detect_and_add_triangular_flags(IA,JA,nnz,flags);
+			if(
+		(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR) && RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR)) ||
+		(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR)&&!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR))
+			)
+			{
+				RSB_ERROR("Matrix contains both upper and lower elements ? It is not suited for spsv_uxua, then!\n");
+				errval = RSB_ERR_CORRUPT_INPUT_DATA;	/* uhm */
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			}
+
+	/* CONDITIONALLY, PROCESSING THE INPUT */
+	if(!b_r_filename)
+	{
+		if(want_column_expand)
+		{
+			errval = rsb__do_column_expand(JA,nnz,&ncA,want_column_expand);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+		}
+
+		if( pattern_only )
+			rsb__fill_with_ones(VA,typecode,nnz,1);
+
+		if( dumpout )
+		{
+			errval = rsb__test_print_coo_mm(typecode,flags,IA,JA,VA,nrA,ncA,nnz,RSB_BOOL_TRUE,RSB_DEFAULT_STREAM);
+			//COO equivalent for rsb_file_mtx_save(mtxAp,NULL);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+			goto ret;
+		}
+	}
+#if 1
+	if(want_nonzeroes_distplot)
+	{
+		/* FIXME: Unfinished: printout not adequate ! */
+		/* FIXME: Shall use a separate routine for this! Please regard this code as temporary */
+		rsb_coo_idx_t median_m=0,median_k=0,stdd_m=0,stdd_k=0,nzp_m=nnz/nrA,nzp_k=nnz/ncA;
+		rsb_coo_idx_t*idxv=NULL;
+		rsb_coo_idx_t mm=0;
+		rsb_nnz_idx_t cs=0;
+		rsb_bool_t po = RSB_BOOL_TRUE;
+		const int histres=100;
+		const rsb_char_t*pmsg="\n\nplot \"-\" using 1:2 title \"cumulative %s population (nnz)\"\n";
+		RSBENCH_STDOUT("set xtics rotate\n");
+		RSBENCH_STDOUT("set term postscript eps color\n");
+		RSBENCH_STDOUT("set output \"%s-distplot.eps\"\n", rsb__basename(filename));
+		RSBENCH_STDOUT("set multiplot layout 1,2 title \"%s (%d x %d, %d nnz)\"\n", rsb__basename(filename),nrA,ncA,nnz);
+
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+
+		mm=nrA<histres?1:nrA/histres;
+		idxv = rsb__calloc(sizeof(rsb_coo_idx_t)*(ndA));
+		if(!idxv)
+			goto nohists;
+
+		for(i=0;i<nnz;++i)
+			if(IA[i] < nrA && IA[i] >= 0 )
+				idxv[IA[i]]++;
+		for(i=0;i<nrA;++i)
+			if(median_m<nnz/2)
+				{ median_m+=idxv[i]; }
+			else
+				{ break; }
+		median_m=i; 
+
+		RSB_STDOUT(pmsg,"rows");
+		if(po) for(i=0;i<nrA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		mm=ncA<histres?1:ncA/histres;
+
+		for(i=0;i<nrA;++i)
+			stdd_m+=(idxv[i]-nzp_m)*(idxv[i]-nzp_m);
+		stdd_m=nrA<2?0:sqrt(stdd_m/(nrA-1));
+
+
+		for(i=0;i<ncA;++i)
+			idxv[i]=0;
+
+		for(i=0;i<nnz;++i)
+			if(JA[i] < ncA && JA[i] >= 0 )
+				idxv[JA[i]]++;
+		for(i=0;i<ncA;++i)
+			if(median_k<nnz/2)
+				{ median_k+=idxv[i]; }
+			else
+				{ break; }
+		median_k=i; 
+
+		cs=0;
+		RSB_STDOUT(pmsg,"columns");
+		if(po) for(i=0;i<ncA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		for(i=0;i<ncA;++i)
+			stdd_k+=(idxv[i]-nzp_k)*(idxv[i]-nzp_k);
+		stdd_k=ncA<2?0:sqrt(stdd_k/(ncA-1));
+
+		RSBENCH_STDOUT("unset multiplot\n");
+		RSBENCH_STDOUT("#%%:NNZ_PER_ROW_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_m);
+		RSBENCH_STDOUT("#%%:ROWS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_m/(double)nrA));
+		RSBENCH_STDOUT("#%%:NNZ_PER_COL_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_k);
+		RSBENCH_STDOUT("#%%:COLS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_k/(double)ncA));
+nohists:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+		RSB_CONDITIONAL_FREE(idxv); RSB_CONDITIONAL_FREE(idxv);
+		goto ret;
+	}
+	#endif /* 1 */
+	if(want_unordered_coo_bench)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+		lhs = rsb__calloc_vector(ndA*nrhs*incY,typecode);
+		rhs = rsb__calloc_vector(ndA*nrhs*incX,typecode);
+
+		if(!lhs || !rhs)
+		{
+			RSB_ERROR("problems allocating vectors");
+			RSB_CONDITIONAL_FREE(lhs); RSB_CONDITIONAL_FREE(rhs);
+			{ errval = RSB_ERR_INTERNAL_ERROR; goto err; }
+		}
+
+		if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+		for(i=0;i<times;++i)
+		{
+			if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			unordered_coo_op_time = - rsb_time();
+			if((errval = rsb__do_spmv_fullword_coo(&coo,flags,rhs,lhs,alphap,betap,incX,incY,transA))!=RSB_ERR_NO_ERROR) { goto erru; }
+			unordered_coo_op_time += rsb_time();
+			unordered_coo_op_time_best = RSB_MIN_ABOVE_INF(unordered_coo_op_time_best,unordered_coo_op_time,tinf);
+			unordered_coo_op_tot_time+=unordered_coo_op_time;
+		}
+		if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+erru:
+		RSB_CONDITIONAL_FREE(lhs); RSB_CONDITIONAL_FREE(rhs);
+		if(want_verbose == RSB_BOOL_TRUE)
+		{
+			/* FIXME ! 20110427 */
+			struct rsb_mtx_t matrixs;
+			mtxAp=&matrixs;
+			rsb__init_rsb_struct_from_coo(mtxAp,&coo);
+			mtxAp->flags = RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS|RSB_DO_FLAG_FILTEROUT((flags),RSB_DO_FLAGS_EXTRACT_STORAGE(flags));
+			rsb__do_set_init_storage_flags(mtxAp,mtxAp->flags);
+			raw_Mflops=nnz*2;
+			RSBENCH_STDOUT("%%:UNORDERED_COO_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+			RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)raw_Mflops)/(RSB_REAL_MILLION*unordered_coo_op_time_best));
+			mtxAp=NULL;
+		}
+	}
+	/* CONDITIONALLY, PERFORMING SOME TEST ON THE INPUT */
+	if(want_accuracy_test>=1)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_accuracy_test(&coo,ca,cn,flags));
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_ERROR("accuracy based test failed!\n");
+			goto err;
+		}
+		if(want_accuracy_test>1)
+		{
+			goto done;
+		}
+	}
+
+		if( (flags & RSB_FLAG_QUAD_PARTITIONING) && g_all_flags==1)
+		{
+			int /*ci=0,*/hi=0,oi=0;
+			fn=0;
+			for(ci=0;ci<3;++ci)
+/*			for(di=0;di<2;++di)*/
+			for(oi=0;oi<2;++oi)
+			for(hi=0;hi<2;++hi)
+/*			for(li=0;li<2;++li)*/
+			{
+#if 0
+				flagsa[di+hi*2+li*4+ci*8]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+	
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#else /* 0 */
+				flagsa[fn]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[fn],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+				//RSB_DO_FLAG_ADD(flagsa[fn],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],oi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#endif /* 0 */
+				++fn;
+			}
+		}
+		else
+		{
+			fn=1;
+			flagsa[fn-1]=flags;
+		}
+
+		if(!want_perf_dump)
+		if(!( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )) /* otherwise pr__set.. cannot distinguish samples */
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			/* adds a no-recursion flag case */
+			RSB_DO_FLAG_DEL(flags,RSB_FLAG_QUAD_PARTITIONING);
+/*			if(fn)*/
+/*				flags=flagsa[fn-1];	*//* copy from the last */
+/*			else*/
+/*				flagsa[fn]=flags;	*//* impose these flags */
+			for(fi=fn;fi>0;--fi)
+				flagsa[fi]=flagsa[fi-1];/* shift forward */
+			RSB_DO_FLAG_DEL(flagsa[0],RSB_FLAG_QUAD_PARTITIONING);
+			++fn;	/* add ours */
+		}
+
+		for(ti=0;ti<tn;++ti)
+		{
+
+	rsb_time_t op_t = RSB_TIME_ZERO;
+	rsb_time_t mct = RSB_TIME_ZERO;	/* matrix construction time */
+	rsb_time_t fet = RSB_TIME_ZERO;	/* fillin estimation time */
+
+	rsb_time_t sct = RSB_TIME_ZERO;	/* serial (if minimum number of cores is 1) matrix construction time */
+	rsb_time_t pct = RSB_TIME_ZERO;	/* parallel (if maximum number of cores > 1) matrix construction time */
+
+	rsb_time_t smt = RSB_TIME_ZERO;	/* serial multiplication time */
+	rsb_time_t pmt = RSB_TIME_ZERO;	/* parallel multiplication time */
+
+	rsb_time_t sst = RSB_TIME_ZERO;	/* serial solve time */
+	rsb_time_t pst = RSB_TIME_ZERO;	/* parallel solve time */
+	
+	rsb_time_t sest = RSB_TIME_ZERO;	/**/
+	//rsb_time_t sect = RSB_TIME_ZERO;	/**/
+	rsb_time_t ssat = RSB_TIME_ZERO;	/**/
+	rsb_time_t seit = RSB_TIME_ZERO;	/**/
+	rsb_time_t scpt = RSB_TIME_ZERO;	/**/
+
+	rsb_time_t mest = RSB_TIME_ZERO;	/**/
+	rsb_time_t mect = RSB_TIME_ZERO;	/**/
+	rsb_time_t msat = RSB_TIME_ZERO;	/**/
+	rsb_time_t meit = RSB_TIME_ZERO;	/**/
+	rsb_time_t mcpt = RSB_TIME_ZERO;	/**/
+
+	rsb_time_t me_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;     /* experimental merge */
+	rsb_time_t at_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME, at_mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME; /* experimental merge */
+	rsb_thread_t at_mkl_csr_nt = RSB_AT_THREADS_AUTO, me_at_nt = RSB_AT_THREADS_AUTO;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+	rsb_time_t best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t base_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;	/* for comparative benchmarking */
+	rsb_time_t serial_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;	/* for comparative benchmarking */
+	rsb_time_t spmv_t = RSB_TIME_ZERO;
+	rsb_time_t tot_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t spsv_d_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t spsv_spmv_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t best_spsv_spmv_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t spsv_f_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+#endif
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	struct rsb_pci_t rsb_pci;
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+#if RSB_WANT_MKL
+	void *M_VA=NULL; MKL_INT *M_IA=NULL,*M_JA=NULL;
+	void *M_VAC=NULL; MKL_INT *M_IAC=NULL,*M_JAC=NULL;
+	rsb_time_t mkl_coo2csr_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_coo_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_csr_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_csr_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_csr_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+
+	rsb_time_t mkl_gem_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_gem_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_gem_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_gem_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	struct rsb_ts_t btpms[2]; /* first is tuned, first is not */
+	rsb_flags_t mif = ( mib == 0 ) ? RSB_FLAG_NOFLAGS : RSB_FLAG_FORTRAN_INDICES_INTERFACE; /* MKL index flags */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	struct rsb_pci_t mkl_coo_pci,mkl_csr_pci,mkl_gem_pci;
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+#endif /* RSB_WANT_MKL */
+	struct rsb_attr_t attr;	/* this structure is rather large (100k, as of 20140223); with future parameters it shall be rather heap allocated */
+	struct rsb_ts_t otpos, btpos;
+
+	RSB_BZERO_P((&otpos));
+	RSB_BZERO_P((&btpos));
+	RSB_BZERO_P((&attr));
+		transA = transAo;
+		if(ti>0)
+			transA = rsb__do_transpose_transposition(transAo);
+		if(ti==2)
+			transA = RSB_TRANSPOSITION_C;
+		if(!  (
+			( RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && (ti!=0) && ( flags & RSB_FLAG_SOME_SYMMETRY ) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti!=0) && ( flags & RSB_FLAG_SYMMETRIC) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti==2) &&!( flags & RSB_FLAG_SOME_SYMMETRY) )  ||
+			g_allow_any_tr_comb
+		))
+		if(tn>1)
+		{
+			RSBENCH_STDOUT("# multi-transpose benchmarking -- now using transA = %c.\n",RSB_TRANSPOSITION_AS_CHAR(transA));
+		}
+		if( /* transA != RSB_TRANSPOSITION_N */ ti>0 && RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC) )
+		{
+			RSBENCH_STDOUT("# symmetric matrix --- skipping transposed benchmarking\n");
+			continue;
+		}
+		for(fi=0;fi<fn;++fi)
+		for(brvi=-1;brvi<brl;++brvi)
+		for(bcvi=-1;bcvi<bcl;++bcvi)
+#ifndef  RSB_COORDINATE_TYPE_H
+		if(!(flagsa[fi] & RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+#endif /* RSB_COORDINATE_TYPE_H */
+		for(ci=0;ci<cn;++ci)	/* here just for should_recycle_matrix */
+		if(!(ca[ci]>1 && !(RSB_DO_FLAG_HAS(flagsa[fi],RSB_FLAG_QUAD_PARTITIONING)))) /* no need for more than one core without recursion */
+		{
+			cc = ca[ci];
+	rsb_time_t diag_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t diag_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t getrow_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t getrow_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t diag_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t getrow_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			should_recycle_matrix=(ci>0)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+			/* if this is the special "vanilla CSR" run after/before recursive runs ... */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			flags=flagsa[fi];
+			if(cn>1 && !RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+
+			best_spsv_spmv_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			op_t = RSB_TIME_ZERO;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+			best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			spmv_t = RSB_TIME_ZERO;
+			tot_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_d_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_spmv_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_f_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+
+			if(brl>0 && bcl>0)
+			{
+				/* this is a trick and an unclean programming practice */
+				if(brvi==-1)++brvi;
+				if(bcvi==-1)++bcvi;
+				br = brv[brvi];
+				bc = bcv[bcvi];
+			}
+			else
+			{	
+				/* br, bc already set */
+			}
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+			/*	
+			* FIXME : laziness
+			*/
+						if( br!=1 || bc!=1 || !rsb__util_are_flags_suitable_for_optimized_1x1_constructor(flags) )
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+			if(0)
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+			{
+				p_r = rsb__util_get_partitioning_array(br,nrA,&M_b,flags);
+				p_c = rsb__util_get_partitioning_array(bc,ncA,&K_b,flags);
+
+				if((! p_r) || (! p_c))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					errval = RSB_ERR_ENOMEM;
+					goto erri;
+				}
+			}
+
+			if(  ( br!=1 || bc!=1 || p_r || p_c ) && ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR ))
+			{
+				/*  */
+				RSB_WARN("WARNING : disabling in place allocation flag : it is only allowed for 1x1!\n");
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR) ;
+			}
+
+
+
+
+#define RSB_WANT_SPSV_STABILITY_FIX 1
+#if RSB_WANT_SPSV_STABILITY_FIX
+#if 0
+			/* FIXME : fix for numerical stability */
+#if 0
+			if(RSB_SOME_ERROR(rsb__fill_with_ones(VA,typecode,nnz,1))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#else /* 0 */
+			/* FIXME : temporary fix */
+			double uthreshold=.0001;
+			double athreshold=10000000;
+			if(RSB_SOME_ERROR(rsb__util_drop_to_zero_if_under_threshold(VA,typecode,nnz,&uthreshold))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			if(RSB_SOME_ERROR(rsb__util_drop_to_zero_if_above_threshold(VA,typecode,nnz,&athreshold))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#endif /* 0 */
+#else /* 0 */
+			{rsb_nnz_idx_t n;for(n=0;n<nnz;++n)if(IA[n]==JA[n])rsb__fill_with_ones(((rsb_byte_t*)VA)+RSB_SIZEOF(typecode)*n,typecode,1,1);}
+#endif /* 0 */
+#endif /* RSB_WANT_SPSV_STABILITY_FIX */
+
+			if(!mtxAp)
+			{
+				int mci=0;
+				if(b_r_filename)
+				{
+					rsb_err_t errval_;
+					mct = - rsb_time();
+					mtxAp = rsb__load_matrix_file_as_binary(b_r_filename,&errval_);
+					mct += rsb_time();
+					if((RSB_SOME_ERROR(errval)) || !mtxAp )
+					{
+						RSB_ERROR(RSB_ERRM_ES);
+						goto err;
+					}
+					else
+					{
+						nnz = mtxAp->nnz;
+						nrA = mtxAp->nr;
+						ncA = mtxAp->nc;
+					}
+
+					filename=b_r_filename;// for info purposes
+					flags=mtxAp->flags;
+				}
+				else
+				{
+				mect=mest=msat=meit=mcpt = RSB_TIME_ZERO;	/* resetting al values */
+
+				for(mci=0;mci<repeat_construction;++mci)
+				{
+					if(repeat_construction>1 && mci==0)
+						RSBENCH_STDOUT("# will repeat constructor %d times\n",repeat_construction);
+					mct = - rsb_time();
+					if(want_in_place_assembly)
+					{
+						mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnz,typecode,nrA,ncA,br,bc,flags,&errval);
+					}
+					else
+						mtxAp = rsb_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,nrA,ncA,br,bc,flags,&errval);
+					mct += rsb_time();
+					if((RSB_SOME_ERROR(errval)) || !mtxAp )
+					{
+						RSB_ERROR(RSB_ERRM_ES);
+						goto err;
+					}
+
+/*					RSBENCH_STDOUT("running constructor for time %d/%d\n",mci+1,repeat_construction);*/
+					if(mect == RSB_TIME_ZERO || mect>mtxAp->ect)
+						mect=mtxAp->est;
+					if(mest == RSB_TIME_ZERO || mest>mtxAp->est)
+						mest=mtxAp->est;
+					if(msat == RSB_TIME_ZERO || msat>mtxAp->sat)
+						msat=mtxAp->sat;
+					if(meit == RSB_TIME_ZERO || meit>mtxAp->eit)
+						meit=mtxAp->eit;
+					if(mcpt == RSB_TIME_ZERO || mcpt>mtxAp->cpt)
+						mcpt=mtxAp->cpt;
+					if(mci != repeat_construction-1)
+					{ RSB_MTX_FREE(mtxAp);	/* we only wanted timings */ }
+					else
+					{
+						/* we keep the mtxAp, and set best individual times */;
+						mtxAp->est=mest;
+						mtxAp->ect=mect;
+						mtxAp->sat=msat;
+						mtxAp->eit=meit;
+						mtxAp->cpt=mcpt;
+					}
+				}
+				}
+				if(ci==0 && sct == RSB_TIME_ZERO)
+					//sct=mct;
+					sct=mtxAp->tat;
+				if(ci==cn-1 && pct == RSB_TIME_ZERO)
+					//pct=mct;
+					pct=mtxAp->tat;
+			} /* !mtxAp */
+			
+			if(do_perform_ddc == RSB_BOOL_TRUE)
+			{
+			if(rsb__is_square(mtxAp))
+			{
+				/* FIXME: experimental, new. should write a test with octave for this */
+				void * DV = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				void * RS = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				rsb_aligned_t mtwo[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+				if(!RS||!DV) { errval = RSB_ERR_ENOMEM; goto noddc; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_infty_norm(mtxAp,RSB_TRANSPOSITION_N,RS));
+				rsb__util_set_area_to_converted_integer(mtwo,mtxAp->typecode,-2);
+				RSB_DO_ERROR_CUMULATE(errval,rsb__dodo_getdiag(mtxAp,DV));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__vector_to_abs(DV,mtxAp->typecode,mtxAp->nr));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,mtxAp->nr,mtwo,DV,1));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xaxpy(mtxAp->typecode,mtxAp->nr,NULL,DV,1,RS,1));
+				if(rsb__util_count_negative(RS,mtxAp->typecode,mtxAp->nr)==mtxAp->nr)
+					RSBENCH_STDOUT("#matrix is diagonal dominant\n");
+				else
+					RSBENCH_STDOUT("#matrix is not diagonal dominant\n");
+				RSBENCH_STDOUT("#diagonal dominance computed in ? s\n");
+noddc:
+				RSB_CONDITIONAL_FREE(DV); RSB_CONDITIONAL_FREE(RS);
+				if(RSB_SOME_ERROR(errval))
+					goto err;
+			}
+			else
+			{
+				RSB_ERROR("input matrix is not square: cannot compute the diagonal dominance check\n");
+			}
+			}
+
+			if( dump_graph_file )
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_DOT,dump_graph_file));
+
+			if(do_perform_ilu == RSB_BOOL_TRUE)
+			{
+				/* FIXME: experimental */
+				rsb_time_t ilut = - rsb_time();
+				RSB_STDOUT("performing EXPERIMENTAL ILU-0\n");
+				errval = rsb__prec_ilu0(mtxAp);//TODO: actually, only for CSR
+				ilut += rsb_time();
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+				else
+					RSB_STDOUT("performed EXPERIMENTAL ILU-0 with success in %lg s.\n",ilut);
+				rsb_file_mtx_save(mtxAp,NULL);
+				goto ret;
+			} /* do_perform_ilu */
+
+			if(want_update && mtxAp)
+			{
+				rsb_time_t ct = - rsb_time();
+				/* FIXME: this is update, not conversion, so it should not be here */
+				errval = rsb__do_set_coo_elements(mtxAp,VA,IA,JA,nnz);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				/* missing check */
+				RSBENCH_STDOUT("#individual update of %d elements in assembled RSB took %2.5f s: %2.5f%% of construction time\n",nnz,ct,(100*ct)/mtxAp->tat);
+			} /* want_update */
+
+			if(want_convert && mtxAp)
+			{
+				/* FIXME: here all conversions should occur, and be benchmarked */
+				rsb_time_t ct;
+				rsb_nnz_idx_t rnz=0;
+				struct rsb_coo_matrix_t coo;
+
+				coo.nnz = RSB_MAX(mtxAp->nnz,RSB_MAX(nrA,ncA));
+				coo.typecode=mtxAp->typecode;
+				if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto errc;
+				}
+				coo.nr = mtxAp->nr;
+				coo.nc = mtxAp->nc;
+
+				ct = - rsb_time();
+				errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,coo.VA,coo.IA,coo.JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.typecode,
+					NULL,RSB_FLAG_NOFLAGS)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+				RSBENCH_STDOUT("#extraction to unsorted COO unimplemented\n");
+				//RSBENCH_STDOUT("#extraction of %d elements in unsorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+				RSB_DO_ERROR_CUMULATE(errval,rsb_mtx_get_coo(mtxAp,VA,IA,JA,RSB_FLAG_C_INDICES_INTERFACE));
+
+				rsb__util_coo_array_set(coo.JA,coo.nnz,0);
+				rsb_coo_sort(VA,IA,JA,mtxAp->nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+
+				ct = - rsb_time();
+				errval = rsb_mtx_get_csr(typecode,mtxAp, coo.VA, coo.IA, coo.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				for(i=0;i<mtxAp->nnz;++i)if(coo.JA[i]!=JA[i]){RSB_ERROR("@%d: %d != %d!\n",i,coo.JA[i],JA[i]);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+				if(RSB_SOME_ERROR(errval=rsb__csr_chk(coo.IA,coo.JA,coo.nr,coo.nc,coo.nnz,mib)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in CSR took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+/*				ct = - rsb_time();*/
+/*				errval = rsb__do_get_coo(mtxAp,&coo.VA,&coo.IA,&coo.JA);	// FIXME : bugged ?*/
+/*				if(RSB_SOME_ERROR(errval)) goto erri;*/
+/*				ct += rsb_time();*/
+/*				if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.typecode,*/
+/*					NULL,RSB_FLAG_NOFLAGS)))*/
+/*					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}*/
+/*				RSBENCH_STDOUT("#extraction of %d elements in sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);*/
+
+				rsb__util_coo_array_set(coo.IA,coo.nnz,0);
+				rsb_coo_sort(VA,JA,IA,mtxAp->nnz,ncA,nrA,typecode,RSB_FLAG_NOFLAGS);
+				ct = - rsb_time();
+				errval = rsb__do_get_csc(mtxAp,(rsb_byte_t**) &coo.VA,&coo.JA,&coo.IA);
+				if(RSB_SOME_ERROR(errval))
+					{goto erri;}
+				ct += rsb_time();
+				for(i=0;i<mtxAp->nnz;++i)if(coo.IA[i]!=IA[i]){RSB_ERROR("@%d: %d != %d!\n",i,coo.IA[i],IA[i]);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+				if(RSB_SOME_ERROR(rsb__csc_chk(coo.JA,coo.IA,coo.nr,coo.nc,coo.nnz,mib)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in CSC took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					ct = - rsb_time();
+					cmatrix = rsb__clone_simple(mtxAp);
+					ct += rsb_time();
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					if(!rsb__mtx_chk(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSB_MTX_FREE(cmatrix);
+				}
+				RSBENCH_STDOUT("#cloning of %d elements took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(cmatrix,RSB_BOOL_FALSE);
+					ct += rsb_time();
+					if(!rsb__mtx_chk(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					if(
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(cmatrix,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+					!= rsb__terminal_recursive_matrix_count(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+					RSBENCH_STDOUT("#conversion of %d elements to RCOO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					RSB_MTX_FREE(cmatrix);
+				}
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(cmatrix,&icoo);
+					ct += rsb_time();
+
+					if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(icoo.VA,icoo.IA,icoo.JA,icoo.nnz,icoo.typecode,NULL,RSB_FLAG_NOFLAGS)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSBENCH_STDOUT("#conversion of %d elements to sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+				
+				if(!RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nr))
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_csr(cmatrix,&icoo);
+					ct += rsb_time();
+					if(RSB_SOME_ERROR(rsb__csr_chk(icoo.IA,icoo.JA,icoo.nr,icoo.nc,icoo.nnz,mib)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSBENCH_STDOUT("#conversion of %d elements to CSR took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+
+				if(!RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nc))
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_csc(cmatrix,&icoo);
+					ct += rsb_time();
+					if(RSB_SOME_ERROR(rsb__csc_chk(icoo.JA,icoo.IA,icoo.nr,icoo.nc,icoo.nnz,mib)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+					RSBENCH_STDOUT("#conversion of %d elements to CSC took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(cmatrix,&icoo);
+					ct += rsb_time();
+
+					RSBENCH_STDOUT("#conversion of %d elements to unsorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+errc:
+				rsb__destroy_coo_matrix_t(&coo);
+			} /* want_convert */
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("problems assembling / converting matrix\n");
+				goto erri;
+			}
+
+			if(!mtxAp)
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;
+				RSB_ERROR("problems assembling matrix\n");
+				goto erri;
+			}
+
+			totht -= rsb_time();
+			if(!rsb__mtx_chk(mtxAp))
+			{
+				RSB_ERROR("matrix does not seem to be built correctly\n");
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+			totht += rsb_time();
+
+
+			if(zsort_for_coo)
+				rsb__do_zsort_coo_submatrices(mtxAp);
+			if(reverse_odd_rows)
+				rsb__do_reverse_odd_rows(mtxAp);
+
+			//rsb_file_mtx_save(mtxAp,NULL);
+			//rsb__dump_blocks(mtxAp);
+
+			if(b_w_filename || csr_w_filename)
+			{
+				const char * w_filename = b_w_filename ;
+				rsb_dump_flags_t dflags = RSB_CONST_DUMP_RSB;
+
+				if(csr_w_filename)
+					w_filename = csr_w_filename,
+					dflags = RSB_CONST_DUMP_CSR;
+
+				frt = -rsb_time();
+				errval = rsb__do_print_matrix_stats(mtxAp,dflags,w_filename);
+				frt += rsb_time();
+				rsb_perror(NULL,errval);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_NO_XDR); }
+				RSB_STDOUT("#file output of %s took %lf s (%.0lf nnz, %.0lf nnz/s ) (%.5lf MB/s ) \n",rsb__basename(w_filename),frt,
+					(((double)mtxAp->nnz)),
+					(((double)mtxAp->nnz)/frt),
+					(((double)rsb_sys_filesize(w_filename))/(frt*RSB_INT_MILLION))
+				);
+				goto ret;
+			}
+
+			if(dumpout_internals)
+			{
+				errval = rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_RECURSION,NULL);
+				if(RSB_SOME_ERROR(errval))goto err;
+				//goto ret; /* we want to continue */
+			}
+
+			errval = rsb__get_blocking_size(mtxAp,&br,&bc);
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("problems getting blocking size");
+				goto erri;
+			}
+
+			/* NOTE: the matrix constructor could have removed duplicates or zeros */
+			/* nnz=mtxAp->nnz; */ /* 20120922 commented out: in case of removed entries, it would remember this number in spite of unchanged IA,JA,VA arrays */ 
+			if(!RSB_IS_VALID_NNZ_COUNT(nnz)){errval = RSB_ERR_INTERNAL_ERROR;goto erri;}
+			/* NOTE: if loading from a binary dump, we need to set nrA,ncA */
+			nrA = mtxAp->nr;
+			ncA = mtxAp->nc;
+			ndA = RSB_MAX(nrA,ncA);
+			outnri = rhsnri = ndA;
+			ldX = (RSB_DOES_NOT_TRANSPOSE(transA) ? nrA : ncA) * incX; 	/* FIXME: still unused, e.g. in rsb__do_spmm_general */
+			ldY = (RSB_DOES_NOT_TRANSPOSE(transA) ? ncA : nrA) * incY; 
+			lhs = rsb__calloc((mtxAp->el_size*(ndA+br))*nrhs*incY);
+			rhs = rsb__calloc((mtxAp->el_size*(ndA+bc))*nrhs*incX);
+
+			if(!lhs || !rhs)
+			{
+				RSB_ERROR("problems allocating vectors");
+				RSB_CONDITIONAL_FREE(lhs);
+				RSB_CONDITIONAL_FREE(rhs);
+				{ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			}
+
+			if(RSB_SOME_ERROR(rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			if(merge_experimental || split_experimental || just_enter_tuning) /* FIXME: pass parameter */
+			{
+				struct rsb_mtx_t*mtxOp = NULL;
+				int wvmbat = RSB_AUT0_TUNING_SILENT; /* wanted verbosity in merge based autotuning */
+				int eps = 0; /* effective partitioning steps */
+				rsb_time_t btt = RSB_TIME_ZERO; /* blocks tuning time */
+				rsb_submatrix_idx_t maxms = merge_experimental, maxss = split_experimental;
+				int maxr = RSB_CONST_AUTO_TUNING_ROUNDS;
+				enum rsb_op_t op = rsb_op_spsvlt;
+				int mintimes = RSB_AT_MIN_TIMES/*RSB_AT_NTIMES_AUTO*/;
+				rsb_time_t maxtime = /* RSB_AT_TIME_AUTO*/ RSB_AT_MAX_TIME;
+				struct rsb_mtx_t mtxA = *mtxAp;
+
+				/* please note at_mkl_csr_nt in the following... */
+				if(maxms < 0 || maxss < 0) { at_mkl_csr_nt = me_at_nt = RSB_THREADS_AUTO; }
+				if(maxms < 0) maxms *= -1;
+				if(maxss < 0) maxss *= -1;
+				
+				RSBENCH_STDOUT("RSB Sparse Blocks Autotuner invoked requesting max %d splits and max %d merges in %d rounds, threads spec.%d (specify negative values to enable threads tuning).\n",maxss,maxms,maxr,me_at_nt);
+
+				if (want_verbose_tuning > 0)
+					wvmbat = RSB_AUT0_TUNING_VERBOSE;
+				if (want_verbose_tuning > 1)
+					wvmbat = RSB_AUT0_TUNING_QUATSCH ;
+				if (want_verbose_tuning > 2)
+					wvmbat = RSB_AUT0_TUNING_QUATSCH + 1;
+				btt -= rsb_time(); 
+
+				if( just_enter_tuning == 0 || merge_experimental == 0 && split_experimental == 0 )
+					maxr = 0;
+				mtxOp = mtxAp;
+				errval = rsb__tune_spxx(&mtxOp,NULL,&me_at_nt,maxr,maxms,maxss,RSB_CONST_MS_AT_AUTO_STEPS,RSB_AUT0_TUNING_DEFAULT_TIMES,maxtime,transA,alphap,NULL,nrhs,order,rhs,rhsnri,betap,lhs,outnri,op,&eps,&me_best_t,&me_at_best_t,wvmbat,rsb__basename(filename),&attr,&otpos,&btpos);
+
+				btt += rsb_time(); 
+				tottt += btt;
+				if(want_perf_dump) /* FIXME: shall give only values from the tuning routine */
+				if(RSB_DO_FLAG_HAS(/*mtxAp->*/flags,RSB_FLAG_QUAD_PARTITIONING))
+					rsb__pr_set(rspr, &mtxA, me_at_best_t<me_best_t?mtxOp:NULL, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, transA, me_best_t, RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_best_t, RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_nt, RSB_THREADS_AUTO, btt, eps, &otpos, &btpos, NULL, NULL);
+				if( mtxAp != mtxOp && mtxOp )
+			 	{
+					RSBENCH_STDOUT("RSB Autotuner suggested a new clone.\n");
+#if RSB_AT_DESTROYS_MTX
+					mtxAp = mtxOp;
+#else  /* RSB_AT_DESTROYS_MTX */
+#if 1
+ 					/* FIXME: this is to have mtxAp address constant. */
+					errval = rsb__mtx_transplant_from_clone(&mtxAp, mtxOp);
+					mtxOp = NULL;
+					if(RSB_SOME_ERROR(errval)) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#else
+				 	RSB_MTX_FREE(mtxAp); mtxAp = mtxOp;
+#endif
+#endif /* RSB_AT_DESTROYS_MTX */
+				 }
+			}
+
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+			if(RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ))
+			{
+				rsb_int_t otn = wat;
+				rsb_int_t*otnp = NULL;
+				rsb_real_t sf = RSB_REAL_ZERO;
+				rsb_time_t att = - rsb_time();
+				struct rsb_mtx_t * mtxOp = NULL;
+				struct rsb_mtx_t ** mtxOpp = NULL;
+				enum rsb_op_t op = rsb_op_spsvlt;
+
+				if(wat >  0)
+					otnp = &otn; /* starting thread suggestion */
+				if(wat == 0)
+				{
+					otnp = NULL; /* current thread count */
+					mtxOpp = &mtxOp; /* matrix structure tuning */
+				}
+				if(wat <  0)
+				{
+					otn = -wat; /* ;-) */
+					otnp = &otn; /* starting thread suggestion */
+					mtxOpp = &mtxOp; /* matrix structure tuning */
+				}
+				errval = rsb__tune_spxx(mtxOpp, &sf, otnp, wai, 0, 0, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES, want_autotuner, transA, alphap, mtxAp, nrhs, order, rhs, rhsnri, betap, lhs, outnri, op , NULL, NULL, NULL, wavf, rsb__basename(filename), &attr, &otpos, &btpos);
+				if(mtxOpp && *mtxOpp)
+				{
+					RSBENCH_STDOUT("RSB Autotuner suggested a new matrix: freeing the existing one.\n");
+					RSB_MTX_FREE(mtxAp);
+					mtxAp = mtxOp;
+					mtxOp = NULL;
+					mtxOpp = NULL;
+				}
+				att += rsb_time();
+				RSBENCH_STDOUT("RSB Autotuner took %lg s and estimated a speedup of %lf x\n",att,sf);
+				if(wat && otn > 0)
+				{
+					/* FIXME: this breaks consistency! Shall skip further cycles!  */
+					RSBENCH_STDOUT("Setting autotuning suggested thread count of %d (will skip further thread number configurations!)\n",otn);
+					/* rsb__set_num_threads(otn); */
+					RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+					if(want_ancillary_execs == RSB_BOOL_TRUE)
+					if(incX == 1 && incY == 1)
+					{
+						totatt -= rsb_time();
+						RSBENCH_STDOUT("# Post-autotuning performance recheck:\n");
+						/* errval = */ rsb__do_bench_spxm(NULL,NULL,transA,alphap,mtxAp,nrhs,order,rhs,rhsnri,betap,lhs,outnri,RSB_AT_TIME_AUTO,RSB_AT_NTIMES_AUTO,op,10,RSB_AUT0_TUNING_QUATSCH,NULL,NULL); /* just for check purposes */
+						totatt += rsb_time();
+					}
+					cc=otn;cl=ci+1;
+				}
+			}	/* want_autotuner */
+
+			if(RSB_SOME_ERROR(errval)) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				if(n_dumpres)
+				{
+					RSBENCH_STDOUT("##RSB LHS %d elements pre-peek:\n",n_dumpres);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incX);
+				}
+				if(n_dumprhs)
+				{
+					RSBENCH_STDOUT("##RSB RHS %d elements pre-peek:\n",n_dumprhs);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incX);
+				}
+			if ( times >= 0 ) /* benchmark of spsv_uxua */
+			{
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_RSB_SPMV_",0,times,NULL);
+				op_t = - rsb_time();
+				RSB_TM_LIKWID_MARKER_R_START("RSB_SPMV");
+				for(i=0;i<times;++i)  /* benchmark loop of spsv_uxua begin */
+				{
+#if 0
+	{
+				/* an extreme debugging measure */
+				rsb_nnz_index_t ii;
+				if(RSB_SOME_ERROR(rsb__cblas_Xscal(mtxAp->typecode,ndA,NULL,rhs,incX))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				for(ii=0;ii<nnz;++ii)rsb__util_increase_by_one(rhs,IA[ii],typecode);
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+	}
+#else /* 0 */
+				if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+#endif /* 0 */
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spsv_d_t -= rsb_time();
+
+				if((errval = rsb__do_spsv_general(transA,alphap,mtxAp,lhs,incX,lhs,incY,RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+
+				spsv_d_t += rsb_time();
+				if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+
+				spsv_spmv_t -= rsb_time();
+				/* y <- y + A x */
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,&pone[0],&pone[0],incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+					goto err;
+				spsv_spmv_t += rsb_time();
+				best_spsv_spmv_t = RSB_MIN_ABOVE_INF(spsv_spmv_t,best_spsv_spmv_t,tinf);
+				if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA*nrhs,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; } 
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+
+				spsv_f_t -= rsb_time();
+				if(want_ancillary_execs == RSB_BOOL_TRUE)
+				if((errval = rsb__do_spsv_general(transA,alphap,mtxAp,lhs,incX,lhs,incY,RSB_OP_FLAG_FAKE_LOCK RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+				/* FIXME: if RSB_OUTER_NRHS_SPMV_ARGS_IDS defined to empty string, will not handle properly nrhs! */
+#if 0
+				if((errval = rsb__do_spsv_general(transA,alphap,mtxAp,lhs,incX,lhs,incY,RSB_OP_FLAG_DEFAULT RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)/* mop is spsv_uxua*/
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+#endif
+				spsv_f_t += rsb_time();
+				/* if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; } */
+				for(nrhsl=0;nrhsl<nrhs;++nrhsl)
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)rhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incX,nrhsl+1),
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)lhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incY,nrhsl+1);
+				/* RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size)); */
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spmv_t = - rsb_time();
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				if((errval = rsb__do_spsm(transA,alphap,mtxAp,nrhs,order,betap,lhs,incY*mtxAp->nr,lhs,incY*mtxAp->nr))!=RSB_ERR_NO_ERROR) /* benchmark -- mop is spsv_uxua*/
+				{
+					RSBENCH_STDERR("[!] "RSB_ERRM_TS);
+					goto erri;
+				}
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spmv_t += rsb_time();
+				tot_t += spmv_t;
+				best_t = RSB_MIN_ABOVE_INF(spmv_t,best_t,tinf);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if((g_debug || 1) && i==times-1)
+				{
+					/* this is debug information, very cheap to include */
+					rsb_byte_t * out2=NULL;
+					rsb_aligned_t mbetap[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+					out2 = rsb__calloc(mtxAp->el_size*(RSB_MAX(nrA,ncA)+br)*nrhs);
+					if(!out2 /* || rsb__cblas_Xscal(mtxAp->typecode,nrA+br,NULL,out2,incY)*/) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+					if(RSB_SOME_ERROR(rsb__fill_with_ones(alphap,typecode,1,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+					if(RSB_SOME_ERROR(rsb__fill_with_ones(mbetap,typecode,1,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+					if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,1,NULL,errnorm,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+					if((errval = rsb__do_spmm_general(mtxAp,lhs,out2,alphap,mbetap,incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+					{
+						/* e is our error code*/
+						RSBENCH_STDERR("[!] some problem occurred in sparse matrix vector product!\n");
+						goto erri;
+					}
+					RSB_DO_ERROR_CUMULATE(errval,rsb__sqrt_of_sum_of_fabs_diffs(rhs,out2,errnorm,typecode,nrA+br));
+					RSBENCH_STDOUT("#error norm:");
+					RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_value(errnorm,typecode));
+					RSBENCH_STDOUT("\n");
+					if(out2)rsb__free(out2);
+				}
+
+	#ifdef RSB_WANT_KERNELS_DEBUG
+				/* ... */
+	#endif /* RSB_WANT_KERNELS_DEBUG */
+				}  /* times: benchmark loop of spsv_uxua end */
+				RSB_TM_LIKWID_MARKER_R_STOP("RSB_SPMV");
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_RSB_SPMV_",1,times,&rsb_pci);
+				if((g_debug || 1) /*&& i==times-1*/)
+				{
+					/* this is debug information, very cheap to include */
+					RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_some_vector_stats(lhs,typecode,nrA,incY));
+				}
+
+				if(dumpvec&rsb_dumpvec_res)
+					rsb__debug_print_vector(lhs,nrA,typecode,incY);
+				if(dumpvec&rsb_dumpvec_rhs)
+					rsb__debug_print_vector(rhs,nrA,typecode,incX);
+
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if(n_dumpres)
+				{
+					RSBENCH_STDOUT("##RSB LHS %d elements post-peek:\n",n_dumpres);
+					rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+				}
+				if(n_dumprhs)
+				{
+					RSBENCH_STDOUT("##RSB RHS %d elements post-peek:\n",n_dumprhs);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+				}
+				if(!g_sort_only)
+				{
+					op_t += rsb_time();
+					op_t /= (double)times;
+					/*
+				if(RSB_WANT_VERBOSE_MESSAGES)
+				{RSBENCH_STDOUT("performed %lf Mflops in %lf seconds (%lf Mflops)\n",raw_Mflops, op_t, (raw_Mflops)/(op_t));
+				RSBENCH_STDOUT("raw data rate of (%lf Gbytes/sec)\n", ((double)(raw_Mflops)*(mtxAp->el_size))/(op_t*1000.0));	}*/
+				/*
+				if(RSB_WANT_VERBOSE_MESSAGES)
+				RSBENCH_STDOUT("nonzero data rate of (%lf Gbytes/sec, or %lf Mflops)\n",
+				(true_Mflops*(mtxAp->el_size))/(op_t*1000.0),
+				true_Mflops/(op_t)
+				);*/
+				}
+
+                                fillin = rsb__do_get_matrix_fillin(mtxAp);
+				if(g_sort_only)
+				{
+				/* FIXME :
+				 * please note that in this rudimentary model we take in account also the matrix creationtime.
+				 */
+                	                raw_Mflops= (rsb_perf_t) mtxAp->element_count;
+        	                        true_Mflops=(((double)mtxAp->nnz)*log((double)mtxAp->nnz))/RSB_REAL_MILLION;
+					op_t=mct;	/* our timed operation is matrix construction */
+				}
+				else
+				{
+	                                raw_Mflops = rsb__estimate_mflops_per_op_spsv_uxua(mtxAp);
+	                                true_Mflops = raw_Mflops/fillin;
+	                                raw_Mflops *=nrhs;
+	                                true_Mflops*=nrhs;
+				}
+
+
+#if RSB_WANT_MKL
+	if(want_mkl_bench && !(cc==1 && mkl_coo_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME))
+	{
+			rsb_nnz_idx_t annz = RSB_MAX(nnz,nrA+1),rnz=0,mklnz=nnz;
+			/* please note that mkl routines do not support stride */
+			/* FIXME: a non monotonically-increasing order will do harm */
+			mkl_coo2csr_time = RSB_TIME_ZERO;
+			mkl_coo_op_tot_time = RSB_TIME_ZERO;
+			mkl_coo_op_time = RSB_TIME_ZERO;
+			mkl_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			//mkl_coo_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			mkl_csr_op_tot_time = RSB_TIME_ZERO;
+			mkl_csr_op_time = RSB_TIME_ZERO;
+			mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			//mkl_csr_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			
+			if(nrhs>1)
+				want_mkl_bench_coo = RSB_BOOL_FALSE;/* 20130401 FIXME: this circumvents an Intel MKL bug */
+#if 1
+			//mkl_set_dynamic(1);
+			//RSBENCH_STDOUT("MKL failed enabling dynamic thread number control\n");
+			mkl_set_num_threads(cc);
+			//RSBENCH_STDOUT("MKL has %d threads now\n",mkl_get_num_threads());
+#else /* 1 */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+#endif /* 1 */
+			if(!want_sort_after_load)
+			if(!want_in_place_assembly)
+			{
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				mklnz = rsb_weed_out_duplicates (IA,JA,VA,nnz,typecode,RSB_FLAG_SORTED_INPUT);
+				if((!RSB_IS_VALID_NNZ_COUNT(mklnz)) || (!mklnz) || (RSB_SOME_ERROR(errval)))
+				{
+					RSB_PERR_GOTO(err,RSB_ERRM_EM);
+				}
+				annz = RSB_MAX(mklnz,nrA+1);
+			}
+			mkl_set_num_threads(cc); // necessary, or MKL will get puzzled
+
+		if(want_mkl_bench_coo)
+		{
+			totct -= rsb_time();
+			errval = rsb_util_coo_alloc_copy_and_stats(&M_VA,&M_IA,&M_JA,want_in_place_assembly?NULL:VA,want_in_place_assembly?NULL:IA,want_in_place_assembly?NULL:JA,NULL,NULL,mklnz,(annz-mklnz),typecode,0,mib,RSB_FLAG_NOFLAGS,NULL);
+			if(RSB_SOME_ERROR(errval)){RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+			//errval = rsb_mtx_get_coo(mtxAp,M_VA,M_IA,M_JA,flags); /* FIXME: use this */
+			errval = rsb__do_get_rows_sparse(RSB_DEFAULT_TRANSPOSITION,NULL,mtxAp,M_VA,M_IA,M_JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS|mif);
+			totct += rsb_time();
+	
+			if(!M_VA  || !M_IA  || !M_JA ){RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_COO_SPXV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_COO_SPMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_coo_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_COO_SPXV_",0);
+				RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo_spsv(M_VA,nrA,ncA,mklnz,M_IA,M_JA,rhs,lhs,alphap,betap,transA,typecode,flags));
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_1KL_COO_SPXV_",1);
+				mkl_coo_op_time += rsb_time();
+				mkl_coo_op_time_best = RSB_MIN_ABOVE_INF(mkl_coo_op_time_best,mkl_coo_op_time,tinf);
+				mkl_coo_op_tot_time+=mkl_coo_op_time;
+			}
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_COO_SPMV");
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_COO_SPXV_",1,times,&mkl_coo_pci);
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL COO LHS %d elements post-peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			if(cc==1) 
+				mkl_coo_op_time_best_serial = mkl_coo_op_time_best;
+
+			RSB_CONDITIONAL_FREE(M_VA);
+			RSB_CONDITIONAL_FREE(M_IA);
+			RSB_CONDITIONAL_FREE(M_JA);
+		} /* want_mkl_bench_coo */
+
+		if(want_mkl_bench_csr || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) )
+		{
+			totct -= rsb_time();
+			errval = rsb_util_coo_alloc_copy_and_stats(&M_VAC,&M_IAC,&M_JAC,want_in_place_assembly?NULL:VA,want_in_place_assembly?NULL:IA,want_in_place_assembly?NULL:JA,NULL,NULL,mklnz,(annz-mklnz),typecode,0,mib,RSB_FLAG_NOFLAGS,NULL);
+			errval = rsb_mtx_get_csr(mtxAp->typecode,mtxAp,M_VAC,M_IAC,M_JAC,flags|mif);
+			totct += rsb_time();
+	
+			if(!M_VAC || !M_IAC || !M_JAC) {RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+				// FIXME: Missing error handling !
+
+                        if(0)/* if want bogus contents (for debug/inspection) */
+                        {
+                                rsb_coo_idx_t i,npr=(mklnz+nrA-1)/nrA;
+                                rsb_nnz_idx_t l;
+                                M_IAC[0]=0;
+                                for(i=1;i<nrA;++i)
+                                        M_IAC[i]=M_IAC[i-1]+npr;
+                                for(i=0;i<nrA;++i)
+                                        for(l=M_IAC[i];l<M_IAC[i+1];++l)
+                                                M_JAC[l]=l-M_IAC[i];
+                                M_IAC[nrA]=mklnz;
+                        }
+
+			totct -= rsb_time();
+			if(!want_in_place_assembly)
+			{
+				mkl_coo2csr_time = - rsb_time();
+				RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo2csr(nrA,ncA,mklnz,VA,IA,JA,M_VAC,M_IAC,M_JAC,typecode,mib));
+				mkl_coo2csr_time += rsb_time();
+				if(RSB_SOME_ERROR(rsb__csr_chk(M_IAC,M_JAC,nrA,ncA,mklnz,mib)))
+				{
+      					RSB_PERR_GOTO(err,RSB_ERRM_EM)
+				}
+			}
+			else
+			{
+				RSB_WARN("warning : skipping MKL coo2csr conversion (user chose in-place RSB build) \n");
+			}
+			totct += rsb_time();
+		} /* want_mkl_bench_csr || want_mkl_autotuner */
+
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL CSR LHS %d elements pre-peek:\n",n_dumpres);
+				rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incX);
+			}			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(n_dumprhs)
+			{
+				RSBENCH_STDOUT("##MKL CSR RHS %d elements pre-peek:\n",n_dumprhs);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+			}			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(want_mkl_bench_csr)
+			{
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_CSR_SPXV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_CSR_SPMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_csr_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_CSR_SPXV_",0);
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__do_mkl_csr_spsm(M_VAC,nrA,nrhs,M_IAC,M_JAC,rhs,lhs,alphap,transA,typecode,flags,nrhs/*ldX*/,nrhs/*ldY*/));
+					/* FIXME: rsb__mkl_csr_spsm_bench is there */
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spsv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,NULL,NULL,NULL /* &mkl_csr_op_time */,NULL));
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_MKL_CSR_SPXV_",1);
+				mkl_csr_op_time += rsb_time();
+				mkl_csr_op_time_best = RSB_MIN_ABOVE_INF(mkl_csr_op_time_best,mkl_csr_op_time,tinf);
+				mkl_csr_op_tot_time+=mkl_csr_op_time;
+			}
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_CSR_SPMV");
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_CSR_SPXV_",1,times,&mkl_csr_pci);
+			} /* want_mkl_bench_csr */
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(cc==1)mkl_csr_op_time_best_serial=mkl_csr_op_time_best;
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL CSR LHS %d elements post-peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			if(n_dumprhs)
+			{
+				RSBENCH_STDOUT("##MKL CSR RHS %d elements post-peek:\n",n_dumprhs);
+				rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+			}
+			if( mkl_csr_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+				RSBENCH_STDOUT("##MKL STUFF DEBUG omp_set_num_threads():%d==omp_get_num_threads():%d  bestserialcsr:%0.5lf vs bestcsr:%0.5lf\n",omp_get_num_threads(),cc,mkl_csr_op_time_best_serial,mkl_csr_op_time_best);
+			if( mkl_coo_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+				RSBENCH_STDOUT("##MKL STUFF DEBUG omp_set_num_threads():%d==omp_get_num_threads():%d  bestserialcoo:%0.5lf vs bestcoo:%0.5lf\n",omp_get_num_threads(),cc,mkl_coo_op_time_best_serial,mkl_coo_op_time_best);
+
+			if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) && want_mkl_autotuner > RSB_TIME_ZERO )
+			{
+				rsb_time_t btime = RSB_TIME_ZERO, matt = -rsb_time();
+				rsb_thread_t bthreads = at_mkl_csr_nt;
+				rsb_real_t sf = RSB_REAL_ZERO;
+				rsb_char_t * ops = "";
+
+				rsb__tattr_init(&(attr.clattr), NULL, nrA, mklnz, typecode, flags, nrhs);
+				attr.clattr.vl = 1; /* FIXME: new */
+				RSBENCH_STDOUT("# MKL CSR %s autotuning for thread spec. %d  trans %c (0=current (=%d),<0=auto,>0=specified)\n",ops,bthreads,RSB_TRANSPOSITION_AS_CHAR(transA),cc);
+#if 1
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spsm_bench(M_VAC,nrA,nrhs,M_IAC,M_JAC,rhs,lhs,alphap,transA,typecode,flags,nrhs/*ldX*/,nrhs/*ldY*/,&bthreads,&btime,&(attr.clattr),&btpms));
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spsv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,&bthreads,&btime,&(attr.clattr),&btpms));
+				ops = "SPSV";
+#endif
+				bthreads = bthreads ? bthreads : cc;
+				RSBENCH_STDOUT("# MKL CSR %s best threads / time / perf. were: %d / %lg / %lg\n",ops,bthreads,btime,(rsb__estimate_mflops_per_op_spmv_uaua(mtxAp)*nrhs)/btime);
+				matt += rsb_time();
+				RSBENCH_STDOUT("MKL CSR Autotuner took %.2lgs and estimated a speedup of %lf / %lf = %lf x (best round %d samples at %d threads)\n",matt,(attr.clattr).dtpo,(attr.clattr).btpo,(attr.clattr).dtpo/(attr.clattr).btpo,attr.clattr.nit[attr.clattr.optt],attr.clattr.optt);
+				at_mkl_csr_op_time_best = btime;
+				at_mkl_csr_nt = bthreads;
+				mkl_csr_op_time_best = (attr.clattr).dtpo;
+				totmt += matt;
+				RSB_ASSERT( bthreads > 0 );
+			} /* want_mkl_autotuner */
+
+			if(want_mkl_bench_gem)
+			{
+				rsb_coo_idx_t gemdim=0;
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_GEMV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_GEMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_gem_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_GEMV_",0);
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_MKL_GEMV_",1);
+				mkl_gem_op_time += rsb_time();
+				mkl_gem_op_time_best = RSB_MIN_ABOVE_INF(mkl_gem_op_time_best,mkl_gem_op_time,tinf);
+				mkl_gem_op_tot_time+=mkl_gem_op_time;
+			}
+			true_gem_Mflops=2*gemdim*gemdim;
+			true_gem_Mflops/=RSB_REAL_MILLION;
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_GEMV");
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_GEMV_",1,times,&mkl_gem_pci);
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(cc==1)mkl_gem_op_time_best_serial=mkl_gem_op_time_best;
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL GEMX LHS %d elements peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			} /* want_mkl_bench_gem */
+mklerr:
+			RSB_CONDITIONAL_FREE(M_VAC);
+			RSB_CONDITIONAL_FREE(M_IAC);
+			RSB_CONDITIONAL_FREE(M_JAC);
+			RSB_CONDITIONAL_FREE(M_VA);
+			RSB_CONDITIONAL_FREE(M_IA);
+			RSB_CONDITIONAL_FREE(M_JA);
+			rsb_perror(NULL,errval);
+		} /* want_mkl_bench  */
+#endif /* RSB_WANT_MKL */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			/* FIXME : should only exist for double as type */
+			if(want_oski_bench && guess_blocking_test!=2 /* g.b.t=2 is an extra run*/) 
+			{
+
+			rsb__sprintf(oxform,"return BCSR(InputMat, %zd, %zd)",(rsb_printf_int_t)br,(rsb_printf_int_t)bc);
+			//rsb__sprintf(oxform,"return BCSR(InputMat, %d, %d)",1,1);
+			/* FIXME : ncA and nrA are not enough : we should account for br and bc excess ! */
+
+			Oval = rsb__clone_area(VA,nnz*mtxAp->el_size);
+			OIA = rsb__clone_area(IA,nnz*sizeof(rsb_coo_idx_t));
+			OJA = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+
+			/* we need duplicates, for we later will use VA as it is */
+			if(!Oval || !OIA || !OJA)
+			{
+				RSB_ERROR("failed aux arrays allocation !\n");goto err;
+			}
+
+			/*
+				Unfortunately, Oski does not have native BCSR constructors, but 
+				rely on conversion from CSR.
+				So the measured time is more than it should, but a better
+				approximation than oski_CreateMatCSR only.
+			*/
+
+			oski_a_t = -rsb_time();
+			if(RSB_SOME_ERROR(rsb__allocate_csr_arrays_from_coo_sorted(Oval, OIA, OJA, nnz, nrA, ncA, typecode, &Aval, &Aptr, &Aind)))
+			{
+				RSB_ERROR("failed csr allocation !\n");goto err;
+			}
+			oski_a_t += rsb_time();
+
+			if(!Aval || !Aptr || !Aind)
+			{
+				RSB_ERROR("failed csr arrays allocation !\n");goto err;
+			}
+
+			oski_m_t = -rsb_time();
+			A_tunable = oski_CreateMatCSR (Aptr, Aind, Aval, nrA, ncA,        /* CSR arrays */
+                                // SHARE_INPUTMAT /*COPY_INPUTMAT*/,        /* "copy mode" */
+				 /*SHARE_INPUTMAT*/ COPY_INPUTMAT,        /* "copy mode" */
+                                 1, INDEX_ZERO_BASED);
+				// we should add : INDEX_SORTED, INDEX_UNIQUE
+				// 3, INDEX_ZERO_BASED, MAT_TRI_LOWER, MAT_UNIT_DIAG_IMPLICIT);
+			oski_m_t += rsb_time();
+
+		        if(A_tunable==INVALID_MAT)
+                	{
+				RSB_ERROR("invalid oski matrix!\n");goto err;
+			}
+
+			oski_t_t = -rsb_time();
+			if( oski_ApplyMatTransforms (A_tunable, oxform) )
+			{
+				RSB_ERROR("invalid transform!\n");goto err;
+			}
+			oski_t_t += rsb_time();
+
+			if(A_tunable==INVALID_MAT)
+			{
+				RSB_ERROR("invalid oski tuned matrix!\n");goto err;
+			}
+
+				/* FIXME : should error - check these steps */
+			//	RSBENCH_STDOUT("# oski : ncA=%zd, nrA=%zd\n",(rsb_printf_int_t)ncA,(rsb_printf_int_t)nrA);
+			        x_view = oski_CreateVecView( rhs, ncA, STRIDE_UNIT );
+			        y_view = oski_CreateVecView( lhs, nrA, STRIDE_UNIT );
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				oski_t = - rsb_time();
+				for(i=0;i<times;++i)
+				{
+#error FIXME: flush breaks measured time
+					if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+					/* y <- alpha A * x + beta * y */
+					if(oski_MatMult( A_tunable, OP_NORMAL, oalpha, x_view, obeta, y_view ))
+					{
+							RSB_ERROR("failed uuuoski_MatMult !\n");goto err;
+					}
+				}
+				oski_t += rsb_time();
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if(n_dumpres)
+					rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+				/* FIXME */
+	
+				oski_DestroyMat( A_tunable );
+				oski_DestroyVecView( x_view );
+				oski_DestroyVecView( y_view );
+				RSB_CONDITIONAL_FREE(Aptr);
+				RSB_CONDITIONAL_FREE(Aind);
+				RSB_CONDITIONAL_FREE(Aval);
+				RSB_CONDITIONAL_FREE(Oval);
+				RSB_CONDITIONAL_FREE(OJA  );
+				RSB_CONDITIONAL_FREE(OIA );
+				Aptr= Aind= Aval= NULL;
+			} /* want_oski_bench  */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+			if(ti>0)
+				want_getrow_bench=0;
+			if(want_getrow_bench)
+			{
+				const rsb_coo_idx_t nr=1;
+				void * RVA = NULL;
+				rsb_coo_idx_t*RIA = NULL;
+				rsb_coo_idx_t*RJA = NULL;
+
+				if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&RVA,&RIA,&RJA,mtxAp->nc*nr,typecode,RSB_BOOL_FALSE))){goto errgr;}
+				for(i=0;i<times;++i)
+				{
+					rsb_time_t getrow_op_time = RSB_TIME_ZERO;
+					rsb_coo_idx_t ri=0;
+					rsb_nnz_idx_t rnz=0;
+					getrow_op_time = - rsb_time();
+					for(ri=0;ri+nr-1<mtxAp->nr;ri+=nr)
+						RSB_DO_ERROR_CUMULATE(errval,rsb_mtx_get_coo_block(mtxAp,RVA,RIA,RJA,ri,RSB_MIN(mtxAp->nc-1,ri+nr-1),0,mtxAp->nc-1,NULL,NULL,&rnz,mtxAp->flags));
+					getrow_op_time += rsb_time();
+					getrow_op_time_best = RSB_MIN_ABOVE_INF(getrow_op_time_best,getrow_op_time,tinf);
+					getrow_op_tot_time+=getrow_op_time;
+				}
+				if(cc==1)getrow_op_time_best_serial=getrow_op_time_best;
+errgr:
+				RSB_CONDITIONAL_FREE(RVA);
+				RSB_CONDITIONAL_FREE(RIA);
+				RSB_CONDITIONAL_FREE(RJA);
+				if(RSB_SOME_ERROR(errval))
+				{goto err;}
+			} /* want_getrow_bench */
+
+			if(ti>0)
+				want_getdiag_bench=0;
+			if(want_getdiag_bench)
+			{
+				void * DV = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				if(!DV) { errval = RSB_ERR_ENOMEM; goto err; }
+				for(i=0;i<times;++i)
+				{
+					rsb_time_t diag_op_time = RSB_TIME_ZERO;
+					diag_op_time = - rsb_time();
+					RSB_DO_ERROR_CUMULATE(errval,rsb__dodo_getdiag(mtxAp,DV));
+					diag_op_time += rsb_time();
+					diag_op_time_best = RSB_MIN_ABOVE_INF(diag_op_time_best,diag_op_time,tinf);
+					diag_op_tot_time+=diag_op_time;
+				}
+				if(cc==1)diag_op_time_best_serial=diag_op_time_best;
+				RSB_CONDITIONAL_FREE(DV);
+				if(RSB_SOME_ERROR(errval))
+				{goto err;}
+			} /* want_getdiag_bench */
+
+			if(g_sort_only)
+			{
+				/* single line output, ideal for benchmark data to be processed later */
+				RSBENCH_STDOUT ( "%-20s	%s", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags));
+
+				RSBENCH_STDOUT ( "	%.3lf	%lg",
+				//raw_Mflops/op_t,	/* please note that in the sort case, it is an absolutely meaningless value */
+				true_Mflops/op_t,	/* algorithmic millions of ops per second (not an accurated model)  */
+				op_t/true_Mflops	/* the sorting algorithmic constant (not an accurated model) */
+				);
+			}
+			else
+			if(!g_estimate_matrix_construction_time)
+			{
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,true_Mflops/best_t,raw_Mflops/best_t,"spsv_uxua",flags);
+				if( spsv_spmv_t != RSB_TIME_ZERO )
+				printf("# (extra) SpMV performance record:\n"),
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,(true_Mflops/best_t)*(tot_t/spsv_spmv_t),raw_Mflops/best_t*(tot_t/spsv_spmv_t),"spmv_uaua*",flags);
+#else /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,true_Mflops/op_t,raw_Mflops/op_t,"spsv_uxua",flags);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+			}
+			if(g_estimate_matrix_construction_time)
+			{
+				/* in this case the user asked us too for :
+				   * matrix construction Mflops
+				   * a ratio of the selected op time with the matrix construction time
+				 */
+				RSBENCH_STDOUT("\t%.3lg\t%.3lg	", ((double)nnz)/(mct*RSB_REAL_MILLION), mct/op_t);
+				rsb__fprint_matrix_implementation_code(mtxAp, "spsv_uxua", flags, RSB_STDOUT_FD);
+				RSBENCH_STDOUT ( "\n");
+			}
+			omta=((double)rsb_spmv_memory_accessed_bytes(mtxAp));
+			
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			if(want_oski_bench)
+			{
+				RSBENCH_STDOUT ( "#OSKI_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(oski_t/times),raw_Mflops/op_t);
+				RSBENCH_STDOUT ( "#OSKI_VS_US-ASM~:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),oski_m_t+oski_t_t+oski_a_t,mct);
+			}
+#endif /* RSB_WANT_OSKI_BENCHMARKING  */
+			/* WARNING : we cannot use RSB_FLAG_SORTED_INPUT in the recursive case
+				     until the following routine will be able to use Z sorted values.. */
+			efillin = RSB_REAL_ZERO,eperf = RSB_REAL_ZERO;
+
+			/* FIXME : dies with ct20stif.mtx, now */
+			#if 0
+			RSB_WARN("warning : skipping rsb__estimate_expected_fillin_for_blocking\n");
+			fet = - rsb_time();
+			//rsb__estimate_expected_fillin_for_blocking(VA,IA,JA,nrA,ncA,nnz,typecode,flags/*|RSB_FLAG_SORTED_INPUT*/,br,bc,&efillin);/*TODO:thiscouldbedangerous:fixit!*/
+			efillin=mtxAp->einfo.efillin;	/* NEW */
+			fet += rsb_time();
+			#else /* 0 */
+			fet = RSB_TIME_ZERO;
+			#endif /* 0 */
+			rsb__estimate_expected_raw_performance_for_blocking(nrA,ncA,br,bc,nnz,typecode,flags,efillin,&eperf);
+
+			if(cc==1)
+			{
+				/* we need input flags, not instantiated matrix flags (which could have not that flag )*/
+				if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+					base_best_t=best_t;
+				else
+					serial_best_t=best_t;
+			}
+	
+			if(want_perf_dump) 
+			if(RSB_DO_FLAG_HAS(/*mtxAp->*/flags,RSB_FLAG_QUAD_PARTITIONING))
+			{
+#if RSB_WANT_MKL
+				/* FIXME: this #if is horrible */
+				rsb__pr_set(rspr, mtxAp/*NULL */ /* FIXME */, NULL, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, transA, RSB_CONST_IMPOSSIBLY_BIG_TIME, mkl_csr_op_time_best, RSB_CONST_IMPOSSIBLY_BIG_TIME, at_mkl_csr_op_time_best, RSB_THREADS_AUTO, at_mkl_csr_nt, RSB_CONST_IMPOSSIBLY_BIG_TIME, -1, NULL, NULL, &btpms[1], &btpms);
+#endif
+			}
+
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+			RSBENCH_STDOUT ( "#	%10.2lf	%10.2lf	( best, average net performance in %d tries ); diff:%2.0lf%%\n",
+				((double)true_Mflops/best_t), ((double)true_Mflops/op_t),
+				(int)times,
+				/* for marcin : */
+				((((double)true_Mflops/best_t)-((double)true_Mflops/op_t))*100)/((double)true_Mflops/op_t)
+				);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+
+			RSBENCH_STDOUT ( "#	%10.2lf	%10.2lf	%10.2lf %10.6lf (min bw, reasonable bw, exceedingly max bw, w/r ratio) (MB/s)\n"
+				     "#	%10.2lf (MB per mop) %10.2lf (rhs loads, with a variable degree of locality)\n"
+				     "#	%10.2lf (MB per mop, estimated)\n"
+				     "#	%10.2lf (assembly + extra to (best) mop time ratio) (%10.2lf s)\n"
+				     "#	%10.2lf (assembly (p.e.+s.a.+e.i.+e.s.+...) to mop time ratio)\n"
+/*				     "#	%10.2lf (performance estimation to mop time ratio)\n"*/
+/*				     "#	%10.2lf (gross fillin estimation to mop time ratio)\n"*/
+				     "#	%10.2lf (structure analysis to mop time ratio)\n"
+				     "#	%10.2lf (elements insertion to mop time ratio)\n"
+				     "#	%10.2lf (elements sorting to mop time ratio) (%10.2lf s)\n"
+				     "#	%10.2lf (elements partitioning to mop time ratio)\n"
+				     "#	%10.2lf (recursion sort to mop time ratio)\t%10.ld (max recursion depth)\n"
+				     "#	%10.2lf	%10.2lf (nnz per row/column)\n"
+					,
+				((double)rsb_spmv_memory_accessed_bytes_min(mtxAp))*(1.e-6/best_t) ,
+				((double)omta)*(1.e-6/best_t) ,
+				((double)rsb_spmv_memory_accessed_bytes_max(mtxAp))*(1.e-6/best_t) ,
+				((double)rsb_spmv_memory_accessed_bytes_wr_ratio(mtxAp)),
+				((double)omta)*(1.e-6),
+				(1.0>((fillin*nnz)/(br*ncA))?1.0:((fillin*nnz)/(br*ncA))),
+				((double)rsb_spmv_memory_accessed_bytes_(br,bc,nrA,ncA,efillin*nnz,((efillin*nnz)/br)/bc,nrA/br,mtxAp->el_size))*(1.e-6),
+				(mct)/(best_t),
+				(mtxAp->tat),
+				(mtxAp->tat)/(best_t),
+/*				(mtxAp->pet)/(best_t),*/
+/*				(fet)/(best_t),*/
+				(mtxAp->sat)/(best_t),
+				(mtxAp->eit)/(best_t),
+				(mtxAp->est)/(best_t), (mtxAp->est),
+				(mtxAp->cpt)/(best_t),
+				((mtxAp->rpt)/(best_t)),((long)rsb__get_recursive_matrix_depth(mtxAp)),
+				(double)nnz/nrA, (double)nnz/ncA
+				);
+				if(RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE>1)
+				RSBENCH_STDOUT ( 
+				     "#	%10.2lf (estimated fillin)"
+				     "#	%10.2lf (estimated fillin error)\n"
+				     "#	%10.2lf (estimated raw performance)"
+				     "#	%10.2lf (estimated raw performance error)\n"
+				     "#	%10.2lf (estimated net performance)"
+				     "#	%10.2lf (estimated net performance error)\n",
+				efillin, (efillin-fillin)/fillin,
+				eperf, (eperf-raw_Mflops/best_t)/(raw_Mflops/best_t),
+				efillin?(eperf/efillin):-1,efillin?(((eperf/efillin)-(true_Mflops/best_t))/(true_Mflops/best_t)):-1
+				);
+				RSBENCH_STDOUT( "#used index storage compared to COO:%zd vs %zd bytes (%.02lf%%) "
+					,(size_t)rsb__get_index_storage_amount(mtxAp),sizeof(rsb_coo_idx_t)*2*nnz
+					,(100*(double)rsb__get_index_storage_amount(mtxAp))/RSB_UTIL_COO_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+				);
+				RSBENCH_STDOUT( "; compared to CSR:%zd vs %zd bytes (%.02lf%%)\n"
+					,(size_t)rsb__get_index_storage_amount(mtxAp),
+					 (sizeof(rsb_coo_idx_t)*nnz+sizeof(rsb_nnz_idx_t)*(mtxAp->nr+1))
+					,(100*(double)rsb__get_index_storage_amount(mtxAp))/RSB_UTIL_CSR_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+				);
+				totatt += spsv_f_t;
+				if( spsv_d_t != RSB_TIME_ZERO)
+				RSBENCH_STDOUT( "#gain for spsv if we had infinite spmv-workers:%lf\n",((double)tot_t)/((double)(spsv_d_t)));
+				if( spsv_spmv_t != RSB_TIME_ZERO)
+				RSBENCH_STDOUT( "#spsv performance vs spmv_uaua*:%lf\n",spsv_spmv_t/tot_t);
+				if( spsv_f_t != RSB_TIME_ZERO)
+				RSBENCH_STDOUT( "#gain for spsv if we had no concurrent writes preventing locks at all:%lf\n",((double)tot_t)/((double)(spsv_f_t)));
+							
+			if(ci==0 && smt == RSB_TIME_ZERO && RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				smt=best_spsv_spmv_t;
+			if(ci==cl-1 && pmt == RSB_TIME_ZERO)
+				pmt=best_spsv_spmv_t;
+			if(ci==0 && sst == RSB_TIME_ZERO && RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				sst=best_t;
+			if(ci==cl-1 && pst == RSB_TIME_ZERO)
+				pst=best_t;
+			rsb__attr_dump(&attr);
+			RSB_BZERO_P((&attr));
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+				{
+					rsb_nnz_idx_t minnz=0,maxnz=0,avgnz=0;
+					rsb_bool_t vrpr = (times != 0) ? RSB_BOOL_TRUE : RSB_BOOL_FALSE;
+
+					if(vrpr)
+					{
+					RSBENCH_STDOUT("%%:PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/best_t);
+					RSBENCH_STDOUT("\t%le\t%le\n",true_Mflops,best_t);
+
+					RSBENCH_STDOUT("%%:OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",best_t);
+					}
+
+
+					if(vrpr)
+					{
+					if( serial_best_t != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",serial_best_t/best_t);
+					}
+
+					RSBENCH_STDOUT("#%%:CONSTRUCTOR_*:SORT	SCAN	INSERT	SCAN+INSERT\n");
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_TIMES:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\t%10.6lf\t%10.6lf\t%10.6lf\n",mest,msat,meit,msat+meit);
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", mest+msat+meit);
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", msat);
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", meit);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", mest);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", sest/mest);
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", msat+meit);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", mest/best_t);
+
+					if(vrpr)
+					{
+					RSBENCH_STDOUT("%%:CLEANUP_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",mect/best_t);
+
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\t%10.2lf\t%10.2lf\t%10.2lf\n",mest/best_t,msat/best_t,meit/best_t,(msat+meit)/best_t);
+
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat+meit+mest)/best_t);
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat+meit)/best_t);
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat)/best_t);
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(meit)/best_t);
+					}
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat+seit+sest)/(msat+meit+mest));
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat+seit)/(msat+meit));
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat)/(msat));
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(seit)/(meit));
+
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\t%10.2lf\t%10.2lf\t%10.2lf\n",sest/mest,ssat/msat,seit/meit,(ssat+seit)/(meit+msat));
+
+					if( base_best_t != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:PERF_SCALING2CSR:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",base_best_t/best_t);
+
+
+					RSBENCH_STDOUT("#%%:SM_COUNTS:	Tot	HalfwordCsr	FullwordCsr	HalfwordCoo	FullwordCoo\n");
+					RSBENCH_STDOUT("%%:SM_COUNTS:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					//RSBENCH_STDOUT("\t%d\t%d\t%d\t%d\t%d\n",
+					RSBENCH_STDOUT("\t%ld\t%ld\t%ld\t%ld\t%ld\n",
+rsb__terminal_recursive_matrix_count(mtxAp),
+rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR),
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR),
+rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO),
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO)
+						);
+
+					RSBENCH_STDOUT("%%:SM_IDXOCCUPATIONRSBVSCOOANDCSR:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%zd\t%zd\t%zd\n",rsb__get_index_storage_amount(mtxAp),
+						RSB_UTIL_COO_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz),
+						RSB_UTIL_CSR_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+						);
+
+					RSBENCH_STDOUT("%%:SM_IDXOCCUPATION:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%zd\n",rsb__get_index_storage_amount(mtxAp));
+
+					RSBENCH_STDOUT("%%:SM_MEMTRAFFIC:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.0lf\n",omta);
+#if 0
+					/* new, elegant */
+					RSBENCH_STDOUT("%%:SM_MINMAXAVGSUBMNNZ:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					{
+						rsb_submatrix_idx_t i=0;
+						rsb_real_t avgnz = ((rsb_real_t)mtxAp->nnz) / mtxAp->all_leaf_matrices_n;
+						rsb_coo_idx_t maxnz = 0, minnz = RSB_MAX_MATRIX_NNZ ;
+
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+						{
+							struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[i].mtxlp;
+							maxnz = RSB_MAX(maxnz,submatrix->nnz);
+							minnz = RSB_MIN(minnz,submatrix->nnz);
+						}
+						RSBENCH_STDOUT(" %d %d %.2lf %d\n",minnz,maxnz,avgnz,mtxAp->all_leaf_matrices_n);
+					}
+#else
+					/* old, obsolete */
+					rsb__do_compute_terminal_nnz_min_max_avg_count(mtxAp,&minnz,&maxnz,&avgnz);
+					RSBENCH_STDOUT("%%:SM_MINMAXAVGNNZ:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%d\t%d\t%d\n",minnz,maxnz,avgnz);
+#endif
+
+				if(want_print_per_subm_stats)
+				{
+					RSBENCH_STDOUT("%%:SM_NNZ_HISTOGRAM:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					if(!mtxAp->all_leaf_matrices)
+						RSBENCH_STDOUT(" %zd\n",(size_t)mtxAp->nnz);
+					else
+					{
+						rsb_submatrix_idx_t i=0;
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+							RSBENCH_STDOUT(" %zd",(size_t)mtxAp->all_leaf_matrices[i].mtxlp->nnz);
+						RSBENCH_STDOUT("\n");
+					}
+
+					RSBENCH_STDOUT("%%:SM_NNZ_PER_ROW:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					if(!mtxAp->all_leaf_matrices)
+						RSBENCH_STDOUT(" %lf\n",((double)mtxAp->nnz)/mtxAp->nr);
+					else
+					{
+						rsb_submatrix_idx_t i=0;
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+							RSBENCH_STDOUT(" %.2lf",((double)mtxAp->all_leaf_matrices[i].mtxlp->nnz)/mtxAp->all_leaf_matrices[i].mtxlp->nr);
+						RSBENCH_STDOUT("\n");
+					}
+				} /* want_print_per_subm_stats */
+
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			if(want_perf_counters)
+				{
+					int i;
+					for(i=0;i<rsb_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:RSB_%s:",rsb_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",(size_t)(rsb_pci.eventvals[i]));
+					}
+				} /* want_perf_counters */
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+				}
+			} /* times */
+#if RSB_WANT_MKL
+				if(want_mkl_bench) /* 20110428 */
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+				{
+#ifdef mkl_get_version
+					MKLVersion mv;
+					mkl_get_version(&mv);
+					RSBENCH_STDOUT("#%%:MKL %d.%d-%d, %s, %s, %s, %s\n",mv.MajorVersion,mv.MinorVersion,mv.UpdateVersion,mv.ProductStatus,mv.Build,mv.Processor,mv.Platform);
+#else /* mkl_get_version */
+					RSBENCH_STDOUT("#%%:MKL, version unknown\n");
+#endif /* mkl_get_version */
+			if(want_mkl_bench_coo)
+			{
+					RSBENCH_STDOUT("%%:MKL_COO_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/mkl_coo_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_COO_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); RSBENCH_STDOUT("\t%10.6lf\n",mkl_coo_op_time_best);
+
+					if( mkl_coo_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_COO_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_coo_op_time_best_serial/mkl_coo_op_time_best);
+			}
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			if(want_perf_counters)
+				{
+					int i;
+					for(i=0;i<mkl_csr_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:MKL_CSR_%s:",mkl_csr_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",mkl_csr_pci.eventvals[i]);
+					}
+					if(want_mkl_bench_coo)
+					for(i=0;i<mkl_coo_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:MKL_COO_%s:",mkl_coo_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",mkl_coo_pci.eventvals[i]);
+					}
+				}
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+			if(want_mkl_bench_csr)
+			{
+					RSBENCH_STDOUT("%%:MKL_CSR_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/mkl_csr_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_CSR_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_csr_op_time_best);
+
+					if( mkl_csr_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_CSR_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_csr_op_time_best_serial/mkl_csr_op_time_best);
+			}
+			if(want_mkl_bench_gem)
+			{
+					RSBENCH_STDOUT("%%:MKL_GEMV_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_gem_Mflops/mkl_gem_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_GEMV_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_gem_op_time_best);
+
+					if( mkl_gem_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_GEMV_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_gem_op_time_best_serial/mkl_gem_op_time_best);
+			}
+
+					if( mkl_coo2csr_time != RSB_TIME_ZERO )
+					{
+					RSBENCH_STDOUT("%%:MKL_COO2CSR_T0_CSR_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_coo2csr_time);
+					RSBENCH_STDOUT("%%:MKL_COO2CSR_T0_CSR_OP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_coo2csr_time/mkl_csr_op_time_best);
+
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_VS_MKLCOO2CSR:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", (msat+meit)/(mkl_coo2csr_time));
+					}
+				} /* want_mkl_bench */
+#endif /* RSB_WANT_MKL */
+				if(want_getrow_bench)
+				{
+					const char*norsbnotice="";
+					const char*rsbnotice="NORSB_";
+					const char*notice=norsbnotice;
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+					{}
+				else
+					notice = rsbnotice;
+
+					RSBENCH_STDOUT("%%:%sGETROW_PERFORMANCE:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)mtxAp->nnz)/(RSB_REAL_MILLION*getrow_op_time_best));
+					RSBENCH_STDOUT("%%:%sGETROW_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",getrow_op_time_best);
+					RSBENCH_STDOUT("%%:%sGETROW_TO_SPMV_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",getrow_op_time_best/best_t);
+
+				}
+				if(want_getdiag_bench)
+				{
+					const char*norsbnotice="";
+					const char*rsbnotice="NORSB_";
+					const char*notice=norsbnotice;
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+					{}
+				else
+					notice = rsbnotice;
+
+					RSBENCH_STDOUT("%%:%sGETDIAG_PERFORMANCE:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)mtxAp->nr)/(RSB_REAL_MILLION*diag_op_time_best));
+					RSBENCH_STDOUT("%%:%sGETDIAG_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",diag_op_time_best);
+					RSBENCH_STDOUT("%%:%sGETDIAG_TO_SPMV_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",diag_op_time_best/best_t);
+
+				}
+				RSBENCH_STDOUT( "#\n");/* end of record */
+				if(guess_blocking_test)
+				{
+					rsb_flags_t oflags = RSB_FLAG_NOFLAGS;
+					/* TODO : should keep info of the worst, to */
+					rsb_perf_t nrp=(true_Mflops/op_t),bomta = RSB_REAL_ZERO /* best op memory traffic amount */;
+
+					if(guess_blocking_test==1)
+					{
+						if( nrp>RSB_REAL_ZERO && nrp>bperf)
+						{
+							bperf=nrp;
+							bomta=omta;
+							bfillin=fillin;
+							ebfillin=efillin;
+							bri=brvi;
+							bci=bcvi;
+						}
+					
+						if(brv[brvi]==1 && bcv[bcvi]==1)/* IF ANY! */
+						{
+							cperf=nrp;
+						}
+ 
+						if((nrp>RSB_REAL_ZERO && nrp<wperf) || wperf == RSB_REAL_ZERO)
+						{
+							wperf=nrp;
+						}
+
+						if( fillin > maxfillin )
+						{
+							maxfillin=fillin;
+						}
+					}
+
+					if( guess_blocking_test==2) 
+					{
+						egfillin=efillin;
+						RSBENCH_STDOUT("# GUESS DATA;  best performance was       :	%zd	%zd\n", (size_t)brv[bri], (size_t)bcv[bci] );
+						RSBENCH_STDOUT("# GUESS DATA;  guessed was                :	%zd	%zd\n", (size_t)br, (size_t)bc );
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff from best :	%lg\n", (nrp-bperf)/bperf );
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff from worst:	%lg\n", (nrp-wperf)/wperf );
+						if(cperf)
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff over CSR:	%lg\n", (nrp-cperf)/cperf );
+						RSBENCH_STDOUT("# GUESS DATA:  best/guessed op matrix traffic amount:	%lg	%lg\n", bomta,omta);
+						RSBENCH_STDOUT("#GUESS_TEST_:%-20s\t%20s\t%zd\t%zd\t%zd\t%zd\t%zd\t%zd\n",
+							rsb__basename(filename),
+							rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),
+				(rsb_printf_int_t)((nrp>=bperf*.95) || (brv[bri]==br && bcv[bci]==bc)),	/* (fuzzy WIN) */
+				(rsb_printf_int_t)((nrp>=bperf) || (brv[bri]==br && bcv[bci]==bc)),	/* if 1, best blocking guess (WIN) */
+				(rsb_printf_int_t)(nrp>=bperf),			/* if 1, best performance guess */
+				(rsb_printf_int_t)(brv[bri]==br && bcv[bci]==bc),	/* if 1, best blocking guess */
+				(rsb_printf_int_t)(nrp>=cperf),	/* if 0, we lose over (our) plain CSR  */
+				(rsb_printf_int_t)(nrp> wperf)	/* if 0, we performed as the worst blocking! */
+							);
+					flags=oflags;
+
+					RSBENCH_STDOUT(	"#GUESS_TEST:%-20s\t%-20s"
+						"\t%10.2lf"
+						"\t%10.2lf"
+						"\t%zd" "\t%zd"
+						"\t%10.4lf" "\t%10.2lf" "\t%10.4lf" "\t%10.2lf" "\t%10.4lf" "\n"
+						,
+						rsb__basename(filename),
+						rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),	
+						/* grmflops */
+						raw_Mflops/op_t,
+						/* egfillin */
+						egfillin,
+						/* bbr */
+						(rsb_printf_int_t)brv[bri],
+						/* bbc */
+						(rsb_printf_int_t)bcv[bci],
+						/* bfillin */
+						bfillin,
+						/* brmflops */
+						bperf*bfillin,
+						/* ebfillin */
+						ebfillin,
+						/* csrmflops */
+						cperf,
+						/* maxfillin */
+						maxfillin);
+
+						flags=oflags;
+					}
+				
+
+					if(brvi==brl-1 && bcvi==bcl-1 && guess_blocking_test==1)
+					{
+						oflags=flags;
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_AUTO_BLOCKING);
+						guess_blocking_test++;
+						--bcvi;	/* un altro giro :) */
+					}
+				} /* guess_blocking_test */
+		erri:
+			if(want_in_place_assembly && mtxAp)
+			{
+				rsb_time_t st = -rsb_time();
+				errval = rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+				st += rsb_time();
+				RSBENCH_STDOUT("# rsb_mtx_switch_to_coo time: %lg.\n",st);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			}
+			RSB_MTX_FREE(mtxAp);
+			RSB_CONDITIONAL_FREE(lhs);
+			RSB_CONDITIONAL_FREE(rhs);
+
+			RSB_CONDITIONAL_FREE(p_r);
+			RSB_CONDITIONAL_FREE(p_c);
+			
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);goto err;
+			}
+			if(brl==0 || bcl==0) break;
+		} /* ci : core (count) index */
+
+			if(want_verbose == RSB_BOOL_TRUE)
+			{
+            			RSBENCH_STDOUT("%%operation:matrix	CONSTRUCTOR[%d]	SPMV[%d]	SPMV[%d]	STSV[%d]	STSV[%d]\n",
+					ca[0], ca[0], ca[cl-1], ca[0], ca[cl-1]);
+            			RSBENCH_STDOUT("%%operation:%s	%lg	%lg	%lg	%lg	%lg\n",
+					rsb__basename(filename),sct,smt,pmt,sst,pst);
+            			RSBENCH_STDOUT("%%constructor:matrix	SORT[%d]	SCAN[%d]	SHUFFLE[%d]	INSERT[%d]\n",
+					ca[0],ca[0],ca[0],ca[0]);
+            			RSBENCH_STDOUT("%%constructor:%s	%lg	%lg	%lg	%lg\n",
+					rsb__basename(filename),sest,ssat,scpt,seit);
+			}
+		} /* ti (transposition index) */
+	}
+	else
+	{
+		RSBENCH_STDOUT("%s (spsv_uxua) : Please specify a matrix filename (with -f)\n",argv[0]);
+	}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+	rsb__getrusage();
+done:
+frv:
+	if( !should_recycle_io )
+	{
+		RSBENCH_STDOUT("# Freeing I/O arrays.\n");
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	
+	if(mtxAp && !should_recycle_matrix){RSB_MTX_FREE(mtxAp)}
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+		RSBENCH_MAY_SQUIT(ret,{}) /* early end of program */
+		RSBENCH_MAY_TQUIT(ret,{}) /* early end of program */
+	}	/* typecodesi */
+	}	/* nrhsi */
+	}	/* incXi */
+	}	/* incYi */
+nfnm:	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}	/* filenamei */
+	RSBENCH_STDOUT("# benchmarking terminated --- finalizing run.\n");
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	errval = rsb_perf_counters_finalize();
+	if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+#endif
+ret:
+	errval = RSB_ERR_NO_ERROR;
+goto rret;
+err:
+	rsb_perror(NULL,errval);
+	errval = RSB_ERR_GENERIC_ERROR;
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	if(want_in_place_assembly && mtxAp)rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+	RSB_MTX_FREE(mtxAp);
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+	if(RSB_SOME_ERROR(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)))return RSB_ERR_GENERIC_ERROR;
+rret:
+	if(want_perf_dump) 
+	{
+		RSBENCH_STDOUT("# ====== BEGIN Total summary record.\n");
+		errval = rsb__pr_dump(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL );
+		RSBENCH_STDOUT("# ======  END  Total summary record.\n");
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		errval = rsb__pr_save(fprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		RSBENCH_STDOUT("# Removing the temporary record file %s.\n",cprfn);
+		remove(cprfn);
+	}
+	if( ca  != ca_ ) {RSB_CONDITIONAL_FREE(ca);}
+#if !RSB_RSBENCH_STATIC_FILENAMEA
+	/* if(filenamea!=&fnbufp)RSB_CONDITIONAL_FREE(filenamea); */
+	if(filenamea!=&fnbufp)free(filenamea); /* FIXME */
+#endif
+	if(nrhsa!=(&nrhs))RSB_CONDITIONAL_FREE(nrhsa); /* FIXME: they get allocated (and thus shall be deallocated) before init */
+	if(incXa!=(&incX))RSB_CONDITIONAL_FREE(incXa);
+ 	if(incYa!=(&incY))RSB_CONDITIONAL_FREE(incYa); 
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_EXIT;} /* FIXME: and other cases ? */
+	if(want_verbose == RSB_BOOL_TRUE)
+		rsb__echo_timeandlabel(" terminating run at ","\n",&st);
+	return errval;
+}
+
+int rsb__main_block_partitioned_mat_stats(const int argc, rsb_char_t * const argv[])
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This function implements a complete program for using our variable block
+	 * rows sparse matrix storage as it was a fixed block size format.
+	 * It is useful for benchmark against fixed block sparse matrix codes.
+	 * 
+	 * This function will benchmark the "mat_stats" matrix operation.
+	 * */
+
+	/*
+	 * This example main program reads in a Matrix Market file in block format and multiplies it against a unit vector.
+	 **/
+	rsb_option options[] = {
+	    {"all-flags",	0 , NULL, 0x51},/* Q */  
+	    {"allow-any-transposition-combination",	0 , NULL, 0x61617463 },/* aatc */  
+	    {"alternate-sort",	no_argument, NULL , 0x4153},/* AS */
+	    {"auto-blocking",	0 , NULL, 0x41},/* A */
+	    {"be-verbose",		0, NULL, 0x76},	/* v */
+	    {"block-columnsize",	required_argument, NULL, 0x63},/* c */  
+	    {"block-rowsize",   required_argument, NULL, 0x72 },/* r */
+	    {"cache-blocking",	required_argument, NULL , 0x4342},/* CB */
+/*	    {"cache-flush",	no_argument, NULL, 0x4343},*/ /*   */
+	    {"column-expand",	required_argument, NULL, 0x6B},/* k */  
+	    {"compare-competitors",	no_argument, NULL, 0x6363},/* cc */  
+	    {"convert",	0, NULL, 0x4B},/* K */  
+/*	    {"convert",	required_argument, NULL, 0x4B},*//* K   */
+	    {"dense",	required_argument, NULL, 0x64 },   /* d */
+	    {"diagonal-dominance-check",	no_argument , NULL, 0x4444},/* DD */  /* new */
+	    {"dump-n-lhs-elements",	required_argument , NULL, 0x444444},/* DDD */  /* new */
+	    {"echo-arguments",	no_argument , NULL, 0x6563686f},/* echo */  /* new */
+	    {"estimate-samples",		required_argument, NULL, 0x53},	/* S */
+	    {"estimate-fillin",required_argument, NULL, 0x65},	/* e */
+	    {"flush-cache-in-iterations",	no_argument, NULL, 0x4343},/*  */  
+	    {"impatient",	no_argument, NULL, 0x696d7061},/* impa[tient] */  
+	    {"no-flush-cache-in-iterations",	no_argument, NULL, 0x434E},/*  */  
+	    {"flush-cache-around-loop",	no_argument, NULL, 0x434343},/*  */  
+	    {"want-ancillary-execs",	no_argument, NULL, 0x767646},/*  */  
+	    {"no-want-ancillary-execs",	no_argument, NULL, 0x42767646},/*  */  
+	    {"no-flush-cache-around-loop", no_argument	, NULL, 0x43434E},/*  */  
+	    {"want-no-recursive",	no_argument, NULL, 0x776e720a},/*  */  
+	    {"guess-blocking",	no_argument , NULL, 0x47},/* G */
+	    {"help",	no_argument , NULL, 0x68},	/* h */
+	    {"ilu0",	no_argument , NULL, 0x494B55},/* ILU */  /* new */
+	    {"incx",	required_argument, NULL, 0xb1bb0 },/* */  
+	    {"incy",	required_argument, NULL, 0xb1bb1 },/* */  
+	    {"in-place-assembly-experimental",	no_argument , NULL, 0x6970},/* i */  
+	    {"in-place-csr",	0 , NULL, 0x69},/* i */  
+	    {"in-place-permutation",	no_argument, NULL, 0x50},   /* P */
+#if RSB_WITH_LIKWID
+	    {"likwid",	no_argument, NULL, 0x6c696b77},   /* likw */
+#endif /* RSB_WITH_LIKWID */
+	    {"lower",	required_argument, NULL, 0x6c},   /* l */
+	    {"lower-dense",	required_argument, NULL, 0x6c64},   /* ld */
+	    {"generate-lowerband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"gen-lband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"generate-spacing",	required_argument, NULL, 0xbabb2 },   /* */
+	    {"matrix-dump",	0 , NULL, 0x44044},/* D */  
+	    {"matrix-dump-graph",	required_argument , NULL, 0x44047},/* DG */  
+	    {"matrix-dump-internals",	0 , NULL, 0x49049},/* I */  
+	    {"merge-experimental",	required_argument , NULL, 0x6d656578},/* meex */  
+	    {"split-experimental",	required_argument , NULL, 0x73706578},/* spex */  
+	    {"ms-experimental",	required_argument , NULL, 0x6d736578},/* msex */  
+	    {"matrix-filename",	required_argument, NULL, 0x66},/* f */  
+	    {"matrix-storage",	required_argument, NULL, 0x46},/* F */  
+	    {"matrix-time",	0 , NULL, 0x4D},/* M */  /* new */
+	    {"mem-hierarchy-info",	required_argument , NULL, 0x4D4D},/* MM */  /* new */
+	    {"max-runtime",	required_argument , NULL, 0x6d617275},/* maru */
+	    {"no-op",		0 , NULL, 0x4E},	/* N */
+	    {"notranspose",	no_argument, NULL, 0x5051},   /* do not transpose the operation */
+	    {"nrhs",	required_argument, NULL, 0x6e726873},   /* */
+	    {"one-nonunit-incx-incy-nrhs-per-type",	no_argument, NULL, 0x6e697270},   /* */
+	    RSB_BENCH_PROG_OPTS
+	    {"oski-benchmark",	0 , NULL, 0x42},/* B: only long option *//* comparative benchmarking agains OSKI */
+	    {"out-lhs",		0 , NULL, 0x6F},/* o */	/* should accept an output file name, optionally */
+	    {"out-rhs",		0 , NULL, 0x6F6F},/* o */	/* should accept an output file name, optionally */
+	    {"override-matrix-name",	required_argument , NULL, 0x6F6D6E},/* omn */	
+	    {"pattern-mark",	0 , NULL, 0x70},/* p */
+	    {"pre-transpose",	no_argument, NULL, 0x5454},   /* transpose the matrix before assembly  */
+	    {"read-as-binary",		required_argument, NULL, 0x62},/* b */
+	    {"repeat-constructor",	required_argument , NULL, 0x4A4A},
+	    {"reuse-io-arrays",	no_argument , NULL, 0x726961}, /* ria */
+	    {"no-reuse-io-arrays",	no_argument , NULL, 0x6e726961 }, /* nria */
+	    {"reverse-alternate-rows",	no_argument , NULL, 0x4A4A4A},
+	    {"generate-upperband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"gen-uband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"generate-diagonal",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"gen-diag",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"zig-zag",	no_argument , NULL, 0x4A4A4A},
+	    {"subdivision-multiplier",	required_argument, NULL , 0x534D},/* SM */
+#if RSB_WANT_BOUNDED_BOXES
+	    {"bounded-box",	required_argument, NULL , 0x4242},/* BB */
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	    {"max-nnz-samples",	required_argument, NULL, 0x73},	/* s */
+	    {"no-leaf-multivec",	no_argument, NULL , 0x6e6c6d6d},/* nlmm */
+	    {"with-leaf-multivec",	no_argument, NULL , 0x636c6d6d},/* wlmm */
+	    {"sort-after-load",	no_argument, NULL, 0x7373},/* ss */  
+	    {"skip-loading-symmetric-matrices",	 no_argument, NULL, 0x736c736d},/* slsm */  
+	    {"skip-loading-unsymmetric-matrices",no_argument, NULL, 0x736c756d},/* slum */  
+	    {"skip-loading-hermitian-matrices",no_argument, NULL, 0x736c686d},/* slhm */  
+	    {"skip-loading-not-unsymmetric-matrices",no_argument, NULL, 0x736c6e75},/* slnu */  
+	    {"skip-loading-if-more-nnz-matrices",required_argument, NULL, 0x736c6d6},/* slmn */  
+	    {"skip-loading-if-less-nnz-matrices",required_argument, NULL, 0x736c6e6e},/* slnn */  
+	    {"skip-loading-if-more-filesize-kb-matrices",required_argument, NULL, 0x736c6d73},/* slms */  
+#ifdef RSB_HAVE_REGEX_H 
+	    {"skip-loading-if-matching-regex",required_argument, NULL, 0x736c6d72},/* slmr */  
+#endif /* RSB_HAVE_REGEX_H */
+	    {"skip-loading-if-matching-substr",required_argument, NULL, 0x736c7373},/* slss */  
+	    {"times",		required_argument, NULL, 0x74},/* t */  
+	    {"transpose-as",	required_argument, NULL, 0x5040},   /* do transpose the operation */
+	    {"transpose",	no_argument, NULL, 0x5050},   /* do transpose the operation */
+	    {"also-transpose",	no_argument, NULL, 0x4150},  /* N,T: do transpose the operation after no transposition */
+	    {"all-transposes",	no_argument, NULL, 0x616c6c74},  /* N,T,C */
+	    {"type",		required_argument, NULL, 0x54},/* T */  
+	    {"types",		required_argument, NULL, 0x54},/* T */  
+	    {"update",		0 , NULL, 0x55},	/* U */
+	    {"as-unsymmetric",		0 , NULL, 0x5555},	/* UU: TODO: to insert such a test in as default, in order to quantify the benefit of symmetry */
+	    {"as-symmetric",		0 , NULL, 0x5353},	/* SS */
+	    {"only-lower-triangle",		0 , NULL, 0x4F4C54},	/* OLT */
+   	    {"only-upper-triangle",		0 , NULL, 0x4F4554},	/* OUT */
+	    {"verbose",	no_argument , NULL, 0x56},/* V */
+	    {"want-io-only",	no_argument , NULL, 0x4949},/* --want-io-only */
+	    {"want-nonzeroes-distplot",	no_argument, NULL, 0x776E68},/* wnh */  
+	    {"want-accuracy-test",	no_argument, NULL, 0x776174},/* wat */  
+	    {"want-getdiag-bench",	no_argument , NULL, 0x774446},/* wde */  /* FIXME: obsolete ? */
+	    {"want-getrow-bench",	no_argument , NULL, 0x777246},/* wre */  /* FIXME: obsolete ? */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	    {"want-perf-counters",	no_argument , NULL, 0x707763},/* wpc */
+#endif
+	    {"want-print-per-subm-stats",	no_argument , NULL, 0x77707373},/* wpss */
+	    {"want-only-accuracy-test",	no_argument, NULL, 0x776F6174},/* woat */  
+	    {"want-autotune",	required_argument, NULL, 0x7772740a},/* wrt */  
+	    {"want-no-autotune",	no_argument, NULL, 0x776e7274},/* wnrt */  
+#if RSB_HAVE_METIS
+	    {"want-metis-reordering",	no_argument, NULL, 0x776d6272 },/* wmbr */  
+#endif
+	    {"want-mkl-autotune",	required_argument, NULL, 0x776d6174},/* wmat */  
+	    {"want-mkl-one-based-indexing",	no_argument, NULL, 0x776d6f62 },/* wmob */  
+	    {"want-unordered-coo-test",	no_argument, NULL, 0x775563},/* */  
+	    {"with-flags",	required_argument, NULL, 0x71},/* q */  
+	    {"write-as-binary",	required_argument, NULL, 0x77 }, /* w */
+	    {"write-as-csr",	required_argument, NULL,  0x63777273 }, /* wcsr */
+	    {"write-performance-record",	required_argument, NULL, 0x77707266 }, /* write performance record file  */
+	    {"performance-record-name-append",	required_argument, NULL, 0x77707261 }, /* ...append  */
+	    {"performance-record-name-prepend",	required_argument, NULL, 0x77707270 }, /* ...prepend  */
+	    {"write-no-performance-record",	no_argument, NULL, 0x776e7072 }, /* write no performance record */
+	    {"discard-read-zeros",	no_argument, NULL,  0x64697a65 }, /* dize */
+	    {"z-sorted-coo",	no_argument, NULL , 0x7A},/* z */
+	    {0,0,0,0}	};
+
+	rsb_nnz_idx_t nnz = 0;/* was 0 */
+	int c;
+	int opt_index = 0;
+
+	rsb_coo_idx_t *IA = NULL, *JA = NULL;
+	void *VA = NULL;
+
+	int g_estimate_matrix_construction_time = 0;
+	int g_all_flags = 0;
+	int g_sort_only = 0;
+	int repeat_construction = 1;	/* times to call the matrix constructor (the more times, the more accurate measurements) */
+
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT, typecode_old = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_int ntypecodes = 0,typecodesi;
+	const rsb_int maxtypes = 2*RSB_IMPLEMENTED_TYPES;
+	rsb_type_t typecodes[maxtypes+1] ;
+
+	rsb_blk_idx_t br = 1;
+	rsb_blk_idx_t bc = 1;
+	char * bcs = NULL, *brs = NULL, *cns = NULL, *mhs = NULL;
+	rsb_blk_idx_t * brv = NULL;
+	rsb_blk_idx_t * bcv = NULL;
+	int brl = 0;
+	int bcl = 0;
+	rsb_thread_t ca_[1] = {1};
+	rsb_thread_t * ca = ca_;
+	rsb_thread_t cn = 1, ci = 0, cc = ca[ci];
+
+	int times = 100;	/* the default number of times to perform mat_stats */
+	rsb_coo_idx_t nrA = 0, ncA = 0, ndA = 0;
+	int filenamen = 0, filenamei = 0;
+#define RSB_RSBENCH_STATIC_FILENAMEA 1
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_MAX_MTXFILES 256
+	const rsb_char_t *filenamea[RSB_RSBENCH_MAX_MTXFILES];
+#else
+	const rsb_char_t **filenamea = NULL;
+#endif
+	const rsb_char_t *filename = NULL;
+	const rsb_char_t *filename_old = NULL;
+	const rsb_char_t *usfnbuf = NULL;
+	rsb_char_t*fprfn = NULL, *cprfn = NULL, *apprfn = NULL, *ppprfn = NULL; /* final/checkpoint      performance file name , append/prepend */
+	rsb_char_t fprfnb[RSB_MAX_FILENAME_LENGTH], cprfnb[RSB_MAX_FILENAME_LENGTH];/* final/checkpoint      performance file name buffers */
+	rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+	rsb_char_t*fnbufp[1]={&(fnbuf[0])};
+	rsb_char_t * dump_graph_file=NULL;
+	rsb_flags_t flags_o = RSB_FLAG_NOFLAGS|RSB_FLAG_OWN_PARTITIONING_ARRAYS;
+/*	RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS)	;	*/ /* FIXME : EXPERIMENTAL (watch nnz count on a multi blocking run ...) */
+	rsb_flags_t flagsa[128] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	rsb_flags_t r_flags = RSB_FLAG_NOFLAGS; /* recycling flags */
+	int fn = 1, fi = 0;/* for flags */
+	int tn = 1, ti = 0;/* for transposition */
+	int g_debug = 0;
+	int be_verbose = 0;
+	int pattern_only = 0;
+	int dumpout = 0;
+	int dumpout_internals = 0, merge_experimental = 0, split_experimental = 0;
+	int just_enter_tuning = 1;
+	rsb_char_t * csr_w_filename = NULL;
+	rsb_char_t * b_w_filename = NULL;
+	rsb_char_t * b_r_filename = NULL;
+	int dumpvec = rsb_dumpvec_no;
+	struct rsb_mtx_t * mtxAp = NULL;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+	int guess_blocking_test = 0;		/* guess test stuff */
+	rsb_int want_column_expand = 0;
+
+	rsb_bool_t should_recycle_matrix = RSB_BOOL_FALSE; /* reuse the matrix across measurements */
+	rsb_bool_t should_recycle_io = RSB_BOOL_TRUE;/* reuse the input arrays */
+	rsb_bool_t g_allow_any_tr_comb = RSB_BOOL_FALSE; /* allow any transposition combination */
+	
+	int g_estimate_fillin = 0;
+	int want_percentage = 0;
+	double until_confidence = 0;
+
+	rsb_nnz_idx_t  max_nnzs = 0;
+	rsb_nnz_idx_t nnzn = 10;
+	rsb_nnz_idx_t * nnzs = NULL;
+	size_t * element_count = NULL;
+	size_t * block_count = NULL;
+	//rsb_nnz_idx_t i = 0;
+	struct rsb_mtx_partitioning_info_t pinfo;
+	rsb_trans_t transAo = RSB_DEFAULT_TRANSPOSITION;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_nnz_idx_t should_generate_dense = 0;
+	rsb_nnz_idx_t should_generate_dense_nc = 0;
+	rsb_nnz_idx_t should_generate_lband = -1, should_generate_uband = -1;
+	rsb_nnz_idx_t want_generated_spacing = 0;
+	rsb_bool_t want_only_star_scan = RSB_BOOL_FALSE;
+	rsb_blk_idx_t nrhs = 1, nrhsn = 1, nrhsi = 1, nrhsl = 1;
+	const char*nrhss = NULL;
+	rsb_blk_idx_t *nrhsa = NULL;
+	size_t outnri = 0, rhsnri = 0;
+	rsb_nnz_idx_t n_dumpres = 0;
+	rsb_nnz_idx_t n_dumprhs = 0;
+	rsb_bool_t ignore_failed_fio = RSB_BOOL_TRUE; /* FIXME 20140912 experimental */
+	rsb_bool_t want_convert = RSB_BOOL_FALSE;
+	rsb_bool_t want_update = RSB_BOOL_FALSE;
+	rsb_int_t want_impatiently_soon_pre_results = 0; /* FIXME: temporary */
+	rsb_bool_t want_inner_flush = RSB_BOOL_FALSE;
+	rsb_bool_t want_outer_flush = RSB_BOOL_TRUE;
+	rsb_bool_t want_ancillary_execs = RSB_BOOL_FALSE;
+	rsb_time_t st = RSB_TIME_ZERO;
+	rsb_time_t totiot = RSB_TIME_ZERO; /* total I/O time */
+	rsb_time_t totatt = RSB_TIME_ZERO; /* total ancillary tests time */ /* FIXME: is this complete ? */
+	rsb_time_t totct = RSB_TIME_ZERO; /* total conversions time */ /* FIXME: is this complete ? */
+	rsb_time_t tottt = RSB_TIME_ZERO; /* total tuning time */
+	rsb_time_t totht = RSB_TIME_ZERO; /* total checks time */ /* FIXME: is this complete ? */
+	rsb_time_t maxtprt = RSB_TIME_ZERO; /* max total program run time */
+	const rsb_time_t totprt = - rsb_time(); /* total program run time */
+	rsb_bool_t want_as_unsymmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_as_symmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_lowtri = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_upptri = RSB_BOOL_FALSE;
+	rsb_bool_t want_sort_after_load = RSB_BOOL_FALSE;
+	rsb_bool_t want_slsm = RSB_BOOL_FALSE, want_slum = RSB_BOOL_FALSE, want_slnu = RSB_BOOL_FALSE, want_slhm = RSB_BOOL_FALSE;
+	rsb_nnz_idx_t want_slmn = 0,  want_slnn = 0,  want_slms = 0;
+#ifdef RSB_HAVE_REGEX_H
+	const rsb_char_t * want_slmr = NULL;
+#endif /* RSB_HAVE_REGEX_H */
+	const rsb_char_t * want_slss = NULL;
+	rsb_bool_t do_perform_ilu = RSB_BOOL_FALSE;
+	rsb_bool_t do_perform_ddc = RSB_BOOL_FALSE;
+	rsb_bool_t want_in_place_assembly = RSB_BOOL_FALSE;
+	rsb_bool_t want_accuracy_test = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_nonzeroes_distplot = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getdiag_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getrow_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_coo_idx_t mib = 0; /* MKL index base (FIXME: declared here and not within RSB_WANT_MKL because CSR copy made even with no MKL) */
+	rsb_time_t totmt = RSB_TIME_ZERO; /* total mkl/competitors (tuning) time */
+	rsb_bool_t want_perf_dump = RSB_BOOL_FALSE;
+	void*rspr = NULL; /* rsb sampled performance record structure pointer */
+
+	rsb_coo_idx_t incX = 1, incY = 1;
+	rsb_blk_idx_t incXn = 1, incXi = 1;
+	rsb_blk_idx_t incYn = 1, incYi = 1;
+	rsb_blk_idx_t *incXa = NULL, *incYa = NULL;
+	rsb_coo_idx_t ldX = 0, ldY = 0;
+	rsb_bool_t want_incX = RSB_BOOL_FALSE,want_incY = RSB_BOOL_FALSE;
+	rsb_bool_t want_verbose = RSB_BOOL_FALSE;
+	rsb_int_t want_verbose_tuning = 0;
+	rsb_bool_t want_transpose = RSB_BOOL_FALSE;
+	#if 1
+	const int max_io = 10;
+	struct rsb_initopts io={NULL,NULL,0,RSB_IO_SPECIFIER_SET},*iop=&io;
+	rsb_int_t should_use_cb_method = 0;
+	rsb_real_t subdivision_multiplier = 0.0;
+#if RSB_WANT_BOUNDED_BOXES
+	rsb_int_t want_bounded_box=1;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	rsb_int_t want_no_leaf_spmm=0;
+	void * io_values[max_io];
+	enum rsb_opt_t io_keys[max_io];
+	#else /* 1 */
+	struct rsb_initopts *iop = RSB_NULL_INIT_OPTIONS;
+	#endif /* 1 */
+	rsb_bool_t should_use_alternate_sort = RSB_BOOL_FALSE;
+	rsb_bool_t reverse_odd_rows = RSB_BOOL_FALSE;
+	rsb_bool_t zsort_for_coo = RSB_BOOL_FALSE;
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : unfinished */
+	rsb_time_t oski_t = RSB_TIME_ZERO,oski_m_t = RSB_TIME_ZERO,oski_a_t = RSB_TIME_ZERO,oski_t_t = RSB_TIME_ZERO;
+	oski_idx_t * Aptr=NULL;
+	oski_idx_t * Aind=NULL;
+	oski_value_t * Aval=NULL;
+	oski_matrix_t A_tunable;
+        oski_vecview_t x_view;
+        oski_vecview_t y_view;
+	void * Oval = NULL;
+	rsb_coo_idx_t *OIA=NULL,*OJA=NULL;
+        rsb_char_t oxform[256];
+        double oalpha = 1, obeta = 0;
+	rsb_bool_t want_oski_bench=0;
+	#ifdef RSB_HAVE_SETENV
+	setenv("OSKI_LUA_PATH",OSKI_LUA_PATH,0/* if 0, will not override. if 1, it would. */);
+	#endif /* RSB_HAVE_SETENV */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	rsb_time_t tinf = rsb__timer_granularity();
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_bool_t want_likwid = RSB_BOOL_FALSE;
+	rsb_time_t want_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES, want_mkl_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+	rsb_bool_t want_io_only = RSB_BOOL_FALSE;
+	rsb_int wat = 1;	/* want autotuning threads choice */
+	rsb_int wai = 1;	/* want autotuning rounds */
+	char wav = 0x56;	/* want autotuning verbose */
+	int wavf = RSB_AUT0_TUNING_VERBOSE;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	int want_perf_counters = 0;
+#endif
+	rsb_bool_t want_print_per_subm_stats = RSB_BOOL_FALSE;
+#if RSB_HAVE_METIS
+	rsb_bool_t want_wmbr = RSB_BOOL_FALSE;
+#endif
+	rsb_bool_t want_recursive = RSB_BOOL_TRUE;
+
+	io.keys = io_keys;
+	io.values = io_values;
+	io.n_pairs = 0;
+
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc,argv,RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS"b:w:BGht:f:r:c:vpn:MNS:Bk:KU" /* Flawfinder: ignore */
+		"s:e"
+		"o:O:"
+		, options, &opt_index);
+		if (c == -1)break;
+
+		RSB_DO_FLAG_ADD(flags_o,rsb__sample_program_options_get_flags(c,optarg));
+
+		switch (c)
+		{
+			case 0x62:	/* b */
+			b_r_filename = optarg;
+			break;
+			case  0xb1bb0:
+#if 0
+				incX = rsb__util_atoi(optarg);
+				if(incX<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incX>1)RSBENCH_STDOUT("# setting incX=%d\n",incX);
+				want_incX = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incXn,&incXa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case  0x6970:
+				RSBENCH_STDOUT("# WARNING: in place assembly is an UNFINISHED, EXPERIMENTAL feature\n");
+				want_in_place_assembly = RSB_BOOL_TRUE;
+			break;
+			case  0xb1bb1:
+#if 0
+				incY = rsb__util_atoi(optarg);
+				if(incY<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incY>1)RSBENCH_STDOUT("# setting incY=%d\n",incY);
+				want_incY = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incYn,&incYa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case 0x6c:
+			case 0x6c64: /* lower-dense */
+			{
+				should_generate_dense = - rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0x6c696b77:
+#if RSB_WITH_LIKWID
+				want_likwid = RSB_BOOL_TRUE;
+				#else /* RSB_WITH_LIKWID */
+				#endif /* RSB_WITH_LIKWID */
+			break;
+			case 0x6c6c:
+			{
+				should_generate_lband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_uband==-1)should_generate_uband=0;
+			}
+			break;
+			case 0x7575:
+			{
+				should_generate_uband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_lband==-1)should_generate_lband=0;
+			}
+			break;
+			case 0x6464: /* gen-diag */
+			{
+				should_generate_uband = 0;
+				should_generate_lband = 0;
+				should_generate_dense = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0xbabb2:
+			{
+				want_generated_spacing = rsb__util_atoi(optarg);
+			}
+			break;
+			case 0x6e697270:
+			want_only_star_scan = RSB_BOOL_TRUE;
+			break;
+			case 0x64: /* dense */
+			{
+				/* should_generate_dense = rsb__util_atoi(optarg); */  // FIXME ! PROBLEMS
+				int sargs = sscanf(optarg,"%dx%d",&should_generate_dense,&should_generate_dense_nc);
+				if( should_generate_dense_nc == 0)
+					should_generate_dense_nc = should_generate_dense;
+				/* RSBENCH_STDOUT("# Requested generation of a %d by %d matrix\n",should_generate_dense,should_generate_dense_nc); */
+			}
+			break;
+			/* FIXME : please note that specifying two or more times -r or -c will cause memory leaks */
+			case 0x72:/* r */
+			brs=optarg;
+			break;
+			case 0x63: /* c */
+			bcs=optarg;
+			break;
+			case 0x42: /* oski : B */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			want_oski_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_OSKI_BENCHMARKING */
+			RSB_ERROR("Sorry, OSKI comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+			break;
+			case 0x61617463:
+			g_allow_any_tr_comb = RSB_BOOL_TRUE;
+			break;
+			case 0x51: /* Q (do not ask me why) */
+			g_all_flags = 1;
+			break;
+			break;
+			case 0x44044: /* D */
+			dumpout = 1;
+			break;
+			case 0x5040: /*  */
+			transAo = rsb__do_transposition_from_char(*optarg);	/* */
+			break;
+			case 0x4150:
+			tn = 2;
+			break;
+			case 0x616c6c74:
+			tn = 3;
+			break;
+			case 0x5050: /*  */
+			transAo = rsb__do_transpose_transposition(transAo);
+			break;
+			case 0x5051: /*  */
+			transAo = RSB_TRANSPOSITION_N;
+			break;
+			case 0x6e726873: /*  */
+#if 0
+			nrhs = rsb__util_atoi(optarg);
+			/* if(nrhs>1){ RSB_ERROR("Sorry, nrhs > 1 still unsupported!\n"); goto err; } */
+#else
+			nrhss = optarg;
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(nrhss,&nrhsn,&nrhsa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+
+			break;
+			case 0x5454: /*  */
+			want_transpose = !want_transpose;
+			break;
+			case 0x44047: /* DG */
+			dump_graph_file = optarg;
+			break;
+			case 0x49049: /* I */
+			dumpout_internals = 1;
+			break;
+			case 0x6d656578: /* meex */
+			merge_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x73706578: /* spex */
+			split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x6d736578: /* msex */
+			merge_experimental = split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x4444 : /* DD */
+			do_perform_ddc = RSB_BOOL_TRUE;
+			break;
+			case 0x444444 : /* DDD */
+			n_dumprhs = n_dumpres = rsb__util_atoi(optarg);
+			break;
+			case 0x6563686f: /* echo */
+			{
+				rsb_int argi=0;
+				if(argc>0) printf("#args: %s",argv[0]);
+				for(argi=1;argi<argc;++argi)
+					printf(" %s",argv[argi]);
+				printf("\n");
+			}
+			break;
+			case 0x494B55 : /* ILU */
+			do_perform_ilu = RSB_BOOL_TRUE;
+			break;
+			case 0x696d7061: /* */
+			want_impatiently_soon_pre_results = 1;
+			break;
+			case 0x4343: /* */
+			want_inner_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x434E: /* */
+			want_inner_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x434343: /*  */
+			want_outer_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x43434E: /*  */
+			want_outer_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x776e720a: /*  */
+			want_recursive = RSB_BOOL_FALSE;
+			break;
+			case 0x4D: /* M */
+			g_estimate_matrix_construction_time=1;
+			break;
+			case 0x65: /* e */
+			g_estimate_fillin=1;
+			break;
+			case 0x7A:
+			zsort_for_coo = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the now active Z sort feature will only apply to COO submatrices\n");
+			break;
+			case 0x726961:
+			RSBENCH_STDOUT("# setting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_TRUE;
+			break;
+			case 0x6e726961:
+			RSBENCH_STDOUT("# unsetting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_FALSE;
+			break;
+			case 0x4A4A4A:
+			reverse_odd_rows = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the row reversal feature only applies to CSR submatrices, and on indices only\n");
+			break;
+			case 0x6F6D6E:
+			usfnbuf = optarg;
+			break;
+			case 0x4A4A:
+			repeat_construction = rsb__util_atoi(optarg);
+			if(repeat_construction<1)
+			{
+				RSB_ERROR("Constructor repetition times should be a positive number!\n");goto err;
+			}
+			break;
+			case 0x4342: /* CB */
+			should_use_cb_method = rsb__util_atoi(optarg);
+			break;
+			case 0x4153: /* AS */
+			should_use_alternate_sort = RSB_BOOL_TRUE;
+			break;
+			case 0x534D: /* SM */
+			subdivision_multiplier = rsb__util_atof(optarg);
+			break;
+#if RSB_WANT_BOUNDED_BOXES
+			case 0x4242: /* BB */
+			want_bounded_box = rsb__util_atoi(optarg);
+			break;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+			case 0x6e6c6d6d: /* nlmm */
+			want_no_leaf_spmm = /*rsb__util_atoi(optarg)*/ -1;
+			break;
+			case 0x636c6d6d: /* wlmm */
+#if RSB_ENABLE_INNER_NRHS_SPMV
+			want_no_leaf_spmm = 0;
+#else
+			RSB_ERROR("Cannot activate the RSB_IO_WANT_LEAF_LEVEL_MULTIVEC option because RSB_ENABLE_INNER_NRHS_SPMV is opted out!\n");goto err;
+#endif
+			break;
+			case 0x4D4D: /* MM */
+			mhs = optarg;
+			break;
+			case 0x6d617275:
+			maxtprt = rsb__util_atof(optarg);
+			maxtprt = RSB_MAX( RSB_TIME_ZERO, maxtprt  );
+			break;
+			case 0x6F: /* o */
+			dumpvec = rsb_dumpvec_res;
+			break;
+			case 0x6F6F: /* o */
+			dumpvec = rsb_dumpvec_rhs;
+			break;
+			case 0x70: /* p */
+			pattern_only = 1;
+			break;
+			case 0x4E: /* N */
+			g_sort_only = 1;
+			break;
+			case 0x73: /* s */
+			/* FIXME : BROKEN! */
+			max_nnzs = rsb__util_atonnz(optarg);
+			if(*optarg && optarg[rsb__util_strlen(optarg)-1]==0x25)want_percentage=1;/* 0x25 == % */
+			break;
+			case 0x53: /* S */
+			nnzn = rsb__util_atonnz(optarg);
+			if(nnzn<1){RSB_ERROR(RSB_ERRM_ES);goto err;}
+			break;
+			case 0x7373: /* ss */
+			want_sort_after_load = RSB_BOOL_TRUE;
+			break;
+			case 0x736c736d: /* slsm */
+			want_slsm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c756d: /* slum */
+			want_slum = RSB_BOOL_TRUE;
+			break;
+			case 0x736c686d: /* slhm */
+			want_slhm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6e75: /* slnu */
+			want_slnu = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6d6: /* slmn */
+			want_slmn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6e6e: /* slnn */
+			want_slnn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6d73: /* slms */
+			want_slms = rsb__util_atoi_km2(optarg);
+			break;
+#ifdef RSB_HAVE_REGEX_H
+			case 0x736c6d72: /* slmr */
+			want_slmr = (optarg);
+			break;
+#endif /* RSB_HAVE_REGEX_H */
+			case 0x736c7373: /* slss */
+			want_slss = (optarg);
+			break;
+			case 0x74: /* t */
+			times = rsb__util_atoi(optarg);
+			break;
+			case 0x47: /* G */
+			guess_blocking_test = 1;
+			break;
+			case 0x54: /* T */
+			{
+				const char*toa = optarg;
+				ntypecodes=0; /* this neutralizes former -T ... option */
+				/* if( *optarg == 0x3A || *optarg == 0x2A ) */ /* : or * aka colon or asterisk */
+				if( ( ! isalpha(*optarg) ) || ( strstr(optarg,"all") != NULL ) )
+					toa = RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS ;
+				for(;*toa;++toa)
+				if(isalpha(*toa))
+				{
+					if(ntypecodes<maxtypes)
+						typecodes[ntypecodes++]=typecode=toupper(*toa);
+					else
+					{
+						RSB_ERROR("Up to %d types supported! P.s.: Use a punctuation symbol to ask for all supported types.\n",maxtypes);
+						goto err;
+					}
+				}
+				typecodes[ntypecodes] = RSB_NUL;
+			}
+			break;
+			case 0x56: /* V */
+			want_verbose = RSB_BOOL_TRUE;
+			want_verbose_tuning ++;
+			break;
+			case 0x4949: /* II */
+			want_io_only = RSB_BOOL_TRUE;
+			break;
+			case 0x66: /* f */
+			filename = optarg;
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_ADDF(FILENAME)	if(filenamen<RSB_RSBENCH_MAX_MTXFILES)filenamea[filenamen++] = (FILENAME); else {errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Please increase RSB_RSBENCH_MAX_MTXFILES (%d) and recompile !!\n",RSB_RSBENCH_MAX_MTXFILES);goto err;}
+#else
+ /* FIXME: for some reason, this seems to break e.g.  ./rsbench -oa -Ob --nrhs 1,2 -f pd.mtx -f A.mtx.
+    Of course this is wrong also w.r.t. rsb_calloc/rsb_lib_init, but that is not a problem.
+    Using calloc / realloc does not solve the problem.  */
+#define RSB_RSBENCH_ADDF(FILENAME)		if(filenamen==0) \
+				filenamea = rsb__calloc(sizeof(filenamea)*(filenamen+1)); \
+			else \
+				filenamea = rsb__do_realloc(filenamea, sizeof(filenamea)*(filenamen+1), sizeof(filenamea)); \
+			filenamea[filenamen++] = (FILENAME);
+#endif
+			RSB_RSBENCH_ADDF(filename) /* FIXME */
+			break;
+			case 0x4B: /* K */
+			want_convert = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x55: /* U */
+			want_update = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x5353: /* SS */
+			want_as_symmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x5555: /* UU */
+			want_as_unsymmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4C54: /* OLT */
+			want_only_lowtri = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4554: /* OUT */
+			want_only_upptri = RSB_BOOL_TRUE;
+			break;
+			case 0x6363:
+			/* this flag activates all interfaced libraries (if any) */
+			break;
+			case 0x6B: /* ncA */
+			want_column_expand = rsb__util_atoi(optarg);
+			break;
+			case 0x6E: /* n */
+			cns = optarg; /* cores (threads) numbers (specification) string */
+			break;
+			case 0x75 :	/* u */
+			until_confidence = rsb__util_atof(optarg);
+			break;
+			case 0x76: /* spmv_uauz */
+			be_verbose = 1;
+			break;
+			case 0x774446:	/* wde */
+			want_getdiag_bench = 1;
+			break;
+			case 0x776E68:	/* wnh */
+			want_nonzeroes_distplot = 1;
+			break;
+			case 0x777246:	/* wre */
+			want_getrow_bench = 1;
+			break;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			case 0x707763:	/* wpc */
+			want_perf_counters = 1; /* 1 is what user wants; 2 is for debug purposes */
+			break;
+#endif
+			case 0x77707373:	/* wpss */
+			want_print_per_subm_stats = RSB_BOOL_TRUE;
+			break;
+			case 0x776F6174:	/* woac */
+			want_accuracy_test = 2;
+			break;
+			case 0x776e7274:	/* wnrt */
+			want_autotuner = RSB_TIME_ZERO;
+			wai=wat=0;
+			want_autotuner = merge_experimental = split_experimental = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+			break;
+			case 0x7772740a:	/* wrt */
+			/* want_autotuner = rsb__util_atof(optarg); */
+			{
+				char wavv = 0x0;
+				int sargs = sscanf(optarg,"%lfs%dx%dt%c%c",&want_autotuner,&wai,&wat,&wav,&wavv);
+
+				if(!*optarg)
+					sargs = 0;
+				RSBENCH_STDOUT(" Passed %d arguments via autotuning string \"%s\" (an empty string requests defaults)\n",sargs,optarg);
+				if(sargs < 0)
+				{
+					RSBENCH_STDOUT("Wrong autotuning string detected!\n");
+					rsb_test_help_and_exit(argv[0],options, 0);
+					exit(0);
+				}
+				switch(sargs)
+				{
+					case(EOF):
+					case(0):
+						want_autotuner = 10.0;
+					case(1):
+						wai = 1;
+					case(2):
+						wat = 0;
+					case(3):
+						wav = 0;
+					case(4):
+						wavv = 0;
+					case(5):
+					break;
+				}
+				/* RSBENCH_STDOUT("Got an autotuning string: %lfs%dx%dt%c%c\n",want_autotuner,wai,wat,wav,wavv); */
+				if(toupper(wav)==0x56) /* V */
+					wavf = RSB_AUT0_TUNING_VERBOSE;
+				else
+					wavf = RSB_AUT0_TUNING_SILENT ;
+				if(toupper(wavv)==0x56) /* V */
+					wavf++;
+				if(toupper(wai)>RSB_CONST_MAX_TUNING_ROUNDS)
+				{
+					RSBENCH_STDOUT("Restricting the number of tuning round to %d (%d is too much!).\n",RSB_CONST_MAX_TUNING_ROUNDS,wai);
+					wai = RSB_CONST_MAX_TUNING_ROUNDS;
+				}
+				RSBENCH_STDOUT("Will invoke autotuning for ~%lf s x %d rounds, specifying verbosity=%d and threads=%d. (>0 means no structure tuning; 0 means only structure tuning, <0 means tuning of both with (negated) thread count suggestion).\n",want_autotuner,wai,wavf,wat);
+			}
+			want_mkl_autotuner = want_autotuner;
+			break;
+#if RSB_HAVE_METIS
+			case 0x776d6272:	/* wmbr */
+			want_wmbr = RSB_BOOL_TRUE;
+			break;
+#endif
+			case 0x776d6174:	/* wmat */
+			sscanf(optarg,"%lf",&want_mkl_autotuner);
+			want_mkl_autotuner = RSB_MAX(1.0,want_mkl_autotuner); /* FIXME: actual value is unimportant as long as it is positive ! */
+			break;
+			case 0x776d6f62:	/* wmob */
+			mib = 1;
+			break;
+			case 0x776174:	/* wac */
+			want_accuracy_test = 1;
+			break;
+			case 0x767646:	/* wae */
+			want_ancillary_execs = RSB_BOOL_TRUE;
+			break;
+			case 0x42767646:	/* nwae */
+			want_ancillary_execs = RSB_BOOL_FALSE;
+			break;
+			case 0x77:	/* w */
+			b_w_filename = optarg;
+			break;
+			case 0x63777273:	/* wcsr */
+			csr_w_filename = optarg;
+			break;
+			case 0x77707266:
+			fprfn = optarg;
+			want_perf_dump = RSB_BOOL_TRUE;
+			if(optarg && !*optarg)
+				fprfn = NULL;
+			break;
+			case 0x776e7072:
+			fprfn = NULL;
+			want_perf_dump = RSB_BOOL_FALSE;
+			break;
+			case 0x77707261:
+			apprfn = optarg;
+			break;
+			case 0x77707270:
+			ppprfn = optarg;
+			break;
+			case 0x64697a65 :	/* dize */
+			RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS);
+			break;
+			case 0x68: /* h */
+			/* should use rsb_test_help_and_exit */
+			RSBENCH_STDERR(
+				"%s "RSB_INFOMSG_SAK".\n"
+				"You can use it to perform sparse matrix - unitary vector multiplication, "
+				"specifying the blocking parameters, the times to perform multiplication.\n"
+				"\n"
+				"Additional debugging flags (-d, -p) are present.\n"
+				"\n"
+				"Usage : %s [OPTIONS]\n where OPTIONS are taken from "
+				"[ -f filename ] \n"
+				"[ -F matrix_storage=[b|c|bc] ] \n"
+				"[ -r br ] \n"
+				"[ -c bc ] \n"
+				"[ -t TIMES ]\n"
+				"[ -n OPENMP_THREADS ]\n"
+				"[ -T ( S | D | I | C ) /* float, double, integer, character*/ ] \n"
+				"[ -s /* will internally sort out nnzs */ ] \n"
+				"[ -p /* will set to 1 nonzeros */ ] \n"
+				"[-d /* if debugging on */]: \n"
+				"[-A /* for auto-blocking */]: \n"
+				"[ -h ] \n"
+				"\n"
+				"please note that not all of the suggested numerical types could be compiled in right now and/or work well.default is double.\n"
+				"\n"
+				"\n"
+				"e.g.: %s -f raefsky4.mtx -t 10 -T :   # 10 times for each of the supported numerical types\n",
+				argv[0],
+				argv[0],
+				argv[0]);
+			rsb_test_help_and_exit(argv[0],options, 0);
+			exit(0);
+	    	}
+	}
+
+	if( (!RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_QUAD_PARTITIONING)) && want_recursive != RSB_BOOL_FALSE )
+	{
+		RSB_WARN("Assuming a recursive matrix structure is requested...\n");
+		RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_QUAD_PARTITIONING);
+	}
+	for (c = optind; c < argc; c++)                                                     
+	{
+		RSB_RSBENCH_ADDF(argv[c])
+	}
+	if(want_verbose == RSB_BOOL_TRUE)
+	{
+		rsb_char_t cbuf[RSB_MAX_COMPILE_COMMAND_LENGTH];
+		rsb__echo_timeandlabel(" beginning run at ","\n",&st);
+		rsb__echo_cargs(argc, argv);
+		errval = rsb__do_lib_get_info_str(0, &cbuf[0], sizeof(cbuf)-1);
+		if(RSB_SOME_ERROR(errval))
+			errval = RSB_ERR_NO_ERROR;
+		else
+			RSBENCH_STDOUT("# compiled with: %s\n",cbuf);
+	}
+	printf("# average timer granularity: %2.3lg s\n",tinf);
+	if(want_perf_dump)
+	{
+		if(!fprfn)
+		{
+			rsb__impcdstr(fprfnb+strlen(fprfnb),"rsbench_pr",".rpr",ppprfn,apprfn);
+			fprfn = fprfnb;
+		}
+		if(!cprfn)
+			rsb__sprintf(cprfnb,"%s.tmp",fprfn),
+			cprfn = cprfnb;
+		printf("# Will write a final performance record to file %s and periodic checkpoints to %s\n",fprfn,cprfn);
+	}
+	if( maxtprt > RSB_TIME_ZERO )
+		printf("# If program run time will exceed %2.3lg s, will attempt early termination.\n",maxtprt );
+
+	RSBENCH_STDOUT("# will %s""perform ancillary tests.\n", want_ancillary_execs ?"":"NOT ");
+	RSBENCH_STDOUT("# will flush cache memory: %s between each operation measurement series, and %s between each operation.\n", want_outer_flush?"":"NOT", want_inner_flush?"":"NOT");
+	RSBENCH_STDOUT("# will %s any zero encountered in the matrix.\n", ( RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_DISCARD_ZEROS) )?"discard":"keep");
+	if( nrhsa == NULL ) nrhsa = &nrhs;
+	if( incXa == NULL ) incXa = &incX;
+	if( incYa == NULL ) incYa = &incY;
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_INIT;}
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if(ntypecodes==0)
+		typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	if(ntypecodes==0)
+	{
+		typecodes[ntypecodes++] = typecode;
+		typecodes[ntypecodes] = RSB_NUL;
+	}
+
+	io.n_pairs=0;
+	if(should_use_alternate_sort)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SORT_METHOD;
+		io.n_pairs++;
+	}
+	if(should_use_cb_method!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_CACHE_BLOCKING_METHOD;
+		io.n_pairs++;
+	}
+	if(mhs!=NULL)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&mhs;
+		io.keys[io.n_pairs]=RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING;
+		io.n_pairs++;
+	}
+	if(subdivision_multiplier!=0.0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&subdivision_multiplier;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SUBDIVISION_MULTIPLIER;
+		io.n_pairs++;
+	}
+#if RSB_WANT_BOUNDED_BOXES
+	if(want_bounded_box==0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_bounded_box;
+		io.keys[io.n_pairs]=RSB_IO_WANT_BOUNDED_BOX_COMPUTATION;
+		io.n_pairs++;
+	}
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	if(want_no_leaf_spmm!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_no_leaf_spmm;
+		io.keys[io.n_pairs]=RSB_IO_WANT_LEAF_LEVEL_MULTIVEC;
+		io.n_pairs++;
+	}
+
+#ifdef RSB_HAVE_UNISTD_H
+{
+	extern char **environ;
+	char **me = NULL;
+	rsb_int_t rpevc = 0; /* RSB_ prefixed environment variables count */
+
+	for(me=environ;*me;++me)
+		if( strstr(*me,"RSB_") == *me )
+			rpevc++;
+
+	if( rpevc )
+	{
+		RSB_STDOUT("# The user specified %d RSB_ prefixed environment variables:\n",rpevc);
+		for(me=environ;*me;++me)
+			if( strstr(*me,"RSB_") == *me )
+				RSB_STDOUT("#  export %s\n",*me);
+	}
+}
+#endif /* RSB_HAVE_UNISTD_H */
+	
+	
+	if( rsb__getenv("KMP_AFFINITY") )
+		RSB_STDOUT("# export KMP_AFFINITY=%s\n",rsb__getenv("KMP_AFFINITY"));
+	if( rsb__getenv("OMP_PROC_BIND") )
+		RSB_STDOUT("# export OMP_PROC_BIND=%s\n",rsb__getenv("OMP_PROC_BIND"));
+	if( rsb__getenv("OMP_NUM_THREADS") )
+		RSB_STDOUT("# export OMP_NUM_THREADS=%s\n",rsb__getenv("OMP_NUM_THREADS"));
+
+	if( want_verbose != RSB_BOOL_FALSE )
+		RSBENCH_STDOUT("# user specified a verbosity level of %d (each --verbose occurrence counts +1)\n",want_verbose_tuning );
+	else
+		RSBENCH_STDOUT("# user did not specify any verbosity level (each --verbose occurrence counts +1)\n");
+
+	if((errval = rsb_lib_init(iop))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR("Error while initializing the library.");
+		goto err;
+	}
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	if((errval = rsb_perf_counters_init())!=RSB_ERR_NO_ERROR)
+	{
+		RSBENCH_STDERR("problem initializing performance counters (rsb_perf_counters_init gave %d)\n",(int)errval);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#endif
+
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )
+	{
+		RSB_STDOUT("# auto-tuning oriented output implies  times==0 iterations and sort-after-load.\n");
+		times = 0;
+		/* if(want_verbose) */
+		want_impatiently_soon_pre_results = 1;
+		want_sort_after_load = RSB_BOOL_TRUE;
+	}
+	else
+	if( times < 1 )
+	{
+		RSB_STDOUT("# The iteration times should be specified as a positive number!\n");
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	else
+		RSB_STDOUT("# Will measure on times=%d iterations.\n",times);
+
+	if( 0 == filenamen )
+#if RSB_RSBENCH_STATIC_FILENAMEA
+	       	filenamea[0] = fnbufp[0];
+#else
+	       	filenamea = &fnbufp;
+#endif
+	filenamen = RSB_MAX(1,filenamen);
+
+	if(cns)
+	{
+		ca = NULL;
+		cn = 0;
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(cns,&cn,&ca)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	}
+	else
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		/* #define rsb_get_max_threads omp_get_max_threads */
+		cn = 1;
+		ca_[0] = omp_get_max_threads ();
+		RSBENCH_STDOUT("# User did not specify threads; assuming %d.\n", cn );
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	}
+
+
+
+	if(want_perf_dump) 
+		rsb__pr_init(&rspr, NULL, filenamen, cn, incXn, incYn, nrhsn, ntypecodes, tn);
+
+	for(     filenamei=0;     filenamei<filenamen+want_impatiently_soon_pre_results  ;++filenamei     )
+	{
+		if( filenamea && ( filenamea[filenamei] != filename_old) && filename_old && want_impatiently_soon_pre_results && want_perf_dump && filenamei>0 && filenamen>1) 
+		{
+			int filenameif = filenamei-1;
+			RSBENCH_STDOUT("# ====== BEGIN Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL);
+			RSBENCH_STDOUT("# ======  END  Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			if( filenameif > 0 && filenameif < filenamen-1) /* not after first and not at last */
+				RSBENCH_STDOUT("# ====== BEGIN Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen),
+				errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL),
+				RSBENCH_STDOUT("# ======  END  Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen);
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			errval = rsb__pr_save(cprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+
+		if( filenamei >= filenamen )
+			continue; /* temporary: only for the want_impatiently_soon_pre_results trick */
+
+		if(filenamea)
+		{
+			filename = filenamea[filenamei];
+		}
+
+		if(filenamen>1)
+		{
+			RSBENCH_STDOUT("# multi-file benchmarking (file %d/%d) -- now using %s\n",filenamei+1,filenamen,rsb__basename(filename));
+		}
+
+	for(     incXi=0;     incXi<incXn     ;++incXi     )
+	{
+	for(     incYi=0;     incYi<incYn     ;++incYi     )
+	{
+	for(     nrhsi=0;     nrhsi<nrhsn     ;++nrhsi     )
+	{
+	for(typecodesi=0;typecodesi<ntypecodes;++typecodesi)
+	{
+	rsb_flags_t flags = flags_o;
+	rsb_thread_t cl; /* cores number last (overrides cn for this typecode cycle) */
+	typecode = typecodes[typecodesi];
+
+	if(ntypecodes>1)
+	{
+		RSBENCH_STDOUT("# multi-type benchmarking (%s) -- now using typecode %c (last was %c).\n",typecodes,typecode,typecode_old);
+		if( RSB_MATRIX_UNSUPPORTED_TYPE ( typecode ) )
+		{
+			RSBENCH_STDOUT("# Skipping unsupported type \"%c\" -- please choose from \"%s\".\n",typecode,RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS );
+			continue;
+		}
+	}
+
+	nrhs = nrhsa[nrhsi];
+	if( nrhsn > 1 && nrhss )
+	{
+		RSBENCH_STDOUT("# multi-nrhs benchmarking (%s) -- now using nrhs %d.\n",nrhss,nrhs);
+	}
+	incX = incXa[incXi];
+	incY = incYa[incYi];
+	if(incXn>1)
+	{
+		RSBENCH_STDOUT("# multi-incX benchmarking (%d/%d) -- now using incX=%d.\n",incXi+1,incXn,incX);
+	}
+	if(incYn>1)
+	{
+		RSBENCH_STDOUT("# multi-incY benchmarking (%d/%d) -- now using incY=%d.\n",incYi+1,incYn,incY);
+	}
+
+	if( want_only_star_scan )
+		if( RSB_MIN(incXi,1) + RSB_MIN(incYi,1) + RSB_MIN(nrhsi,1) > 1 ) /* two or more exceed index one */
+		{
+			RSBENCH_STDOUT("# Skipping a case with incX=%d incY=%d nrhs=%d.\n",incX,incY,nrhs);
+			goto frv;
+		}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	/* rsb__getrusage(); */ /* FIXME: new (20140727) */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	RSBENCH_STDOUT("( allocated_memory:%zd allocations_count:%zd)",rsb_global_session_handle.allocated_memory,rsb_global_session_handle.allocations_count);
+#endif
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+
+	if(cns)
+	{
+		cc = ca[ci];
+	}
+	cl=cn;
+	if(bcs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(bcs,&bcl,&bcv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	if(brs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(brs,&brl,&brv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : note that this option is not compatible with g_sort_only .. */
+        oski_Init();
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	g_debug = ((flags & RSB_FLAG_SHOULD_DEBUG) != 0);
+
+	if(g_sort_only)RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+
+	if(typecode==-1)
+	{
+		RSBENCH_STDERR("error : please recompile with double precision floating point numbers supported! \n");
+		return RSB_ERR_GENERIC_ERROR;
+	}
+	rsb__util_set_area_to_converted_integer(&pone[0],typecode,+1);
+
+
+	if(until_confidence && g_estimate_fillin)
+	{
+		RSBENCH_STDERR("cannot perform -e functionality in one run. one at a time please..\n");
+		goto err;
+	}
+
+	if(brl<1) { /* this is a hack */ brv = rua; brl = RSB_ROWS_UNROLL_ARRAY_LENGTH;}
+	if(bcl<1) { /* this is a hack */ bcv = cua; bcl = RSB_COLUMNS_UNROLL_ARRAY_LENGTH;}
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		RSBENCH_STDERR("This numerical type is not supported.\n");
+		goto err;
+	}
+
+	/* CONDITIONALLY, GENERATING A MATRIX */
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense);
+		rsb_nnz_idx_t spacing = want_generated_spacing>1?want_generated_spacing:1;
+		
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb__sprintf(fnbuf,"banded-%dx%d-%d+%d-%dnz-spaced-%d",dim*spacing,dim*spacing,should_generate_lband,should_generate_uband,RSB_NNZ_OF_BANDED(dim,should_generate_lband,should_generate_uband),spacing);
+		}
+		else
+		{
+		if(want_generated_spacing>0)
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*dim);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz-spaced-%d",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim,spacing);
+		}
+		else
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*should_generate_dense_nc);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim);
+		}
+		}
+		if(want_incX)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incX-%d",incX);
+		if(want_incY)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incY-%d",incY);
+/*		rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim,dim,dim*dim);*/
+/*		rsb__sprintf(fnbuf,"dense-%dx%d",dim,dim);*/
+		filename=&(fnbuf[0]);
+	}
+
+	if(usfnbuf)
+		filename=usfnbuf;
+
+	/* CONDITIONALLY, READING A MATRIX FROM FILE */
+if(filename || b_r_filename)
+{
+
+	rsb_blk_idx_t M_b=0;/* was 0 */
+	rsb_blk_idx_t K_b=0;
+	rsb_nnz_idx_t i=0;
+
+	rsb_coo_idx_t *p_r=NULL,*p_c=NULL;	/* FIXME : get rid of these */
+	void *lhs=NULL,*rhs=NULL;
+	int bcvi=0;
+	int brvi=0;
+	rsb_time_t frt = RSB_TIME_ZERO;
+
+	if( filename != filename_old )
+	{
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	if(!should_recycle_io) { RSB_DEBUG_ASSERT( VA == NULL ); }
+	if( should_recycle_io && VA && filename == filename_old )
+	{
+		flags = r_flags;
+		if( typecode != typecode_old )
+		{
+			void *VA_ = rsb__malloc_vector(nnz,typecode);
+			errval = rsb__do_copy_converted_scaled(VA, VA_, NULL, typecode_old, typecode, nnz, RSB_DEFAULT_TRANSPOSITION);
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR(RSB_ERRM_ES);goto err; }
+			RSB_CONDITIONAL_FREE(VA);
+			VA = VA_;
+			RSBENCH_STDOUT("# Reusing type converted (%c->%c) arrays from last iteration instead of reloading matrix file.\n",typecode_old,typecode);
+			typecode_old = typecode;
+		}
+		else
+		{
+			RSBENCH_STDOUT("# Reusing same type     (type %c) arrays from last iteration instead of reloading matrix file.\n",typecode);
+		}
+		goto have_va_ia_ja;
+	}
+	if((!should_generate_dense) && (!b_r_filename))
+	{
+		rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+		rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+		size_t fsz = rsb_sys_filesize(filename);
+
+		frt = - rsb_time();
+
+#ifdef RSB_HAVE_REGEX_H
+		if( want_slmr && rsb_regexp_match(rsb__basename(filename),want_slmr) == RSB_BOOL_TRUE )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches regex /%s/.\n",filename,want_slmr);
+			goto nfnm;
+		}
+#endif /* RSB_HAVE_REGEX_H */
+		if( want_slss && ( strstr( rsb__basename(filename), want_slss ) != NULL ) )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches substring %s.\n",filename,want_slss);
+			goto nfnm;
+		}
+		/* if(RSB_SOME_ERROR(rsb__do_util_get_matrix_dimensions(filename,&ncA,&nrA,&nnz,NULL)) ) */
+		if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,&nrA,&ncA,&nnz,NULL,&is_symmetric,&is_hermitian,NULL,NULL,NULL,NULL)) )
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+			if( ignore_failed_fio )
+			{
+				RSBENCH_STDERR("Will ignore error and continue with the following files.\n");
+				errval = RSB_ERR_NO_ERROR;
+				goto nfnm;
+			}
+			goto err;
+		}
+		if( want_slnu == RSB_BOOL_TRUE && ( is_hermitian || is_symmetric ) )
+		{
+			RSB_STDOUT("# skipping loading not unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slsm == RSB_BOOL_TRUE && is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading symmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slhm == RSB_BOOL_TRUE && is_hermitian )
+		{
+			RSB_STDOUT("# skipping loading hermitian matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slum == RSB_BOOL_TRUE && !is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slmn > 0 && want_slmn <  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d > %d allowed nonzeroes.\n",filename,nnz,want_slmn);
+			goto nfnm;
+		}
+		if( want_slms > 0 && want_slms <= fsz / 1024 )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %zd>=%zd allowed filesize (KiB).\n",filename,fsz,want_slms);
+			goto nfnm;
+		}
+		if( want_slnn > 0 && want_slnn >  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d < %d allowed nonzeroes.\n",filename,nnz,want_slnn);
+			goto nfnm;
+		}
+	
+		RSB_STDOUT("# reading %s (%zd bytes / %zd "RSB_MEGABYTE_SYM" / %zd nnz / %zd rows / %zd columns / %zd MiB COO) as type %c...\n",rsb__basename(filename),fsz,RSB_DIV(fsz,RSB_MEGABYTE),(size_t)nnz,(size_t)nrA,(size_t)ncA,RSB_DIV(RSB_UTIL_COO_OCCUPATION(nrA,ncA,nnz,typecode),RSB_MEGABYTE),typecode);
+
+		if( ( nrA == ncA ) && ( nrA > 1 ) && ( want_only_lowtri || want_only_upptri ) )
+			nnz += nrA;	/* the loading routine shall allocate nnz+nrA */
+		else
+ 			nnz = 0;	/* the loading routine should determine nnz */
+
+		totiot -= rsb_time();
+		errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&nrA,&ncA,&nnz,typecode,flags,NULL,NULL);
+		totiot += rsb_time();
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+			goto err;
+		}
+		else
+		{
+			rsb_bool_t is_lower = RSB_BOOL_FALSE;
+			rsb_bool_t is_upper = RSB_BOOL_FALSE;
+			rsb_bool_t is_vector = RSB_BOOL_FALSE;
+
+			filename_old = filename;
+			typecode_old = typecode;
+
+			frt += rsb_time();
+			RSB_STDOUT("# file input of %s took %6.2lf s (%.0lf nnz, %.0lf nnz/s ) (%.2lf MB/s ) \n",rsb__basename(filename),frt,
+				(((double)nnz)),
+				(((double)nnz)/frt),
+				(((double)rsb_sys_filesize(filename))/(frt*RSB_INT_MILLION))
+			);
+
+			if (want_io_only)
+			{
+				/*  */
+				goto err;
+			}
+
+			if(want_transpose)
+			{
+				RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+				RSB_SWAP(rsb_coo_idx_t,nrA,ncA);
+				flags = rsb__do_flip_uplo_flags(flags);
+			}
+
+			if( nrA==ncA && nrA>1 && ( want_only_lowtri || want_only_upptri ) )
+			{
+				rsb_nnz_idx_t discarded = 0;
+				/*
+				rsb__util_coo_array_set_sequence(IA+nnz,nrA,0,1);
+				rsb__util_coo_array_set_sequence(JA+nnz,nrA,0,1);
+				 */
+				RSB_FCOO_ISET(IA+nnz,0,nrA);
+				RSB_FCOO_ISET(JA+nnz,0,nrA);
+				rsb__fill_with_ones(((rsb_byte_t*)VA)+RSB_SIZEOF(typecode)*nnz,typecode,nrA,1);
+				nnz += nrA;	/* nnz+nrA this number has been overwritten as nnz */
+				if( want_only_lowtri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+					errval = rsb_weed_out_non_lowtri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non lower elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+				if( want_only_upptri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+					errval = rsb_weed_out_non_upptri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non upper elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+
+				if(RSB_SOME_ERROR(errval))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			}
+
+			if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,NULL,NULL,NULL,NULL,&is_symmetric,&is_hermitian,NULL,&is_lower,&is_upper,&is_vector) ))
+			{
+				RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+				goto err;
+			}
+			if( is_vector )
+			{
+				RSBENCH_STDERR("file %s seems to store a vector\n",filename);
+				goto err;
+			}
+			if(RSB_BOOL_AND(want_as_unsymmetric,want_as_symmetric))
+			{
+				RSBENCH_STDERR("requiring both symmetric and unsymmetric flags is contradictory!\n");
+				goto err;
+			}
+			if(want_as_unsymmetric)
+			{
+				is_symmetric = RSB_BOOL_FALSE;
+				is_hermitian = RSB_BOOL_FALSE;
+			}
+			if(want_as_symmetric)
+			{
+				is_symmetric = RSB_BOOL_TRUE;
+				is_hermitian = RSB_BOOL_TRUE;
+			}
+			if(!RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && is_hermitian)
+			{
+				RSBENCH_STDOUT("# Warning: non complex matrix with hermitian flags! Converting to symmetric!\n");
+				is_hermitian = RSB_BOOL_FALSE;
+				is_symmetric = RSB_BOOL_TRUE;
+			}
+			/* TODO: use rsb__flags_from_props() */
+			if(is_hermitian == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_HERMITIAN);
+			}
+			if(is_symmetric == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+			}
+
+			if( (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER)) && (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)) )
+			{
+				/* is_upper and is_lower as declared in the matrix file */
+				if(is_upper)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+				if(is_lower)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+			}
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_cleanup_nnz(VA,IA,JA,nnz,0,0,nrA,ncA,&nnz,typecode,flags)); /* NEW */
+			if(RSB_SOME_ERROR(errval))
+			{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+			if(want_sort_after_load)
+			{
+				rsb_time_t dt = RSB_TIME_ZERO;
+				dt = - rsb_time();
+				if((errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS))!=RSB_ERR_NO_ERROR)
+				{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+				dt += rsb_time();
+				RSBENCH_STDOUT("#pre-sorting took %lg s\n",dt);
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+			}
+#if RSB_HAVE_METIS
+			if(want_wmbr)
+			{
+				/* FIXME: unfinished */
+				rsb_coo_idx_t *perm = NULL,*iperm = NULL,*vwgt = NULL;
+
+				perm  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+				iperm = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+#if 1
+				vwgt  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz));
+				rsb__util_coo_array_set(vwgt,nnz,0);
+#else
+				vwgt  = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+#endif
+				if( !perm || !iperm || !vwgt )
+				{
+					RSB_CONDITIONAL_FREE(iperm);
+					RSB_CONDITIONAL_FREE(perm);
+					RSB_CONDITIONAL_FREE(vwgt);
+				}
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				errval = rsb__do_switch_fullword_array_to_compressed(IA,nnz,nrA);
+				RSBENCH_STDOUT("Calling METIS_NodeND\n");
+				/*errval = */ METIS_NodeND(&nrA,IA,JA,vwgt,NULL,perm,iperm); /* Scotch wrapper crashes on vwgt=NULL. and is void */
+				RSBENCH_STDOUT("Exited  METIS_NodeND with code %d\n",errval);
+				/* if(errval == METIS_OK) */
+				{
+					RSBENCH_STDOUT("Permuting..\n");
+					errval = rsb__do_switch_compressed_array_to_fullword_coo(IA, nrA, 0, NULL);
+					errval = rsb__do_permute_rows_with_coo_index( IA, perm, nnz);
+					RSBENCH_STDOUT("Permuted.\n");
+					/* 
+					 */
+					for(i=0;i<nrA;++i){ RSB_STDOUT("%d\n",perm[i]);}
+				}
+				RSB_CONDITIONAL_FREE(vwgt);
+				RSB_CONDITIONAL_FREE(perm);
+				RSB_CONDITIONAL_FREE(iperm);
+			}
+			
+#endif /* RSB_HAVE_METIS */
+		}
+	}
+	else
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense),spacing=1;
+		if(want_generated_spacing>1)
+			spacing = want_generated_spacing;
+		dim *= spacing;
+
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb_nnz_idx_t lbw=should_generate_lband,ubw=should_generate_uband;
+			nrA = ncA = dim;
+			errval = rsb__generate_blocked_banded_coo(dim/spacing,spacing,lbw,ubw,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+		if(should_generate_dense>0)
+		{
+			RSB_DEBUG_ASSERT( should_generate_dense_nc != 0 );
+			/* full dense, no diag */
+			nrA = dim;
+			ncA = should_generate_dense_nc * spacing;
+			errval = rsb__generate_dense_full(nrA/spacing,ncA/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+			/* trick: lower triangular */
+			nrA=ncA=dim;
+			errval = rsb__generate_dense_lower_triangular_coo(dim/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER); /* 20121223	*/
+		}
+		}
+
+		if(want_sort_after_load)	
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+
+		if(want_as_symmetric)
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+	} /* should_generate_dense */
+have_va_ia_ja:
+	RSB_DEBUG_ASSERT( VA != NULL );
+	RSB_DEBUG_ASSERT( IA != NULL );
+	RSB_DEBUG_ASSERT( JA != NULL );
+	r_flags = flags;
+
+	/* CONDITIONALLY, PROCESSING THE INPUT */
+	if(!b_r_filename)
+	{
+		if(want_column_expand)
+		{
+			errval = rsb__do_column_expand(JA,nnz,&ncA,want_column_expand);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+		}
+
+		if( pattern_only )
+			rsb__fill_with_ones(VA,typecode,nnz,1);
+
+		if( dumpout )
+		{
+			errval = rsb__test_print_coo_mm(typecode,flags,IA,JA,VA,nrA,ncA,nnz,RSB_BOOL_TRUE,RSB_DEFAULT_STREAM);
+			//COO equivalent for rsb_file_mtx_save(mtxAp,NULL);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+			goto ret;
+		}
+	}
+#if 1
+	if(want_nonzeroes_distplot)
+	{
+		/* FIXME: Unfinished: printout not adequate ! */
+		/* FIXME: Shall use a separate routine for this! Please regard this code as temporary */
+		rsb_coo_idx_t median_m=0,median_k=0,stdd_m=0,stdd_k=0,nzp_m=nnz/nrA,nzp_k=nnz/ncA;
+		rsb_coo_idx_t*idxv=NULL;
+		rsb_coo_idx_t mm=0;
+		rsb_nnz_idx_t cs=0;
+		rsb_bool_t po = RSB_BOOL_TRUE;
+		const int histres=100;
+		const rsb_char_t*pmsg="\n\nplot \"-\" using 1:2 title \"cumulative %s population (nnz)\"\n";
+		RSBENCH_STDOUT("set xtics rotate\n");
+		RSBENCH_STDOUT("set term postscript eps color\n");
+		RSBENCH_STDOUT("set output \"%s-distplot.eps\"\n", rsb__basename(filename));
+		RSBENCH_STDOUT("set multiplot layout 1,2 title \"%s (%d x %d, %d nnz)\"\n", rsb__basename(filename),nrA,ncA,nnz);
+
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+
+		mm=nrA<histres?1:nrA/histres;
+		idxv = rsb__calloc(sizeof(rsb_coo_idx_t)*(ndA));
+		if(!idxv)
+			goto nohists;
+
+		for(i=0;i<nnz;++i)
+			if(IA[i] < nrA && IA[i] >= 0 )
+				idxv[IA[i]]++;
+		for(i=0;i<nrA;++i)
+			if(median_m<nnz/2)
+				{ median_m+=idxv[i]; }
+			else
+				{ break; }
+		median_m=i; 
+
+		RSB_STDOUT(pmsg,"rows");
+		if(po) for(i=0;i<nrA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		mm=ncA<histres?1:ncA/histres;
+
+		for(i=0;i<nrA;++i)
+			stdd_m+=(idxv[i]-nzp_m)*(idxv[i]-nzp_m);
+		stdd_m=nrA<2?0:sqrt(stdd_m/(nrA-1));
+
+
+		for(i=0;i<ncA;++i)
+			idxv[i]=0;
+
+		for(i=0;i<nnz;++i)
+			if(JA[i] < ncA && JA[i] >= 0 )
+				idxv[JA[i]]++;
+		for(i=0;i<ncA;++i)
+			if(median_k<nnz/2)
+				{ median_k+=idxv[i]; }
+			else
+				{ break; }
+		median_k=i; 
+
+		cs=0;
+		RSB_STDOUT(pmsg,"columns");
+		if(po) for(i=0;i<ncA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		for(i=0;i<ncA;++i)
+			stdd_k+=(idxv[i]-nzp_k)*(idxv[i]-nzp_k);
+		stdd_k=ncA<2?0:sqrt(stdd_k/(ncA-1));
+
+		RSBENCH_STDOUT("unset multiplot\n");
+		RSBENCH_STDOUT("#%%:NNZ_PER_ROW_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_m);
+		RSBENCH_STDOUT("#%%:ROWS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_m/(double)nrA));
+		RSBENCH_STDOUT("#%%:NNZ_PER_COL_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_k);
+		RSBENCH_STDOUT("#%%:COLS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_k/(double)ncA));
+nohists:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+		RSB_CONDITIONAL_FREE(idxv); RSB_CONDITIONAL_FREE(idxv);
+		goto ret;
+	}
+	#endif /* 1 */
+	/* CONDITIONALLY, PERFORMING SOME TEST ON THE INPUT */
+	if(want_accuracy_test>=1)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_accuracy_test(&coo,ca,cn,flags));
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_ERROR("accuracy based test failed!\n");
+			goto err;
+		}
+		if(want_accuracy_test>1)
+		{
+			goto done;
+		}
+	}
+
+		if( (flags & RSB_FLAG_QUAD_PARTITIONING) && g_all_flags==1)
+		{
+			int /*ci=0,*/hi=0,oi=0;
+			fn=0;
+			for(ci=0;ci<3;++ci)
+/*			for(di=0;di<2;++di)*/
+			for(oi=0;oi<2;++oi)
+			for(hi=0;hi<2;++hi)
+/*			for(li=0;li<2;++li)*/
+			{
+#if 0
+				flagsa[di+hi*2+li*4+ci*8]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+	
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#else /* 0 */
+				flagsa[fn]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[fn],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+				//RSB_DO_FLAG_ADD(flagsa[fn],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],oi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#endif /* 0 */
+				++fn;
+			}
+		}
+		else
+		{
+			fn=1;
+			flagsa[fn-1]=flags;
+		}
+
+		if(!want_perf_dump)
+		if(!( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )) /* otherwise pr__set.. cannot distinguish samples */
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			/* adds a no-recursion flag case */
+			RSB_DO_FLAG_DEL(flags,RSB_FLAG_QUAD_PARTITIONING);
+/*			if(fn)*/
+/*				flags=flagsa[fn-1];	*//* copy from the last */
+/*			else*/
+/*				flagsa[fn]=flags;	*//* impose these flags */
+			for(fi=fn;fi>0;--fi)
+				flagsa[fi]=flagsa[fi-1];/* shift forward */
+			RSB_DO_FLAG_DEL(flagsa[0],RSB_FLAG_QUAD_PARTITIONING);
+			++fn;	/* add ours */
+		}
+
+		for(ti=0;ti<tn;++ti)
+		{
+
+		transA = transAo;
+		if(ti>0)
+			transA = rsb__do_transpose_transposition(transAo);
+		if(ti==2)
+			transA = RSB_TRANSPOSITION_C;
+		if(!  (
+			( RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && (ti!=0) && ( flags & RSB_FLAG_SOME_SYMMETRY ) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti!=0) && ( flags & RSB_FLAG_SYMMETRIC) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti==2) &&!( flags & RSB_FLAG_SOME_SYMMETRY) )  ||
+			g_allow_any_tr_comb
+		))
+		if(tn>1)
+		{
+			RSBENCH_STDOUT("# multi-transpose benchmarking -- now using transA = %c.\n",RSB_TRANSPOSITION_AS_CHAR(transA));
+		}
+		if( /* transA != RSB_TRANSPOSITION_N */ ti>0 && RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC) )
+		{
+			RSBENCH_STDOUT("# symmetric matrix --- skipping transposed benchmarking\n");
+			continue;
+		}
+		for(fi=0;fi<fn;++fi)
+		for(brvi=-1;brvi<brl;++brvi)
+		for(bcvi=-1;bcvi<bcl;++bcvi)
+#ifndef  RSB_COORDINATE_TYPE_H
+		if(!(flagsa[fi] & RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+#endif /* RSB_COORDINATE_TYPE_H */
+		for(ci=0;ci<cn;++ci)	/* here just for should_recycle_matrix */
+		if(!(ca[ci]>1 && !(RSB_DO_FLAG_HAS(flagsa[fi],RSB_FLAG_QUAD_PARTITIONING)))) /* no need for more than one core without recursion */
+		{
+			cc = ca[ci];
+			should_recycle_matrix=(ci>0)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+			/* if this is the special "vanilla CSR" run after/before recursive runs ... */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			flags=flagsa[fi];
+			if(cn>1 && !RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+
+
+			if(brl>0 && bcl>0)
+			{
+				/* this is a trick and an unclean programming practice */
+				if(brvi==-1)++brvi;
+				if(bcvi==-1)++bcvi;
+				br = brv[brvi];
+				bc = bcv[bcvi];
+			}
+			else
+			{	
+				/* br, bc already set */
+			}
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+			/*	
+			* FIXME : laziness
+			*/
+						if( br!=1 || bc!=1 || !rsb__util_are_flags_suitable_for_optimized_1x1_constructor(flags) )
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+			if(0)
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+			{
+				p_r = rsb__util_get_partitioning_array(br,nrA,&M_b,flags);
+				p_c = rsb__util_get_partitioning_array(bc,ncA,&K_b,flags);
+
+				if((! p_r) || (! p_c))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					errval = RSB_ERR_ENOMEM;
+					goto erri;
+				}
+			}
+
+			if(  ( br!=1 || bc!=1 || p_r || p_c ) && ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR ))
+			{
+				/*  */
+				RSB_WARN("WARNING : disabling in place allocation flag : it is only allowed for 1x1!\n");
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR) ;
+			}
+
+			pinfo.M_b=M_b;
+			pinfo.K_b=K_b;
+			pinfo.rpntr=p_r;
+			pinfo.cpntr=p_c;
+
+
+			if(max_nnzs==0)
+				max_nnzs=nnz;
+	if(until_confidence && g_estimate_fillin)
+	{
+		if( want_percentage && ( max_nnzs > 100 || max_nnzs < 1) ) 
+		{RSBENCH_STDERR("given percentage = %zd ?\n",(rsb_printf_int_t)max_nnzs);goto err;}
+		else
+		{
+			if( want_percentage ) max_nnzs =(rsb_nnz_idx_t ) (((double)nnz/100.0) *(double) max_nnzs );
+
+			if(max_nnzs>nnz)
+			{RSBENCH_STDERR("want more max_nnzs (%zd) than nonzeros (%zd) !\n",(rsb_printf_int_t)max_nnzs,(rsb_printf_int_t)nnz);goto err;}
+			else
+			if(max_nnzs<nnzn)
+			{RSBENCH_STDERR("want max_nnzs (%zd) less than %zd ?\n",(rsb_printf_int_t)max_nnzs,(rsb_printf_int_t)nnzn);goto err;}
+		}
+	}
+
+#if 0
+	if(!until_confidence && !g_estimate_fillin)
+	{
+		{RSBENCH_STDERR("should choose an option : [ -S points] (-e)!\n");goto err;}
+		goto err;
+	}
+#else /* 0 */
+	g_estimate_fillin=1;
+#endif /* 0 */
+		if( until_confidence && ( until_confidence > 100 || until_confidence < 1) ) 
+		{RSBENCH_STDERR("given percentage = %zd ?\n",(rsb_printf_int_t)until_confidence ); {RSB_ERROR(RSB_ERRM_ES);goto err;} ;}
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+			if(g_estimate_fillin)
+			{
+				size_t total_element_count=0;
+				size_t total_block_count=0;
+				rsb_fillin_t fillin;
+
+				nnzs = rsb__calloc(nnzn * sizeof(size_t));
+				element_count = rsb__calloc(nnzn * sizeof(size_t));
+				block_count = rsb__calloc(nnzn * sizeof(size_t));
+
+				if(!nnzs || !element_count || !block_count)
+				{
+					errval = RSB_ERR_ENOMEM;
+					RSB_ERROR(RSB_ERRM_ES);
+					goto erri;
+				}
+
+				for(i=1;i<=nnzn;++i) nnzs[i-1]=(max_nnzs/nnzn) * i;/* ach, integer arithmetics ! */
+				nnzs[nnzn-1]=max_nnzs;
+				nnzs[nnzn-1]=nnz;
+	
+				errval = rsb__compute_partial_fillin_for_nnz_fractions(IA, JA, nnzs, nnzn, &pinfo, element_count, block_count);
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto erri;
+				}
+
+				errval = rsb__compute_partial_fillin_for_nnz_fractions(IA, JA, &nnz, 1, &pinfo, &total_element_count, &total_block_count);
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto erri;
+				}
+				fillin = ((double)total_element_count)/((double)nnz);
+	
+				//RSB_STDOUT("#using %d up to %d nonzeros out of %d, we estimate the fillin as:\n",nnzs[0],nnzs[nnzn-1],nnz);
+				RSBENCH_STDOUT("#matrix	rows	cols	br	bc	nnz	fillin	fraction	rel.error\n");
+				for(i=0;i< nnzn;++i)
+				{
+					rsb_fillin_t partial_fillin=0;
+/*					RSBENCH_STDOUT("#%d\n",nnzs[i]);*/
+/*					RSBENCH_STDOUT("#%d / %d\n",element_count[i],total_element_count);*/
+					RSBENCH_STDOUT("%s\t%zd\t%zd\t%zd\t%zd\t%zd\t%lg",filename,
+					(rsb_printf_int_t)nrA,(rsb_printf_int_t)ncA,(rsb_printf_int_t)br,(rsb_printf_int_t)bc,(rsb_printf_int_t)nnz,fillin);
+					//RSBENCH_STDOUT(" (%d,%d)",element_count[i],block_count[i]);
+					partial_fillin = (element_count[i])/(double)(nnzs[i]);
+					RSBENCH_STDOUT("\t%.3lg\t%+.3lg\n",
+						((double)nnzs[i])/(double)nnz,
+						(partial_fillin-fillin)/fillin
+					);
+				}
+				//RSBENCH_STDOUT("\n");
+			}
+
+
+		erri:
+			if(want_in_place_assembly && mtxAp)
+			{
+				rsb_time_t st = -rsb_time();
+				errval = rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+				st += rsb_time();
+				RSBENCH_STDOUT("# rsb_mtx_switch_to_coo time: %lg.\n",st);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			}
+			RSB_MTX_FREE(mtxAp);
+			RSB_CONDITIONAL_FREE(lhs);
+			RSB_CONDITIONAL_FREE(rhs);
+
+			RSB_CONDITIONAL_FREE(p_r);
+			RSB_CONDITIONAL_FREE(p_c);
+			
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);goto err;
+			}
+			if(brl==0 || bcl==0) break;
+		} /* ci : core (count) index */
+
+			if(want_verbose == RSB_BOOL_TRUE)
+			{
+            			RSBENCH_STDOUT("%%constructor:matrix	SORT[%d]	SCAN[%d]	SHUFFLE[%d]	INSERT[%d]\n",
+					ca[0],ca[0],ca[0],ca[0]);
+			}
+		} /* ti (transposition index) */
+	}
+	else
+	{
+		RSBENCH_STDOUT("%s (mat_stats) : Please specify a matrix filename (with -f)\n",argv[0]);
+	}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+	rsb__getrusage();
+done:
+	RSB_CONDITIONAL_FREE(nnzs);
+	RSB_CONDITIONAL_FREE(element_count );
+	RSB_CONDITIONAL_FREE(block_count   );
+frv:
+	if( !should_recycle_io )
+	{
+		RSBENCH_STDOUT("# Freeing I/O arrays.\n");
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	
+	if(mtxAp && !should_recycle_matrix){RSB_MTX_FREE(mtxAp)}
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+		RSBENCH_MAY_SQUIT(ret,{}) /* early end of program */
+		RSBENCH_MAY_TQUIT(ret,{}) /* early end of program */
+	}	/* typecodesi */
+	}	/* nrhsi */
+	}	/* incXi */
+	}	/* incYi */
+nfnm:	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}	/* filenamei */
+	RSBENCH_STDOUT("# benchmarking terminated --- finalizing run.\n");
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	errval = rsb_perf_counters_finalize();
+	if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+#endif
+ret:
+	errval = RSB_ERR_NO_ERROR;
+goto rret;
+err:
+	rsb_perror(NULL,errval);
+	errval = RSB_ERR_GENERIC_ERROR;
+	RSB_CONDITIONAL_FREE(nnzs);
+	RSB_CONDITIONAL_FREE(element_count );
+	RSB_CONDITIONAL_FREE(block_count   );
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+	if(want_in_place_assembly && mtxAp)rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+	RSB_MTX_FREE(mtxAp);
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+	if(RSB_SOME_ERROR(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)))return RSB_ERR_GENERIC_ERROR;
+rret:
+	if(want_perf_dump) 
+	{
+		RSBENCH_STDOUT("# ====== BEGIN Total summary record.\n");
+		errval = rsb__pr_dump(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL );
+		RSBENCH_STDOUT("# ======  END  Total summary record.\n");
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		errval = rsb__pr_save(fprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		RSBENCH_STDOUT("# Removing the temporary record file %s.\n",cprfn);
+		remove(cprfn);
+	}
+	if( ca  != ca_ ) {RSB_CONDITIONAL_FREE(ca);}
+#if !RSB_RSBENCH_STATIC_FILENAMEA
+	/* if(filenamea!=&fnbufp)RSB_CONDITIONAL_FREE(filenamea); */
+	if(filenamea!=&fnbufp)free(filenamea); /* FIXME */
+#endif
+	if(nrhsa!=(&nrhs))RSB_CONDITIONAL_FREE(nrhsa); /* FIXME: they get allocated (and thus shall be deallocated) before init */
+	if(incXa!=(&incX))RSB_CONDITIONAL_FREE(incXa);
+ 	if(incYa!=(&incY))RSB_CONDITIONAL_FREE(incYa); 
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_EXIT;} /* FIXME: and other cases ? */
+	if(want_verbose == RSB_BOOL_TRUE)
+		rsb__echo_timeandlabel(" terminating run at ","\n",&st);
+	return errval;
+}
+
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+/* @endcond */
diff --git a/rsb_test_matops.h b/rsb_test_matops.h
new file mode 100644
index 0000000..6a11c80
--- /dev/null
+++ b/rsb_test_matops.h
@@ -0,0 +1,165 @@
+/* @cond INNERDOC */
+/*! 
+ @file
+ @brief 
+
+ Matrix Operations testing code source file.
+ This is NOT part of the library: only of companion programs.
+
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+/*!
+ @file
+ @brief
+ Performance kernels dispatching code, for each type, submatrix size, operation.
+ But for block compressed sparse stripes format.
+ Kernels unrolled, with no loops, for only user-specified blockings.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_TEST_MATOPS_H_INCLUDED
+#define RSB_TEST_MATOPS_H_INCLUDED
+
+/* FIXME: necessary, until we use so many #ifdefs in this program */
+#include "rsb-config.h"
+#include "rsb_common.h"
+#include "rsb_mkl.h"
+
+#if RSB_WITH_LIKWID
+#include <likwid.h>
+#define RSB_LIKWID_MARKER_INIT	{RSBENCH_STDOUT("# Initializing the LIKWID API with likwid_markerInit().\n");likwid_markerInit();}
+#define RSB_LIKWID_MARKER_EXIT {RSBENCH_STDOUT("# Finalizing the LIKWID API with likwid_markerClose().\n");likwid_markerClose();}
+#define RSB_LIKWID_MARKER_R_START(R) likwid_markerStartRegion(R)
+#define RSB_LIKWID_MARKER_R_STOP(R) likwid_markerStopRegion(R)
+#else /* RSB_WITH_LIKWID */
+#define RSB_LIKWID_MARKER_INIT
+#define RSB_LIKWID_MARKER_EXIT
+#define RSB_LIKWID_MARKER_R_START(R)
+#define RSB_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_WITH_LIKWID */
+
+#define RSB_STDOUT_FD stdout
+#define RSBENCH_STDOUT( ... ) fprintf(RSB_STDOUT_FD, __VA_ARGS__ )
+#define RSB_WAT_FMT_H "Ss[Xx[Tt[V[V]]]]"
+#define RSB_WAT_FMT "%lfs[%dx[%dt[%c]]]"
+
+#define RSB_MIN_ABOVE_INF(X,Y,MIN) RSB_MAX(RSB_MIN(X,Y),MIN)
+#define RSB_INT_MILLION 1000000
+#define RSB_REAL_MILLION 1000000.0 
+enum rsb_dumpvec_enum { rsb_dumpvec_no= 0, rsb_dumpvec_res= 1, rsb_dumpvec_rhs= 2 };
+
+#if RSB_HAVE_LIBGEN_H
+#include <libgen.h>	/* for basename (20101226 FIXME : superseded by rsb__basename usage)*/
+#endif /* RSB_HAVE_LIBGEN_H */
+
+#define RSB_HAVE_METIS 0 /* FIXME: unfinished */
+#if RSB_HAVE_METIS
+#include <metis/metis.h>
+#endif /* RSB_HAVE_METIS */
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+#ifdef RSB_HAVE_OSKI_OSKI_H 
+#include <oski/oski.h>
+#else /* RSB_HAVE_OSKI_OSKI_H */
+#error "you should disable oski benchmarking at configure time!"
+#endif /* RSB_HAVE_OSKI_OSKI_H */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+#ifdef RSB_HAVE_UNISTD_H
+#include <unistd.h>
+#endif /* RSB_HAVE_UNISTD_H */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	#define RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,TIMES,PCIP) if(want_perf_counters>0){rsb_perf_counters_update(); if(PMSG)rsb_perf_counters_dump(MSG,NULL,TIMES,PCIP); rsb_perf_counters_reset();/* TEMPORARY */}
+	#define RSB_PERFORMANCE_COUNTERS_DUMP(MSG,PMSG) if(want_perf_counters>1)RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,1,NULL) 
+#else /* RSB_WANT_PERFORMANCE_COUNTERS */
+	#define RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,TIMES,PCIP) 
+	#define RSB_PERFORMANCE_COUNTERS_DUMP(MSG,PMSG)
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+
+#if RSB_WITH_LIKWID
+#define RSB_TM_LIKWID_MARKER_R_START(R) if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)  if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_STOP(R)
+#else
+#define RSB_TM_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_WITH_LIKWID */
+
+
+#define RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH  defined(RSB_WANT_PERFORMANCE_COUNTERS) && (RSB_WANT_PERFORMANCE_COUNTERS==1)
+
+
+
+
+
+int rsb_test_help_and_exit(rsb_char_t *argv0, rsb_option *o, int code);
+/* one function for each of (spmv_uaua,spsv_uxua,mat_stats)*/
+int rsb__main_block_partitioned_spmv_uaua(const int argc, rsb_char_t * const argv[])
+;
+int rsb__main_block_partitioned_spsv_uxua(const int argc, rsb_char_t * const argv[])
+;
+int rsb__main_block_partitioned_mat_stats(const int argc, rsb_char_t * const argv[])
+;
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif	/* RSB_TEST_MATOPS_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_test_matops.m4 b/rsb_test_matops.m4
new file mode 100644
index 0000000..323cd5e
--- /dev/null
+++ b/rsb_test_matops.m4
@@ -0,0 +1,4843 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+/*! 
+ @file
+ @brief 
+
+ Matrix Operations testing code source file.
+ This is NOT part of the library: only of companion programs.
+
+ */
+dnl
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+include(`rsb_krnl_macros.m4')dnl
+dnl
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_TEST_MATOPS_H_INCLUDED
+#define RSB_TEST_MATOPS_H_INCLUDED
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#include "rsb_test_matops.h"
+')dnl
+dnl
+
+/* FIXME: necessary, until we use so many #ifdefs in this program */
+#include "rsb-config.h"
+#include "rsb_common.h"
+#include "rsb_mkl.h"
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#if RSB_WITH_LIKWID
+#include <likwid.h>
+#define RSB_LIKWID_MARKER_INIT	{RSBENCH_STDOUT("# Initializing the LIKWID API with likwid_markerInit().\n");likwid_markerInit();}
+#define RSB_LIKWID_MARKER_EXIT {RSBENCH_STDOUT("# Finalizing the LIKWID API with likwid_markerClose().\n");likwid_markerClose();}
+#define RSB_LIKWID_MARKER_R_START(R) likwid_markerStartRegion(R)
+#define RSB_LIKWID_MARKER_R_STOP(R) likwid_markerStopRegion(R)
+#else /* RSB_WITH_LIKWID */
+#define RSB_LIKWID_MARKER_INIT
+#define RSB_LIKWID_MARKER_EXIT
+#define RSB_LIKWID_MARKER_R_START(R)
+#define RSB_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_WITH_LIKWID */
+
+#define RSB_STDOUT_FD stdout
+#define RSBENCH_STDOUT( ... ) fprintf(RSB_STDOUT_FD, __VA_ARGS__ )
+#define RSB_WAT_FMT_H "Ss[Xx[Tt[V[V]]]]"
+#define RSB_WAT_FMT "%lfs[%dx[%dt[%c]]]"
+
+#define RSB_MIN_ABOVE_INF(X,Y,MIN) RSB_MAX(RSB_MIN(X,Y),MIN)
+#define RSB_INT_MILLION 1000000
+#define RSB_REAL_MILLION 1000000.0 
+enum rsb_dumpvec_enum { rsb_dumpvec_no= 0, rsb_dumpvec_res= 1, rsb_dumpvec_rhs= 2 };
+
+')dnl
+dnl
+#if RSB_HAVE_LIBGEN_H
+#include <libgen.h>	/* for basename (20101226 FIXME : superseded by rsb__basename usage)*/
+#endif /* RSB_HAVE_LIBGEN_H */
+
+#define RSB_HAVE_METIS 0 /* FIXME: unfinished */
+#if RSB_HAVE_METIS
+#include <metis/metis.h>
+#endif /* RSB_HAVE_METIS */
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+#ifdef RSB_HAVE_OSKI_OSKI_H 
+#include <oski/oski.h>
+#else /* RSB_HAVE_OSKI_OSKI_H */
+#error "you should disable oski benchmarking at configure time!"
+#endif /* RSB_HAVE_OSKI_OSKI_H */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+#ifdef RSB_HAVE_UNISTD_H
+#include <unistd.h>
+#endif /* RSB_HAVE_UNISTD_H */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#define RSB_UTIL_CSR_IDX_OCCUPATION(R,C,NNZ) (sizeof(rsb_coo_idx_t)*nnz+sizeof(rsb_nnz_idx_t)*nrA)
+#define RSB_UTIL_COO_IDX_OCCUPATION(R,C,NNZ) (sizeof(rsb_coo_idx_t)*2*nnz)
+#define RSB_UTIL_COO_OCCUPATION(R,C,NNZ,TYPE) (RSB_UTIL_COO_IDX_OCCUPATION(R,C,NNZ)+(NNZ)*(RSB_SIZEOF(TYPE)))
+#define RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH() RSB_FPRINTF_MATRIX_ESSENTIALS(stdout,mtxAp,filename,cc) 
+#define RSB_DIV(Q,D) ( ( (Q)+(D)-1 ) / (D) )
+')dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+extern struct rsb_session_handle_t rsb_global_session_handle;
+#define RSB_NEGATED_EXAGGERATED_TUNER_TIMES -999999.0
+#define RSB_MKL_APPROPRIATE_AT_TIME_SPEC(TS) ( (TS) != RSB_NEGATED_EXAGGERATED_TUNER_TIMES )
+RSB_INTERNALS_RSBENCH_HEAD_DECLS
+#define RSBENCH_MAY_SQUIT(LABEL,ACTION) { if(RSB_SHALL_QUIT) { RSB_INFO("Terminating execution earlier due to interactive user request.\n"); ACTION; goto LABEL; } }
+#define RSBENCH_MAY_TQUIT(LABEL,ACTION) { if(maxtprt > RSB_TIME_ZERO && maxtprt < rsb_time()+totprt) { RSB_INFO("Terminating execution earlier due to user set max timer of %2.3lg s.\n",maxtprt); ACTION; goto LABEL; } }
+')dnl
+dnl
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	#define RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,TIMES,PCIP) if(want_perf_counters>0){rsb_perf_counters_update(); if(PMSG)rsb_perf_counters_dump(MSG,NULL,TIMES,PCIP); rsb_perf_counters_reset();/* TEMPORARY */}
+	#define RSB_PERFORMANCE_COUNTERS_DUMP(MSG,PMSG) if(want_perf_counters>1)RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,1,NULL) 
+#else /* RSB_WANT_PERFORMANCE_COUNTERS */
+	#define RSB_PERFORMANCE_COUNTERS_DUMP_MEAN(MSG,PMSG,TIMES,PCIP) 
+	#define RSB_PERFORMANCE_COUNTERS_DUMP(MSG,PMSG)
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+
+#if RSB_WITH_LIKWID
+#define RSB_TM_LIKWID_MARKER_R_START(R) if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)  if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_STOP(R)
+#else
+#define RSB_TM_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_WITH_LIKWID */
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#ifdef RSB_HAVE_REGEX_H 
+#include <regex.h>
+#endif /* RSB_HAVE_REGEX_H */
+#define RSBENCH_STDERR RSB_STDERR
+')dnl
+
+#define RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH  defined(RSB_WANT_PERFORMANCE_COUNTERS) && (RSB_WANT_PERFORMANCE_COUNTERS==1)
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static int rsb__echo_cargs(const int argc, rsb_char_t * const argv[])
+{
+	int argci;
+
+	if(argc > 0)
+		RSBENCH_STDOUT("# %s",argv[0]);
+	for(argci=1; argci<argc; ++argci)
+	{
+		RSBENCH_STDOUT(" %s",argv[argci]);
+	}
+	RSBENCH_STDOUT("\n");
+	return 0;
+}
+')dnl
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#ifdef RSB_HAVE_REGEX_H 
+static	rsb_bool_t rsb_regexp_match(const rsb_char_t*s, const rsb_char_t*r)
+	{
+		regex_t regex;
+		const int nmatch = 1;
+		regmatch_t pmatch[nmatch];
+		rsb_bool_t match = RSB_BOOL_FALSE;
+		int ignorecase = 0;
+		int ignorenewlines = 0;
+
+		if(!r || !strlen(r))
+			goto ret;
+
+		if(regcomp(&regex,r, 0 | REG_EXTENDED | (ignorecase==0?0:REG_ICASE) )!=0)
+		{
+			RSB_ERROR("error calling regcomp; invalid regexp: %s\n",s);
+			goto ret;
+		}
+
+		if(regexec(&regex,s+0,nmatch,pmatch,0)!=REG_NOMATCH)
+		{
+			match = RSB_BOOL_TRUE;
+		}
+		regfree(&regex);
+ret:
+		return match;
+	}
+#endif /* RSB_HAVE_REGEX_H */
+')dnl
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static void rsb__echo_timeandlabel(const char*l, const char*r, rsb_time_t *stp)
+{
+	rsb_time_t ct = rsb_time();
+
+	if(stp && *stp)
+		RSBENCH_STDOUT("#%s%.0lf (after %.1lfs of w.c.t.)%s",l?l:"",ct,ct-*stp,r?r:"");
+	else
+		RSBENCH_STDOUT("#%s%.0lf%s",l?l:"",ct,r?r:"");
+	if(stp)
+		*stp = ct;
+}
+')dnl
+
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static void rsb__impcdstr(char * dst, const char * h, const char *t, const char * pp, const char * ap)
+{
+	/* There is some overlap with rsb__cat_compver and rsb__sprint_matrix_implementation_code that shall be resolved. */
+	rsb_char_t buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+
+	rsb__cat_compver(buf);
+	strcat(buf,"");
+	rsb__sprintf(dst,"%s%s_%s_%.0lf_%s%s%s",pp?pp:"",h,rsb__getenv_nnr("HOSTNAME"),rsb_time(),buf,ap?ap:"",t);
+}
+')dnl
+
+int rsb_test_help_and_exit(rsb_char_t *argv0, rsb_option *o, int code)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	    size_t i=0;
+
+            printf("%s %s",argv0," where OPTIONS are taken from :\n");
+            for(i=0;o[i].val;++i)
+            {
+                if(o[i].val<RSB_MAX_VALUE_FOR_TYPE(rsb_char_t) && isprint(o[i].val)  )/* please do not swap conditions : some isprint() implementations segfault on this */
+		{
+                	printf("\t-%c",(rsb_char_t)(o[i].val));
+		}
+		else
+			printf("\t");
+                printf("\t\t");
+		if(o[i].name)
+	                printf("--%s",o[i].name);
+                switch(o[i].has_arg)
+		{
+	                case no_argument:
+	                break;
+	                case required_argument:
+	                printf(" <arg>");
+	                break;
+	                case optional_argument:
+	                printf(" [=arg]");
+	                break;
+	                default:
+        	        ;
+                };
+                printf("\n");
+	    }
+            printf("\n");
+	    printf("Arguments to --want-autotune of the format \"%s\", where S is the autotuning time in seconds, X is the number of tries, T the number of starting threads, V can be either q for quiet autotuning or v for a verbose one (can be specified twice). Valid examples: 3.0s2x4tv, 3.0s2x0tq, 3.0s, 2.0s10x . See documentation of rsb_tune_spmm for a full explanation of these parameters role in auto-tuning.\n",RSB_WAT_FMT_H);
+            printf("Report bugs to %s.\n",RSB_PACKAGE_BUGREPORT);
+            return code;
+}
+')dnl
+
+define(`RSB_M4_COMPLETE_TEST_PROGRAM_FUNCTION',`dnl
+pushdef(`mop',$1)dnl
+int rsb__main_block_partitioned_`'mop`'(const int argc, rsb_char_t * const argv[])
+ifdef(`ONLY_WANT_HEADERS',`dnl
+;',`dnl
+{
+	/*!
+	 * \ingroup gr_bench
+	 * This function implements a complete program for using our variable block
+	 * rows sparse matrix storage as it was a fixed block size format.
+	 * It is useful for benchmark against fixed block sparse matrix codes.
+	 * 
+	 * This function will benchmark the "mop" matrix operation.
+	 * */
+
+	/*
+	 * This example main program reads in a Matrix Market file in block format and multiplies it against a unit vector.
+	 **/
+	rsb_option options[] = {
+	    {"all-flags",	0 , NULL, 0x51},/* Q */  
+	    {"allow-any-transposition-combination",	0 , NULL, 0x61617463 },/* aatc */  
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	    {"alpha",	required_argument, NULL , 0x414C},/* AL */
+')dnl
+	    {"alternate-sort",	no_argument, NULL , 0x4153},/* AS */
+	    {"auto-blocking",	0 , NULL, 0x41},/* A */
+	    {"be-verbose",		0, NULL, 0x76},	/* v */
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	    {"beta",	required_argument, NULL ,  0x4246},/* BE */
+')dnl
+	    {"block-columnsize",	required_argument, NULL, 0x63},/* c */  
+	    {"block-rowsize",   required_argument, NULL, 0x72 },/* r */
+	    {"cache-blocking",	required_argument, NULL , 0x4342},/* CB */
+/*	    {"cache-flush",	no_argument, NULL, 0x4343},*/ /*   */
+	    {"column-expand",	required_argument, NULL, 0x6B},/* k */  
+	    {"compare-competitors",	no_argument, NULL, 0x6363},/* cc */  
+	    {"convert",	0, NULL, 0x4B},/* K */  
+/*	    {"convert",	required_argument, NULL, 0x4B},*//* K   */
+dnl	    {"dense",	required_argument, NULL, 0xbabb0 },   /* */
+	    {"dense",	required_argument, NULL, 0x64 },   /* d */
+	    {"diagonal-dominance-check",	no_argument , NULL, 0x4444},/* DD */  /* new */
+dnl	    {"dump-profile",	0 , NULL, 0x4F},/* O */  
+	    {"dump-n-lhs-elements",	required_argument , NULL, 0x444444},/* DDD */  /* new */
+	    {"echo-arguments",	no_argument , NULL, 0x6563686f},/* echo */  /* new */
+ifelse(mop,`mat_stats',`dnl
+	    {"estimate-samples",		required_argument, NULL, 0x53},	/* S */
+dnl 	    {"until-confidence",required_argument, NULL, 0x75},	/* u */	/* dead option */
+	    {"estimate-fillin",required_argument, NULL, 0x65},	/* e */
+')dnl
+	    {"flush-cache-in-iterations",	no_argument, NULL, 0x4343},/*  */  
+	    {"impatient",	no_argument, NULL, 0x696d7061},/* impa[tient] */  
+	    {"no-flush-cache-in-iterations",	no_argument, NULL, 0x434E},/*  */  
+	    {"flush-cache-around-loop",	no_argument, NULL, 0x434343},/*  */  
+	    {"want-ancillary-execs",	no_argument, NULL, 0x767646},/*  */  
+	    {"no-want-ancillary-execs",	no_argument, NULL, 0x42767646},/*  */  
+	    {"no-flush-cache-around-loop", no_argument	, NULL, 0x43434E},/*  */  
+	    {"want-no-recursive",	no_argument, NULL, 0x776e720a},/*  */  
+	    {"guess-blocking",	no_argument , NULL, 0x47},/* G */
+	    {"help",	no_argument , NULL, 0x68},	/* h */
+	    {"ilu0",	no_argument , NULL, 0x494B55},/* ILU */  /* new */
+	    {"incx",	required_argument, NULL, 0xb1bb0 },/* */  
+	    {"incy",	required_argument, NULL, 0xb1bb1 },/* */  
+	    {"in-place-assembly-experimental",	no_argument , NULL, 0x6970},/* i */  
+	    {"in-place-csr",	0 , NULL, 0x69},/* i */  
+	    {"in-place-permutation",	no_argument, NULL, 0x50},   /* P */
+#if RSB_WITH_LIKWID
+	    {"likwid",	no_argument, NULL, 0x6c696b77},   /* likw */
+#endif /* RSB_WITH_LIKWID */
+dnl	    {"lower",	required_argument, NULL, 0xbabb1 },   /* */
+	    {"lower",	required_argument, NULL, 0x6c},   /* l */
+	    {"lower-dense",	required_argument, NULL, 0x6c64},   /* ld */
+	    {"generate-lowerband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"gen-lband",	required_argument, NULL, 0x6c6c},   /* ll */
+	    {"generate-spacing",	required_argument, NULL, 0xbabb2 },   /* */
+	    {"matrix-dump",	0 , NULL, 0x44044},/* D */  
+	    {"matrix-dump-graph",	required_argument , NULL, 0x44047},/* DG */  
+	    {"matrix-dump-internals",	0 , NULL, 0x49049},/* I */  
+	    {"merge-experimental",	required_argument , NULL, 0x6d656578},/* meex */  
+	    {"split-experimental",	required_argument , NULL, 0x73706578},/* spex */  
+	    {"ms-experimental",	required_argument , NULL, 0x6d736578},/* msex */  
+	    {"matrix-filename",	required_argument, NULL, 0x66},/* f */  
+	    {"matrix-storage",	required_argument, NULL, 0x46},/* F */  
+	    {"matrix-time",	0 , NULL, 0x4D},/* M */  /* new */
+	    {"mem-hierarchy-info",	required_argument , NULL, 0x4D4D},/* MM */  /* new */
+	    {"max-runtime",	required_argument , NULL, 0x6d617275},/* maru */
+	    {"no-op",		0 , NULL, 0x4E},	/* N */
+	    {"notranspose",	no_argument, NULL, 0x5051},   /* do not transpose the operation */
+	    {"nrhs",	required_argument, NULL, 0x6e726873},   /* */
+	    {"one-nonunit-incx-incy-nrhs-per-type",	no_argument, NULL, 0x6e697270},   /* */
+	    RSB_BENCH_PROG_OPTS
+	    {"oski-benchmark",	0 , NULL, 0x42},/* B: only long option *//* comparative benchmarking agains OSKI */
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+	    {"mkl-benchmark",	0 , NULL, 0x4C},/* L: only long option *//* comparative benchmarking agains MKL */
+')dnl
+	    {"out-lhs",		0 , NULL, 0x6F},/* o */	/* should accept an output file name, optionally */
+	    {"out-rhs",		0 , NULL, 0x6F6F},/* o */	/* should accept an output file name, optionally */
+	    {"override-matrix-name",	required_argument , NULL, 0x6F6D6E},/* omn */	
+	    {"pattern-mark",	0 , NULL, 0x70},/* p */
+	    {"pre-transpose",	no_argument, NULL, 0x5454},   /* transpose the matrix before assembly  */
+	    {"read-as-binary",		required_argument, NULL, 0x62},/* b */
+	    {"repeat-constructor",	required_argument , NULL, 0x4A4A},
+	    {"reuse-io-arrays",	no_argument , NULL, 0x726961}, /* ria */
+	    {"no-reuse-io-arrays",	no_argument , NULL, 0x6e726961 }, /* nria */
+	    {"reverse-alternate-rows",	no_argument , NULL, 0x4A4A4A},
+	    {"generate-upperband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"gen-uband",	required_argument, NULL, 0x7575},   /* uu */
+	    {"generate-diagonal",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"gen-diag",	required_argument, NULL, 0x6464 },   /* dd */
+	    {"zig-zag",	no_argument , NULL, 0x4A4A4A},
+	    {"subdivision-multiplier",	required_argument, NULL , 0x534D},/* SM */
+#if RSB_WANT_BOUNDED_BOXES
+	    {"bounded-box",	required_argument, NULL , 0x4242},/* BB */
+#endif /* RSB_WANT_BOUNDED_BOXES */
+ifelse(mop,`mat_stats',`dnl
+	    {"max-nnz-samples",	required_argument, NULL, 0x73},	/* s */
+',`dnl
+	    {"sort",		0 , NULL, 0x73},	/* s */
+')dnl
+	    {"no-leaf-multivec",	no_argument, NULL , 0x6e6c6d6d},/* nlmm */
+	    {"with-leaf-multivec",	no_argument, NULL , 0x636c6d6d},/* wlmm */
+	    {"sort-after-load",	no_argument, NULL, 0x7373},/* ss */  
+	    {"skip-loading-symmetric-matrices",	 no_argument, NULL, 0x736c736d},/* slsm */  
+	    {"skip-loading-unsymmetric-matrices",no_argument, NULL, 0x736c756d},/* slum */  
+	    {"skip-loading-hermitian-matrices",no_argument, NULL, 0x736c686d},/* slhm */  
+	    {"skip-loading-not-unsymmetric-matrices",no_argument, NULL, 0x736c6e75},/* slnu */  
+	    {"skip-loading-if-more-nnz-matrices",required_argument, NULL, 0x736c6d6},/* slmn */  
+	    {"skip-loading-if-less-nnz-matrices",required_argument, NULL, 0x736c6e6e},/* slnn */  
+	    {"skip-loading-if-more-filesize-kb-matrices",required_argument, NULL, 0x736c6d73},/* slms */  
+#ifdef RSB_HAVE_REGEX_H 
+	    {"skip-loading-if-matching-regex",required_argument, NULL, 0x736c6d72},/* slmr */  
+#endif /* RSB_HAVE_REGEX_H */
+	    {"skip-loading-if-matching-substr",required_argument, NULL, 0x736c7373},/* slss */  
+	    {"times",		required_argument, NULL, 0x74},/* t */  
+	    {"transpose-as",	required_argument, NULL, 0x5040},   /* do transpose the operation */
+	    {"transpose",	no_argument, NULL, 0x5050},   /* do transpose the operation */
+	    {"also-transpose",	no_argument, NULL, 0x4150},  /* N,T: do transpose the operation after no transposition */
+	    {"all-transposes",	no_argument, NULL, 0x616c6c74},  /* N,T,C */
+	    {"type",		required_argument, NULL, 0x54},/* T */  
+	    {"types",		required_argument, NULL, 0x54},/* T */  
+	    {"update",		0 , NULL, 0x55},	/* U */
+	    {"as-unsymmetric",		0 , NULL, 0x5555},	/* UU: TODO: to insert such a test in as default, in order to quantify the benefit of symmetry */
+	    {"as-symmetric",		0 , NULL, 0x5353},	/* SS */
+	    {"only-lower-triangle",		0 , NULL, 0x4F4C54},	/* OLT */
+   	    {"only-upper-triangle",		0 , NULL, 0x4F4554},	/* OUT */
+	    {"verbose",	no_argument , NULL, 0x56},/* V */
+	    {"want-io-only",	no_argument , NULL, 0x4949},/* --want-io-only */
+	    {"want-nonzeroes-distplot",	no_argument, NULL, 0x776E68},/* wnh */  
+	    {"want-accuracy-test",	no_argument, NULL, 0x776174},/* wat */  
+	    {"want-getdiag-bench",	no_argument , NULL, 0x774446},/* wde */  /* FIXME: obsolete ? */
+	    {"want-getrow-bench",	no_argument , NULL, 0x777246},/* wre */  /* FIXME: obsolete ? */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	    {"want-perf-counters",	no_argument , NULL, 0x707763},/* wpc */
+#endif
+	    {"want-print-per-subm-stats",	no_argument , NULL, 0x77707373},/* wpss */
+	    {"want-only-accuracy-test",	no_argument, NULL, 0x776F6174},/* woat */  
+	    {"want-autotune",	required_argument, NULL, 0x7772740a},/* wrt */  
+	    {"want-no-autotune",	no_argument, NULL, 0x776e7274},/* wnrt */  
+#if RSB_HAVE_METIS
+	    {"want-metis-reordering",	no_argument, NULL, 0x776d6272 },/* wmbr */  
+#endif
+	    {"want-mkl-autotune",	required_argument, NULL, 0x776d6174},/* wmat */  
+	    {"want-mkl-one-based-indexing",	no_argument, NULL, 0x776d6f62 },/* wmob */  
+	    {"want-unordered-coo-test",	no_argument, NULL, 0x775563},/* */  
+	    {"with-flags",	required_argument, NULL, 0x71},/* q */  
+	    {"write-as-binary",	required_argument, NULL, 0x77 }, /* w */
+	    {"write-as-csr",	required_argument, NULL,  0x63777273 }, /* wcsr */
+	    {"write-performance-record",	required_argument, NULL, 0x77707266 }, /* write performance record file  */
+	    {"performance-record-name-append",	required_argument, NULL, 0x77707261 }, /* ...append  */
+	    {"performance-record-name-prepend",	required_argument, NULL, 0x77707270 }, /* ...prepend  */
+	    {"write-no-performance-record",	no_argument, NULL, 0x776e7072 }, /* write no performance record */
+	    {"discard-read-zeros",	no_argument, NULL,  0x64697a65 }, /* dize */
+	    {"z-sorted-coo",	no_argument, NULL , 0x7A},/* z */
+	    {0,0,0,0}	};
+
+	rsb_nnz_idx_t nnz = 0;/* was 0 */
+	int c;
+	int opt_index = 0;
+
+	rsb_coo_idx_t *IA = NULL, *JA = NULL;
+	void *VA = NULL;
+
+	int g_estimate_matrix_construction_time = 0;
+dnl	int g_dump_performance_profile = 0;
+	int g_all_flags = 0;
+	int g_sort_only = 0;
+	int repeat_construction = 1;	/* times to call the matrix constructor (the more times, the more accurate measurements) */
+
+	rsb_type_t typecode = RSB_NUMERICAL_TYPE_DEFAULT, typecode_old = RSB_NUMERICAL_TYPE_DEFAULT;
+	rsb_int ntypecodes = 0,typecodesi;
+	const rsb_int maxtypes = 2*RSB_IMPLEMENTED_TYPES;
+	rsb_type_t typecodes[maxtypes+1] ;
+
+	rsb_blk_idx_t br = 1;
+	rsb_blk_idx_t bc = 1;
+	char * bcs = NULL, *brs = NULL, *cns = NULL, *mhs = NULL;
+	rsb_blk_idx_t * brv = NULL;
+	rsb_blk_idx_t * bcv = NULL;
+	int brl = 0;
+	int bcl = 0;
+	rsb_thread_t ca_[1] = {1};
+	rsb_thread_t * ca = ca_;
+	rsb_thread_t cn = 1, ci = 0, cc = ca[ci];
+
+	int times = 100;	/* the default number of times to perform mop */
+	rsb_coo_idx_t nrA = 0, ncA = 0, ndA = 0;
+	int filenamen = 0, filenamei = 0;
+#define RSB_RSBENCH_STATIC_FILENAMEA 1
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_MAX_MTXFILES 256
+	const rsb_char_t *filenamea[RSB_RSBENCH_MAX_MTXFILES];
+#else
+	const rsb_char_t **filenamea = NULL;
+#endif
+	const rsb_char_t *filename = NULL;
+	const rsb_char_t *filename_old = NULL;
+	const rsb_char_t *usfnbuf = NULL;
+	rsb_char_t*fprfn = NULL, *cprfn = NULL, *apprfn = NULL, *ppprfn = NULL; /* final/checkpoint      performance file name , append/prepend */
+	rsb_char_t fprfnb[RSB_MAX_FILENAME_LENGTH], cprfnb[RSB_MAX_FILENAME_LENGTH];/* final/checkpoint      performance file name buffers */
+	rsb_char_t fnbuf[RSB_MAX_FILENAME_LENGTH];
+	rsb_char_t*fnbufp[1]={&(fnbuf[0])};
+	rsb_char_t * dump_graph_file=NULL;
+	rsb_flags_t flags_o = RSB_FLAG_NOFLAGS|RSB_FLAG_OWN_PARTITIONING_ARRAYS;
+/*	RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS)	;	*/ /* FIXME : EXPERIMENTAL (watch nnz count on a multi blocking run ...) */
+	rsb_flags_t flagsa[128] = RSB_M4_ZEROS_ARRAY(128);
+	rsb_flags_t r_flags = RSB_FLAG_NOFLAGS; /* recycling flags */
+	int fn = 1, fi = 0;/* for flags */
+	int tn = 1, ti = 0;/* for transposition */
+	int g_debug = 0;
+	int be_verbose = 0;
+	int pattern_only = 0;
+	int dumpout = 0;
+	int dumpout_internals = 0, merge_experimental = 0, split_experimental = 0;
+	int just_enter_tuning = 1;
+	rsb_char_t * csr_w_filename = NULL;
+	rsb_char_t * b_w_filename = NULL;
+	rsb_char_t * b_r_filename = NULL;
+	int dumpvec = rsb_dumpvec_no;
+	struct rsb_mtx_t * mtxAp = NULL;
+dnl
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+dnl
+	rsb_blk_idx_t rua[] = RSB_ROWS_UNROLL_ARRAY;
+	rsb_blk_idx_t cua[] = RSB_COLUMNS_UNROLL_ARRAY;
+dnl
+	int guess_blocking_test = 0;		/* guess test stuff */
+	rsb_int want_column_expand = 0;
+dnl
+ifelse(mop,`mat_stats',`',`dnl
+	rsb_perf_t bperf=0,wperf=0,cperf=0;			/* guess test stuff */
+	rsb_fillin_t egfillin=0,ebfillin=0,bfillin=0,maxfillin=0;	/* guess test stuff */
+	rsb_blk_idx_t bri=0,bci=0;		/* guess test stuff */
+	rsb_perf_t omta = RSB_REAL_ZERO; /* op memory traffic amount */
+	rsb_fillin_t fillin = RSB_REAL_ZERO;
+	rsb_perf_t raw_Mflops = RSB_REAL_ZERO,true_Mflops = RSB_REAL_ZERO, true_gem_Mflops = RSB_REAL_ZERO;
+	rsb_char_t buf[RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH];/* Flawfinder: ignore */
+	rsb_fillin_t efillin = RSB_REAL_ZERO;
+	rsb_perf_t eperf = RSB_REAL_ZERO;
+')dnl
+
+	rsb_bool_t should_recycle_matrix = RSB_BOOL_FALSE; /* reuse the matrix across measurements */
+	rsb_bool_t should_recycle_io = RSB_BOOL_TRUE;/* reuse the input arrays */
+	rsb_bool_t g_allow_any_tr_comb = RSB_BOOL_FALSE; /* allow any transposition combination */
+	
+ifelse(mop,`mat_stats',`dnl
+	int g_estimate_fillin = 0;
+	int want_percentage = 0;
+	double until_confidence = 0;
+
+	rsb_nnz_idx_t  max_nnzs = 0;
+	rsb_nnz_idx_t nnzn = 10;
+	rsb_nnz_idx_t * nnzs = NULL;
+	size_t * element_count = NULL;
+	size_t * block_count = NULL;
+	//rsb_nnz_idx_t i = 0;
+	struct rsb_mtx_partitioning_info_t pinfo;
+dnl	struct rsb_mop_performance_info_t mpi;
+')dnl
+	rsb_trans_t transAo = RSB_DEFAULT_TRANSPOSITION;
+	rsb_trans_t transA = RSB_DEFAULT_TRANSPOSITION;
+	rsb_nnz_idx_t should_generate_dense = 0;
+	rsb_nnz_idx_t should_generate_dense_nc = 0;
+	rsb_nnz_idx_t should_generate_lband = -1, should_generate_uband = -1;
+	rsb_nnz_idx_t want_generated_spacing = 0;
+	rsb_bool_t want_only_star_scan = RSB_BOOL_FALSE;
+dnl
+	rsb_blk_idx_t nrhs = 1, nrhsn = 1, nrhsi = 1, nrhsl = 1;
+	const char*nrhss = NULL;
+	rsb_blk_idx_t *nrhsa = NULL;
+dnl
+	size_t outnri = 0, rhsnri = 0;
+	rsb_nnz_idx_t n_dumpres = 0;
+	rsb_nnz_idx_t n_dumprhs = 0;
+	rsb_bool_t ignore_failed_fio = RSB_BOOL_TRUE; /* FIXME 20140912 experimental */
+	rsb_bool_t want_convert = RSB_BOOL_FALSE;
+	rsb_bool_t want_update = RSB_BOOL_FALSE;
+	rsb_int_t want_impatiently_soon_pre_results = 0; /* FIXME: temporary */
+	rsb_bool_t want_inner_flush = RSB_BOOL_FALSE;
+	rsb_bool_t want_outer_flush = RSB_BOOL_TRUE;
+	rsb_bool_t want_ancillary_execs = RSB_BOOL_FALSE;
+	rsb_time_t st = RSB_TIME_ZERO;
+	rsb_time_t totiot = RSB_TIME_ZERO; /* total I/O time */
+	rsb_time_t totatt = RSB_TIME_ZERO; /* total ancillary tests time */ /* FIXME: is this complete ? */
+	rsb_time_t totct = RSB_TIME_ZERO; /* total conversions time */ /* FIXME: is this complete ? */
+	rsb_time_t tottt = RSB_TIME_ZERO; /* total tuning time */
+	rsb_time_t totht = RSB_TIME_ZERO; /* total checks time */ /* FIXME: is this complete ? */
+	rsb_time_t maxtprt = RSB_TIME_ZERO; /* max total program run time */
+	const rsb_time_t totprt = - rsb_time(); /* total program run time */
+	rsb_bool_t want_as_unsymmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_as_symmetric = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_lowtri = RSB_BOOL_FALSE;
+	rsb_bool_t want_only_upptri = RSB_BOOL_FALSE;
+	rsb_bool_t want_sort_after_load = RSB_BOOL_FALSE;
+	rsb_bool_t want_slsm = RSB_BOOL_FALSE, want_slum = RSB_BOOL_FALSE, want_slnu = RSB_BOOL_FALSE, want_slhm = RSB_BOOL_FALSE;
+	rsb_nnz_idx_t want_slmn = 0,  want_slnn = 0,  want_slms = 0;
+#ifdef RSB_HAVE_REGEX_H
+	const rsb_char_t * want_slmr = NULL;
+#endif /* RSB_HAVE_REGEX_H */
+	const rsb_char_t * want_slss = NULL;
+	rsb_bool_t do_perform_ilu = RSB_BOOL_FALSE;
+	rsb_bool_t do_perform_ddc = RSB_BOOL_FALSE;
+	rsb_bool_t want_in_place_assembly = RSB_BOOL_FALSE;
+	rsb_bool_t want_accuracy_test = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_nonzeroes_distplot = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getdiag_bench = 0;	/* FIXME-EXPERIMENTAL */
+	rsb_bool_t want_getrow_bench = 0;	/* FIXME-EXPERIMENTAL */
+dnl
+	rsb_coo_idx_t mib = 0; /* MKL index base (FIXME: declared here and not within RSB_WANT_MKL because CSR copy made even with no MKL) */
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+#if RSB_WANT_MKL
+	rsb_bool_t want_mkl_bench = RSB_BOOL_FALSE;
+	rsb_bool_t want_mkl_bench_csr = RSB_BOOL_TRUE;
+	rsb_bool_t want_mkl_bench_gem = RSB_BOOL_TRUE;
+	rsb_bool_t want_mkl_bench_coo = RSB_BOOL_FALSE;
+#endif /* RSB_WANT_MKL */
+')dnl
+dnl
+dnl
+	rsb_time_t totmt = RSB_TIME_ZERO; /* total mkl/competitors (tuning) time */
+	rsb_bool_t want_perf_dump = RSB_BOOL_FALSE;
+	void*rspr = NULL; /* rsb sampled performance record structure pointer */
+
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t errnorm[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t * alphap = &(alpha[0]);
+	rsb_aligned_t * betap = &(beta[0]);
+	rsb_int alphai = 1, betai = 1;
+')dnl
+dnl
+	rsb_coo_idx_t incX = 1, incY = 1;
+	rsb_blk_idx_t incXn = 1, incXi = 1;
+	rsb_blk_idx_t incYn = 1, incYi = 1;
+	rsb_blk_idx_t *incXa = NULL, *incYa = NULL;
+dnl
+	rsb_coo_idx_t ldX = 0, ldY = 0;
+	rsb_bool_t want_incX = RSB_BOOL_FALSE,want_incY = RSB_BOOL_FALSE;
+	rsb_bool_t want_verbose = RSB_BOOL_FALSE;
+	rsb_int_t want_verbose_tuning = 0;
+	rsb_bool_t want_transpose = RSB_BOOL_FALSE;
+	#if 1
+	const int max_io = 10;
+	struct rsb_initopts io={NULL,NULL,0,RSB_IO_SPECIFIER_SET},*iop=&io;
+dnl	rsb_int_t preferred_sorting_method=1;
+	rsb_int_t should_use_cb_method = 0;
+	rsb_real_t subdivision_multiplier = 0.0;
+#if RSB_WANT_BOUNDED_BOXES
+	rsb_int_t want_bounded_box=1;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	rsb_int_t want_no_leaf_spmm=0;
+	void * io_values[max_io];
+	enum rsb_opt_t io_keys[max_io];
+	#else /* 1 */
+	struct rsb_initopts *iop = RSB_NULL_INIT_OPTIONS;
+	#endif /* 1 */
+	rsb_bool_t should_use_alternate_sort = RSB_BOOL_FALSE;
+	rsb_bool_t reverse_odd_rows = RSB_BOOL_FALSE;
+	rsb_bool_t zsort_for_coo = RSB_BOOL_FALSE;
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	rsb_bool_t want_unordered_coo_bench = RSB_BOOL_FALSE;
+	rsb_time_t unordered_coo_op_tot_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, unordered_coo_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, unordered_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+')dnl
+dnl
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : unfinished */
+	rsb_time_t oski_t = RSB_TIME_ZERO,oski_m_t = RSB_TIME_ZERO,oski_a_t = RSB_TIME_ZERO,oski_t_t = RSB_TIME_ZERO;
+	oski_idx_t * Aptr=NULL;
+	oski_idx_t * Aind=NULL;
+	oski_value_t * Aval=NULL;
+	oski_matrix_t A_tunable;
+        oski_vecview_t x_view;
+        oski_vecview_t y_view;
+	void * Oval = NULL;
+	rsb_coo_idx_t *OIA=NULL,*OJA=NULL;
+        rsb_char_t oxform[256];
+        double oalpha = 1, obeta = 0;
+	rsb_bool_t want_oski_bench=0;
+	#ifdef RSB_HAVE_SETENV
+	setenv("OSKI_LUA_PATH",OSKI_LUA_PATH,0/* if 0, will not override. if 1, it would. */);
+	#endif /* RSB_HAVE_SETENV */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	rsb_time_t tinf = rsb__timer_granularity();
+dnl
+	rsb_aligned_t pone[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+dnl
+	rsb_bool_t want_likwid = RSB_BOOL_FALSE;
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),1,`dnl
+	rsb_flags_t order = RSB_FLAG_WANT_COLUMN_MAJOR_ORDER;
+')dnl
+dnl
+	rsb_time_t want_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES, want_mkl_autotuner = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+	rsb_bool_t want_io_only = RSB_BOOL_FALSE;
+	rsb_int wat = 1;	/* want autotuning threads choice */
+	rsb_int wai = 1;	/* want autotuning rounds */
+	char wav = 0x56;	/* want autotuning verbose */
+	int wavf = RSB_AUT0_TUNING_VERBOSE;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	int want_perf_counters = 0;
+#endif
+	rsb_bool_t want_print_per_subm_stats = RSB_BOOL_FALSE;
+#if RSB_HAVE_METIS
+	rsb_bool_t want_wmbr = RSB_BOOL_FALSE;
+#endif
+	rsb_bool_t want_recursive = RSB_BOOL_TRUE;
+
+	io.keys = io_keys;
+	io.values = io_values;
+	io.n_pairs = 0;
+
+    	for (;;)
+	{
+		c = rsb_getopt_long(argc,argv,RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS"b:w:BGht:f:r:c:vpn:MNS:Bk:KU" /* Flawfinder: ignore */
+ifelse(mop,`mat_stats',`dnl
+		"s:e"
+',`dnl
+		/* s is in anyway, with RSB_SAMPLE_PROGRAM_OPTIONS_GET_FLAGS */
+')dnl
+		"o:O:"
+		, options, &opt_index);
+		if (c == -1)break;
+
+		RSB_DO_FLAG_ADD(flags_o,rsb__sample_program_options_get_flags(c,optarg));
+
+		switch (c)
+		{
+			case 0x62:	/* b */
+			b_r_filename = optarg;
+			break;
+			case  0xb1bb0:
+#if 0
+				incX = rsb__util_atoi(optarg);
+				if(incX<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incX>1)RSBENCH_STDOUT("# setting incX=%d\n",incX);
+				want_incX = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incXn,&incXa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case  0x6970:
+				RSBENCH_STDOUT("# WARNING: in place assembly is an UNFINISHED, EXPERIMENTAL feature\n");
+				want_in_place_assembly = RSB_BOOL_TRUE;
+			break;
+			case  0xb1bb1:
+#if 0
+				incY = rsb__util_atoi(optarg);
+				if(incY<1){errval = RSB_ERR_BADARGS;goto err;}
+				if(incY>1)RSBENCH_STDOUT("# setting incY=%d\n",incY);
+				want_incY = RSB_BOOL_TRUE;
+#else
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(optarg,&incYn,&incYa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+			break;
+			case 0x6c:
+			case 0x6c64: /* lower-dense */
+dnl			case 0xbabb1:
+			{
+				should_generate_dense = - rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0x6c696b77:
+#if RSB_WITH_LIKWID
+				want_likwid = RSB_BOOL_TRUE;
+				dnl RSBENCH_STDOUT("Usage of the LIKWID API requested.\n");
+#else /* RSB_WITH_LIKWID */
+				dnl RSBENCH_STDOUT("Sorry, LIKWID has not been configured in !\n");
+#endif /* RSB_WITH_LIKWID */
+			break;
+			case 0x6c6c:
+			{
+				should_generate_lband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_uband==-1)should_generate_uband=0;
+			}
+			break;
+			case 0x7575:
+			{
+				should_generate_uband = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+				if(should_generate_lband==-1)should_generate_lband=0;
+			}
+			break;
+			case 0x6464: /* gen-diag */
+			{
+				should_generate_uband = 0;
+				should_generate_lband = 0;
+				should_generate_dense = rsb__util_atoi(optarg); // FIXME ! PROBLEMS
+			}
+			break;
+			case 0xbabb2:
+			{
+				want_generated_spacing = rsb__util_atoi(optarg);
+			}
+			break;
+			case 0x6e697270:
+dnl
+			want_only_star_scan = RSB_BOOL_TRUE;
+			break;
+dnl
+			case 0x64: /* dense */
+dnl			case 0xbabb0:
+			{
+				/* should_generate_dense = rsb__util_atoi(optarg); */  // FIXME ! PROBLEMS
+				int sargs = sscanf(optarg,"%dx%d",&should_generate_dense,&should_generate_dense_nc);
+				if( should_generate_dense_nc == 0)
+					should_generate_dense_nc = should_generate_dense;
+				/* RSBENCH_STDOUT("# Requested generation of a %d by %d matrix\n",should_generate_dense,should_generate_dense_nc); */
+			}
+			break;
+			/* FIXME : please note that specifying two or more times -r or -c will cause memory leaks */
+			case 0x72:/* r */
+			brs=optarg;
+			break;
+			case 0x63: /* c */
+			bcs=optarg;
+			break;
+			case 0x42: /* oski : B */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			want_oski_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_OSKI_BENCHMARKING */
+			RSB_ERROR("Sorry, OSKI comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+			break;
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+			case 0x4C: /* MKL : L */
+#if RSB_WANT_MKL
+			want_mkl_bench = RSB_BOOL_TRUE;
+#else /* RSB_WANT_MKL */
+			RSB_ERROR("Sorry, MKL comparative benchmarking was opted out at compile time\n");
+			goto err;
+#endif /* RSB_WANT_MKL */
+			break;
+')dnl
+			case 0x61617463:
+			g_allow_any_tr_comb = RSB_BOOL_TRUE;
+			break;
+			case 0x51: /* Q (do not ask me why) */
+			g_all_flags = 1;
+			break;
+dnl			break;
+dnl			case 0x4F: /* O */
+dnl			g_dump_performance_profile=1;
+			break;
+			case 0x44044: /* D */
+			dumpout = 1;
+			break;
+			case 0x5040: /*  */
+			transAo = rsb__do_transposition_from_char(*optarg);	/* */
+			break;
+			case 0x4150:
+			tn = 2;
+			break;
+			case 0x616c6c74:
+			tn = 3;
+			break;
+			case 0x5050: /*  */
+			transAo = rsb__do_transpose_transposition(transAo);
+			break;
+			case 0x5051: /*  */
+			transAo = RSB_TRANSPOSITION_N;
+			break;
+			case 0x6e726873: /*  */
+#if 0
+			nrhs = rsb__util_atoi(optarg);
+			/* if(nrhs>1){ RSB_ERROR("Sorry, nrhs > 1 still unsupported!\n"); goto err; } */
+#else
+			nrhss = optarg;
+			if(RSB_SOME_ERROR(rsb__util_get_bx_array(nrhss,&nrhsn,&nrhsa)))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+#endif
+
+			break;
+			case 0x5454: /*  */
+			want_transpose = !want_transpose;
+			break;
+			case 0x44047: /* DG */
+			dump_graph_file = optarg;
+			break;
+			case 0x49049: /* I */
+			dumpout_internals = 1;
+			break;
+			case 0x6d656578: /* meex */
+			merge_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x73706578: /* spex */
+			split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x6d736578: /* msex */
+			merge_experimental = split_experimental = rsb__util_atoi(optarg);
+			RSB_ASSIGN_IF_ZERO(merge_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			RSB_ASSIGN_IF_ZERO(split_experimental,RSB_CONST_MS_AT_AUTO_STEPS);
+			break;
+			case 0x4444 : /* DD */
+			do_perform_ddc = RSB_BOOL_TRUE;
+			break;
+			case 0x444444 : /* DDD */
+			n_dumprhs = n_dumpres = rsb__util_atoi(optarg);
+			break;
+			case 0x6563686f: /* echo */
+			{
+				rsb_int argi=0;
+				if(argc>0) printf("#args: %s",argv[0]);
+				for(argi=1;argi<argc;++argi)
+					printf(" %s",argv[argi]);
+				printf("\n");
+			}
+			break;
+			case 0x494B55 : /* ILU */
+			do_perform_ilu = RSB_BOOL_TRUE;
+			break;
+			case 0x696d7061: /* */
+			want_impatiently_soon_pre_results = 1;
+			break;
+			case 0x4343: /* */
+			want_inner_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x434E: /* */
+			want_inner_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x434343: /*  */
+			want_outer_flush = RSB_BOOL_TRUE;
+			break;
+			case 0x43434E: /*  */
+			want_outer_flush = RSB_BOOL_FALSE;
+			break;
+			case 0x776e720a: /*  */
+			want_recursive = RSB_BOOL_FALSE;
+			break;
+			case 0x4D: /* M */
+			g_estimate_matrix_construction_time=1;
+			break;
+ifelse(mop,`mat_stats',`dnl
+			case 0x65: /* e */
+			g_estimate_fillin=1;
+			break;
+')dnl
+			case 0x7A:
+			zsort_for_coo = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the now active Z sort feature will only apply to COO submatrices\n");
+			break;
+			case 0x726961:
+			RSBENCH_STDOUT("# setting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_TRUE;
+			break;
+			case 0x6e726961:
+			RSBENCH_STDOUT("# unsetting the reuse I/O arrays option in e.g.: type transitions\n");
+			should_recycle_io = RSB_BOOL_FALSE;
+			break;
+			case 0x4A4A4A:
+			reverse_odd_rows = RSB_BOOL_TRUE;
+			RSBENCH_STDOUT("# WARNING: the row reversal feature only applies to CSR submatrices, and on indices only\n");
+			break;
+			case 0x6F6D6E:
+			usfnbuf = optarg;
+			break;
+			case 0x4A4A:
+			repeat_construction = rsb__util_atoi(optarg);
+			if(repeat_construction<1)
+			{
+				RSB_ERROR("Constructor repetition times should be a positive number!\n");goto err;
+			}
+			break;
+			case 0x4342: /* CB */
+			should_use_cb_method = rsb__util_atoi(optarg);
+			break;
+			case 0x4153: /* AS */
+			should_use_alternate_sort = RSB_BOOL_TRUE;
+			break;
+			case 0x534D: /* SM */
+			subdivision_multiplier = rsb__util_atof(optarg);
+			break;
+#if RSB_WANT_BOUNDED_BOXES
+			case 0x4242: /* BB */
+			want_bounded_box = rsb__util_atoi(optarg);
+			break;
+#endif /* RSB_WANT_BOUNDED_BOXES */
+			case 0x6e6c6d6d: /* nlmm */
+			want_no_leaf_spmm = /*rsb__util_atoi(optarg)*/ -1;
+			break;
+			case 0x636c6d6d: /* wlmm */
+#if RSB_ENABLE_INNER_NRHS_SPMV
+			want_no_leaf_spmm = 0;
+#else
+			RSB_ERROR("Cannot activate the RSB_IO_WANT_LEAF_LEVEL_MULTIVEC option because RSB_ENABLE_INNER_NRHS_SPMV is opted out!\n");goto err;
+#endif
+			break;
+			case 0x4D4D: /* MM */
+			mhs = optarg;
+			break;
+			case 0x6d617275:
+			maxtprt = rsb__util_atof(optarg);
+			maxtprt = RSB_MAX( RSB_TIME_ZERO, maxtprt  );
+			break;
+			case 0x6F: /* o */
+			dumpvec = rsb_dumpvec_res;
+			break;
+			case 0x6F6F: /* o */
+			dumpvec = rsb_dumpvec_rhs;
+			break;
+			case 0x70: /* p */
+			pattern_only = 1;
+			break;
+			case 0x4E: /* N */
+			g_sort_only = 1;
+			break;
+ifelse(mop,`mat_stats',`dnl
+			case 0x73: /* s */
+			/* FIXME : BROKEN! */
+			max_nnzs = rsb__util_atonnz(optarg);
+			if(*optarg && optarg[rsb__util_strlen(optarg)-1]==0x25)want_percentage=1;/* 0x25 == % */
+			break;
+',`dnl	
+			/* handled by rsb__sample_program_options_get_flags() */
+			case 0x73: /* s */
+				RSB_DEPRECATED("use of the sort flag");
+				flags_o = flags_o;
+			break;
+')dnl
+ifelse(mop,`mat_stats',`dnl
+			case 0x53: /* S */
+			nnzn = rsb__util_atonnz(optarg);
+			if(nnzn<1){RSB_ERROR(RSB_ERRM_ES);goto err;}
+			break;
+')dnl
+			case 0x7373: /* ss */
+			want_sort_after_load = RSB_BOOL_TRUE;
+			break;
+			case 0x736c736d: /* slsm */
+			want_slsm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c756d: /* slum */
+			want_slum = RSB_BOOL_TRUE;
+			break;
+			case 0x736c686d: /* slhm */
+			want_slhm = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6e75: /* slnu */
+			want_slnu = RSB_BOOL_TRUE;
+			break;
+			case 0x736c6d6: /* slmn */
+			want_slmn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6e6e: /* slnn */
+			want_slnn = rsb__util_atoi_km10(optarg);
+			break;
+			case 0x736c6d73: /* slms */
+			want_slms = rsb__util_atoi_km2(optarg);
+			break;
+#ifdef RSB_HAVE_REGEX_H
+			case 0x736c6d72: /* slmr */
+			want_slmr = (optarg);
+			break;
+#endif /* RSB_HAVE_REGEX_H */
+			case 0x736c7373: /* slss */
+			want_slss = (optarg);
+			break;
+			case 0x74: /* t */
+			times = rsb__util_atoi(optarg);
+			break;
+			case 0x47: /* G */
+			guess_blocking_test = 1;
+			break;
+			case 0x54: /* T */
+			{
+				const char*toa = optarg;
+				ntypecodes=0; /* this neutralizes former -T ... option */
+				/* if( *optarg == 0x3A || *optarg == 0x2A ) */ /* : or * aka colon or asterisk */
+				if( ( ! isalpha(*optarg) ) || ( strstr(optarg,"all") != NULL ) )
+					toa = RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS ;
+				for(;*toa;++toa)
+				if(isalpha(*toa))
+				{
+					if(ntypecodes<maxtypes)
+						typecodes[ntypecodes++]=typecode=toupper(*toa);
+					else
+					{
+						RSB_ERROR("Up to %d types supported! P.s.: Use a punctuation symbol to ask for all supported types.\n",maxtypes);
+						goto err;
+					}
+				}
+				typecodes[ntypecodes] = RSB_NUL;
+			}
+			break;
+			case 0x56: /* V */
+			want_verbose = RSB_BOOL_TRUE;
+			want_verbose_tuning ++;
+			break;
+			case 0x4949: /* II */
+			want_io_only = RSB_BOOL_TRUE;
+			break;
+			case 0x66: /* f */
+			filename = optarg;
+#if RSB_RSBENCH_STATIC_FILENAMEA
+#define RSB_RSBENCH_ADDF(FILENAME)	if(filenamen<RSB_RSBENCH_MAX_MTXFILES)filenamea[filenamen++] = (FILENAME); else {errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR("Please increase RSB_RSBENCH_MAX_MTXFILES (%d) and recompile !!\n",RSB_RSBENCH_MAX_MTXFILES);goto err;}
+#else
+ /* FIXME: for some reason, this seems to break e.g.  ./rsbench -oa -Ob --nrhs 1,2 -f pd.mtx -f A.mtx.
+    Of course this is wrong also w.r.t. rsb_calloc/rsb_lib_init, but that is not a problem.
+    Using calloc / realloc does not solve the problem.  */
+#define RSB_RSBENCH_ADDF(FILENAME)		if(filenamen==0) \
+				filenamea = rsb__calloc(sizeof(filenamea)*(filenamen+1)); \
+			else \
+				filenamea = rsb__do_realloc(filenamea, sizeof(filenamea)*(filenamen+1), sizeof(filenamea)); \
+			filenamea[filenamen++] = (FILENAME);
+#endif
+			RSB_RSBENCH_ADDF(filename) /* FIXME */
+			break;
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+			case 0x414C: /* AL */
+			alphai = rsb__util_atoi(optarg);
+			break;
+			case 0x4246: /* BE */
+			betai = rsb__util_atoi(optarg);
+			break;
+')dnl
+			case 0x4B: /* K */
+			want_convert = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x55: /* U */
+			want_update = RSB_BOOL_TRUE; /* FIXME: ignoring argument */
+			break;
+			case 0x5353: /* SS */
+			want_as_symmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x5555: /* UU */
+			want_as_unsymmetric = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4C54: /* OLT */
+			want_only_lowtri = RSB_BOOL_TRUE;
+			break;
+			case 0x4F4554: /* OUT */
+			want_only_upptri = RSB_BOOL_TRUE;
+			break;
+			case 0x6363:
+			/* this flag activates all interfaced libraries (if any) */
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+#if RSB_WANT_MKL
+			want_mkl_bench = RSB_BOOL_TRUE;
+#endif /* RSB_WANT_MKL */
+')dnl
+			break;
+			case 0x6B: /* ncA */
+			want_column_expand = rsb__util_atoi(optarg);
+			break;
+			case 0x6E: /* n */
+			cns = optarg; /* cores (threads) numbers (specification) string */
+			break;
+ifelse(mop,`mat_stats',`dnl
+			case 0x75 :	/* u */
+			until_confidence = rsb__util_atof(optarg);
+			break;
+')dnl
+			case 0x76: /* spmv_uauz */
+			be_verbose = 1;
+			break;
+			case 0x774446:	/* wde */
+			want_getdiag_bench = 1;
+			break;
+			case 0x776E68:	/* wnh */
+			want_nonzeroes_distplot = 1;
+			break;
+			case 0x777246:	/* wre */
+			want_getrow_bench = 1;
+			break;
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			case 0x707763:	/* wpc */
+			want_perf_counters = 1; /* 1 is what user wants; 2 is for debug purposes */
+			break;
+#endif
+			case 0x77707373:	/* wpss */
+			want_print_per_subm_stats = RSB_BOOL_TRUE;
+			break;
+			case 0x776F6174:	/* woac */
+			want_accuracy_test = 2;
+			break;
+			case 0x776e7274:	/* wnrt */
+			want_autotuner = RSB_TIME_ZERO;
+			wai=wat=0;
+			want_autotuner = merge_experimental = split_experimental = RSB_NEGATED_EXAGGERATED_TUNER_TIMES;
+			break;
+			case 0x7772740a:	/* wrt */
+			/* want_autotuner = rsb__util_atof(optarg); */
+			{
+				char wavv = 0x0;
+				int sargs = sscanf(optarg,"%lfs%dx%dt%c%c",&want_autotuner,&wai,&wat,&wav,&wavv);
+
+				if(!*optarg)
+					sargs = 0;
+				RSBENCH_STDOUT(" Passed %d arguments via autotuning string \"%s\" (an empty string requests defaults)\n",sargs,optarg);
+				if(sargs < 0)
+				{
+					RSBENCH_STDOUT("Wrong autotuning string detected!\n");
+					rsb_test_help_and_exit(argv[0],options, 0);
+					exit(0);
+				}
+				switch(sargs)
+				{
+					case(EOF):
+					case(0):
+						want_autotuner = 10.0;
+					case(1):
+						wai = 1;
+					case(2):
+						wat = 0;
+					case(3):
+						wav = 0;
+					case(4):
+						wavv = 0;
+					case(5):
+					break;
+				}
+				/* RSBENCH_STDOUT("Got an autotuning string: %lfs%dx%dt%c%c\n",want_autotuner,wai,wat,wav,wavv); */
+				if(toupper(wav)==0x56) /* V */
+					wavf = RSB_AUT0_TUNING_VERBOSE;
+				else
+					wavf = RSB_AUT0_TUNING_SILENT ;
+				if(toupper(wavv)==0x56) /* V */
+					wavf++;
+				if(toupper(wai)>RSB_CONST_MAX_TUNING_ROUNDS)
+				{
+					RSBENCH_STDOUT("Restricting the number of tuning round to %d (%d is too much!).\n",RSB_CONST_MAX_TUNING_ROUNDS,wai);
+					wai = RSB_CONST_MAX_TUNING_ROUNDS;
+				}
+				RSBENCH_STDOUT("Will invoke autotuning for ~%lf s x %d rounds, specifying verbosity=%d and threads=%d. (>0 means no structure tuning; 0 means only structure tuning, <0 means tuning of both with (negated) thread count suggestion).\n",want_autotuner,wai,wavf,wat);
+			}
+			want_mkl_autotuner = want_autotuner;
+			break;
+#if RSB_HAVE_METIS
+			case 0x776d6272:	/* wmbr */
+			want_wmbr = RSB_BOOL_TRUE;
+			break;
+#endif
+			case 0x776d6174:	/* wmat */
+			sscanf(optarg,"%lf",&want_mkl_autotuner);
+			want_mkl_autotuner = RSB_MAX(1.0,want_mkl_autotuner); /* FIXME: actual value is unimportant as long as it is positive ! */
+			break;
+			case 0x776d6f62:	/* wmob */
+			mib = 1;
+			break;
+			case 0x776174:	/* wac */
+			want_accuracy_test = 1;
+			break;
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+			case 0x775563:
+			want_unordered_coo_bench = RSB_BOOL_TRUE;
+			break;
+')dnl
+			case 0x767646:	/* wae */
+			want_ancillary_execs = RSB_BOOL_TRUE;
+			break;
+			case 0x42767646:	/* nwae */
+			want_ancillary_execs = RSB_BOOL_FALSE;
+			break;
+			case 0x77:	/* w */
+			b_w_filename = optarg;
+			break;
+			case 0x63777273:	/* wcsr */
+			csr_w_filename = optarg;
+			break;
+			case 0x77707266:
+			fprfn = optarg;
+			want_perf_dump = RSB_BOOL_TRUE;
+			if(optarg && !*optarg)
+				fprfn = NULL;
+			break;
+			case 0x776e7072:
+			fprfn = NULL;
+			want_perf_dump = RSB_BOOL_FALSE;
+			break;
+			case 0x77707261:
+			apprfn = optarg;
+			break;
+			case 0x77707270:
+			ppprfn = optarg;
+			break;
+			case 0x64697a65 :	/* dize */
+			RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_DISCARD_ZEROS);
+			break;
+			case 0x68: /* h */
+			/* should use rsb_test_help_and_exit */
+			RSBENCH_STDERR(
+				"%s "RSB_INFOMSG_SAK".\n"
+				"You can use it to perform sparse matrix - unitary vector multiplication, "
+				"specifying the blocking parameters, the times to perform multiplication.\n"
+				"\n"
+				"Additional debugging flags (-d, -p) are present.\n"
+				"\n"
+				"Usage : %s [OPTIONS]\n where OPTIONS are taken from "
+				"[ -f filename ] \n"
+				"[ -F matrix_storage=[b|c|bc] ] \n"
+				"[ -r br ] \n"
+				"[ -c bc ] \n"
+				"[ -t TIMES ]\n"
+				"[ -n OPENMP_THREADS ]\n"
+				"[ -T ( S | D | I | C ) /* float, double, integer, character*/ ] \n"
+				"[ -s /* will internally sort out nnzs */ ] \n"
+				"[ -p /* will set to 1 nonzeros */ ] \n"
+				"[-d /* if debugging on */]: \n"
+				"[-A /* for auto-blocking */]: \n"
+				"[ -h ] \n"
+				"\n"
+				"please note that not all of the suggested numerical types could be compiled in right now and/or work well.default is double.\n"
+				"\n"
+				"\n"
+				"e.g.: %s -f raefsky4.mtx -t 10 -T :   # 10 times for each of the supported numerical types\n",
+				argv[0],
+				argv[0],
+				argv[0]);
+			rsb_test_help_and_exit(argv[0],options, 0);
+			exit(0);
+	    	}
+	}
+
+	if( (!RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_QUAD_PARTITIONING)) && want_recursive != RSB_BOOL_FALSE )
+	{
+		RSB_WARN("Assuming a recursive matrix structure is requested...\n");
+		RSB_DO_FLAG_ADD(flags_o,RSB_FLAG_QUAD_PARTITIONING);
+	}
+dnl
+	for (c = optind; c < argc; c++)                                                     
+	{
+		RSB_RSBENCH_ADDF(argv[c])
+	}
+dnl
+	if(want_verbose == RSB_BOOL_TRUE)
+	{
+		rsb_char_t cbuf[RSB_MAX_COMPILE_COMMAND_LENGTH];
+		rsb__echo_timeandlabel(" beginning run at ","\n",&st);
+		rsb__echo_cargs(argc, argv);
+		errval = rsb__do_lib_get_info_str(0, &cbuf[0], sizeof(cbuf)-1);
+		if(RSB_SOME_ERROR(errval))
+			errval = RSB_ERR_NO_ERROR;
+		else
+			RSBENCH_STDOUT("# compiled with: %s\n",cbuf);
+	}
+dnl
+	printf("# average timer granularity: %2.3lg s\n",tinf);
+	if(want_perf_dump)
+	{
+		if(!fprfn)
+		{
+			rsb__impcdstr(fprfnb+strlen(fprfnb),"rsbench_pr",".rpr",ppprfn,apprfn);
+			fprfn = fprfnb;
+		}
+		if(!cprfn)
+			rsb__sprintf(cprfnb,"%s.tmp",fprfn),
+			cprfn = cprfnb;
+		printf("# Will write a final performance record to file %s and periodic checkpoints to %s\n",fprfn,cprfn);
+	}
+	if( maxtprt > RSB_TIME_ZERO )
+		printf("# If program run time will exceed %2.3lg s, will attempt early termination.\n",maxtprt );
+
+dnl	printf("# average OpenMP timer granularity: %lg\n",omp_get_wtick());
+dnl
+	RSBENCH_STDOUT("# will %s""perform ancillary tests.\n", want_ancillary_execs ?"":"NOT ");
+	RSBENCH_STDOUT("# will flush cache memory: %s between each operation measurement series, and %s between each operation.\n", want_outer_flush?"":"NOT", want_inner_flush?"":"NOT");
+	RSBENCH_STDOUT("# will %s any zero encountered in the matrix.\n", ( RSB_DO_FLAG_HAS(flags_o,RSB_FLAG_DISCARD_ZEROS) )?"discard":"keep");
+dnl
+	if( nrhsa == NULL ) nrhsa = &nrhs;
+	if( incXa == NULL ) incXa = &incX;
+	if( incYa == NULL ) incYa = &incY;
+dnl
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_INIT;}
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE
+	if(ntypecodes==0)
+		typecode = RSB_NUMERICAL_TYPE_DOUBLE ;
+#endif /* RSB_NUMERICAL_TYPE_DOUBLE */
+	if(ntypecodes==0)
+	{
+		typecodes[ntypecodes++] = typecode;
+		typecodes[ntypecodes] = RSB_NUL;
+	}
+
+	io.n_pairs=0;
+	if(should_use_alternate_sort)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SORT_METHOD;
+		io.n_pairs++;
+	}
+	if(should_use_cb_method!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&should_use_cb_method;
+		io.keys[io.n_pairs]=RSB_IO_WANT_CACHE_BLOCKING_METHOD;
+		io.n_pairs++;
+	}
+	if(mhs!=NULL)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&mhs;
+		io.keys[io.n_pairs]=RSB_IO_WANT_MEMORY_HIERARCHY_INFO_STRING;
+		io.n_pairs++;
+	}
+	if(subdivision_multiplier!=0.0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&subdivision_multiplier;
+		io.keys[io.n_pairs]=RSB_IO_WANT_SUBDIVISION_MULTIPLIER;
+		io.n_pairs++;
+	}
+#if RSB_WANT_BOUNDED_BOXES
+	if(want_bounded_box==0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_bounded_box;
+		io.keys[io.n_pairs]=RSB_IO_WANT_BOUNDED_BOX_COMPUTATION;
+		io.n_pairs++;
+	}
+#endif /* RSB_WANT_BOUNDED_BOXES */
+	if(want_no_leaf_spmm!=0)
+	{
+		iop=&io;
+		io.values[io.n_pairs]=&want_no_leaf_spmm;
+		io.keys[io.n_pairs]=RSB_IO_WANT_LEAF_LEVEL_MULTIVEC;
+		io.n_pairs++;
+	}
+
+#ifdef RSB_HAVE_UNISTD_H
+{
+	extern char **environ;
+	char **me = NULL;
+	rsb_int_t rpevc = 0; /* RSB_ prefixed environment variables count */
+
+	for(me=environ;*me;++me)
+		if( strstr(*me,"RSB_") == *me )
+			rpevc++;
+
+	if( rpevc )
+	{
+		RSB_STDOUT("# The user specified %d RSB_ prefixed environment variables:\n",rpevc);
+		for(me=environ;*me;++me)
+			if( strstr(*me,"RSB_") == *me )
+				RSB_STDOUT("#  export %s\n",*me);
+	}
+}
+#endif /* RSB_HAVE_UNISTD_H */
+	
+	
+	if( rsb__getenv("KMP_AFFINITY") )
+		RSB_STDOUT("# export KMP_AFFINITY=%s\n",rsb__getenv("KMP_AFFINITY"));
+	if( rsb__getenv("OMP_PROC_BIND") )
+		RSB_STDOUT("# export OMP_PROC_BIND=%s\n",rsb__getenv("OMP_PROC_BIND"));
+	if( rsb__getenv("OMP_NUM_THREADS") )
+		RSB_STDOUT("# export OMP_NUM_THREADS=%s\n",rsb__getenv("OMP_NUM_THREADS"));
+
+	if( want_verbose != RSB_BOOL_FALSE )
+		RSBENCH_STDOUT("# user specified a verbosity level of %d (each --verbose occurrence counts +1)\n",want_verbose_tuning );
+	else
+		RSBENCH_STDOUT("# user did not specify any verbosity level (each --verbose occurrence counts +1)\n");
+
+	if((errval = rsb_lib_init(iop))!=RSB_ERR_NO_ERROR)
+	{
+		RSB_ERROR("Error while initializing the library.");
+		goto err;
+	}
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	if((errval = rsb_perf_counters_init())!=RSB_ERR_NO_ERROR)
+	{
+		RSBENCH_STDERR("problem initializing performance counters (rsb_perf_counters_init gave %d)\n",(int)errval);
+		RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+#endif
+
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )
+	{
+		RSB_STDOUT("# auto-tuning oriented output implies  times==0 iterations and sort-after-load.\n");
+		times = 0;
+		/* if(want_verbose) */
+		want_impatiently_soon_pre_results = 1;
+		want_sort_after_load = RSB_BOOL_TRUE;
+	}
+	else
+	if( times < 1 )
+	{
+		RSB_STDOUT("# The iteration times should be specified as a positive number!\n");
+		RSB_ERROR(RSB_ERRM_ES);
+		goto err;
+	}
+	else
+		RSB_STDOUT("# Will measure on times=%d iterations.\n",times);
+
+	if( 0 == filenamen )
+#if RSB_RSBENCH_STATIC_FILENAMEA
+	       	filenamea[0] = fnbufp[0];
+#else
+	       	filenamea = &fnbufp;
+#endif
+	filenamen = RSB_MAX(1,filenamen);
+
+	if(cns)
+	{
+		ca = NULL;
+		cn = 0;
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(cns,&cn,&ca)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	}
+	else
+	{
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+		/* #define rsb_get_max_threads omp_get_max_threads */
+		cn = 1;
+		ca_[0] = omp_get_max_threads ();
+		RSBENCH_STDOUT("# User did not specify threads; assuming %d.\n", cn );
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+	}
+
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+#if RSB_WANT_MKL
+	if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) )
+		want_mkl_bench_csr = RSB_BOOL_FALSE;
+#endif /* RSB_WANT_MKL */
+')dnl
+
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	RSBENCH_STDOUT("# Using alpha=%d beta=%d for rsb_spmv/rsb_spsv/rsb_spmm/rsb_spsm.\n",alphai,betai);
+')
+	if(want_perf_dump) 
+		rsb__pr_init(&rspr, NULL, filenamen, cn, incXn, incYn, nrhsn, ntypecodes, tn);
+
+	for(     filenamei=0;     filenamei<filenamen+want_impatiently_soon_pre_results  ;++filenamei     )
+	{
+		if( filenamea && ( filenamea[filenamei] != filename_old) && filename_old && want_impatiently_soon_pre_results && want_perf_dump && filenamei>0 && filenamen>1) 
+		{
+			int filenameif = filenamei-1;
+			RSBENCH_STDOUT("# ====== BEGIN Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL);
+			RSBENCH_STDOUT("# ======  END  Impatient results record for matrix %d/%d: %s.\n",filenamei,filenamen,rsb__basename(filename_old));
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			if( filenameif > 0 && filenameif < filenamen-1) /* not after first and not at last */
+				RSBENCH_STDOUT("# ====== BEGIN Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen),
+				errval = rsb__pr_dump_inner(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, NULL,&filenameif, NULL, NULL, NULL, NULL, NULL, NULL, NULL, RSB_FLAG_NOFLAGS, RSB_FLAG_NOFLAGS, NULL),
+				RSBENCH_STDOUT("# ======  END  Impatient summary record for the %d/%d matrices so far.\n", filenameif+1,filenamen);
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			errval = rsb__pr_save(cprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+			if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+
+		if( filenamei >= filenamen )
+			continue; /* temporary: only for the want_impatiently_soon_pre_results trick */
+
+		if(filenamea)
+		{
+			filename = filenamea[filenamei];
+		}
+
+		if(filenamen>1)
+		{
+			RSBENCH_STDOUT("# multi-file benchmarking (file %d/%d) -- now using %s\n",filenamei+1,filenamen,rsb__basename(filename));
+		}
+
+	for(     incXi=0;     incXi<incXn     ;++incXi     )
+	{
+	for(     incYi=0;     incYi<incYn     ;++incYi     )
+	{
+	for(     nrhsi=0;     nrhsi<nrhsn     ;++nrhsi     )
+	{
+	for(typecodesi=0;typecodesi<ntypecodes;++typecodesi)
+	{
+	rsb_flags_t flags = flags_o;
+	rsb_thread_t cl; /* cores number last (overrides cn for this typecode cycle) */
+	typecode = typecodes[typecodesi];
+
+	if(ntypecodes>1)
+	{
+		RSBENCH_STDOUT("# multi-type benchmarking (%s) -- now using typecode %c (last was %c).\n",typecodes,typecode,typecode_old);
+		if( RSB_MATRIX_UNSUPPORTED_TYPE ( typecode ) )
+		{
+			RSBENCH_STDOUT("# Skipping unsupported type \"%c\" -- please choose from \"%s\".\n",typecode,RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS );
+			continue;
+		}
+	}
+
+	nrhs = nrhsa[nrhsi];
+	if( nrhsn > 1 && nrhss )
+	{
+		RSBENCH_STDOUT("# multi-nrhs benchmarking (%s) -- now using nrhs %d.\n",nrhss,nrhs);
+	}
+	incX = incXa[incXi];
+	incY = incYa[incYi];
+	if(incXn>1)
+	{
+		RSBENCH_STDOUT("# multi-incX benchmarking (%d/%d) -- now using incX=%d.\n",incXi+1,incXn,incX);
+	}
+	if(incYn>1)
+	{
+		RSBENCH_STDOUT("# multi-incY benchmarking (%d/%d) -- now using incY=%d.\n",incYi+1,incYn,incY);
+	}
+
+	if( want_only_star_scan )
+		if( RSB_MIN(incXi,1) + RSB_MIN(incYi,1) + RSB_MIN(nrhsi,1) > 1 ) /* two or more exceed index one */
+		{
+			RSBENCH_STDOUT("# Skipping a case with incX=%d incY=%d nrhs=%d.\n",incX,incY,nrhs);
+			goto frv;
+		}
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	/* rsb__getrusage(); */ /* FIXME: new (20140727) */
+#ifndef RSB_DISABLE_ALLOCATOR_WRAPPER
+	RSBENCH_STDOUT("( allocated_memory:%zd allocations_count:%zd)",rsb_global_session_handle.allocated_memory,rsb_global_session_handle.allocations_count);
+#endif
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+
+	if(cns)
+	{
+		cc = ca[ci];
+	}
+	cl=cn;
+	if(bcs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(bcs,&bcl,&bcv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+	if(brs)
+		if(RSB_SOME_ERROR(rsb__util_get_bx_array(brs,&brl,&brv)))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+dnl	if(flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)	/* this is here only for easing triangular solve benchmarking (to avoid each time ) */
+dnl		transA = RSB_TRANSPOSITION_T;
+dnl	else
+dnl		transA = RSB_TRANSPOSITION_N;
+dnl	20110412	vectors shall be transA-independent---now both transposed and untransposed operations could be executed in the same run
+	if(incX!=incY)
+	{
+		RSB_ERROR("setting (incX=%d) != (incY=%d) in triangular solve is unsupported in this program\n",incX,incY);
+		errval = RSB_ERR_BADARGS;goto err;
+	}
+')dnl
+
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+ifelse(RSB_M4_IS_ZEROING_KERNEL_MOP(mop),1,`dnl
+	if(RSB_SOME_ERROR(errval = rsb__cblas_Xscal(typecode,1,NULL,beta,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+',`
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(beta,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+')dnl
+ifelse(RSB_M4_IS_OP_ADDING_KERNEL_MOP(mop),1,`dnl
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(alpha,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+')dnl
+ifelse(RSB_M4_IS_OP_NEGATING_KERNEL_MOP(mop),1,`dnl
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(alpha,typecode,1,1))){ RSB_ERROR(RSB_ERRM_ES);goto err;}
+')dnl
+	/* FIXME: the following collides with the former */
+	rsb__util_set_area_to_converted_integer(alphap,typecode,alphai);
+	rsb__util_set_area_to_converted_integer(betap ,typecode,betai);
+')dnl
+dnl
+
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+	/* FIXME : note that this option is not compatible with g_sort_only .. */
+        oski_Init();
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+	g_debug = ((flags & RSB_FLAG_SHOULD_DEBUG) != 0);
+
+	if(g_sort_only)RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORT_INPUT);
+
+	if(typecode==-1)
+	{
+		RSBENCH_STDERR("error : please recompile with double precision floating point numbers supported! \n");
+		return RSB_ERR_GENERIC_ERROR;
+	}
+	rsb__util_set_area_to_converted_integer(&pone[0],typecode,+1);
+
+dnl	if(g_dump_performance_profile)
+dnl	{
+dnl		if((errval = rsb_do_dump_performance_record_for_op_and_type(
+dnl			RSB_NUMERICAL_TYPE_INDEX_FROM_CODE(typecode), 
+dnl			RSB_NUMERICAL_OP_INDEX_FROM_CODE(RSB_M4_OPTYPE_INDEX_PREPROCESSOR_SYMBOL(mop))))!=RSB_ERR_NO_ERROR)
+dnl			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+dnl		goto done;
+dnl	}
+
+ifelse(mop,`mat_stats',`dnl
+	if(until_confidence && g_estimate_fillin)
+	{
+		RSBENCH_STDERR("cannot perform -e functionality in one run. one at a time please..\n");
+		goto err;
+	}
+')dnl
+
+	if(brl<1) { /* this is a hack */ brv = rua; brl = RSB_ROWS_UNROLL_ARRAY_LENGTH;}
+	if(bcl<1) { /* this is a hack */ bcv = cua; bcl = RSB_COLUMNS_UNROLL_ARRAY_LENGTH;}
+
+	if(RSB_MATRIX_UNSUPPORTED_TYPE(typecode))
+	{
+		RSBENCH_STDERR("This numerical type is not supported.\n");
+		goto err;
+	}
+
+	/* CONDITIONALLY, GENERATING A MATRIX */
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense);
+		rsb_nnz_idx_t spacing = want_generated_spacing>1?want_generated_spacing:1;
+		
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb__sprintf(fnbuf,"banded-%dx%d-%d+%d-%dnz-spaced-%d",dim*spacing,dim*spacing,should_generate_lband,should_generate_uband,RSB_NNZ_OF_BANDED(dim,should_generate_lband,should_generate_uband),spacing);
+		}
+		else
+		{
+		if(want_generated_spacing>0)
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*dim);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz-spaced-%d",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim,spacing);
+		}
+		else
+		{
+			if(should_generate_dense>0)
+				rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim*spacing,should_generate_dense_nc*spacing/*dim*spacing*/,dim*should_generate_dense_nc);
+			else
+				rsb__sprintf(fnbuf,"lower-%dx%d-%dnz",dim*spacing,dim*spacing,(dim*(dim-1))/2+dim);
+		}
+		}
+		if(want_incX)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incX-%d",incX);
+		if(want_incY)
+				rsb__sprintf(fnbuf+strlen(fnbuf),"-incY-%d",incY);
+/*		rsb__sprintf(fnbuf,"dense-%dx%d-%dnz",dim,dim,dim*dim);*/
+/*		rsb__sprintf(fnbuf,"dense-%dx%d",dim,dim);*/
+		filename=&(fnbuf[0]);
+	}
+
+	if(usfnbuf)
+		filename=usfnbuf;
+
+	/* CONDITIONALLY, READING A MATRIX FROM FILE */
+if(filename || b_r_filename)
+{
+
+	rsb_blk_idx_t M_b=0;/* was 0 */
+	rsb_blk_idx_t K_b=0;
+	rsb_nnz_idx_t i=0;
+
+	rsb_coo_idx_t *p_r=NULL,*p_c=NULL;	/* FIXME : get rid of these */
+	void *lhs=NULL,*rhs=NULL;
+	int bcvi=0;
+	int brvi=0;
+	rsb_time_t frt = RSB_TIME_ZERO;
+
+	if( filename != filename_old )
+	{
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	if(!should_recycle_io) { RSB_DEBUG_ASSERT( VA == NULL ); }
+	if( should_recycle_io && VA && filename == filename_old )
+	{
+		flags = r_flags;
+dnl
+		if( typecode != typecode_old )
+		{
+			void *VA_ = rsb__malloc_vector(nnz,typecode);
+			errval = rsb__do_copy_converted_scaled(VA, VA_, NULL, typecode_old, typecode, nnz, RSB_DEFAULT_TRANSPOSITION);
+			if(RSB_SOME_ERROR(errval)) { RSB_ERROR(RSB_ERRM_ES);goto err; }
+			RSB_CONDITIONAL_FREE(VA);
+			VA = VA_;
+			RSBENCH_STDOUT("# Reusing type converted (%c->%c) arrays from last iteration instead of reloading matrix file.\n",typecode_old,typecode);
+			typecode_old = typecode;
+		}
+		else
+		{
+			RSBENCH_STDOUT("# Reusing same type     (type %c) arrays from last iteration instead of reloading matrix file.\n",typecode);
+		}
+dnl
+		goto have_va_ia_ja;
+	}
+dnl
+	if((!should_generate_dense) && (!b_r_filename))
+	{
+		rsb_bool_t is_symmetric = RSB_BOOL_FALSE;
+		rsb_bool_t is_hermitian = RSB_BOOL_FALSE;
+		size_t fsz = rsb_sys_filesize(filename);
+
+		frt = - rsb_time();
+
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+			{
+				/* FIXME : we remove symmetry flags, for they are incompatible with triangular solve */
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_SYMMETRIC);
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_HERMITIAN);
+			/*
+				if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER))
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+				}
+				else
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+			*/
+				//RSB_DO_FLAG_ADD(flags,RSB_FLAG_DISCARD_ZEROS) ;//problematic : FIXME
+			}
+')dnl
+#ifdef RSB_HAVE_REGEX_H
+		if( want_slmr && rsb_regexp_match(rsb__basename(filename),want_slmr) == RSB_BOOL_TRUE )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches regex /%s/.\n",filename,want_slmr);
+			goto nfnm;
+		}
+#endif /* RSB_HAVE_REGEX_H */
+		if( want_slss && ( strstr( rsb__basename(filename), want_slss ) != NULL ) )
+		{
+			RSB_STDOUT("# skipping loading matrix file %s, because it matches substring %s.\n",filename,want_slss);
+			goto nfnm;
+		}
+		/* if(RSB_SOME_ERROR(rsb__do_util_get_matrix_dimensions(filename,&ncA,&nrA,&nnz,NULL)) ) */
+		if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,&nrA,&ncA,&nnz,NULL,&is_symmetric,&is_hermitian,NULL,NULL,NULL,NULL)) )
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+			if( ignore_failed_fio )
+			{
+				RSBENCH_STDERR("Will ignore error and continue with the following files.\n");
+				errval = RSB_ERR_NO_ERROR;
+				goto nfnm;
+			}
+			goto err;
+		}
+		if( want_slnu == RSB_BOOL_TRUE && ( is_hermitian || is_symmetric ) )
+		{
+			RSB_STDOUT("# skipping loading not unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slsm == RSB_BOOL_TRUE && is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading symmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slhm == RSB_BOOL_TRUE && is_hermitian )
+		{
+			RSB_STDOUT("# skipping loading hermitian matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slum == RSB_BOOL_TRUE && !is_symmetric )
+		{
+			RSB_STDOUT("# skipping loading unsymmetric matrix %s, as requested.\n",filename);
+			goto nfnm;
+		}
+		if( want_slmn > 0 && want_slmn <  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d > %d allowed nonzeroes.\n",filename,nnz,want_slmn);
+			goto nfnm;
+		}
+		if( want_slms > 0 && want_slms <= fsz / 1024 )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %zd>=%zd allowed filesize (KiB).\n",filename,fsz,want_slms);
+			goto nfnm;
+		}
+		if( want_slnn > 0 && want_slnn >  nnz )
+		{
+			RSB_STDOUT("# skipping loading matrix %s, having %d < %d allowed nonzeroes.\n",filename,nnz,want_slnn);
+			goto nfnm;
+		}
+	
+		RSB_STDOUT("# reading %s (%zd bytes / %zd "RSB_MEGABYTE_SYM" / %zd nnz / %zd rows / %zd columns / %zd MiB COO) as type %c...\n",rsb__basename(filename),fsz,RSB_DIV(fsz,RSB_MEGABYTE),(size_t)nnz,(size_t)nrA,(size_t)ncA,RSB_DIV(RSB_UTIL_COO_OCCUPATION(nrA,ncA,nnz,typecode),RSB_MEGABYTE),typecode);
+
+		if( ( nrA == ncA ) && ( nrA > 1 ) && ( want_only_lowtri || want_only_upptri ) )
+			nnz += nrA;	/* the loading routine shall allocate nnz+nrA */
+		else
+ 			nnz = 0;	/* the loading routine should determine nnz */
+
+		totiot -= rsb_time();
+		errval = rsb__util_mm_load_matrix_f(filename,&IA,&JA,&VA,&nrA,&ncA,&nnz,typecode,flags,NULL,NULL);
+		totiot += rsb_time();
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSBENCH_STDERR(RSB_ERRMSG_NOTMTXMKT" : %s ..\n",filename);
+			goto err;
+		}
+		else
+		{
+			rsb_bool_t is_lower = RSB_BOOL_FALSE;
+			rsb_bool_t is_upper = RSB_BOOL_FALSE;
+			rsb_bool_t is_vector = RSB_BOOL_FALSE;
+
+			filename_old = filename;
+			typecode_old = typecode;
+
+			frt += rsb_time();
+			RSB_STDOUT("# file input of %s took %6.2lf s (%.0lf nnz, %.0lf nnz/s ) (%.2lf MB/s ) \n",rsb__basename(filename),frt,
+				(((double)nnz)),
+				(((double)nnz)/frt),
+				(((double)rsb_sys_filesize(filename))/(frt*RSB_INT_MILLION))
+			);
+
+			if (want_io_only)
+			{
+				/*  */
+				goto err;
+			}
+
+			if(want_transpose)
+			{
+				RSB_SWAP(rsb_coo_idx_t*,IA,JA);
+				RSB_SWAP(rsb_coo_idx_t,nrA,ncA);
+				flags = rsb__do_flip_uplo_flags(flags);
+			}
+
+			if( nrA==ncA && nrA>1 && ( want_only_lowtri || want_only_upptri ) )
+			{
+				rsb_nnz_idx_t discarded = 0;
+				/*
+				rsb__util_coo_array_set_sequence(IA+nnz,nrA,0,1);
+				rsb__util_coo_array_set_sequence(JA+nnz,nrA,0,1);
+				 */
+				RSB_FCOO_ISET(IA+nnz,0,nrA);
+				RSB_FCOO_ISET(JA+nnz,0,nrA);
+				rsb__fill_with_ones(((rsb_byte_t*)VA)+RSB_SIZEOF(typecode)*nnz,typecode,nrA,1);
+				nnz += nrA;	/* nnz+nrA this number has been overwritten as nnz */
+				if( want_only_lowtri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+					errval = rsb_weed_out_non_lowtri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non lower elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+				if( want_only_upptri )
+				{
+					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+					errval = rsb_weed_out_non_upptri(VA,IA,JA,nnz,typecode,NULL,&discarded);
+					RSBENCH_STDOUT("# discarding %d non upper elements of %d.\n",discarded,nnz);
+					nnz-=discarded;
+				}
+
+				if(RSB_SOME_ERROR(errval))
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			}
+
+			if(RSB_SOME_ERROR(rsb__util_mm_info_matrix_f(filename,NULL,NULL,NULL,NULL,&is_symmetric,&is_hermitian,NULL,&is_lower,&is_upper,&is_vector) ))
+			{
+				RSBENCH_STDERR(RSB_ERRMSG_PROIFAMM ": %s ..\n",filename);
+				goto err;
+			}
+			if( is_vector )
+			{
+				RSBENCH_STDERR("file %s seems to store a vector\n",filename);
+				goto err;
+			}
+			if(RSB_BOOL_AND(want_as_unsymmetric,want_as_symmetric))
+			{
+				RSBENCH_STDERR("requiring both symmetric and unsymmetric flags is contradictory!\n");
+				goto err;
+			}
+			if(want_as_unsymmetric)
+			{
+				is_symmetric = RSB_BOOL_FALSE;
+				is_hermitian = RSB_BOOL_FALSE;
+			}
+			if(want_as_symmetric)
+			{
+				is_symmetric = RSB_BOOL_TRUE;
+				is_hermitian = RSB_BOOL_TRUE;
+			}
+			if(!RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && is_hermitian)
+			{
+				RSBENCH_STDOUT("# Warning: non complex matrix with hermitian flags! Converting to symmetric!\n");
+				is_hermitian = RSB_BOOL_FALSE;
+				is_symmetric = RSB_BOOL_TRUE;
+			}
+			/* TODO: use rsb__flags_from_props() */
+			if(is_hermitian == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+dnl				RSBENCH_STDOUT("# exploiting EXPERIMENTAL symmetry\n");
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_HERMITIAN);
+			}
+			if(is_symmetric == RSB_BOOL_TRUE && !RSB_EXPERIMENTAL_EXPAND_SYMMETRIC_MATRICES_BY_DEFAULT)
+			{
+dnl				RSBENCH_STDOUT("# exploiting EXPERIMENTAL symmetry\n");
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+			}
+
+			if( (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER)) && (!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER)) )
+			{
+				/* is_upper and is_lower as declared in the matrix file */
+				if(is_upper)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER);
+				if(is_lower)
+ 					RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER);
+			}
+			RSB_DO_ERROR_CUMULATE(errval,rsb__do_cleanup_nnz(VA,IA,JA,nnz,0,0,nrA,ncA,&nnz,typecode,flags)); /* NEW */
+			if(RSB_SOME_ERROR(errval))
+			{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+			if(want_sort_after_load)
+			{
+				rsb_time_t dt = RSB_TIME_ZERO;
+				dt = - rsb_time();
+dnl				//if((errval = rsb_util_sort_row_major_inner(VA,IA,JA,nnz,nrA,ncA,typecode,flags))!=RSB_ERR_NO_ERROR)
+				if((errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS))!=RSB_ERR_NO_ERROR)
+				{ RSB_ERROR(RSB_ERRM_ES); goto err; }
+				dt += rsb_time();
+				RSBENCH_STDOUT("#pre-sorting took %lg s\n",dt);
+				RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+			}
+#if RSB_HAVE_METIS
+			if(want_wmbr)
+			{
+				/* FIXME: unfinished */
+				rsb_coo_idx_t *perm = NULL,*iperm = NULL,*vwgt = NULL;
+
+				perm  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+				iperm = rsb__calloc(sizeof(rsb_coo_idx_t)*(nrA+1));
+#if 1
+				vwgt  = rsb__calloc(sizeof(rsb_coo_idx_t)*(nnz));
+				rsb__util_coo_array_set(vwgt,nnz,0);
+#else
+				vwgt  = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+#endif
+				if( !perm || !iperm || !vwgt )
+				{
+					RSB_CONDITIONAL_FREE(iperm);
+					RSB_CONDITIONAL_FREE(perm);
+					RSB_CONDITIONAL_FREE(vwgt);
+				}
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				errval = rsb__do_switch_fullword_array_to_compressed(IA,nnz,nrA);
+				RSBENCH_STDOUT("Calling METIS_NodeND\n");
+				/*errval = */ METIS_NodeND(&nrA,IA,JA,vwgt,NULL,perm,iperm); /* Scotch wrapper crashes on vwgt=NULL. and is void */
+				RSBENCH_STDOUT("Exited  METIS_NodeND with code %d\n",errval);
+				/* if(errval == METIS_OK) */
+				{
+					RSBENCH_STDOUT("Permuting..\n");
+					errval = rsb__do_switch_compressed_array_to_fullword_coo(IA, nrA, 0, NULL);
+					errval = rsb__do_permute_rows_with_coo_index( IA, perm, nnz);
+					RSBENCH_STDOUT("Permuted.\n");
+					/* 
+					 */
+					for(i=0;i<nrA;++i){ RSB_STDOUT("%d\n",perm[i]);}
+				}
+				RSB_CONDITIONAL_FREE(vwgt);
+				RSB_CONDITIONAL_FREE(perm);
+				RSB_CONDITIONAL_FREE(iperm);
+			}
+			
+#endif /* RSB_HAVE_METIS */
+		}
+	}
+	else
+	if(should_generate_dense!=0)
+	{
+		rsb_nnz_idx_t dim = RSB_FABS(should_generate_dense),spacing=1;
+		if(want_generated_spacing>1)
+			spacing = want_generated_spacing;
+		dim *= spacing;
+
+		if(((should_generate_lband>-1) || (should_generate_uband>-1)) && should_generate_dense>0)
+		{
+			rsb_nnz_idx_t lbw=should_generate_lband,ubw=should_generate_uband;
+			nrA = ncA = dim;
+			errval = rsb__generate_blocked_banded_coo(dim/spacing,spacing,lbw,ubw,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,`dnl
+		if(should_generate_dense>0)
+		{
+			RSBENCH_STDOUT("Interpreting --dense as --lower-dense (full dense makes no sense for triangular solve).\n");
+			should_generate_dense = -should_generate_dense;
+			should_generate_dense_nc = 0;
+		}
+')dnl
+		if(should_generate_dense>0)
+		{
+			RSB_DEBUG_ASSERT( should_generate_dense_nc != 0 );
+			/* full dense, no diag */
+			nrA = dim;
+			ncA = should_generate_dense_nc * spacing;
+			errval = rsb__generate_dense_full(nrA/spacing,ncA/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+		}
+		else
+		{
+			/* trick: lower triangular */
+			nrA=ncA=dim;
+			errval = rsb__generate_dense_lower_triangular_coo(dim/spacing,spacing,&IA,&JA,&VA,&nnz,typecode);
+			if(RSB_SOME_ERROR(errval))
+			{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER); /* 20121223	*/
+		}
+		}
+
+		if(want_sort_after_load)	
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SORTED_INPUT);
+
+		if(want_as_symmetric)
+			RSB_DO_FLAG_ADD(flags,RSB_FLAG_SYMMETRIC);
+	} /* should_generate_dense */
+dnl
+have_va_ia_ja:
+dnl
+	RSB_DEBUG_ASSERT( VA != NULL );
+	RSB_DEBUG_ASSERT( IA != NULL );
+	RSB_DEBUG_ASSERT( JA != NULL );
+	r_flags = flags;
+dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+			flags = rsb__do_detect_and_add_triangular_flags(IA,JA,nnz,flags);
+dnl			RSB_DO_FLAG_ADD(flags,RSB_FLAG_TRIANGULAR);
+			if(
+		(RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR) && RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR)) ||
+		(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_UPPER_TRIANGULAR)&&!RSB_DO_FLAG_HAS(flags,RSB_FLAG_LOWER_TRIANGULAR))
+			)
+			{
+				RSB_ERROR("Matrix contains both upper and lower elements ? It is not suited for mop, then!\n");
+				errval = RSB_ERR_CORRUPT_INPUT_DATA;	/* uhm */
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+			}
+')dnl
+dnl
+
+	/* CONDITIONALLY, PROCESSING THE INPUT */
+	if(!b_r_filename)
+	{
+		if(want_column_expand)
+		{
+			errval = rsb__do_column_expand(JA,nnz,&ncA,want_column_expand);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+		}
+
+		if( pattern_only )
+			rsb__fill_with_ones(VA,typecode,nnz,1);
+
+		if( dumpout )
+		{
+			errval = rsb__test_print_coo_mm(typecode,flags,IA,JA,VA,nrA,ncA,nnz,RSB_BOOL_TRUE,RSB_DEFAULT_STREAM);
+			//COO equivalent for rsb_file_mtx_save(mtxAp,NULL);
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);
+				goto err;
+			}
+			goto ret;
+		}
+	}
+#if 1
+	if(want_nonzeroes_distplot)
+	{
+		/* FIXME: Unfinished: printout not adequate ! */
+		/* FIXME: Shall use a separate routine for this! Please regard this code as temporary */
+		rsb_coo_idx_t median_m=0,median_k=0,stdd_m=0,stdd_k=0,nzp_m=nnz/nrA,nzp_k=nnz/ncA;
+		rsb_coo_idx_t*idxv=NULL;
+		rsb_coo_idx_t mm=0;
+		rsb_nnz_idx_t cs=0;
+		rsb_bool_t po = RSB_BOOL_TRUE;
+		const int histres=100;
+		const rsb_char_t*pmsg="\n\nplot \"-\" using 1:2 title \"cumulative %s population (nnz)\"\n";
+		RSBENCH_STDOUT("set xtics rotate\n");
+		RSBENCH_STDOUT("set term postscript eps color\n");
+		RSBENCH_STDOUT("set output \"%s-distplot.eps\"\n", rsb__basename(filename));
+		RSBENCH_STDOUT("set multiplot layout 1,2 title \"%s (%d x %d, %d nnz)\"\n", rsb__basename(filename),nrA,ncA,nnz);
+
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+
+		mm=nrA<histres?1:nrA/histres;
+		idxv = rsb__calloc(sizeof(rsb_coo_idx_t)*(ndA));
+		if(!idxv)
+			goto nohists;
+
+		for(i=0;i<nnz;++i)
+			if(IA[i] < nrA && IA[i] >= 0 )
+				idxv[IA[i]]++;
+		for(i=0;i<nrA;++i)
+			if(median_m<nnz/2)
+				{ median_m+=idxv[i]; }
+			else
+				{ break; }
+		median_m=i; 
+
+		RSB_STDOUT(pmsg,"rows");
+		if(po) for(i=0;i<nrA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		mm=ncA<histres?1:ncA/histres;
+
+		for(i=0;i<nrA;++i)
+			stdd_m+=(idxv[i]-nzp_m)*(idxv[i]-nzp_m);
+		stdd_m=nrA<2?0:sqrt(stdd_m/(nrA-1));
+
+
+		for(i=0;i<ncA;++i)
+			idxv[i]=0;
+
+		for(i=0;i<nnz;++i)
+			if(JA[i] < ncA && JA[i] >= 0 )
+				idxv[JA[i]]++;
+		for(i=0;i<ncA;++i)
+			if(median_k<nnz/2)
+				{ median_k+=idxv[i]; }
+			else
+				{ break; }
+		median_k=i; 
+
+		cs=0;
+		RSB_STDOUT(pmsg,"columns");
+		if(po) for(i=0;i<ncA;++i){ cs+=idxv[i]; if(i%mm==0)RSB_STDOUT("%d %d\n",i,cs);}
+		RSB_STDOUT("e\n");
+
+		for(i=0;i<ncA;++i)
+			stdd_k+=(idxv[i]-nzp_k)*(idxv[i]-nzp_k);
+		stdd_k=ncA<2?0:sqrt(stdd_k/(ncA-1));
+
+		RSBENCH_STDOUT("unset multiplot\n");
+		RSBENCH_STDOUT("#%%:NNZ_PER_ROW_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_m);
+		RSBENCH_STDOUT("#%%:ROWS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_m/(double)nrA));
+		RSBENCH_STDOUT("#%%:NNZ_PER_COL_STDDEV:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0d\n",stdd_k);
+		RSBENCH_STDOUT("#%%:COLS_MEDIAN:");/* RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); */
+		RSBENCH_STDOUT("\t%10.0g\n",((double)median_k/(double)ncA));
+nohists:
+		RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+		RSB_CONDITIONAL_FREE(idxv); RSB_CONDITIONAL_FREE(idxv);
+		goto ret;
+	}
+	#endif /* 1 */
+dnl
+ifelse(RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP(mop),1,`dnl
+	if(want_unordered_coo_bench)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		outnri = rhsnri = ndA = RSB_MAX(nrA,ncA);
+		lhs = rsb__calloc_vector(ndA*nrhs*incY,typecode);
+		rhs = rsb__calloc_vector(ndA*nrhs*incX,typecode);
+
+		if(!lhs || !rhs)
+		{
+			RSB_ERROR("problems allocating vectors");
+			RSB_CONDITIONAL_FREE(lhs); RSB_CONDITIONAL_FREE(rhs);
+			{ errval = RSB_ERR_INTERNAL_ERROR; goto err; }
+		}
+
+		if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+		for(i=0;i<times;++i)
+		{
+			if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			unordered_coo_op_time = - rsb_time();
+			if((errval = rsb__do_spmv_fullword_coo(&coo,flags,rhs,lhs,alphap,betap,incX,incY,transA))!=RSB_ERR_NO_ERROR) { goto erru; }
+			unordered_coo_op_time += rsb_time();
+			unordered_coo_op_time_best = RSB_MIN_ABOVE_INF(unordered_coo_op_time_best,unordered_coo_op_time,tinf);
+			unordered_coo_op_tot_time+=unordered_coo_op_time;
+		}
+		if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+erru:
+		RSB_CONDITIONAL_FREE(lhs); RSB_CONDITIONAL_FREE(rhs);
+dnl
+		if(want_verbose == RSB_BOOL_TRUE)
+		{
+			/* FIXME ! 20110427 */
+			struct rsb_mtx_t matrixs;
+			mtxAp=&matrixs;
+			rsb__init_rsb_struct_from_coo(mtxAp,&coo);
+			mtxAp->flags = RSB_FLAG_DEFAULT_COO_MATRIX_FLAGS|RSB_DO_FLAG_FILTEROUT((flags),RSB_DO_FLAGS_EXTRACT_STORAGE(flags));
+			rsb__do_set_init_storage_flags(mtxAp,mtxAp->flags);
+			raw_Mflops=nnz*2;
+			RSBENCH_STDOUT("%%:UNORDERED_COO_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+			RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)raw_Mflops)/(RSB_REAL_MILLION*unordered_coo_op_time_best));
+			mtxAp=NULL;
+		}
+	}
+dnl
+')dnl
+dnl
+	/* CONDITIONALLY, PERFORMING SOME TEST ON THE INPUT */
+	if(want_accuracy_test>=1)
+	{
+		struct rsb_coo_matrix_t coo;
+		rsb__fill_coo_struct(&coo,VA,IA,JA,nrA,ncA,nnz,typecode);
+		RSB_DO_ERROR_CUMULATE(errval,rsb__do_spmv_accuracy_test(&coo,ca,cn,flags));
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_ERROR("accuracy based test failed!\n");
+			goto err;
+		}
+		if(want_accuracy_test>1)
+		{
+			goto done;
+		}
+	}
+
+		if( (flags & RSB_FLAG_QUAD_PARTITIONING) && g_all_flags==1)
+		{
+dnl			int ci=0,di=0,hi=0,li=0,oi=0;
+			int /*ci=0,*/hi=0,oi=0;
+			fn=0;
+			for(ci=0;ci<3;++ci)
+/*			for(di=0;di<2;++di)*/
+			for(oi=0;oi<2;++oi)
+			for(hi=0;hi<2;++hi)
+/*			for(li=0;li<2;++li)*/
+			{
+#if 0
+				flagsa[di+hi*2+li*4+ci*8]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#ifdef RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+#endif /* RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES */
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+	
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[di+hi*2+li*4+ci*8],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#else /* 0 */
+				flagsa[fn]=flags;
+				//RSB_DO_FLAG_ADD(flagsa[fn],li?RSB_FLAG_EXPERIMENTAL_NO_MICRO_LEAVES:0);
+				//RSB_DO_FLAG_ADD(flagsa[fn],di?RSB_FLAG_RECURSIVE_SUBDIVIDE_MORE_ON_DIAG:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],oi?RSB_FLAG_USE_HALFWORD_INDICES_COO:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],hi?RSB_FLAG_USE_HALFWORD_INDICES_CSR:0);
+#if 0
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==1?RSB_FLAG_RECURSIVE_HALF_DETECTED_CACHE:0);
+				RSB_DO_FLAG_ADD(flagsa[fn],ci==2?RSB_FLAG_RECURSIVE_DOUBLE_DETECTED_CACHE:0);
+#endif /* 0 */
+#endif /* 0 */
+				++fn;
+			}
+		}
+		else
+		{
+			fn=1;
+			flagsa[fn-1]=flags;
+		}
+
+		if(!want_perf_dump)
+		if(!( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( merge_experimental ) || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( split_experimental ) )) /* otherwise pr__set.. cannot distinguish samples */
+		if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+		{
+			/* adds a no-recursion flag case */
+			RSB_DO_FLAG_DEL(flags,RSB_FLAG_QUAD_PARTITIONING);
+/*			if(fn)*/
+/*				flags=flagsa[fn-1];	*//* copy from the last */
+/*			else*/
+/*				flagsa[fn]=flags;	*//* impose these flags */
+			for(fi=fn;fi>0;--fi)
+				flagsa[fi]=flagsa[fi-1];/* shift forward */
+			RSB_DO_FLAG_DEL(flagsa[0],RSB_FLAG_QUAD_PARTITIONING);
+			++fn;	/* add ours */
+		}
+
+		for(ti=0;ti<tn;++ti)
+dnl		if(!((ti>=1)&&(RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC)||RSB_DO_FLAG_HAS(flags,RSB_FLAG_HERMITIAN))))
+		{
+
+ifelse(mop,`mat_stats',`',`dnl
+	rsb_time_t op_t = RSB_TIME_ZERO;
+	rsb_time_t mct = RSB_TIME_ZERO;	/* matrix construction time */
+	rsb_time_t fet = RSB_TIME_ZERO;	/* fillin estimation time */
+
+	rsb_time_t sct = RSB_TIME_ZERO;	/* serial (if minimum number of cores is 1) matrix construction time */
+	rsb_time_t pct = RSB_TIME_ZERO;	/* parallel (if maximum number of cores > 1) matrix construction time */
+
+ifelse(mop,`infty_norm',`',`dnl
+ifelse(mop,`rowssums',`',`dnl
+ifelse(mop,`scale',`',`dnl
+	rsb_time_t smt = RSB_TIME_ZERO;	/* serial multiplication time */
+	rsb_time_t pmt = RSB_TIME_ZERO;	/* parallel multiplication time */
+')dnl
+')dnl
+')dnl
+
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+	rsb_time_t sst = RSB_TIME_ZERO;	/* serial solve time */
+	rsb_time_t pst = RSB_TIME_ZERO;	/* parallel solve time */
+')dnl
+	
+	rsb_time_t sest = RSB_TIME_ZERO;	/**/
+	//rsb_time_t sect = RSB_TIME_ZERO;	/**/
+	rsb_time_t ssat = RSB_TIME_ZERO;	/**/
+	rsb_time_t seit = RSB_TIME_ZERO;	/**/
+	rsb_time_t scpt = RSB_TIME_ZERO;	/**/
+
+	rsb_time_t mest = RSB_TIME_ZERO;	/**/
+	rsb_time_t mect = RSB_TIME_ZERO;	/**/
+	rsb_time_t msat = RSB_TIME_ZERO;	/**/
+	rsb_time_t meit = RSB_TIME_ZERO;	/**/
+	rsb_time_t mcpt = RSB_TIME_ZERO;	/**/
+
+	rsb_time_t me_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;     /* experimental merge */
+	rsb_time_t at_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME, at_mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME; /* experimental merge */
+	rsb_thread_t at_mkl_csr_nt = RSB_AT_THREADS_AUTO, me_at_nt = RSB_AT_THREADS_AUTO;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+	rsb_time_t best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t base_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;	/* for comparative benchmarking */
+	rsb_time_t serial_best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;	/* for comparative benchmarking */
+	rsb_time_t spmv_t = RSB_TIME_ZERO;
+	rsb_time_t tot_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t spsv_d_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t spsv_spmv_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+	rsb_time_t best_spsv_spmv_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t spsv_f_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+#endif
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	struct rsb_pci_t rsb_pci;
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+#if RSB_WANT_MKL
+	void *M_VA=NULL; MKL_INT *M_IA=NULL,*M_JA=NULL;
+	void *M_VAC=NULL; MKL_INT *M_IAC=NULL,*M_JAC=NULL;
+	rsb_time_t mkl_coo2csr_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_coo_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_csr_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_csr_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_csr_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+
+	rsb_time_t mkl_gem_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_gem_op_time = RSB_TIME_ZERO;
+	rsb_time_t mkl_gem_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t mkl_gem_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	struct rsb_ts_t btpms[2]; /* first is tuned, first is not */
+	rsb_flags_t mif = ( mib == 0 ) ? RSB_FLAG_NOFLAGS : RSB_FLAG_FORTRAN_INDICES_INTERFACE; /* MKL index flags */
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+	struct rsb_pci_t mkl_coo_pci,mkl_csr_pci,mkl_gem_pci;
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+#endif /* RSB_WANT_MKL */
+	struct rsb_attr_t attr;	/* this structure is rather large (100k, as of 20140223); with future parameters it shall be rather heap allocated */
+	struct rsb_ts_t otpos, btpos;
+
+	RSB_BZERO_P((&otpos));
+	RSB_BZERO_P((&btpos));
+')dnl
+dnl
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+	RSB_BZERO_P((&attr));
+')dnl
+dnl
+		transA = transAo;
+dnl
+		if(ti>0)
+			transA = rsb__do_transpose_transposition(transAo);
+dnl
+		if(ti==2)
+			transA = RSB_TRANSPOSITION_C;
+dnl
+		if(!  (
+			( RSB_IS_MATRIX_TYPE_COMPLEX(typecode) && (ti!=0) && ( flags & RSB_FLAG_SOME_SYMMETRY ) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti!=0) && ( flags & RSB_FLAG_SYMMETRIC) )  ||
+		       ((!RSB_IS_MATRIX_TYPE_COMPLEX(typecode))&& (ti==2) &&!( flags & RSB_FLAG_SOME_SYMMETRY) )  ||
+			g_allow_any_tr_comb
+		))
+dnl
+		if(tn>1)
+		{
+			RSBENCH_STDOUT("# multi-transpose benchmarking -- now using transA = %c.\n",RSB_TRANSPOSITION_AS_CHAR(transA));
+		}
+dnl
+		if( /* transA != RSB_TRANSPOSITION_N */ ti>0 && RSB_DO_FLAG_HAS(flags,RSB_FLAG_SYMMETRIC) )
+		{
+			RSBENCH_STDOUT("# symmetric matrix --- skipping transposed benchmarking\n");
+			continue;
+		}
+dnl
+		for(fi=0;fi<fn;++fi)
+		for(brvi=-1;brvi<brl;++brvi)
+		for(bcvi=-1;bcvi<bcl;++bcvi)
+#ifndef  RSB_COORDINATE_TYPE_H
+		if(!(flagsa[fi] & RSB_FLAG_USE_HALFWORD_INDICES_CSR))
+#endif /* RSB_COORDINATE_TYPE_H */
+		for(ci=0;ci<cn;++ci)	/* here just for should_recycle_matrix */
+		if(!(ca[ci]>1 && !(RSB_DO_FLAG_HAS(flagsa[fi],RSB_FLAG_QUAD_PARTITIONING)))) /* no need for more than one core without recursion */
+		{
+			cc = ca[ci];
+ifelse(mop,`mat_stats',`',`dnl
+	rsb_time_t diag_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t diag_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t getrow_op_tot_time = RSB_TIME_ZERO;
+	rsb_time_t getrow_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+')dnl
+dnl
+ifelse(mop,`mat_stats',`',`dnl
+	rsb_time_t diag_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	rsb_time_t getrow_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+dnl
+	rsb_time_t no_lock_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, no_lock_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME,
+	serial_no_lock_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME, no_lock_op_tot_time = RSB_TIME_ZERO;
+dnl
+	rsb_time_t qt_op_time = RSB_CONST_IMPOSSIBLY_BIG_TIME, qt_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME,
+	qt_op_tot_time = RSB_TIME_ZERO;
+dnl
+')dnl
+dnl
+			should_recycle_matrix=(ci>0)?RSB_BOOL_TRUE:RSB_BOOL_FALSE;
+			/* if this is the special "vanilla CSR" run after/before recursive runs ... */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			flags=flagsa[fi];
+			if(cn>1 && !RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_USE_HALFWORD_INDICES);
+
+ifelse(mop,`mat_stats',`',`dnl
+			best_spsv_spmv_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			op_t = RSB_TIME_ZERO;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+			best_t = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			spmv_t = RSB_TIME_ZERO;
+			tot_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_d_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_spmv_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+			spsv_f_t = RSB_TIME_ZERO;	/* cumulative time (not best one)*/
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+')dnl
+
+			if(brl>0 && bcl>0)
+			{
+				/* this is a trick and an unclean programming practice */
+				if(brvi==-1)++brvi;
+				if(bcvi==-1)++bcvi;
+				br = brv[brvi];
+				bc = bcv[bcvi];
+			}
+			else
+			{	
+				/* br, bc already set */
+			}
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS 
+			/*	
+			* FIXME : laziness
+			*/
+			dnl RSB_WARN("using RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS\n");
+			if( br!=1 || bc!=1 || !rsb__util_are_flags_suitable_for_optimized_1x1_constructor(flags) )
+#endif /* RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS */
+#if RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT
+			if(0)
+#endif /* RSB_WANT_RSB_AS_ONLY_ALLOWED_FORMAT */
+			{
+				p_r = rsb__util_get_partitioning_array(br,nrA,&M_b,flags);
+				p_c = rsb__util_get_partitioning_array(bc,ncA,&K_b,flags);
+
+				if((! p_r) || (! p_c))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					errval = RSB_ERR_ENOMEM;
+					goto erri;
+				}
+			}
+
+			if(  ( br!=1 || bc!=1 || p_r || p_c ) && ( flags & RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR ))
+			{
+				/*  */
+				RSB_WARN("WARNING : disabling in place allocation flag : it is only allowed for 1x1!\n");
+				RSB_DO_FLAG_DEL(flags,RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR) ;
+			}
+
+ifelse(mop,`mat_stats',`dnl
+			pinfo.M_b=M_b;
+			pinfo.K_b=K_b;
+			pinfo.rpntr=p_r;
+			pinfo.cpntr=p_c;
+')dnl
+
+
+ifelse(mop,`mat_stats',`dnl
+			if(max_nnzs==0)
+				max_nnzs=nnz;
+	if(until_confidence && g_estimate_fillin)
+	{
+		if( want_percentage && ( max_nnzs > 100 || max_nnzs < 1) ) 
+		{RSBENCH_STDERR("given percentage = %zd ?\n",(rsb_printf_int_t)max_nnzs);goto err;}
+		else
+		{
+			if( want_percentage ) max_nnzs =(rsb_nnz_idx_t ) (((double)nnz/100.0) *(double) max_nnzs );
+
+			if(max_nnzs>nnz)
+			{RSBENCH_STDERR("want more max_nnzs (%zd) than nonzeros (%zd) !\n",(rsb_printf_int_t)max_nnzs,(rsb_printf_int_t)nnz);goto err;}
+			else
+			if(max_nnzs<nnzn)
+			{RSBENCH_STDERR("want max_nnzs (%zd) less than %zd ?\n",(rsb_printf_int_t)max_nnzs,(rsb_printf_int_t)nnzn);goto err;}
+		}
+	}
+
+#if 0
+	if(!until_confidence && !g_estimate_fillin)
+	{
+		{RSBENCH_STDERR("should choose an option : [ -S points] (-e)!\n");goto err;}
+		goto err;
+	}
+#else /* 0 */
+	g_estimate_fillin=1;
+#endif /* 0 */
+		if( until_confidence && ( until_confidence > 100 || until_confidence < 1) ) 
+		{RSBENCH_STDERR("given percentage = %zd ?\n",(rsb_printf_int_t)until_confidence ); {RSB_ERROR(RSB_ERRM_ES);goto err;} ;}
+				{RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+			if(g_estimate_fillin)
+			{
+				size_t total_element_count=0;
+				size_t total_block_count=0;
+				rsb_fillin_t fillin;
+
+				nnzs = rsb__calloc(nnzn * sizeof(size_t));
+				element_count = rsb__calloc(nnzn * sizeof(size_t));
+				block_count = rsb__calloc(nnzn * sizeof(size_t));
+
+				if(!nnzs || !element_count || !block_count)
+				{
+					errval = RSB_ERR_ENOMEM;
+					RSB_ERROR(RSB_ERRM_ES);
+					goto erri;
+				}
+
+				for(i=1;i<=nnzn;++i) nnzs[i-1]=(max_nnzs/nnzn) * i;/* ach, integer arithmetics ! */
+				nnzs[nnzn-1]=max_nnzs;
+				nnzs[nnzn-1]=nnz;
+	
+				errval = rsb__compute_partial_fillin_for_nnz_fractions(IA, JA, nnzs, nnzn, &pinfo, element_count, block_count);
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto erri;
+				}
+
+				errval = rsb__compute_partial_fillin_for_nnz_fractions(IA, JA, &nnz, 1, &pinfo, &total_element_count, &total_block_count);
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto erri;
+				}
+				fillin = ((double)total_element_count)/((double)nnz);
+	
+				//RSB_STDOUT("#using %d up to %d nonzeros out of %d, we estimate the fillin as:\n",nnzs[0],nnzs[nnzn-1],nnz);
+				RSBENCH_STDOUT("#matrix	rows	cols	br	bc	nnz	fillin	fraction	rel.error\n");
+				for(i=0;i< nnzn;++i)
+				{
+					rsb_fillin_t partial_fillin=0;
+/*					RSBENCH_STDOUT("#%d\n",nnzs[i]);*/
+/*					RSBENCH_STDOUT("#%d / %d\n",element_count[i],total_element_count);*/
+					RSBENCH_STDOUT("%s\t%zd\t%zd\t%zd\t%zd\t%zd\t%lg",filename,
+					(rsb_printf_int_t)nrA,(rsb_printf_int_t)ncA,(rsb_printf_int_t)br,(rsb_printf_int_t)bc,(rsb_printf_int_t)nnz,fillin);
+					//RSBENCH_STDOUT(" (%d,%d)",element_count[i],block_count[i]);
+					partial_fillin = (element_count[i])/(double)(nnzs[i]);
+					RSBENCH_STDOUT("\t%.3lg\t%+.3lg\n",
+						((double)nnzs[i])/(double)nnz,
+						(partial_fillin-fillin)/fillin
+					);
+				}
+				//RSBENCH_STDOUT("\n");
+			}
+
+
+',`dnl
+
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+#define RSB_WANT_SPSV_STABILITY_FIX 1
+#if RSB_WANT_SPSV_STABILITY_FIX
+#if 0
+			/* FIXME : fix for numerical stability */
+#if 0
+			if(RSB_SOME_ERROR(rsb__fill_with_ones(VA,typecode,nnz,1))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#else /* 0 */
+			/* FIXME : temporary fix */
+			double uthreshold=.0001;
+			double athreshold=10000000;
+			if(RSB_SOME_ERROR(rsb__util_drop_to_zero_if_under_threshold(VA,typecode,nnz,&uthreshold))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			if(RSB_SOME_ERROR(rsb__util_drop_to_zero_if_above_threshold(VA,typecode,nnz,&athreshold))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#endif /* 0 */
+#else /* 0 */
+			{rsb_nnz_idx_t n;for(n=0;n<nnz;++n)if(IA[n]==JA[n])rsb__fill_with_ones(((rsb_byte_t*)VA)+RSB_SIZEOF(typecode)*n,typecode,1,1);}
+#endif /* 0 */
+#endif /* RSB_WANT_SPSV_STABILITY_FIX */
+')dnl
+
+			if(!mtxAp)
+			{
+				int mci=0;
+				if(b_r_filename)
+				{
+					rsb_err_t errval_;
+					mct = - rsb_time();
+					mtxAp = rsb__load_matrix_file_as_binary(b_r_filename,&errval_);
+					mct += rsb_time();
+					if((RSB_SOME_ERROR(errval)) || !mtxAp )
+					{
+						RSB_ERROR(RSB_ERRM_ES);
+						goto err;
+					}
+					else
+					{
+						nnz = mtxAp->nnz;
+						nrA = mtxAp->nr;
+						ncA = mtxAp->nc;
+					}
+
+					filename=b_r_filename;// for info purposes
+					flags=mtxAp->flags;
+				}
+				else
+				{
+				mect=mest=msat=meit=mcpt = RSB_TIME_ZERO;	/* resetting al values */
+
+				for(mci=0;mci<repeat_construction;++mci)
+				{
+					if(repeat_construction>1 && mci==0)
+						RSBENCH_STDOUT("# will repeat constructor %d times\n",repeat_construction);
+					mct = - rsb_time();
+					if(want_in_place_assembly)
+					{
+						mtxAp = rsb__do_mtx_alloc_from_coo_inplace(VA,IA,JA,nnz,typecode,nrA,ncA,br,bc,flags,&errval);
+					}
+					else
+						mtxAp = rsb_mtx_alloc_from_coo_const(VA,IA,JA,nnz,typecode,nrA,ncA,br,bc,flags,&errval);
+					mct += rsb_time();
+					if((RSB_SOME_ERROR(errval)) || !mtxAp )
+					{
+						RSB_ERROR(RSB_ERRM_ES);
+						goto err;
+					}
+
+/*					RSBENCH_STDOUT("running constructor for time %d/%d\n",mci+1,repeat_construction);*/
+					if(mect == RSB_TIME_ZERO || mect>mtxAp->ect)
+						mect=mtxAp->est;
+					if(mest == RSB_TIME_ZERO || mest>mtxAp->est)
+						mest=mtxAp->est;
+					if(msat == RSB_TIME_ZERO || msat>mtxAp->sat)
+						msat=mtxAp->sat;
+					if(meit == RSB_TIME_ZERO || meit>mtxAp->eit)
+						meit=mtxAp->eit;
+					if(mcpt == RSB_TIME_ZERO || mcpt>mtxAp->cpt)
+						mcpt=mtxAp->cpt;
+					if(mci != repeat_construction-1)
+					{ RSB_MTX_FREE(mtxAp);	/* we only wanted timings */ }
+					else
+					{
+						/* we keep the mtxAp, and set best individual times */;
+						mtxAp->est=mest;
+						mtxAp->ect=mect;
+						mtxAp->sat=msat;
+						mtxAp->eit=meit;
+						mtxAp->cpt=mcpt;
+					}
+				}
+				}
+				if(ci==0 && sct == RSB_TIME_ZERO)
+					//sct=mct;
+					sct=mtxAp->tat;
+				if(ci==cn-1 && pct == RSB_TIME_ZERO)
+					//pct=mct;
+					pct=mtxAp->tat;
+			} /* !mtxAp */
+			
+			if(do_perform_ddc == RSB_BOOL_TRUE)
+			{
+			if(rsb__is_square(mtxAp))
+			{
+				/* FIXME: experimental, new. should write a test with octave for this */
+				void * DV = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				void * RS = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				rsb_aligned_t mtwo[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+				if(!RS||!DV) { errval = RSB_ERR_ENOMEM; goto noddc; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_infty_norm(mtxAp,RSB_TRANSPOSITION_N,RS));
+				rsb__util_set_area_to_converted_integer(mtwo,mtxAp->typecode,-2);
+				RSB_DO_ERROR_CUMULATE(errval,rsb__dodo_getdiag(mtxAp,DV));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__vector_to_abs(DV,mtxAp->typecode,mtxAp->nr));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xscal(mtxAp->typecode,mtxAp->nr,mtwo,DV,1));
+				RSB_DO_ERROR_CUMULATE(errval,rsb__cblas_Xaxpy(mtxAp->typecode,mtxAp->nr,NULL,DV,1,RS,1));
+				if(rsb__util_count_negative(RS,mtxAp->typecode,mtxAp->nr)==mtxAp->nr)
+					RSBENCH_STDOUT("#matrix is diagonal dominant\n");
+				else
+					RSBENCH_STDOUT("#matrix is not diagonal dominant\n");
+				RSBENCH_STDOUT("#diagonal dominance computed in ? s\n");
+noddc:
+				RSB_CONDITIONAL_FREE(DV); RSB_CONDITIONAL_FREE(RS);
+				if(RSB_SOME_ERROR(errval))
+					goto err;
+			}
+			else
+			{
+				RSB_ERROR("input matrix is not square: cannot compute the diagonal dominance check\n");
+			}
+			}
+
+			if( dump_graph_file )
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_DOT,dump_graph_file));
+
+			if(do_perform_ilu == RSB_BOOL_TRUE)
+			{
+				/* FIXME: experimental */
+				rsb_time_t ilut = - rsb_time();
+				RSB_STDOUT("performing EXPERIMENTAL ILU-0\n");
+				errval = rsb__prec_ilu0(mtxAp);//TODO: actually, only for CSR
+				ilut += rsb_time();
+				if(RSB_SOME_ERROR(errval))
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+				else
+					RSB_STDOUT("performed EXPERIMENTAL ILU-0 with success in %lg s.\n",ilut);
+				rsb_file_mtx_save(mtxAp,NULL);
+				goto ret;
+			} /* do_perform_ilu */
+
+			if(want_update && mtxAp)
+			{
+				rsb_time_t ct = - rsb_time();
+				/* FIXME: this is update, not conversion, so it should not be here */
+				errval = rsb__do_set_coo_elements(mtxAp,VA,IA,JA,nnz);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				/* missing check */
+				RSBENCH_STDOUT("#individual update of %d elements in assembled RSB took %2.5f s: %2.5f%% of construction time\n",nnz,ct,(100*ct)/mtxAp->tat);
+			} /* want_update */
+
+			if(want_convert && mtxAp)
+			{
+				/* FIXME: here all conversions should occur, and be benchmarked */
+				rsb_time_t ct;
+				rsb_nnz_idx_t rnz=0;
+				struct rsb_coo_matrix_t coo;
+
+				coo.nnz = RSB_MAX(mtxAp->nnz,RSB_MAX(nrA,ncA));
+				coo.typecode=mtxAp->typecode;
+				if(rsb__allocate_coo_matrix_t(&coo)!=&coo)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto errc;
+				}
+				coo.nr = mtxAp->nr;
+				coo.nc = mtxAp->nc;
+
+				ct = - rsb_time();
+				errval = rsb__do_get_rows_sparse(RSB_TRANSPOSITION_N,NULL,mtxAp,coo.VA,coo.IA,coo.JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.typecode,
+					NULL,RSB_FLAG_NOFLAGS)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+				RSBENCH_STDOUT("#extraction to unsorted COO unimplemented\n");
+				//RSBENCH_STDOUT("#extraction of %d elements in unsorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+				RSB_DO_ERROR_CUMULATE(errval,rsb_mtx_get_coo(mtxAp,VA,IA,JA,RSB_FLAG_C_INDICES_INTERFACE));
+
+				rsb__util_coo_array_set(coo.JA,coo.nnz,0);
+				rsb_coo_sort(VA,IA,JA,mtxAp->nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+
+				ct = - rsb_time();
+				errval = rsb_mtx_get_csr(typecode,mtxAp, coo.VA, coo.IA, coo.JA,RSB_FLAG_DEFAULT_CSR_MATRIX_FLAGS);
+				if(RSB_SOME_ERROR(errval))
+				{ RSB_ERROR(RSB_ERRM_ES);goto erri;}
+				ct += rsb_time();
+				for(i=0;i<mtxAp->nnz;++i)if(coo.JA[i]!=JA[i]){RSB_ERROR("@%d: %d != %d!\n",i,coo.JA[i],JA[i]);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+				if(RSB_SOME_ERROR(errval=rsb__csr_chk(coo.IA,coo.JA,coo.nr,coo.nc,coo.nnz,mib)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in CSR took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+/*				ct = - rsb_time();*/
+/*				errval = rsb__do_get_coo(mtxAp,&coo.VA,&coo.IA,&coo.JA);	// FIXME : bugged ?*/
+/*				if(RSB_SOME_ERROR(errval)) goto erri;*/
+/*				ct += rsb_time();*/
+/*				if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(coo.VA,coo.IA,coo.JA,coo.nnz,coo.typecode,*/
+/*					NULL,RSB_FLAG_NOFLAGS)))*/
+/*					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}*/
+/*				RSBENCH_STDOUT("#extraction of %d elements in sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);*/
+
+				rsb__util_coo_array_set(coo.IA,coo.nnz,0);
+				rsb_coo_sort(VA,JA,IA,mtxAp->nnz,ncA,nrA,typecode,RSB_FLAG_NOFLAGS);
+				ct = - rsb_time();
+				errval = rsb__do_get_csc(mtxAp,(rsb_byte_t**) &coo.VA,&coo.JA,&coo.IA);
+				if(RSB_SOME_ERROR(errval))
+					{goto erri;}
+				ct += rsb_time();
+				for(i=0;i<mtxAp->nnz;++i)if(coo.IA[i]!=IA[i]){RSB_ERROR("@%d: %d != %d!\n",i,coo.IA[i],IA[i]);errval = RSB_ERR_INTERNAL_ERROR;goto err;}
+				if(RSB_SOME_ERROR(rsb__csc_chk(coo.JA,coo.IA,coo.nr,coo.nc,coo.nnz,mib)))
+					{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+				RSBENCH_STDOUT("#extraction of %d elements in CSC took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					ct = - rsb_time();
+					cmatrix = rsb__clone_simple(mtxAp);
+					ct += rsb_time();
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					if(!rsb__mtx_chk(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSB_MTX_FREE(cmatrix);
+				}
+				RSBENCH_STDOUT("#cloning of %d elements took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_rcoo(cmatrix,RSB_BOOL_FALSE);
+					ct += rsb_time();
+					if(!rsb__mtx_chk(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					if(
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(cmatrix,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_CSR)
+					!= rsb__terminal_recursive_matrix_count(cmatrix))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+					RSBENCH_STDOUT("#conversion of %d elements to RCOO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					RSB_MTX_FREE(cmatrix);
+				}
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_sorted(cmatrix,&icoo);
+					ct += rsb_time();
+
+					if(RSB_SOME_ERROR(rsb__util_is_sorted_coo_as_row_major(icoo.VA,icoo.IA,icoo.JA,icoo.nnz,icoo.typecode,NULL,RSB_FLAG_NOFLAGS)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSBENCH_STDOUT("#conversion of %d elements to sorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+				
+				if(!RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nr))
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_csr(cmatrix,&icoo);
+					ct += rsb_time();
+					if(RSB_SOME_ERROR(rsb__csr_chk(icoo.IA,icoo.JA,icoo.nr,icoo.nc,icoo.nnz,mib)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					RSBENCH_STDOUT("#conversion of %d elements to CSR took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+
+				if(!RSB_DO_TOOFEWNNZFORCSR(mtxAp->nnz,mtxAp->nc))
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_csc(cmatrix,&icoo);
+					ct += rsb_time();
+					if(RSB_SOME_ERROR(rsb__csc_chk(icoo.JA,icoo.IA,icoo.nr,icoo.nc,icoo.nnz,mib)))
+						{errval = RSB_ERR_INTERNAL_ERROR;RSB_ERROR(RSB_ERRM_ES);goto err;}
+
+					RSBENCH_STDOUT("#conversion of %d elements to CSC took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+
+				{
+					struct rsb_mtx_t * cmatrix=NULL;
+					struct rsb_coo_matrix_t icoo;
+					cmatrix = rsb__clone_simple(mtxAp);
+					if(!cmatrix){errval = RSB_ERR_ENOMEM;RSB_ERROR(RSB_ERRM_ES);goto err;}
+					ct = - rsb_time();
+					errval = rsb__do_switch_recursive_in_place_matrix_to_in_place_coo_unsorted(cmatrix,&icoo);
+					ct += rsb_time();
+
+					RSBENCH_STDOUT("#conversion of %d elements to unsorted COO took %2.5f s: %2.5f%% of construction time\n",rnz,ct,(100*ct)/mtxAp->tat);
+					rsb__destroy_coo_matrix_t(&icoo);
+				}
+errc:
+				rsb__destroy_coo_matrix_t(&coo);
+			} /* want_convert */
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("problems assembling / converting matrix\n");
+				goto erri;
+			}
+
+			if(!mtxAp)
+			{
+				errval = RSB_ERR_INTERNAL_ERROR;
+				RSB_ERROR("problems assembling matrix\n");
+				goto erri;
+			}
+
+			totht -= rsb_time();
+			if(!rsb__mtx_chk(mtxAp))
+			{
+				RSB_ERROR("matrix does not seem to be built correctly\n");
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto erri;
+			}
+			totht += rsb_time();
+
+dnl			RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_RECURSION_BRIEF,NULL));
+dnl			if(RSB_SOME_ERROR(errval)){goto err;}
+
+			if(zsort_for_coo)
+				rsb__do_zsort_coo_submatrices(mtxAp);
+			if(reverse_odd_rows)
+				rsb__do_reverse_odd_rows(mtxAp);
+
+			//rsb_file_mtx_save(mtxAp,NULL);
+			//rsb__dump_blocks(mtxAp);
+
+			if(b_w_filename || csr_w_filename)
+			{
+				const char * w_filename = b_w_filename ;
+				rsb_dump_flags_t dflags = RSB_CONST_DUMP_RSB;
+
+				if(csr_w_filename)
+					w_filename = csr_w_filename,
+					dflags = RSB_CONST_DUMP_CSR;
+
+				frt = -rsb_time();
+				errval = rsb__do_print_matrix_stats(mtxAp,dflags,w_filename);
+				frt += rsb_time();
+				rsb_perror(NULL,errval);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_NO_XDR); }
+				RSB_STDOUT("#file output of %s took %lf s (%.0lf nnz, %.0lf nnz/s ) (%.5lf MB/s ) \n",rsb__basename(w_filename),frt,
+					(((double)mtxAp->nnz)),
+					(((double)mtxAp->nnz)/frt),
+					(((double)rsb_sys_filesize(w_filename))/(frt*RSB_INT_MILLION))
+				);
+				goto ret;
+			}
+
+			if(dumpout_internals)
+			{
+				errval = rsb__do_print_matrix_stats(mtxAp,RSB_CONST_DUMP_RECURSION,NULL);
+				if(RSB_SOME_ERROR(errval))goto err;
+				//goto ret; /* we want to continue */
+			}
+
+			errval = rsb__get_blocking_size(mtxAp,&br,&bc);
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR("problems getting blocking size");
+				goto erri;
+			}
+
+			/* NOTE: the matrix constructor could have removed duplicates or zeros */
+			/* nnz=mtxAp->nnz; */ /* 20120922 commented out: in case of removed entries, it would remember this number in spite of unchanged IA,JA,VA arrays */ 
+			if(!RSB_IS_VALID_NNZ_COUNT(nnz)){errval = RSB_ERR_INTERNAL_ERROR;goto erri;}
+			/* NOTE: if loading from a binary dump, we need to set nrA,ncA */
+			nrA = mtxAp->nr;
+			ncA = mtxAp->nc;
+			ndA = RSB_MAX(nrA,ncA);
+			outnri = rhsnri = ndA;
+dnl
+			ldX = (RSB_DOES_NOT_TRANSPOSE(transA) ? nrA : ncA) * incX; 	/* FIXME: still unused, e.g. in rsb__do_spmm_general */
+			ldY = (RSB_DOES_NOT_TRANSPOSE(transA) ? ncA : nrA) * incY; 
+dnl
+			lhs = rsb__calloc((mtxAp->el_size*(ndA+br))*nrhs*incY);
+			rhs = rsb__calloc((mtxAp->el_size*(ndA+bc))*nrhs*incX);
+
+			if(!lhs || !rhs)
+			{
+				RSB_ERROR("problems allocating vectors");
+				RSB_CONDITIONAL_FREE(lhs);
+				RSB_CONDITIONAL_FREE(rhs);
+				{ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+			}
+
+			if(RSB_SOME_ERROR(rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+dnl
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),1,`dnl
+			if(merge_experimental || split_experimental || just_enter_tuning) /* FIXME: pass parameter */
+			{
+				struct rsb_mtx_t*mtxOp = NULL;
+				int wvmbat = RSB_AUT0_TUNING_SILENT; /* wanted verbosity in merge based autotuning */
+				int eps = 0; /* effective partitioning steps */
+				rsb_time_t btt = RSB_TIME_ZERO; /* blocks tuning time */
+				rsb_submatrix_idx_t maxms = merge_experimental, maxss = split_experimental;
+				int maxr = RSB_CONST_AUTO_TUNING_ROUNDS;
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+				enum rsb_op_t op = rsb_op_spmv;
+')dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+				enum rsb_op_t op = rsb_op_spsvlt;
+')dnl
+dnl
+				int mintimes = RSB_AT_MIN_TIMES/*RSB_AT_NTIMES_AUTO*/;
+				rsb_time_t maxtime = /* RSB_AT_TIME_AUTO*/ RSB_AT_MAX_TIME;
+				struct rsb_mtx_t mtxA = *mtxAp;
+
+				/* please note at_mkl_csr_nt in the following... */
+				if(maxms < 0 || maxss < 0) { at_mkl_csr_nt = me_at_nt = RSB_THREADS_AUTO; }
+				if(maxms < 0) maxms *= -1;
+				if(maxss < 0) maxss *= -1;
+				
+				RSBENCH_STDOUT("RSB Sparse Blocks Autotuner invoked requesting max %d splits and max %d merges in %d rounds, threads spec.%d (specify negative values to enable threads tuning).\n",maxss,maxms,maxr,me_at_nt);
+
+				if (want_verbose_tuning > 0)
+					wvmbat = RSB_AUT0_TUNING_VERBOSE;
+				if (want_verbose_tuning > 1)
+					wvmbat = RSB_AUT0_TUNING_QUATSCH ;
+				if (want_verbose_tuning > 2)
+					wvmbat = RSB_AUT0_TUNING_QUATSCH + 1;
+				btt -= rsb_time(); 
+
+				if( just_enter_tuning == 0 || merge_experimental == 0 && split_experimental == 0 )
+					maxr = 0;
+				mtxOp = mtxAp;
+				errval = rsb__tune_spxx(&mtxOp,NULL,&me_at_nt,maxr,maxms,maxss,RSB_CONST_MS_AT_AUTO_STEPS,RSB_AUT0_TUNING_DEFAULT_TIMES,maxtime,transA,alphap,NULL,nrhs,order,rhs,rhsnri,betap,lhs,outnri,op,&eps,&me_best_t,&me_at_best_t,wvmbat,rsb__basename(filename),&attr,&otpos,&btpos);
+
+				btt += rsb_time(); 
+				tottt += btt;
+dnl
+				if(want_perf_dump) /* FIXME: shall give only values from the tuning routine */
+				if(RSB_DO_FLAG_HAS(/*mtxAp->*/flags,RSB_FLAG_QUAD_PARTITIONING))
+					rsb__pr_set(rspr, &mtxA, me_at_best_t<me_best_t?mtxOp:NULL, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, transA, me_best_t, RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_best_t, RSB_CONST_IMPOSSIBLY_BIG_TIME, me_at_nt, RSB_THREADS_AUTO, btt, eps, &otpos, &btpos, NULL, NULL);
+dnl
+				if( mtxAp != mtxOp && mtxOp )
+			 	{
+					RSBENCH_STDOUT("RSB Autotuner suggested a new clone.\n");
+#if RSB_AT_DESTROYS_MTX
+					mtxAp = mtxOp;
+#else  /* RSB_AT_DESTROYS_MTX */
+#if 1
+ 					/* FIXME: this is to have mtxAp address constant. */
+					errval = rsb__mtx_transplant_from_clone(&mtxAp, mtxOp);
+					mtxOp = NULL;
+					if(RSB_SOME_ERROR(errval)) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+#else
+				 	RSB_MTX_FREE(mtxAp); mtxAp = mtxOp;
+#endif
+#endif /* RSB_AT_DESTROYS_MTX */
+				 }
+dnl
+			}
+')dnl
+dnl
+
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),1,`dnl
+			if(RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+			if(RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_autotuner ))
+			{
+				rsb_int_t otn = wat;
+				rsb_int_t*otnp = NULL;
+				rsb_real_t sf = RSB_REAL_ZERO;
+				rsb_time_t att = - rsb_time();
+				struct rsb_mtx_t * mtxOp = NULL;
+				struct rsb_mtx_t ** mtxOpp = NULL;
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),1,`dnl
+				enum rsb_op_t op = rsb_op_spmv;
+',`dnl
+				enum rsb_op_t op = rsb_op_spsvlt;
+')dnl
+
+				if(wat >  0)
+					otnp = &otn; /* starting thread suggestion */
+				if(wat == 0)
+				{
+					otnp = NULL; /* current thread count */
+					mtxOpp = &mtxOp; /* matrix structure tuning */
+				}
+				if(wat <  0)
+				{
+					otn = -wat; /* ;-) */
+					otnp = &otn; /* starting thread suggestion */
+					mtxOpp = &mtxOp; /* matrix structure tuning */
+				}
+				errval = rsb__tune_spxx(mtxOpp, &sf, otnp, wai, 0, 0, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES, want_autotuner, transA, alphap, mtxAp, nrhs, order, rhs, rhsnri, betap, lhs, outnri, op , NULL, NULL, NULL, wavf, rsb__basename(filename), &attr, &otpos, &btpos);
+				if(mtxOpp && *mtxOpp)
+				{
+					RSBENCH_STDOUT("RSB Autotuner suggested a new matrix: freeing the existing one.\n");
+					RSB_MTX_FREE(mtxAp);
+					mtxAp = mtxOp;
+					mtxOp = NULL;
+					mtxOpp = NULL;
+				}
+				att += rsb_time();
+				RSBENCH_STDOUT("RSB Autotuner took %lg s and estimated a speedup of %lf x\n",att,sf);
+				if(wat && otn > 0)
+				{
+					/* FIXME: this breaks consistency! Shall skip further cycles!  */
+					RSBENCH_STDOUT("Setting autotuning suggested thread count of %d (will skip further thread number configurations!)\n",otn);
+					/* rsb__set_num_threads(otn); */
+					RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+dnl
+					if(want_ancillary_execs == RSB_BOOL_TRUE)
+					if(incX == 1 && incY == 1)
+					{
+						totatt -= rsb_time();
+						RSBENCH_STDOUT("# Post-autotuning performance recheck:\n");
+						/* errval = */ rsb__do_bench_spxm(NULL,NULL,transA,alphap,mtxAp,nrhs,order,rhs,rhsnri,betap,lhs,outnri,RSB_AT_TIME_AUTO,RSB_AT_NTIMES_AUTO,op,10,RSB_AUT0_TUNING_QUATSCH,NULL,NULL); /* just for check purposes */
+						totatt += rsb_time();
+					}
+dnl
+					cc=otn;cl=ci+1;
+				}
+			}	/* want_autotuner */
+
+			if(RSB_SOME_ERROR(errval)) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+')dnl
+dnl
+				if(n_dumpres)
+				{
+					RSBENCH_STDOUT("##RSB LHS %d elements pre-peek:\n",n_dumpres);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incX);
+				}
+				if(n_dumprhs)
+				{
+					RSBENCH_STDOUT("##RSB RHS %d elements pre-peek:\n",n_dumprhs);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incX);
+				}
+dnl
+			if ( times >= 0 ) /* benchmark of mop */
+			{
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+				/* 20140616 use this in conjunction with --dump-n-lhs-elements .. */
+				for(nrhsl=0;nrhsl<nrhs;++nrhsl)
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)rhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incX,nrhsl+1),
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)lhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incY,nrhsl+1);
+dnl
+')dnl
+dnl
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_RSB_SPMV_",0,times,NULL);
+				op_t = - rsb_time();
+dnl
+				RSB_TM_LIKWID_MARKER_R_START("RSB_SPMV");
+				for(i=0;i<times;++i)  /* benchmark loop of mop begin */
+				{
+dnl				int e=0;
+dnl				// FIXME : the following should be used only as a bugfix
+dnl				//if(RSB_SOME_ERROR(rsb__cblas_Xscal(mtxAp->typecode,nrA+br,NULL,lhs,incY))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+dnl //#if RSB_WANT_SPSV_STABILITY_FIX
+#if 0
+	{
+				/* an extreme debugging measure */
+				rsb_nnz_index_t ii;
+				if(RSB_SOME_ERROR(rsb__cblas_Xscal(mtxAp->typecode,ndA,NULL,rhs,incX))) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				for(ii=0;ii<nnz;++ii)rsb__util_increase_by_one(rhs,IA[ii],typecode);
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+	}
+#else /* 0 */
+				if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+#endif /* 0 */
+')dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spsv_d_t -= rsb_time();
+
+				if((errval = rsb__do_spsv_general(transA,alphap,mtxAp,lhs,incX,lhs,incY,RSB_OP_FLAG_INFINITE_PARALLELISM_EMULATE RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+
+				spsv_d_t += rsb_time();
+				if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+
+				spsv_spmv_t -= rsb_time();
+				/* y <- y + A x */
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,&pone[0],&pone[0],incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+					goto err;
+				spsv_spmv_t += rsb_time();
+				best_spsv_spmv_t = RSB_MIN_ABOVE_INF(spsv_spmv_t,best_spsv_spmv_t,tinf);
+				if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA*nrhs,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; } 
+				RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size));
+
+				spsv_f_t -= rsb_time();
+				if(want_ancillary_execs == RSB_BOOL_TRUE)
+				if((errval = rsb__do_spsv_general(transA,alphap,mtxAp,lhs,incX,lhs,incY,RSB_OP_FLAG_FAKE_LOCK RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+				/* FIXME: if RSB_OUTER_NRHS_SPMV_ARGS_IDS defined to empty string, will not handle properly nrhs! */
+#if 0
+				if((errval = rsb__do_spsv_general(transA,alphap,mtxAp,lhs,incX,lhs,incY,RSB_OP_FLAG_DEFAULT RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)/* `mop' is mop*/
+				{
+					RSB_ERROR(RSB_ERRM_ES);
+					goto err;
+				}
+#endif
+				spsv_f_t += rsb_time();
+				/* if(RSB_SOME_ERROR(rsb__fill_with_ones(rhs,mtxAp->typecode,ndA,incX))){ errval = RSB_ERR_INTERNAL_ERROR; goto erri; } */
+				for(nrhsl=0;nrhsl<nrhs;++nrhsl)
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)rhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incX,nrhsl+1),
+					rsb__util_set_array_to_converted_integer(((rsb_byte_t*)lhs)+mtxAp->el_size*ndA*nrhsl,mtxAp->typecode,ndA,incY,nrhsl+1);
+				/* RSB_DO_ERROR_CUMULATE(errval,rsb__xcopy(lhs,rhs,0,0,mtxAp->nr,mtxAp->el_size)); */
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+')dnl
+dnl
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spmv_t = - rsb_time();
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+dnl spmv_uauz
+dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+dnl
+dnl	yes, we are passing the same vector (lhs)
+dnl	
+				if((errval = rsb__do_spsm(transA,alphap,mtxAp,nrhs,order,betap,lhs,incY*mtxAp->nr,lhs,incY*mtxAp->nr))!=RSB_ERR_NO_ERROR) /* benchmark -- `mop' is mop*/
+				{
+					RSBENCH_STDERR("[!] "RSB_ERRM_TS);
+					goto erri;
+				}
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+dnl
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_RSB_SPMV_",0);
+dnl				if((e = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR) /* benchmark -- `mop' is mop */
+				{
+dnl					/* e is our error code*/
+					RSBENCH_STDERR("[!] "RSB_ERRM_MV);
+					goto erri;
+				}
+dnl				else
+dnl
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_RSB_SPMV_",1);
+')dnl
+dnl
+ifelse(mop,`infty_norm',`dnl
+				{
+				double infinity_norm=RSB_REAL_ZERO;
+				double infinity_norm_2=RSB_REAL_ZERO;
+
+				if((errval = rsb__do_matrix_norm( mtxAp, &infinity_norm, RSB_EXTF_NORM_INF))!=RSB_ERR_NO_ERROR)
+				{
+					RSBENCH_STDERR("[!] some problem occurred in rsb_infinity_norm!\n");
+					errval = RSB_ERR_INTERNAL_ERROR;
+					goto erri;
+				}
+				else
+				if(g_debug)
+				{
+					RSBENCH_STDOUT("matrix infinity norm is %lg\n",infinity_norm);
+					if((errval = rsb__do_infinity_norm(mtxAp,&infinity_norm_2,1,transA))!=RSB_ERR_NO_ERROR)
+					{
+						RSBENCH_STDERR("[!] some problem occurred in rsb__do_infinity_norm!\n");
+						errval = RSB_ERR_INTERNAL_ERROR;
+						goto erri;
+					}
+					if(infinity_norm != infinity_norm_2)
+					{
+						RSB_ERROR("Mismatch while computing infinity norm : \n"
+							"%lg != %lg\n",
+							infinity_norm,infinity_norm_2);
+						errval = RSB_ERR_INTERNAL_ERROR;
+						goto erri;
+					}
+					RSBENCH_STDOUT("Infinity norm check passed.\n");
+
+				}}
+')dnl
+ifelse(mop,`negation',`dnl
+				/* FIXME: this section is obsolete and shall be removed (with the corresponding m4 macros). */
+				int please_fix_RSB_M4_ARGS_TO_ACTUAL_ARGS=-1;/* here to fix negation */
+				if(g_debug)
+				{
+					struct rsb_mtx_t * matrix2 = rsb__clone_simple(mtxAp);
+					void *new_VA=NULL;
+					rsb_coo_idx_t *new_IA=NULL, *new_JA=NULL;
+					RSBENCH_STDOUT("cloning ...\n");
+
+					if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&new_VA,&new_IA,&new_JA,nnz,typecode,RSB_BOOL_TRUE))){ RSB_ERROR("an allocation problem occurred\n"); errval = RSB_ERR_INTERNAL_ERROR;goto erri;}
+		
+					RSBENCH_STDOUT("getting back matrix in coo format ... \n");
+					if(RSB_SOME_ERROR(rsb_mtx_get_coo(mtxAp,VA,new_IA,new_JA,RSB_FLAG_C_INDICES_INTERFACE) ))
+					{
+						RSB_ERROR("rsb_mtx_get_coo returned with an error code\n");
+						errval = RSB_ERR_INTERNAL_ERROR;
+						goto erri;
+					}
+					if(RSB_SOME_ERROR(rsb_mtx_get_coo(matrix2,new_VA,new_IA,new_JA,RSB_FLAG_C_INDICES_INTERFACE) ))
+					{
+						RSB_ERROR("rsb_mtx_get_coo returned with an error code\n");
+						errval = RSB_ERR_INTERNAL_ERROR;
+						goto erri;
+					}
+					RSBENCH_STDOUT("sorting back reconstructed matrix in blocks  ... \n");
+					if( RSB_SOME_ERROR(rsb_util_sortcoo(    VA, new_IA, new_JA, nnz, typecode, M_b, K_b, p_r, p_c , flags )) ||
+					    RSB_SOME_ERROR(rsb_util_sortcoo(new_VA, new_IA, new_JA, nnz, typecode, M_b, K_b, p_r, p_c , flags )))
+						{errval = RSB_ERR_INTERNAL_ERROR; goto erri;}
+	
+					RSB_MTX_FREE(matrix2);
+
+					if((errval = rsb_do_negation( mtxAp, please_fix_RSB_M4_ARGS_TO_ACTUAL_ARGS,transA ))!=RSB_ERR_NO_ERROR)
+					{
+						RSBENCH_STDERR("[!] some problem occurred in negation!\n");
+						errval = RSB_ERR_INTERNAL_ERROR;
+						goto erri;
+					}
+					else
+					{
+						if(RSB_MEMCMP(new_VA,VA,mtxAp->el_size*mtxAp->element_count)!=0)
+						{
+							RSB_ERROR("negation cross check failed!\n");
+							errval = RSB_ERR_INTERNAL_ERROR;
+							goto erri;
+						}
+						else
+							RSBENCH_STDOUT("matrix negation cross check successfully passed\n");
+					}
+
+					RSB_CONDITIONAL_FREE(new_VA);
+					RSB_CONDITIONAL_FREE(new_IA);
+					RSB_CONDITIONAL_FREE(new_JA);
+				}
+				else
+				{
+					if((errval = rsb_do_negation( mtxAp, please_fix_RSB_M4_ARGS_TO_ACTUAL_ARGS, transA  ))!=RSB_ERR_NO_ERROR)
+					{
+						RSBENCH_STDERR("[!] some problem occurred in negation!\n");
+						errval = RSB_ERR_INTERNAL_ERROR;
+						goto erri;
+					}
+				
+				}
+
+')dnl
+dnl				RSBENCH_STDERR("[!] Unimplemented!\n");
+dnl				goto erri;
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				spmv_t += rsb_time();
+				tot_t += spmv_t;
+				best_t = RSB_MIN_ABOVE_INF(spmv_t,best_t,tinf);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+dnl
+dnl
+dnl
+dnl
+dnl
+dnl	After sampling time.
+dnl
+dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+				if((g_debug || 1) && i==times-1)
+				{
+					/* this is debug information, very cheap to include */
+					rsb_byte_t * out2=NULL;
+					rsb_aligned_t mbetap[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+					out2 = rsb__calloc(mtxAp->el_size*(RSB_MAX(nrA,ncA)+br)*nrhs);
+					if(!out2 /* || rsb__cblas_Xscal(mtxAp->typecode,nrA+br,NULL,out2,incY)*/) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+					if(RSB_SOME_ERROR(rsb__fill_with_ones(alphap,typecode,1,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+					if(RSB_SOME_ERROR(rsb__fill_with_ones(mbetap,typecode,1,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+					if(RSB_SOME_ERROR(rsb__cblas_Xscal(typecode,1,NULL,errnorm,1))){ errval = RSB_ERR_INTERNAL_ERROR; goto err;}
+					if((errval = rsb__do_spmm_general(mtxAp,lhs,out2,alphap,mbetap,incX,incY,transA,RSB_OP_FLAG_DEFAULT,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR)
+					{
+						/* e is our error code*/
+						RSBENCH_STDERR("[!] some problem occurred in sparse matrix vector product!\n");
+						goto erri;
+					}
+					RSB_DO_ERROR_CUMULATE(errval,rsb__sqrt_of_sum_of_fabs_diffs(rhs,out2,errnorm,typecode,nrA+br));
+					RSBENCH_STDOUT("#error norm:");
+					RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_value(errnorm,typecode));
+					RSBENCH_STDOUT("\n");
+dnl					RSBENCH_STDOUT("#computed rhs:");
+dnl					RSB_DO_ERROR_CUMULATE(errval,rsb__debug_print_vector(out2,nrA,typecode,1));
+					if(out2)rsb__free(out2);
+				}
+')
+dnl
+	#ifdef RSB_WANT_KERNELS_DEBUG
+				/* ... */
+	#endif /* RSB_WANT_KERNELS_DEBUG */
+				}  /* times: benchmark loop of mop end */
+				RSB_TM_LIKWID_MARKER_R_STOP("RSB_SPMV");
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_RSB_SPMV_",1,times,&rsb_pci);
+dnl
+ifelse(RSB_M4_IS_SPXV_KERNEL_MOP(mop),1,`dnl
+				if((g_debug || 1) /*&& i==times-1*/)
+				{
+					/* this is debug information, very cheap to include */
+					RSB_DO_ERROR_CUMULATE(errval,rsb__do_print_some_vector_stats(lhs,typecode,nrA,incY));
+				}
+')dnl
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+dnl
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+dnl
+			if(want_ancillary_execs == RSB_BOOL_TRUE)
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				no_lock_op_time = - rsb_time();
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_FAKE_LOCK,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR) { goto erri; }
+				no_lock_op_time += rsb_time();
+				no_lock_op_time_best = RSB_MIN_ABOVE_INF(no_lock_op_time_best,no_lock_op_time,tinf);
+				no_lock_op_tot_time += no_lock_op_time;
+			}
+			if(cc==1)serial_no_lock_op_time_best=no_lock_op_time_best;
+			totatt += no_lock_op_tot_time;
+
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+
+			if(want_ancillary_execs == RSB_BOOL_TRUE)
+			if(cc==1)
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				qt_op_time = - rsb_time();
+				if((errval = rsb__do_spmm_general(mtxAp,rhs,lhs,alphap,betap,incX,incY,transA,RSB_OP_FLAG_WANT_SERIAL,order RSB_OUTER_NRHS_SPMV_ARGS_IDS))!=RSB_ERR_NO_ERROR) { goto erri; }
+				qt_op_time += rsb_time();
+				qt_op_time_best = RSB_MIN_ABOVE_INF(qt_op_time_best,qt_op_time,tinf);
+				qt_op_tot_time += qt_op_time;
+			}
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			totatt += qt_op_tot_time;
+dnl
+')dnl
+dnl
+
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+				if((g_debug) /*&& i==times-1*/)
+				{
+					rsb_byte_t * out2=NULL;
+					out2=rsb__calloc(mtxAp->el_size*(RSB_MAX(nrA,ncA)+br)*nrhs);
+					if(!out2 /* || rsb__cblas_Xscal(mtxAp->typecode,nrA+br,NULL,out2,incY)*/) { errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+
+					RSB_DO_FLAG_ADD(mtxAp->flags,RSB_FLAG_SHOULD_DEBUG);
+/*					rsb_spmv_uaua_testing( mtxAp, rhs, out2,transA );	*//* FIXME : INCOMPLETE */
+					RSB_DO_FLAG_DEL(mtxAp->flags,RSB_FLAG_SHOULD_DEBUG);
+					/* bit-per-bit checking */
+					
+					rsb__util_vector_sum(errnorm,lhs,typecode,nrA);
+					RSBENCH_STDOUT("#sum:");
+					rsb__debug_print_vector(errnorm,1,typecode,1);
+					RSBENCH_STDOUT("\n");
+
+					if(dumpvec&rsb_dumpvec_res)/* new */
+						rsb__debug_print_vectors(lhs,out2,nrA,1,1,typecode);
+					
+					if(dumpvec&rsb_dumpvec_res)/* new */
+					{
+					if(RSB_MEMCMP(lhs,out2,mtxAp->el_size*(nrA+br*0))!=0)
+					{
+						RSB_ERROR("sparse matrix vector product cross check failed. diff (bad,good):\n");
+						rsb__debug_print_vectors_diff(lhs,out2,nrA,typecode,incY,incY,RSB_VECTORS_DIFF_DISPLAY_N);
+
+						if(out2)
+							rsb__free(out2);
+						{ errval = RSB_ERR_INTERNAL_ERROR; goto erri; }
+					}
+					else
+						RSBENCH_STDOUT("sparse matrix vector product cross check succeeded\n");
+					}
+					if(out2)rsb__free(out2);
+				}
+')dnl
+dnl
+				if(dumpvec&rsb_dumpvec_res)
+					rsb__debug_print_vector(lhs,nrA,typecode,incY);
+				if(dumpvec&rsb_dumpvec_rhs)
+					rsb__debug_print_vector(rhs,nrA,typecode,incX);
+
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if(n_dumpres)
+				{
+					RSBENCH_STDOUT("##RSB LHS %d elements post-peek:\n",n_dumpres);
+					rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+				}
+				if(n_dumprhs)
+				{
+					RSBENCH_STDOUT("##RSB RHS %d elements post-peek:\n",n_dumprhs);
+					rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+				}
+				if(!g_sort_only)
+				{
+					op_t += rsb_time();
+					op_t /= (double)times;
+					/*
+				if(RSB_WANT_VERBOSE_MESSAGES)
+				{RSBENCH_STDOUT("performed %lf Mflops in %lf seconds (%lf Mflops)\n",raw_Mflops, op_t, (raw_Mflops)/(op_t));
+				RSBENCH_STDOUT("raw data rate of (%lf Gbytes/sec)\n", ((double)(raw_Mflops)*(mtxAp->el_size))/(op_t*1000.0));	}*/
+				/*
+				if(RSB_WANT_VERBOSE_MESSAGES)
+				RSBENCH_STDOUT("nonzero data rate of (%lf Gbytes/sec, or %lf Mflops)\n",
+				(true_Mflops*(mtxAp->el_size))/(op_t*1000.0),
+				true_Mflops/(op_t)
+				);*/
+				}
+
+ifelse(mop,`mat_stats',`',`dnl
+                                fillin = rsb__do_get_matrix_fillin(mtxAp);
+				if(g_sort_only)
+				{
+				/* FIXME :
+				 * please note that in this rudimentary model we take in account also the matrix creationtime.
+				 */
+                	                raw_Mflops= (rsb_perf_t) mtxAp->element_count;
+        	                        true_Mflops=(((double)mtxAp->nnz)*log((double)mtxAp->nnz))/RSB_REAL_MILLION;
+					op_t=mct;	/* our timed operation is matrix construction */
+				}
+				else
+				{
+	                                raw_Mflops = RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION_IDENTIFIER(mop)(mtxAp);
+	                                true_Mflops = raw_Mflops/fillin;
+	                                raw_Mflops *=nrhs;
+	                                true_Mflops*=nrhs;
+				}
+
+
+dnl spmv_uauz
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),1,`dnl
+#if RSB_WANT_MKL
+	if(want_mkl_bench && !(cc==1 && mkl_coo_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME))
+	{
+			rsb_nnz_idx_t annz = RSB_MAX(nnz,nrA+1),rnz=0,mklnz=nnz;
+			/* please note that mkl routines do not support stride */
+			/* FIXME: a non monotonically-increasing order will do harm */
+			mkl_coo2csr_time = RSB_TIME_ZERO;
+			mkl_coo_op_tot_time = RSB_TIME_ZERO;
+			mkl_coo_op_time = RSB_TIME_ZERO;
+			mkl_coo_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			//mkl_coo_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			mkl_csr_op_tot_time = RSB_TIME_ZERO;
+			mkl_csr_op_time = RSB_TIME_ZERO;
+			mkl_csr_op_time_best = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			//mkl_csr_op_time_best_serial = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+			
+			if(nrhs>1)
+				want_mkl_bench_coo = RSB_BOOL_FALSE;/* 20130401 FIXME: this circumvents an Intel MKL bug */
+#if 1
+			//mkl_set_dynamic(1);
+			//RSBENCH_STDOUT("MKL failed enabling dynamic thread number control\n");
+			mkl_set_num_threads(cc);
+			//RSBENCH_STDOUT("MKL has %d threads now\n",mkl_get_num_threads());
+#else /* 1 */
+			if(rsb__set_num_threads(cc)!=cc)
+			{
+				RSB_ERROR("failed setting %d threads!\n",cc);
+				errval = RSB_ERR_INTERNAL_ERROR;
+				goto err;
+			}
+#endif /* 1 */
+dnl			/* FIXME: for some matrices, the following invocation of rsb_coo_sort() causes memory leaks */
+dnl			//errval = rsb_coo_sort(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+			if(!want_sort_after_load)
+			if(!want_in_place_assembly)
+			{
+				errval = rsb__util_sort_row_major_parallel(VA,IA,JA,nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				mklnz = rsb_weed_out_duplicates (IA,JA,VA,nnz,typecode,RSB_FLAG_SORTED_INPUT);
+				if((!RSB_IS_VALID_NNZ_COUNT(mklnz)) || (!mklnz) || (RSB_SOME_ERROR(errval)))
+				{
+					RSB_PERR_GOTO(err,RSB_ERRM_EM);
+				}
+				annz = RSB_MAX(mklnz,nrA+1);
+			}
+			mkl_set_num_threads(cc); // necessary, or MKL will get puzzled
+
+		if(want_mkl_bench_coo)
+		{
+			totct -= rsb_time();
+			errval = rsb_util_coo_alloc_copy_and_stats(&M_VA,&M_IA,&M_JA,want_in_place_assembly?NULL:VA,want_in_place_assembly?NULL:IA,want_in_place_assembly?NULL:JA,NULL,NULL,mklnz,(annz-mklnz),typecode,0,mib,RSB_FLAG_NOFLAGS,NULL);
+			if(RSB_SOME_ERROR(errval)){RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+			//errval = rsb_mtx_get_coo(mtxAp,M_VA,M_IA,M_JA,flags); /* FIXME: use this */
+			errval = rsb__do_get_rows_sparse(RSB_DEFAULT_TRANSPOSITION,NULL,mtxAp,M_VA,M_IA,M_JA,0,mtxAp->nr-1,&rnz,RSB_FLAG_NOFLAGS|mif);
+			totct += rsb_time();
+	
+			if(!M_VA  || !M_IA  || !M_JA ){RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_COO_SPXV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_COO_SPMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_coo_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_COO_SPXV_",0);
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),1,`dnl
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo_spmm(M_VA,nrA,ncA,nrhs,mklnz,M_IA,M_JA,rhs,rhsnri,lhs,outnri,alphap,betap,transA,typecode,flags));
+				else
+
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo_spmv(M_VA,nrA,ncA,mklnz,M_IA,M_JA,rhs,lhs,alphap,betap,transA,typecode,flags));
+')dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,`dnl
+				RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo_spsv(M_VA,nrA,ncA,mklnz,M_IA,M_JA,rhs,lhs,alphap,betap,transA,typecode,flags));
+')dnl
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_1KL_COO_SPXV_",1);
+				mkl_coo_op_time += rsb_time();
+				mkl_coo_op_time_best = RSB_MIN_ABOVE_INF(mkl_coo_op_time_best,mkl_coo_op_time,tinf);
+				mkl_coo_op_tot_time+=mkl_coo_op_time;
+			}
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_COO_SPMV");
+				RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_COO_SPXV_",1,times,&mkl_coo_pci);
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL COO LHS %d elements post-peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			if(cc==1) 
+				mkl_coo_op_time_best_serial = mkl_coo_op_time_best;
+
+			RSB_CONDITIONAL_FREE(M_VA);
+			RSB_CONDITIONAL_FREE(M_IA);
+			RSB_CONDITIONAL_FREE(M_JA);
+		} /* want_mkl_bench_coo */
+
+		if(want_mkl_bench_csr || RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) )
+		{
+			totct -= rsb_time();
+			errval = rsb_util_coo_alloc_copy_and_stats(&M_VAC,&M_IAC,&M_JAC,want_in_place_assembly?NULL:VA,want_in_place_assembly?NULL:IA,want_in_place_assembly?NULL:JA,NULL,NULL,mklnz,(annz-mklnz),typecode,0,mib,RSB_FLAG_NOFLAGS,NULL);
+			errval = rsb_mtx_get_csr(mtxAp->typecode,mtxAp,M_VAC,M_IAC,M_JAC,flags|mif);
+			totct += rsb_time();
+	
+			if(!M_VAC || !M_IAC || !M_JAC) {RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_ENOMEM);goto mklerr;}
+				// FIXME: Missing error handling !
+
+                        if(0)/* if want bogus contents (for debug/inspection) */
+                        {
+                                rsb_coo_idx_t i,npr=(mklnz+nrA-1)/nrA;
+                                rsb_nnz_idx_t l;
+                                M_IAC[0]=0;
+                                for(i=1;i<nrA;++i)
+                                        M_IAC[i]=M_IAC[i-1]+npr;
+                                for(i=0;i<nrA;++i)
+                                        for(l=M_IAC[i];l<M_IAC[i+1];++l)
+                                                M_JAC[l]=l-M_IAC[i];
+                                M_IAC[nrA]=mklnz;
+                        }
+
+			totct -= rsb_time();
+			if(!want_in_place_assembly)
+			{
+				mkl_coo2csr_time = - rsb_time();
+				RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_coo2csr(nrA,ncA,mklnz,VA,IA,JA,M_VAC,M_IAC,M_JAC,typecode,mib));
+				mkl_coo2csr_time += rsb_time();
+				if(RSB_SOME_ERROR(rsb__csr_chk(M_IAC,M_JAC,nrA,ncA,mklnz,mib)))
+				{
+      					RSB_PERR_GOTO(err,RSB_ERRM_EM)
+				}
+			}
+			else
+			{
+				RSB_WARN("warning : skipping MKL coo2csr conversion (user chose in-place RSB build) \n");
+			}
+			totct += rsb_time();
+		} /* want_mkl_bench_csr || want_mkl_autotuner */
+
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL CSR LHS %d elements pre-peek:\n",n_dumpres);
+				rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incX);
+			}			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(n_dumprhs)
+			{
+				RSBENCH_STDOUT("##MKL CSR RHS %d elements pre-peek:\n",n_dumprhs);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+			}			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(want_mkl_bench_csr)
+			{
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_CSR_SPXV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_CSR_SPMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_csr_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_CSR_SPXV_",0);
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),1,`dnl
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmm_bench(M_VAC,nrA,ncA,nrhs,mklnz,M_IAC,M_JAC,rhs,rhsnri,lhs,outnri,alphap,betap,transA,typecode,flags|mif,NULL,NULL,NULL,NULL));
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,NULL,NULL,NULL /* &mkl_csr_op_time */,NULL ));
+')dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,`dnl
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__do_mkl_csr_spsm(M_VAC,nrA,nrhs,M_IAC,M_JAC,rhs,lhs,alphap,transA,typecode,flags,nrhs/*ldX*/,nrhs/*ldY*/));
+					/* FIXME: rsb__mkl_csr_spsm_bench is there */
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spsv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,NULL,NULL,NULL /* &mkl_csr_op_time */,NULL));
+')dnl
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_MKL_CSR_SPXV_",1);
+				mkl_csr_op_time += rsb_time();
+				mkl_csr_op_time_best = RSB_MIN_ABOVE_INF(mkl_csr_op_time_best,mkl_csr_op_time,tinf);
+				mkl_csr_op_tot_time+=mkl_csr_op_time;
+			}
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_CSR_SPMV");
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_CSR_SPXV_",1,times,&mkl_csr_pci);
+			} /* want_mkl_bench_csr */
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(cc==1)mkl_csr_op_time_best_serial=mkl_csr_op_time_best;
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL CSR LHS %d elements post-peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			if(n_dumprhs)
+			{
+				RSBENCH_STDOUT("##MKL CSR RHS %d elements post-peek:\n",n_dumprhs);
+				rsb__debug_print_vector(rhs,RSB_MIN(ndA*nrhs,n_dumprhs),typecode,incY);
+			}
+			if( mkl_csr_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+				RSBENCH_STDOUT("##MKL STUFF DEBUG omp_set_num_threads():%d==omp_get_num_threads():%d  bestserialcsr:%0.5lf vs bestcsr:%0.5lf\n",omp_get_num_threads(),cc,mkl_csr_op_time_best_serial,mkl_csr_op_time_best);
+			if( mkl_coo_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+				RSBENCH_STDOUT("##MKL STUFF DEBUG omp_set_num_threads():%d==omp_get_num_threads():%d  bestserialcoo:%0.5lf vs bestcoo:%0.5lf\n",omp_get_num_threads(),cc,mkl_coo_op_time_best_serial,mkl_coo_op_time_best);
+
+			if( RSB_MKL_APPROPRIATE_AT_TIME_SPEC( want_mkl_autotuner ) && want_mkl_autotuner > RSB_TIME_ZERO )
+			{
+				rsb_time_t btime = RSB_TIME_ZERO, matt = -rsb_time();
+				rsb_thread_t bthreads = at_mkl_csr_nt;
+				rsb_real_t sf = RSB_REAL_ZERO;
+				rsb_char_t * ops = "";
+
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+				rsb__tattr_init(&(attr.clattr), NULL, nrA, mklnz, typecode, flags, nrhs);
+				attr.clattr.vl = 1; /* FIXME: new */
+')dnl
+				RSBENCH_STDOUT("# MKL CSR %s autotuning for thread spec. %d  trans %c (0=current (=%d),<0=auto,>0=specified)\n",ops,bthreads,RSB_TRANSPOSITION_AS_CHAR(transA),cc);
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),1,`dnl
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmm_bench(M_VAC,nrA,ncA,nrhs,mklnz,M_IAC,M_JAC,rhs,rhsnri,lhs,outnri,alphap,betap,transA,typecode,flags|mif,&bthreads,&btime,&(attr.clattr),&btpms));
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spmv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,&bthreads,&btime,&(attr.clattr),&btpms));
+				ops = "SPMV";
+')dnl
+ifelse(RSB_M4_IS_SPSX_KERNEL_MOP(mop),1,`dnl
+dnl				RSBENCH_STDOUT("# MKL SPSV/SPSM appears to be serial --- skipping MKL threads autotuning !\n");
+#if 1
+				if(nrhs>1)
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spsm_bench(M_VAC,nrA,nrhs,M_IAC,M_JAC,rhs,lhs,alphap,transA,typecode,flags,nrhs/*ldX*/,nrhs/*ldY*/,&bthreads,&btime,&(attr.clattr),&btpms));
+				else
+					RSB_DO_ERROR_CUMULATE(errval,rsb__mkl_csr_spsv_bench(M_VAC,nrA,ncA,mklnz,M_IAC,M_JAC,rhs,lhs,alphap,betap,transA,typecode,flags,&bthreads,&btime,&(attr.clattr),&btpms));
+				ops = "SPSV";
+#endif
+')dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+				bthreads = bthreads ? bthreads : cc;
+')dnl
+				RSBENCH_STDOUT("# MKL CSR %s best threads / time / perf. were: %d / %lg / %lg\n",ops,bthreads,btime,(rsb__estimate_mflops_per_op_spmv_uaua(mtxAp)*nrhs)/btime);
+				matt += rsb_time();
+				RSBENCH_STDOUT("MKL CSR Autotuner took %.2lgs and estimated a speedup of %lf / %lf = %lf x (best round %d samples at %d threads)\n",matt,(attr.clattr).dtpo,(attr.clattr).btpo,(attr.clattr).dtpo/(attr.clattr).btpo,attr.clattr.nit[attr.clattr.optt],attr.clattr.optt);
+				at_mkl_csr_op_time_best = btime;
+				at_mkl_csr_nt = bthreads;
+				mkl_csr_op_time_best = (attr.clattr).dtpo;
+				totmt += matt;
+				RSB_ASSERT( bthreads > 0 );
+			} /* want_mkl_autotuner */
+
+			if(want_mkl_bench_gem)
+			{
+				rsb_coo_idx_t gemdim=0;
+			RSB_DO_ERROR_CUMULATE(errval,rsb__vectors_reinit(rhs,lhs,typecode,ndA,ndA,incX,incY));
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("PRE_MKL_GEMV_",0,times,NULL);
+			RSB_TM_LIKWID_MARKER_R_START("MKL_GEMV");
+			for(i=0;i<times;++i)
+			{
+				if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				mkl_gem_op_time = - rsb_time();
+				RSB_PERFORMANCE_COUNTERS_DUMP("PRE_MKL_GEMV_",0);
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),1,`dnl
+				if(nrhs>1)
+					; /* FIXME */
+				/* FIXME: missing error handling */
+				rsb__mkl_gemv(typecode,VA,rhs,lhs,nnz,ndA,&gemdim);
+')dnl
+				RSB_PERFORMANCE_COUNTERS_DUMP("POST_MKL_GEMV_",1);
+				mkl_gem_op_time += rsb_time();
+				mkl_gem_op_time_best = RSB_MIN_ABOVE_INF(mkl_gem_op_time_best,mkl_gem_op_time,tinf);
+				mkl_gem_op_tot_time+=mkl_gem_op_time;
+			}
+			true_gem_Mflops=2*gemdim*gemdim;
+			true_gem_Mflops/=RSB_REAL_MILLION;
+			RSB_TM_LIKWID_MARKER_R_STOP("MKL_GEMV");
+			RSB_PERFORMANCE_COUNTERS_DUMP_MEAN("POST_MKL_GEMV_",1,times,&mkl_gem_pci);
+			if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+			if(cc==1)mkl_gem_op_time_best_serial=mkl_gem_op_time_best;
+			if(n_dumpres)
+			{
+				RSBENCH_STDOUT("##MKL GEMX LHS %d elements peek:\n",n_dumpres);
+				rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+			}
+			} /* want_mkl_bench_gem */
+mklerr:
+			RSB_CONDITIONAL_FREE(M_VAC);
+			RSB_CONDITIONAL_FREE(M_IAC);
+			RSB_CONDITIONAL_FREE(M_JAC);
+			RSB_CONDITIONAL_FREE(M_VA);
+			RSB_CONDITIONAL_FREE(M_IA);
+			RSB_CONDITIONAL_FREE(M_JA);
+			rsb_perror(NULL,errval);
+		} /* want_mkl_bench  */
+#endif /* RSB_WANT_MKL */
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			/* FIXME : should only exist for double as type */
+			if(want_oski_bench && guess_blocking_test!=2 /* g.b.t=2 is an extra run*/) 
+			{
+
+			rsb__sprintf(oxform,"return BCSR(InputMat, %zd, %zd)",(rsb_printf_int_t)br,(rsb_printf_int_t)bc);
+			//rsb__sprintf(oxform,"return BCSR(InputMat, %d, %d)",1,1);
+			/* FIXME : ncA and nrA are not enough : we should account for br and bc excess ! */
+
+			Oval = rsb__clone_area(VA,nnz*mtxAp->el_size);
+			OIA = rsb__clone_area(IA,nnz*sizeof(rsb_coo_idx_t));
+			OJA = rsb__clone_area(JA,nnz*sizeof(rsb_coo_idx_t));
+
+			/* we need duplicates, for we later will use VA as it is */
+			if(!Oval || !OIA || !OJA)
+			{
+				RSB_ERROR("failed aux arrays allocation !\n");goto err;
+			}
+
+			/*
+				Unfortunately, Oski does not have native BCSR constructors, but 
+				rely on conversion from CSR.
+				So the measured time is more than it should, but a better
+				approximation than oski_CreateMatCSR only.
+			*/
+
+			oski_a_t = -rsb_time();
+			if(RSB_SOME_ERROR(rsb__allocate_csr_arrays_from_coo_sorted(Oval, OIA, OJA, nnz, nrA, ncA, typecode, &Aval, &Aptr, &Aind)))
+			{
+				RSB_ERROR("failed csr allocation !\n");goto err;
+			}
+			oski_a_t += rsb_time();
+
+			if(!Aval || !Aptr || !Aind)
+			{
+				RSB_ERROR("failed csr arrays allocation !\n");goto err;
+			}
+
+			oski_m_t = -rsb_time();
+			A_tunable = oski_CreateMatCSR (Aptr, Aind, Aval, nrA, ncA,        /* CSR arrays */
+                                // SHARE_INPUTMAT /*COPY_INPUTMAT*/,        /* "copy mode" */
+				 /*SHARE_INPUTMAT*/ COPY_INPUTMAT,        /* "copy mode" */
+                                 1, INDEX_ZERO_BASED);
+				// we should add : INDEX_SORTED, INDEX_UNIQUE
+				// 3, INDEX_ZERO_BASED, MAT_TRI_LOWER, MAT_UNIT_DIAG_IMPLICIT);
+			oski_m_t += rsb_time();
+
+		        if(A_tunable==INVALID_MAT)
+                	{
+				RSB_ERROR("invalid oski matrix!\n");goto err;
+			}
+
+			oski_t_t = -rsb_time();
+			if( oski_ApplyMatTransforms (A_tunable, oxform) )
+			{
+				RSB_ERROR("invalid transform!\n");goto err;
+			}
+			oski_t_t += rsb_time();
+
+			if(A_tunable==INVALID_MAT)
+			{
+				RSB_ERROR("invalid oski tuned matrix!\n");goto err;
+			}
+
+				/* FIXME : should error - check these steps */
+			//	RSBENCH_STDOUT("# oski : ncA=%zd, nrA=%zd\n",(rsb_printf_int_t)ncA,(rsb_printf_int_t)nrA);
+			        x_view = oski_CreateVecView( rhs, ncA, STRIDE_UNIT );
+			        y_view = oski_CreateVecView( lhs, nrA, STRIDE_UNIT );
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				oski_t = - rsb_time();
+				for(i=0;i<times;++i)
+				{
+#error FIXME: flush breaks measured time
+					if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+					/* y <- alpha A * x + beta * y */
+					if(oski_MatMult( A_tunable, OP_NORMAL, oalpha, x_view, obeta, y_view ))
+					{
+							RSB_ERROR("failed uuuoski_MatMult !\n");goto err;
+					}
+				}
+				oski_t += rsb_time();
+				if(want_outer_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+				if(n_dumpres)
+					rsb__debug_print_vector(lhs,RSB_MIN(ndA*nrhs,n_dumpres),typecode,incY);
+				/* FIXME */
+	
+				oski_DestroyMat( A_tunable );
+				oski_DestroyVecView( x_view );
+				oski_DestroyVecView( y_view );
+				RSB_CONDITIONAL_FREE(Aptr);
+				RSB_CONDITIONAL_FREE(Aind);
+				RSB_CONDITIONAL_FREE(Aval);
+				RSB_CONDITIONAL_FREE(Oval);
+				RSB_CONDITIONAL_FREE(OJA  );
+				RSB_CONDITIONAL_FREE(OIA );
+				Aptr= Aind= Aval= NULL;
+			} /* want_oski_bench  */
+#endif /* RSB_WANT_OSKI_BENCHMARKING */
+')dnl
+			if(ti>0)
+				want_getrow_bench=0;
+			if(want_getrow_bench)
+			{
+				const rsb_coo_idx_t nr=1;
+				void * RVA = NULL;
+				rsb_coo_idx_t*RIA = NULL;
+				rsb_coo_idx_t*RJA = NULL;
+
+				if(RSB_SOME_ERROR(errval = rsb_util_coo_alloc(&RVA,&RIA,&RJA,mtxAp->nc*nr,typecode,RSB_BOOL_FALSE))){goto errgr;}
+				for(i=0;i<times;++i)
+				{
+					rsb_time_t getrow_op_time = RSB_TIME_ZERO;
+					rsb_coo_idx_t ri=0;
+					rsb_nnz_idx_t rnz=0;
+					getrow_op_time = - rsb_time();
+					for(ri=0;ri+nr-1<mtxAp->nr;ri+=nr)
+						RSB_DO_ERROR_CUMULATE(errval,rsb_mtx_get_coo_block(mtxAp,RVA,RIA,RJA,ri,RSB_MIN(mtxAp->nc-1,ri+nr-1),0,mtxAp->nc-1,NULL,NULL,&rnz,mtxAp->flags));
+					getrow_op_time += rsb_time();
+					getrow_op_time_best = RSB_MIN_ABOVE_INF(getrow_op_time_best,getrow_op_time,tinf);
+					getrow_op_tot_time+=getrow_op_time;
+				}
+				if(cc==1)getrow_op_time_best_serial=getrow_op_time_best;
+errgr:
+				RSB_CONDITIONAL_FREE(RVA);
+				RSB_CONDITIONAL_FREE(RIA);
+				RSB_CONDITIONAL_FREE(RJA);
+				if(RSB_SOME_ERROR(errval))
+				{goto err;}
+			} /* want_getrow_bench */
+
+			if(ti>0)
+				want_getdiag_bench=0;
+			if(want_getdiag_bench)
+			{
+				void * DV = rsb__calloc_vector(mtxAp->nr,mtxAp->typecode);
+				if(!DV) { errval = RSB_ERR_ENOMEM; goto err; }
+				for(i=0;i<times;++i)
+				{
+					rsb_time_t diag_op_time = RSB_TIME_ZERO;
+					diag_op_time = - rsb_time();
+					RSB_DO_ERROR_CUMULATE(errval,rsb__dodo_getdiag(mtxAp,DV));
+					diag_op_time += rsb_time();
+					diag_op_time_best = RSB_MIN_ABOVE_INF(diag_op_time_best,diag_op_time,tinf);
+					diag_op_tot_time+=diag_op_time;
+				}
+				if(cc==1)diag_op_time_best_serial=diag_op_time_best;
+				RSB_CONDITIONAL_FREE(DV);
+				if(RSB_SOME_ERROR(errval))
+				{goto err;}
+			} /* want_getdiag_bench */
+
+			if(g_sort_only)
+			{
+				/* single line output, ideal for benchmark data to be processed later */
+				RSBENCH_STDOUT ( "%-20s	%s", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags));
+
+				RSBENCH_STDOUT ( "	%.3lf	%lg",
+				//raw_Mflops/op_t,	/* please note that in the sort case, it is an absolutely meaningless value */
+				true_Mflops/op_t,	/* algorithmic millions of ops per second (not an accurated model)  */
+				op_t/true_Mflops	/* the sorting algorithmic constant (not an accurated model) */
+				);
+			}
+			else
+			if(!g_estimate_matrix_construction_time)
+			{
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,true_Mflops/best_t,raw_Mflops/best_t,"mop",flags);
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+				if( spsv_spmv_t != RSB_TIME_ZERO )
+				printf("# (extra) SpMV performance record:\n"),
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,(true_Mflops/best_t)*(tot_t/spsv_spmv_t),raw_Mflops/best_t*(tot_t/spsv_spmv_t),"spmv_uaua*",flags);
+')dnl
+#else /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+				rsb__dump_performance_record(rsb__basename(filename),mtxAp,true_Mflops/op_t,raw_Mflops/op_t,"mop",flags);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+			}
+			if(g_estimate_matrix_construction_time)
+			{
+				/* in this case the user asked us too for :
+				   * matrix construction Mflops
+				   * a ratio of the selected op time with the matrix construction time
+				 */
+				RSBENCH_STDOUT("\t%.3lg\t%.3lg	", ((double)nnz)/(mct*RSB_REAL_MILLION), mct/op_t);
+				rsb__fprint_matrix_implementation_code(mtxAp, "mop", flags, RSB_STDOUT_FD);
+				RSBENCH_STDOUT ( "\n");
+			}
+			omta=((double)rsb_spmv_memory_accessed_bytes(mtxAp));
+			
+ifelse(RSB_M4_IS_SPMX_KERNEL_MOP(mop),`1',`dnl
+#if RSB_WANT_MKL
+			if(want_mkl_bench)
+			{
+			if(want_mkl_bench_coo)
+			{
+				RSBENCH_STDOUT ( "#MKL_COO_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(mkl_coo_op_tot_time/times),raw_Mflops/op_t);
+				RSBENCH_STDOUT ( "#MKL_COO2CSR2SPMV_VS_US:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),(mkl_coo2csr_time)/(mkl_csr_op_tot_time/times),-1.0);
+			}
+			if(want_mkl_bench_csr)
+			{
+				RSBENCH_STDOUT ( "#MKL_CSR_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(mkl_csr_op_tot_time/times),raw_Mflops/op_t);
+			}
+			}
+#endif /* RSB_WANT_MKL */
+')dnl
+#ifdef RSB_WANT_OSKI_BENCHMARKING 
+			if(want_oski_bench)
+			{
+				RSBENCH_STDOUT ( "#OSKI_VS_US-SPMV:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),raw_Mflops/(oski_t/times),raw_Mflops/op_t);
+				RSBENCH_STDOUT ( "#OSKI_VS_US-ASM~:%-20s\t%s\t%10.2lf\t%10.2lf\n", rsb__basename(filename),rsb__sprint_matrix_implementation_code2(mtxAp,buf,RSB_FLAG_NOFLAGS),oski_m_t+oski_t_t+oski_a_t,mct);
+			}
+#endif /* RSB_WANT_OSKI_BENCHMARKING  */
+			/* WARNING : we cannot use RSB_FLAG_SORTED_INPUT in the recursive case
+				     until the following routine will be able to use Z sorted values.. */
+			efillin = RSB_REAL_ZERO,eperf = RSB_REAL_ZERO;
+
+			/* FIXME : dies with ct20stif.mtx, now */
+			#if 0
+			RSB_WARN("warning : skipping rsb__estimate_expected_fillin_for_blocking\n");
+			fet = - rsb_time();
+			//rsb__estimate_expected_fillin_for_blocking(VA,IA,JA,nrA,ncA,nnz,typecode,flags/*|RSB_FLAG_SORTED_INPUT*/,br,bc,&efillin);/*TODO:thiscouldbedangerous:fixit!*/
+			efillin=mtxAp->einfo.efillin;	/* NEW */
+			fet += rsb_time();
+			#else /* 0 */
+			fet = RSB_TIME_ZERO;
+			#endif /* 0 */
+			rsb__estimate_expected_raw_performance_for_blocking(nrA,ncA,br,bc,nnz,typecode,flags,efillin,&eperf);
+
+			if(cc==1)
+			{
+				/* we need input flags, not instantiated matrix flags (which could have not that flag )*/
+				if(!RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+					base_best_t=best_t;
+				else
+					serial_best_t=best_t;
+			}
+	
+			if(want_perf_dump) 
+			if(RSB_DO_FLAG_HAS(/*mtxAp->*/flags,RSB_FLAG_QUAD_PARTITIONING))
+			{
+#if RSB_WANT_MKL
+				/* FIXME: this #if is horrible */
+dnl				if( /* at_mkl_csr_op_time_best<mkl_csr_op_time_best && */ cc == at_mkl_csr_nt )
+dnl					mkl_csr_op_time_best = at_mkl_csr_op_time_best = RSB_MIN( mkl_csr_op_time_best, at_mkl_csr_op_time_best); /* FIXME: this is just a parachute */
+				rsb__pr_set(rspr, mtxAp/*NULL */ /* FIXME */, NULL, filenamei, ci, incXi, incYi, nrhsi, typecodesi, ti, transA, RSB_CONST_IMPOSSIBLY_BIG_TIME, mkl_csr_op_time_best, RSB_CONST_IMPOSSIBLY_BIG_TIME, at_mkl_csr_op_time_best, RSB_THREADS_AUTO, at_mkl_csr_nt, RSB_CONST_IMPOSSIBLY_BIG_TIME, -1, NULL, NULL, &btpms[1], &btpms);
+#endif
+			}
+
+#if RSB_EXPERIMENTAL_WANT_BEST_TIMES
+			RSBENCH_STDOUT ( "#	%10.2lf	%10.2lf	( best, average net performance in %d tries ); diff:%2.0lf%%\n",
+				((double)true_Mflops/best_t), ((double)true_Mflops/op_t),
+				(int)times,
+				/* for marcin : */
+				((((double)true_Mflops/best_t)-((double)true_Mflops/op_t))*100)/((double)true_Mflops/op_t)
+				);
+#endif /* RSB_EXPERIMENTAL_WANT_BEST_TIMES */
+
+			RSBENCH_STDOUT ( "#	%10.2lf	%10.2lf	%10.2lf %10.6lf (min bw, reasonable bw, exceedingly max bw, w/r ratio) (MB/s)\n"
+				     "#	%10.2lf (MB per mop) %10.2lf (rhs loads, with a variable degree of locality)\n"
+				     "#	%10.2lf (MB per mop, estimated)\n"
+				     "#	%10.2lf (assembly + extra to (best) mop time ratio) (%10.2lf s)\n"
+				     "#	%10.2lf (assembly (p.e.+s.a.+e.i.+e.s.+...) to mop time ratio)\n"
+/*				     "#	%10.2lf (performance estimation to mop time ratio)\n"*/
+/*				     "#	%10.2lf (gross fillin estimation to mop time ratio)\n"*/
+				     "#	%10.2lf (structure analysis to mop time ratio)\n"
+				     "#	%10.2lf (elements insertion to mop time ratio)\n"
+				     "#	%10.2lf (elements sorting to mop time ratio) (%10.2lf s)\n"
+				     "#	%10.2lf (elements partitioning to mop time ratio)\n"
+				     "#	%10.2lf (recursion sort to mop time ratio)\t%10.ld (max recursion depth)\n"
+				     "#	%10.2lf	%10.2lf (nnz per row/column)\n"
+					,
+				((double)rsb_spmv_memory_accessed_bytes_min(mtxAp))*(1.e-6/best_t) ,
+				((double)omta)*(1.e-6/best_t) ,
+				((double)rsb_spmv_memory_accessed_bytes_max(mtxAp))*(1.e-6/best_t) ,
+				((double)rsb_spmv_memory_accessed_bytes_wr_ratio(mtxAp)),
+				((double)omta)*(1.e-6),
+				(1.0>((fillin*nnz)/(br*ncA))?1.0:((fillin*nnz)/(br*ncA))),
+				((double)rsb_spmv_memory_accessed_bytes_(br,bc,nrA,ncA,efillin*nnz,((efillin*nnz)/br)/bc,nrA/br,mtxAp->el_size))*(1.e-6),
+				(mct)/(best_t),
+				(mtxAp->tat),
+				(mtxAp->tat)/(best_t),
+/*				(mtxAp->pet)/(best_t),*/
+/*				(fet)/(best_t),*/
+				(mtxAp->sat)/(best_t),
+				(mtxAp->eit)/(best_t),
+				(mtxAp->est)/(best_t), (mtxAp->est),
+				(mtxAp->cpt)/(best_t),
+				((mtxAp->rpt)/(best_t)),((long)rsb__get_recursive_matrix_depth(mtxAp)),
+				(double)nnz/nrA, (double)nnz/ncA
+				);
+				if(RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE>1)
+				RSBENCH_STDOUT ( 
+				     "#	%10.2lf (estimated fillin)"
+				     "#	%10.2lf (estimated fillin error)\n"
+				     "#	%10.2lf (estimated raw performance)"
+				     "#	%10.2lf (estimated raw performance error)\n"
+				     "#	%10.2lf (estimated net performance)"
+				     "#	%10.2lf (estimated net performance error)\n",
+				efillin, (efillin-fillin)/fillin,
+				eperf, (eperf-raw_Mflops/best_t)/(raw_Mflops/best_t),
+				efillin?(eperf/efillin):-1,efillin?(((eperf/efillin)-(true_Mflops/best_t))/(true_Mflops/best_t)):-1
+				);
+				RSBENCH_STDOUT( "#used index storage compared to COO:%zd vs %zd bytes (%.02lf%%) "
+					,(size_t)rsb__get_index_storage_amount(mtxAp),sizeof(rsb_coo_idx_t)*2*nnz
+					,(100*(double)rsb__get_index_storage_amount(mtxAp))/RSB_UTIL_COO_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+				);
+				RSBENCH_STDOUT( "; compared to CSR:%zd vs %zd bytes (%.02lf%%)\n"
+					,(size_t)rsb__get_index_storage_amount(mtxAp),
+					 (sizeof(rsb_coo_idx_t)*nnz+sizeof(rsb_nnz_idx_t)*(mtxAp->nr+1))
+					,(100*(double)rsb__get_index_storage_amount(mtxAp))/RSB_UTIL_CSR_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+				);
+dnl				if(0)//very verbose and annoying!
+dnl				RSBENCH_STDOUT( "#"),rsb_do_print_nnz_per_row_for_each_submatrix(mtxAp);
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+				totatt += spsv_f_t;
+				if( spsv_d_t != RSB_TIME_ZERO)
+				RSBENCH_STDOUT( "#gain for spsv if we had infinite spmv-workers:%lf\n",((double)tot_t)/((double)(spsv_d_t)));
+				if( spsv_spmv_t != RSB_TIME_ZERO)
+				RSBENCH_STDOUT( "#spsv performance vs spmv_uaua*:%lf\n",spsv_spmv_t/tot_t);
+				if( spsv_f_t != RSB_TIME_ZERO)
+				RSBENCH_STDOUT( "#gain for spsv if we had no concurrent writes preventing locks at all:%lf\n",((double)tot_t)/((double)(spsv_f_t)));
+							
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+			if(ci==0 && smt == RSB_TIME_ZERO && RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				smt=best_spsv_spmv_t;
+			if(ci==cl-1 && pmt == RSB_TIME_ZERO)
+				pmt=best_spsv_spmv_t;
+			if(ci==0 && sst == RSB_TIME_ZERO && RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+				sst=best_t;
+			if(ci==cl-1 && pst == RSB_TIME_ZERO)
+				pst=best_t;
+')dnl
+dnl
+			rsb__attr_dump(&attr);
+			RSB_BZERO_P((&attr));
+dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+dnl
+dnl	FIXME: and what about non SPMV ?
+dnl
+			if(ci==0 && smt == RSB_TIME_ZERO && RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING))
+			{
+				smt=best_t;
+				sest=mest;
+				//sect=mect;
+				ssat=msat;
+				seit=meit;
+				scpt=mcpt;
+			}
+			if(ci==cl-1 && pmt == RSB_TIME_ZERO)
+			{
+				pmt=best_t;
+			}
+')dnl
+dnl
+dnl
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+				{
+					rsb_nnz_idx_t minnz=0,maxnz=0,avgnz=0;
+					rsb_bool_t vrpr = (times != 0) ? RSB_BOOL_TRUE : RSB_BOOL_FALSE;
+
+					if(vrpr)
+					{
+					RSBENCH_STDOUT("%%:PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/best_t);
+					RSBENCH_STDOUT("\t%le\t%le\n",true_Mflops,best_t);
+
+					RSBENCH_STDOUT("%%:OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",best_t);
+					}
+
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+					if( no_lock_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					{
+					RSBENCH_STDOUT("%%:FAKE_LOCK_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/no_lock_op_time_best);
+
+					RSBENCH_STDOUT("%%:FAKE_LOCK_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",no_lock_op_time_best);
+
+					RSBENCH_STDOUT("%%:FAKE_LOCK_PERF_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",serial_no_lock_op_time_best/no_lock_op_time_best);
+					}
+
+					if(qt_op_time_best != RSB_CONST_IMPOSSIBLY_BIG_TIME && cc==1)
+					{
+					RSBENCH_STDOUT("%%:RECURSIVE_SERIAL_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/qt_op_time_best);
+
+					RSBENCH_STDOUT("%%:RECURSIVE_SERIAL_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",qt_op_time_best);
+					}
+
+')
+					if(vrpr)
+					{
+					if( serial_best_t != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",serial_best_t/best_t);
+					}
+
+					RSBENCH_STDOUT("#%%:CONSTRUCTOR_*:SORT	SCAN	INSERT	SCAN+INSERT\n");
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_TIMES:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\t%10.6lf\t%10.6lf\t%10.6lf\n",mest,msat,meit,msat+meit);
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", mest+msat+meit);
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", msat);
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", meit);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", mest);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", sest/mest);
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n", msat+meit);
+
+					RSBENCH_STDOUT("%%:ROW_MAJOR_SORT_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", mest/best_t);
+
+					if(vrpr)
+					{
+					RSBENCH_STDOUT("%%:CLEANUP_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",mect/best_t);
+
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\t%10.2lf\t%10.2lf\t%10.2lf\n",mest/best_t,msat/best_t,meit/best_t,(msat+meit)/best_t);
+
+dnl					RSBENCH_STDOUT("%%:TOTCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+dnl					RSBENCH_STDOUT("\t%10.2lf\n",(mtat)/best_t);
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat+meit+mest)/best_t);
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat+meit)/best_t);
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(msat)/best_t);
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_TO_MOP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(meit)/best_t);
+					}
+
+					RSBENCH_STDOUT("%%:UNSORTEDCOO2RSB_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat+seit+sest)/(msat+meit+mest));
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat+seit)/(msat+meit));
+
+					RSBENCH_STDOUT("%%:RSB_SUBDIVISION_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(ssat)/(msat));
+
+					RSBENCH_STDOUT("%%:RSB_SHUFFLE_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",(seit)/(meit));
+
+					RSBENCH_STDOUT("%%:CONSTRUCTOR_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\t%10.2lf\t%10.2lf\t%10.2lf\n",sest/mest,ssat/msat,seit/meit,(ssat+seit)/(meit+msat));
+
+					if( base_best_t != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:PERF_SCALING2CSR:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",base_best_t/best_t);
+
+dnl					RSBENCH_STDOUT("%%:FM_FRACTIONS:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+dnl					RSBENCH_STDOUT("\tTODO\n");
+
+					RSBENCH_STDOUT("#%%:SM_COUNTS:	Tot	HalfwordCsr	FullwordCsr	HalfwordCoo	FullwordCoo\n");
+					RSBENCH_STDOUT("%%:SM_COUNTS:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					//RSBENCH_STDOUT("\t%d\t%d\t%d\t%d\t%d\n",
+					RSBENCH_STDOUT("\t%ld\t%ld\t%ld\t%ld\t%ld\n",
+rsb__terminal_recursive_matrix_count(mtxAp),
+rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR),
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCSR,RSB_FLAG_USE_HALFWORD_INDICES_CSR),
+rsb__terminal_recursive_matrix_count_with_storage_and_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO),
+rsb__terminal_recursive_matrix_count_with_storage_and_no_flags(mtxAp,RSB_MATRIX_STORAGE_BCOR,RSB_FLAG_USE_HALFWORD_INDICES_COO)
+						);
+
+					RSBENCH_STDOUT("%%:SM_IDXOCCUPATIONRSBVSCOOANDCSR:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%zd\t%zd\t%zd\n",rsb__get_index_storage_amount(mtxAp),
+						RSB_UTIL_COO_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz),
+						RSB_UTIL_CSR_IDX_OCCUPATION(mtxAp->nr,mtxAp->nc,mtxAp->nnz)
+						);
+
+					RSBENCH_STDOUT("%%:SM_IDXOCCUPATION:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%zd\n",rsb__get_index_storage_amount(mtxAp));
+
+					RSBENCH_STDOUT("%%:SM_MEMTRAFFIC:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.0lf\n",omta);
+#if 0
+					/* new, elegant */
+					RSBENCH_STDOUT("%%:SM_MINMAXAVGSUBMNNZ:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					{
+						rsb_submatrix_idx_t i=0;
+						rsb_real_t avgnz = ((rsb_real_t)mtxAp->nnz) / mtxAp->all_leaf_matrices_n;
+						rsb_coo_idx_t maxnz = 0, minnz = RSB_MAX_MATRIX_NNZ ;
+
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+						{
+							struct rsb_mtx_t * submatrix = mtxAp->all_leaf_matrices[i].mtxlp;
+							maxnz = RSB_MAX(maxnz,submatrix->nnz);
+							minnz = RSB_MIN(minnz,submatrix->nnz);
+						}
+						RSBENCH_STDOUT(" %d %d %.2lf %d\n",minnz,maxnz,avgnz,mtxAp->all_leaf_matrices_n);
+					}
+#else
+					/* old, obsolete */
+					rsb__do_compute_terminal_nnz_min_max_avg_count(mtxAp,&minnz,&maxnz,&avgnz);
+					RSBENCH_STDOUT("%%:SM_MINMAXAVGNNZ:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%d\t%d\t%d\n",minnz,maxnz,avgnz);
+#endif
+
+				if(want_print_per_subm_stats)
+				{
+					RSBENCH_STDOUT("%%:SM_NNZ_HISTOGRAM:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					if(!mtxAp->all_leaf_matrices)
+						RSBENCH_STDOUT(" %zd\n",(size_t)mtxAp->nnz);
+					else
+					{
+						rsb_submatrix_idx_t i=0;
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+							RSBENCH_STDOUT(" %zd",(size_t)mtxAp->all_leaf_matrices[i].mtxlp->nnz);
+						RSBENCH_STDOUT("\n");
+					}
+
+					RSBENCH_STDOUT("%%:SM_NNZ_PER_ROW:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					if(!mtxAp->all_leaf_matrices)
+						RSBENCH_STDOUT(" %lf\n",((double)mtxAp->nnz)/mtxAp->nr);
+					else
+					{
+						rsb_submatrix_idx_t i=0;
+						for(i=0;i<mtxAp->all_leaf_matrices_n;++i)
+							RSBENCH_STDOUT(" %.2lf",((double)mtxAp->all_leaf_matrices[i].mtxlp->nnz)/mtxAp->all_leaf_matrices[i].mtxlp->nr);
+						RSBENCH_STDOUT("\n");
+					}
+				} /* want_print_per_subm_stats */
+
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			if(want_perf_counters)
+				{
+					int i;
+					for(i=0;i<rsb_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:RSB_%s:",rsb_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",(size_t)(rsb_pci.eventvals[i]));
+					}
+				} /* want_perf_counters */
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+				}
+dnl
+			} /* times */
+dnl
+ifelse(RSB_M4_IS_SPXX_KERNEL_MOP(mop),`1',`dnl
+#if RSB_WANT_MKL
+				if(want_mkl_bench) /* 20110428 */
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+				{
+#ifdef mkl_get_version
+					MKLVersion mv;
+					mkl_get_version(&mv);
+					RSBENCH_STDOUT("#%%:MKL %d.%d-%d, %s, %s, %s, %s\n",mv.MajorVersion,mv.MinorVersion,mv.UpdateVersion,mv.ProductStatus,mv.Build,mv.Processor,mv.Platform);
+#else /* mkl_get_version */
+					RSBENCH_STDOUT("#%%:MKL, version unknown\n");
+#endif /* mkl_get_version */
+			if(want_mkl_bench_coo)
+			{
+					RSBENCH_STDOUT("%%:MKL_COO_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/mkl_coo_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_COO_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(); RSBENCH_STDOUT("\t%10.6lf\n",mkl_coo_op_time_best);
+
+					if( mkl_coo_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_COO_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_coo_op_time_best_serial/mkl_coo_op_time_best);
+			}
+#ifdef RSB_WANT_PERFORMANCE_COUNTERS
+			if(want_perf_counters)
+				{
+					int i;
+					for(i=0;i<mkl_csr_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:MKL_CSR_%s:",mkl_csr_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",mkl_csr_pci.eventvals[i]);
+					}
+					if(want_mkl_bench_coo)
+					for(i=0;i<mkl_coo_pci.eventnum;++i)
+					{
+						RSBENCH_STDOUT("%%:MKL_COO_%s:",mkl_coo_pci.eventdesc[i]);
+						RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+						RSBENCH_STDOUT("\t%zd\n",mkl_coo_pci.eventvals[i]);
+					}
+				}
+#endif /* RSB_WANT_PERFORMANCE_COUNTERS */
+			if(want_mkl_bench_csr)
+			{
+					RSBENCH_STDOUT("%%:MKL_CSR_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_Mflops/mkl_csr_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_CSR_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_csr_op_time_best);
+
+					if( mkl_csr_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_CSR_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_csr_op_time_best_serial/mkl_csr_op_time_best);
+			}
+			if(want_mkl_bench_gem)
+			{
+					RSBENCH_STDOUT("%%:MKL_GEMV_PERFORMANCE:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",true_gem_Mflops/mkl_gem_op_time_best);
+
+					RSBENCH_STDOUT("%%:MKL_GEMV_OP_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_gem_op_time_best);
+
+					if( mkl_gem_op_time_best_serial != RSB_CONST_IMPOSSIBLY_BIG_TIME )
+					RSBENCH_STDOUT("%%:MKL_GEMV_PERF_SCALING:"),RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH(),
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_gem_op_time_best_serial/mkl_gem_op_time_best);
+			}
+
+					if( mkl_coo2csr_time != RSB_TIME_ZERO )
+					{
+					RSBENCH_STDOUT("%%:MKL_COO2CSR_T0_CSR_TIME:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",mkl_coo2csr_time);
+					RSBENCH_STDOUT("%%:MKL_COO2CSR_T0_CSR_OP:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",mkl_coo2csr_time/mkl_csr_op_time_best);
+
+
+					RSBENCH_STDOUT("%%:SORTEDCOO2RSB_VS_MKLCOO2CSR:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.3lf\n", (msat+meit)/(mkl_coo2csr_time));
+					}
+				} /* want_mkl_bench */
+#endif /* RSB_WANT_MKL */
+')dnl
+dnl
+dnl
+				if(want_getrow_bench)
+				{
+					const char*norsbnotice="";
+					const char*rsbnotice="NORSB_";
+					const char*notice=norsbnotice;
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+					{}
+				else
+					notice = rsbnotice;
+
+					RSBENCH_STDOUT("%%:%sGETROW_PERFORMANCE:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)mtxAp->nnz)/(RSB_REAL_MILLION*getrow_op_time_best));
+					RSBENCH_STDOUT("%%:%sGETROW_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",getrow_op_time_best);
+					RSBENCH_STDOUT("%%:%sGETROW_TO_SPMV_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",getrow_op_time_best/best_t);
+dnl					RSBENCH_STDOUT("%%:GETROW_PERF_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+dnl					RSBENCH_STDOUT("\t%10.2lf\n",/getrow_op_time_best);
+
+				}
+dnl
+dnl
+				if(want_getdiag_bench)
+				{
+					const char*norsbnotice="";
+					const char*rsbnotice="NORSB_";
+					const char*notice=norsbnotice;
+				if(want_verbose == RSB_BOOL_TRUE && (RSB_DO_FLAG_HAS(flags,RSB_FLAG_QUAD_PARTITIONING)||fn==1))
+					{}
+				else
+					notice = rsbnotice;
+
+					RSBENCH_STDOUT("%%:%sGETDIAG_PERFORMANCE:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.2lf\n",((rsb_time_t)mtxAp->nr)/(RSB_REAL_MILLION*diag_op_time_best));
+					RSBENCH_STDOUT("%%:%sGETDIAG_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",diag_op_time_best);
+					RSBENCH_STDOUT("%%:%sGETDIAG_TO_SPMV_OP_TIME:",notice);RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+					RSBENCH_STDOUT("\t%10.6lf\n",diag_op_time_best/best_t);
+dnl					RSBENCH_STDOUT("%%:GETDIAG_PERF_SCALING:");RSBENCH_STDOUT_MATRIX_ESSENTIALS_RSBENCH();
+dnl					RSBENCH_STDOUT("\t%10.2lf\n",/diag_op_time_best);
+
+				}
+dnl
+dnl
+				RSBENCH_STDOUT( "#\n");/* end of record */
+dnl
+dnl
+				if(guess_blocking_test)
+				{
+					rsb_flags_t oflags = RSB_FLAG_NOFLAGS;
+					/* TODO : should keep info of the worst, to */
+					rsb_perf_t nrp=(true_Mflops/op_t),bomta = RSB_REAL_ZERO /* best op memory traffic amount */;
+
+					if(guess_blocking_test==1)
+					{
+						if( nrp>RSB_REAL_ZERO && nrp>bperf)
+						{
+							bperf=nrp;
+							bomta=omta;
+							bfillin=fillin;
+							ebfillin=efillin;
+							bri=brvi;
+							bci=bcvi;
+						}
+					
+						if(brv[brvi]==1 && bcv[bcvi]==1)/* IF ANY! */
+						{
+							cperf=nrp;
+						}
+ 
+						if((nrp>RSB_REAL_ZERO && nrp<wperf) || wperf == RSB_REAL_ZERO)
+						{
+							wperf=nrp;
+						}
+
+						if( fillin > maxfillin )
+						{
+							maxfillin=fillin;
+						}
+					}
+
+					if( guess_blocking_test==2) 
+					{
+						egfillin=efillin;
+						RSBENCH_STDOUT("# GUESS DATA;  best performance was       :	%zd	%zd\n", (size_t)brv[bri], (size_t)bcv[bci] );
+						RSBENCH_STDOUT("# GUESS DATA;  guessed was                :	%zd	%zd\n", (size_t)br, (size_t)bc );
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff from best :	%lg\n", (nrp-bperf)/bperf );
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff from worst:	%lg\n", (nrp-wperf)/wperf );
+						if(cperf)
+						RSBENCH_STDOUT("# GUESS DATA:  performance diff over CSR:	%lg\n", (nrp-cperf)/cperf );
+						RSBENCH_STDOUT("# GUESS DATA:  best/guessed op matrix traffic amount:	%lg	%lg\n", bomta,omta);
+						RSBENCH_STDOUT("#GUESS_TEST_:%-20s\t%20s\t%zd\t%zd\t%zd\t%zd\t%zd\t%zd\n",
+							rsb__basename(filename),
+							rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),
+				(rsb_printf_int_t)((nrp>=bperf*.95) || (brv[bri]==br && bcv[bci]==bc)),	/* (fuzzy WIN) */
+				(rsb_printf_int_t)((nrp>=bperf) || (brv[bri]==br && bcv[bci]==bc)),	/* if 1, best blocking guess (WIN) */
+				(rsb_printf_int_t)(nrp>=bperf),			/* if 1, best performance guess */
+				(rsb_printf_int_t)(brv[bri]==br && bcv[bci]==bc),	/* if 1, best blocking guess */
+				(rsb_printf_int_t)(nrp>=cperf),	/* if 0, we lose over (our) plain CSR  */
+				(rsb_printf_int_t)(nrp> wperf)	/* if 0, we performed as the worst blocking! */
+							);
+					flags=oflags;
+
+					RSBENCH_STDOUT(	"#GUESS_TEST:%-20s\t%-20s"
+						"\t%10.2lf"
+						"\t%10.2lf"
+						"\t%zd" "\t%zd"
+						"\t%10.4lf" "\t%10.2lf" "\t%10.4lf" "\t%10.2lf" "\t%10.4lf" "\n"
+						,
+						rsb__basename(filename),
+						rsb__sprint_matrix_implementation_code2(mtxAp,buf,flags),	
+						/* grmflops */
+						raw_Mflops/op_t,
+						/* egfillin */
+						egfillin,
+						/* bbr */
+						(rsb_printf_int_t)brv[bri],
+						/* bbc */
+						(rsb_printf_int_t)bcv[bci],
+						/* bfillin */
+						bfillin,
+						/* brmflops */
+						bperf*bfillin,
+						/* ebfillin */
+						ebfillin,
+						/* csrmflops */
+						cperf,
+						/* maxfillin */
+						maxfillin);
+
+						flags=oflags;
+					}
+				
+
+					if(brvi==brl-1 && bcvi==bcl-1 && guess_blocking_test==1)
+					{
+						oflags=flags;
+						RSB_DO_FLAG_ADD(flags,RSB_FLAG_AUTO_BLOCKING);
+						guess_blocking_test++;
+						--bcvi;	/* un altro giro :) */
+					}
+				} /* guess_blocking_test */
+dnl
+dnl
+')dnl
+dnl
+')dnl
+dnl
+		erri:
+			if(want_in_place_assembly && mtxAp)
+			{
+dnl				rsb_coo_sort(VA,IA,JA,mtxAp->nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+				rsb_time_t st = -rsb_time();
+				errval = rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+				st += rsb_time();
+				RSBENCH_STDOUT("# rsb_mtx_switch_to_coo time: %lg.\n",st);
+				if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+			}
+			RSB_MTX_FREE(mtxAp);
+			RSB_CONDITIONAL_FREE(lhs);
+			RSB_CONDITIONAL_FREE(rhs);
+
+			RSB_CONDITIONAL_FREE(p_r);
+			RSB_CONDITIONAL_FREE(p_c);
+			
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_ERROR(RSB_ERRM_ES);goto err;
+			}
+			if(brl==0 || bcl==0) break;
+		} /* ci : core (count) index */
+
+			if(want_verbose == RSB_BOOL_TRUE)
+			{
+ifelse(RSB_M4_IS_SPSV_KERNEL_MOP(mop),1,`dnl
+            			RSBENCH_STDOUT("%%operation:matrix	CONSTRUCTOR[%d]	SPMV[%d]	SPMV[%d]	STSV[%d]	STSV[%d]\n",
+					ca[0], ca[0], ca[cl-1], ca[0], ca[cl-1]);
+            			RSBENCH_STDOUT("%%operation:%s	%lg	%lg	%lg	%lg	%lg\n",
+					rsb__basename(filename),sct,smt,pmt,sst,pst);
+')dnl
+ifelse(RSB_M4_IS_SPMV_KERNEL_MOP(mop),1,`dnl
+            			RSBENCH_STDOUT("%%operation:matrix	CONSTRUCTOR[%d]	SPMV[%d]	SPMV[%d]\n",ca[0],ca[0],ca[cl-1]);
+            			RSBENCH_STDOUT("%%operation:%s	%lg	%lg	%lg\n",
+					rsb__basename(filename),sct,smt,pmt);
+')dnl
+            			RSBENCH_STDOUT("%%constructor:matrix	SORT[%d]	SCAN[%d]	SHUFFLE[%d]	INSERT[%d]\n",
+					ca[0],ca[0],ca[0],ca[0]);
+ifelse(mop,`mat_stats',`',`dnl
+            			RSBENCH_STDOUT("%%constructor:%s	%lg	%lg	%lg	%lg\n",
+					rsb__basename(filename),sest,ssat,scpt,seit);
+')dnl
+			}
+		} /* ti (transposition index) */
+	}
+	else
+	{
+		RSBENCH_STDOUT("%s (mop) : Please specify a matrix filename (with -f)\n",argv[0]);
+	}
+dnl
+ 	RSBENCH_STDOUT("# so far, program took %.3lfs of wall clock time; ancillary tests %.3lfs; I/O %.3lfs; checks %.3lfs; conversions %.3lfs; rsb/mkl tuning %.3lfs/%.3lfs ",totprt + rsb_time(),totatt,totiot,totht,totct,tottt,totmt);
+	RSBENCH_STDOUT(".\n"); /* FIXME: this takes too much space here ! */
+	rsb__getrusage();
+dnl
+done:
+dnl
+ifelse(mop,`mat_stats',`dnl
+	RSB_CONDITIONAL_FREE(nnzs);
+	RSB_CONDITIONAL_FREE(element_count );
+	RSB_CONDITIONAL_FREE(block_count   );
+')dnl
+dnl
+frv:
+dnl
+	if( !should_recycle_io )
+	{
+		RSBENCH_STDOUT("# Freeing I/O arrays.\n");
+		RSB_CONDITIONAL_FREE(IA);
+		RSB_CONDITIONAL_FREE(JA);
+		RSB_CONDITIONAL_FREE(VA);
+	}
+	
+	if(mtxAp && !should_recycle_matrix){RSB_MTX_FREE(mtxAp)}
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+dnl
+		RSBENCH_MAY_SQUIT(ret,{}) /* early end of program */
+		RSBENCH_MAY_TQUIT(ret,{}) /* early end of program */
+dnl
+	}	/* typecodesi */
+	}	/* nrhsi */
+	}	/* incXi */
+	}	/* incYi */
+nfnm:	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS;
+	}	/* filenamei */
+dnl
+	RSBENCH_STDOUT("# benchmarking terminated --- finalizing run.\n");
+dnl
+#if RSB_WANT_PERFORMANCE_COUNTERS_IN_RSBENCH 
+	errval = rsb_perf_counters_finalize();
+	if(RSB_SOME_ERROR(errval)) { RSB_PERR_GOTO(err,RSB_ERRM_ES); }
+#endif
+dnl
+ret:
+dnl
+	errval = RSB_ERR_NO_ERROR;
+goto rret;
+dnl
+err:
+dnl
+	rsb_perror(NULL,errval);
+	errval = RSB_ERR_GENERIC_ERROR;
+dnl
+ifelse(mop,`mat_stats',`dnl
+	RSB_CONDITIONAL_FREE(nnzs);
+	RSB_CONDITIONAL_FREE(element_count );
+	RSB_CONDITIONAL_FREE(block_count   );
+')dnl
+	RSB_CONDITIONAL_FREE(IA);
+	RSB_CONDITIONAL_FREE(JA);
+	RSB_CONDITIONAL_FREE(VA);
+dnl	if(want_in_place_assembly && mtxAp)rsb_coo_sort(VA,IA,JA,mtxAp->nnz,nrA,ncA,typecode,RSB_FLAG_NOFLAGS);
+	if(want_in_place_assembly && mtxAp)rsb_mtx_switch_to_coo(mtxAp,&VA,&IA,&JA,RSB_FLAG_SORTED_INPUT),mtxAp=NULL;
+	RSB_MTX_FREE(mtxAp);
+	if( brv != rua ) {RSB_CONDITIONAL_FREE(brv);}
+	if( bcv != cua ) {RSB_CONDITIONAL_FREE(bcv);}
+	if(RSB_SOME_ERROR(rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)))return RSB_ERR_GENERIC_ERROR;
+dnl
+rret:
+	if(want_perf_dump) 
+	{
+		RSBENCH_STDOUT("# ====== BEGIN Total summary record.\n");
+		errval = rsb__pr_dump(rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL );
+		RSBENCH_STDOUT("# ======  END  Total summary record.\n");
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		errval = rsb__pr_save(fprfn, rspr, filenamea, ca, incXa, incYa, nrhsa, typecodes, NULL, RSB_BOOL_TRUE );
+		if(RSB_SOME_ERROR(errval)) RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		RSBENCH_STDOUT("# Removing the temporary record file %s.\n",cprfn);
+		remove(cprfn);
+	}
+	if( ca  != ca_ ) {RSB_CONDITIONAL_FREE(ca);}
+#if !RSB_RSBENCH_STATIC_FILENAMEA
+	/* if(filenamea!=&fnbufp)RSB_CONDITIONAL_FREE(filenamea); */
+	if(filenamea!=&fnbufp)free(filenamea); /* FIXME */
+#endif
+	if(nrhsa!=(&nrhs))RSB_CONDITIONAL_FREE(nrhsa); /* FIXME: they get allocated (and thus shall be deallocated) before init */
+	if(incXa!=(&incX))RSB_CONDITIONAL_FREE(incXa);
+ 	if(incYa!=(&incY))RSB_CONDITIONAL_FREE(incYa); 
+dnl
+	if(want_likwid == RSB_BOOL_TRUE){RSB_LIKWID_MARKER_EXIT;} /* FIXME: and other cases ? */
+dnl
+	if(want_verbose == RSB_BOOL_TRUE)
+		rsb__echo_timeandlabel(" terminating run at ","\n",&st);
+dnl
+	return errval;
+}
+')dnl
+dnl popdef(`mtype')dnl
+popdef(`mop')dnl
+')dnl
+dnl
+dnl
+dnl
+dnl
+dnl define(`RSB_M4_MATRIX_META_OPS',(RSB_M4_LIST_PUSH_BACK(RSB_M4_MATRIX_META_OPS,`matrix_stats')))dnl
+dnl
+/* one function for each of RSB_M4_MATRIX_META_OPS_REDUCED*/
+foreach(`mop',RSB_M4_MATRIX_META_OPS_REDUCED,`dnl
+RSB_M4_COMPLETE_TEST_PROGRAM_FUNCTION(mop)
+')dnl
+dnl
+
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+ifdef(`ONLY_WANT_HEADERS',`
+#endif	/* RSB_TEST_MATOPS_H_INCLUDED */
+')
+/* @endcond */
+dnl
diff --git a/rsb_tune.c b/rsb_tune.c
new file mode 100644
index 0000000..aa80d1e
--- /dev/null
+++ b/rsb_tune.c
@@ -0,0 +1,1776 @@
+/*
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief Auto tuning mechanisms.
+ * */
+
+#include <unistd.h>	/* sysconf */
+#include "rsb_internals.h"
+#include "rsb_lock.h"
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#define RSB_TUNE_WITH_LIKWID 0 /* TODO: for future reference */
+
+#if RSB_TUNE_WITH_LIKWID
+#include <likwid.h>
+#define RSB_LIKWID_MARKER_INIT	likwid_markerInit()
+#define RSB_LIKWID_MARKER_EXIT likwid_markerClose()
+#define RSB_LIKWID_MARKER_R_START(R) likwid_markerStartRegion(R)
+#define RSB_LIKWID_MARKER_R_STOP(R) likwid_markerStopRegion(R)
+#else /* RSB_TUNE_WITH_LIKWID */
+#define RSB_LIKWID_MARKER_INIT
+#define RSB_LIKWID_MARKER_EXIT
+#define RSB_LIKWID_MARKER_R_START(R)
+#define RSB_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_TUNE_WITH_LIKWID */
+
+#if RSB_TUNE_WITH_LIKWID
+#define RSB_TM_LIKWID_MARKER_R_START(R) if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)  if(want_likwid == RSB_BOOL_TRUE)RSB_LIKWID_MARKER_R_STOP(R)
+#else
+#define RSB_TM_LIKWID_MARKER_R_START(R)
+#define RSB_TM_LIKWID_MARKER_R_STOP(R)
+#endif /* RSB_TUNE_WITH_LIKWID */
+#if RSB_TUNE_WITH_LIKWID
+//#define want_likwid RSB_BOOL_TRUE
+#endif /* RSB_TUNE_WITH_LIKWID */
+
+#define rsb__calloca_vector rsb__calloc_vector
+#define RSB_CONDITIONAL_FREE_ALLOCA RSB_CONDITIONAL_FREE
+
+#if 0
+#define RSB_THREAD_STATS RSB_PRINT_THREAD_STATS
+#else
+#define RSB_THREAD_STATS
+#endif
+
+#define RSB_AT_NO_VERBOSE(VERBOSE) ((VERBOSE)<1)
+#define RSB_VERBOSE_FOR_MSG(VERBOSE) ((VERBOSE)>2)
+#define RSB_VERBOSE_FOR_SAVING(VERBOSE) ((VERBOSE)>1)
+
+#define RSB_ATR_SAVENAME(FN,ERRVAL,BASEFILENAME,TYPECODE,TRANSA,NRHS,SVAL,ROP,RVAL,TVAL,MTX,IOT,W_SPMM)	 { \
+       	char ename[RSB_MAX_FILENAME_LENGTH]; \
+	if(TVAL>0) \
+	{	\
+	       	sprintf(ename,"--%s-tuned_%s%d_%0lgx%0dth.eps",(W_SPMM?"mv":"sv"),ROP,RVAL,SVAL,TVAL); \
+       	} \
+       	else \
+       	{ \
+		sprintf(ename,"--base.eps"); \
+	} \
+	sprintf(FN,"%s--%c-%c-%d%s",BASEFILENAME,TYPECODE,RSB_TRANSPOSITION_AS_CHAR(TRANSA),NRHS,ename); \
+	}
+
+#if 0
+#define RSB_ATR_SAVE(ERRVAL,BASEFILENAME,TYPECODE,TRANSA,NRHS,SVAL,ROP,TVAL,MTX,IOT,W_SPMM) { RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS; }
+#else
+#define RSB_ATR_SAVE(ERRVAL,BASEFILENAME,TYPECODE,TRANSA,NRHS,SVAL,ROP,RVAL,TVAL,MTX,IOT,W_SPMM) { \
+       	char fname[RSB_MAX_FILENAME_LENGTH]; \
+	RSB_ATR_SAVENAME(fname,ERRVAL,BASEFILENAME,TYPECODE,TRANSA,NRHS,SVAL,ROP,RVAL,TVAL,MTX,IOT,W_SPMM)	  \
+       	(ERRVAL) = rsb_mtx_rndr(fname,MTX,RSB_DEFAULT_MATRIX_RENDERING_ROWS,RSB_DEFAULT_MATRIX_RENDERING_COLS,/*RSB_MARF_EPS_B*/RSB_MARF_EPS_L); \
+      	RSB_STDOUT("Saved plot to  %s\n",fname); \
+       	(IOT) += rsb_time(); \
+}
+#endif
+
+#define RSB_AP "" /* FIXME */
+#define RSB_INVALID_SVAL -1.0 /* */
+#define RSB_PERF_ZERO RSB_REAL_ZERO /* TODO: to rsb_tune.h */
+
+static double rsb__estimate_mflops_per_op_spmv(size_t Ec, rsb_type_t typecode, rsb_flags_t flags)
+/* generated by the RSB_M4_ESTIMATE_MFLOPS_PER_MOP_FUNCTION macro */
+{
+	/*!
+	 * \ingroup gr_internals
+	 * A function which returns the approximate count of floating point operations
+	 * needed to perform the "spmv_uaua" matrix operation.
+	 * In the case of symmetric/hermitian, the number of operations is multiplied by two.
+	 * In the case of a complex type, the number of operations is multiplied by six:
+	 * (a +bi)*(c+di) = (ac-bd)+(ad+bc)i
+	 * accounts for four real multiplications and two real additions.
+	 * FIXME: complexity is NOT taken in consideration for non-SPMV/SPSV operations
+	 */
+
+	const double M_  = 1000000.0;
+	double Me = Ec;
+
+	if(RSB_IS_MATRIX_TYPE_COMPLEX(typecode)) { Me=8*Ec; } else { Me=2*Ec; }
+	if(( flags & RSB_FLAG_SOME_SYMMETRY )){ Me*=2; }/* slightly optimistic : should subtract the diagonal elements count */
+	Me /= M_;
+	return Me;
+}
+
+void rsb__tattr_init(struct rsb_tattr_t* TTRP, const struct rsb_mtx_t*MTXAP, rsb_coo_idx_t nrA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags, rsb_int_t nrhs)
+{
+	if(!TTRP)
+		goto err;
+
+	RSB_BZERO_P(TTRP);
+	(TTRP)->mint = RSB_TRACE_MAX_THREADS_P1;
+	(TTRP)->maxt = 0;
+	if(MTXAP)
+	{
+		(TTRP)->mtxAc = *(MTXAP); /*(TTRP)->mtxAp = (MTXAP);*/
+		rsb__init_blank_pointers(&(TTRP)->mtxAc);
+       		(TTRP)->bpn = ((rsb_real_t)rsb__get_index_storage_amount(MTXAP))/((MTXAP)->nnz);
+       		(TTRP)->ofe = rsb__estimate_mflops_per_op_spmv_uaua(MTXAP);
+	}
+	else
+	{
+		size_t idx = 4;
+		idx *= nrA + nnz ;
+       		(TTRP)->bpn = idx;
+       		(TTRP)->bpn /= nnz;
+       		(TTRP)->ofe = rsb__estimate_mflops_per_op_spmv(nnz, typecode, flags);
+	}
+       	(TTRP)->ttt = -rsb_time(); /* FIXME: if no rsb__tattr_finalize, whi shall update this ? */
+       	(TTRP)->ofe *= nrhs;
+	(TTRP)->btpo = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+	(TTRP)->dtpo = RSB_TIME_ZERO;
+	(TTRP)->vl = 0;
+err:
+	return;
+}
+
+void rsb__tattr_sets(struct rsb_tattr_t* ttrp, rsb_int_t dnt, rsb_int_t nt, rsb_time_t tpo, rsb_int_t bnt, rsb_int_t nits)
+{
+	/* TODO: verbose to a stream */
+
+	if(!ttrp)
+		goto ret;
+
+	if(nt == 0)
+	{
+		nt = dnt = bnt = rsb_get_num_threads();
+	}
+
+	if(nt>0 && nt < RSB_TRACE_MAX_THREADS_P1)
+	{
+       		(ttrp)->tpo[(nt)] = (tpo),
+       		(ttrp)->nit[(nt)] = (nits);
+	}
+	else
+	{
+		RSB_ERROR("Threads specification (%d) is wrong!",nt);
+		RSB_ASSERT(0);/* TODO: write error case here */
+	}
+	(ttrp)->mint = RSB_MIN((ttrp)->mint,( nt));
+	(ttrp)->maxt = RSB_MAX((ttrp)->maxt,( nt));
+	(ttrp)->optt = (bnt);
+	(ttrp)->deft = (dnt);
+	(ttrp)->btpo = RSB_MIN((ttrp)->btpo,tpo);
+	if((dnt)==(nt))
+	       	(ttrp)->dtpo = (tpo);
+
+	if(ttrp->vl>0)
+		RSB_STDOUT("# for %d threads, %d best threads, %d default threads, %0.4lgs p.op, %0.4lg Mflops\n",nt,bnt,dnt,tpo,(ttrp->ofe)/tpo);
+ret:
+	return;
+}
+
+// #define RSB_ITS "%d" /* operations */
+#define RSB_ITS "%6.3lf" /* bytes per non zero */
+#define RSB_BPS "%6.3lf" /* bytes per non zero */
+#define RSB_PPS "%6.2le" /* performance (mflops) */
+#define RSB_SPS "%6.3lf" /* speedup */
+#define RSB_CPS "%6.1lf" /* percentage */
+#define RSB_TPS "%8.2le" /* time */
+
+#define RSB_TTRP_OK(TTRP) ( (TTRP)->mint <= (TTRP)->maxt && (TTRP)->maxt > 0 )
+
+static void rsb__tattr_dump(FILE*FP, struct rsb_tattr_t* TTRP)
+{
+	rsb_int_t curt; /* FIXME: this shall go to a pair of .dat/.gp files */ /* FIXME: bpnz printout ? */
+
+	if(!TTRP)
+		goto ret;
+
+       	if( RSB_TTRP_OK (TTRP) )
+	{
+       	RSB_FPRINTF(FP,"%s",RSB_AP);
+	if(!RSB_MATRIX_UNSUPPORTED_TYPE(TTRP->mtxAc.typecode))
+		RSB_FPRINTF(FP,"# " RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS( &((TTRP)->mtxAc))), RSB_FPRINTF(FP,"\n");
+       	RSB_FPRINTF(FP,RSB_AP"# [mint:%d ... optt:%d ... maxt:%d], btpo:"RSB_PPS" bpn:"RSB_BPS" dtn:%d\n",(TTRP)->mint,(TTRP)->optt,(TTRP)->maxt,(TTRP)->btpo,(TTRP)->bpn,(TTRP)->deft);
+       	RSB_FPRINTF(FP,RSB_AP"# threads perf bpn tpo subm times\n");
+       	for(curt=(TTRP)->mint;curt<=(TTRP)->maxt;curt++)
+	{
+		RSB_FPRINTF(FP,RSB_AP" %d "RSB_TPS" "RSB_BPS" "RSB_TPS" %d %d\n",curt,(TTRP)->ofe/(TTRP)->tpo[curt],(TTRP)->bpn,(TTRP)->tpo[curt],
+				RSB_MAX(1,(TTRP)->mtxAc.all_leaf_matrices_n),(TTRP)->nit[curt]
+				);
+	}
+	RSB_FPRINTF(FP,RSB_AP"\n"RSB_AP"\n");
+	}
+ret:
+	return;
+}
+
+static void rsb__attr_init(struct rsb_attr_t* TTRP, rsb_bool_t DT, const struct rsb_mtx_t*MTXAP, const char*BASEFILENAME, rsb_trans_t TRANSA, rsb_int_t NRHS, enum rsb_op_t op)
+{
+	if(!MTXAP || ! TTRP)
+		goto ret;
+
+	RSB_BZERO_P(TTRP);
+	(TTRP)->lfp = stdout;
+	(TTRP)->pfp = stdout;
+	(TTRP)->dtr = (DT);
+	(TTRP)->nrhs = (NRHS);
+	(TTRP)->transA = (TRANSA);
+
+	if((TTRP)->dtr)
+	{
+       		/*char ename[RSB_MAX_FILENAME_LENGTH];*/
+       		char fname[RSB_MAX_FILENAME_LENGTH];
+		char * ocs = op == rsb_op_spmv ? "mv" : "sv";
+
+		(TTRP)->typecode = (MTXAP)->typecode;
+		sprintf(fname,"%s--%c-%c-%d--%s%s",BASEFILENAME,(MTXAP)->typecode,RSB_TRANSPOSITION_AS_CHAR(TRANSA),NRHS,ocs,/*ename*/"-tuning_trace.dat");
+		(TTRP)->lfp = rsb__util_fopen(fname,"w");
+		sprintf(fname,"%s--%c-%c-%d--%s%s",BASEFILENAME,(MTXAP)->typecode,RSB_TRANSPOSITION_AS_CHAR(TRANSA),NRHS,ocs,/*ename*/"-tuning_trace.gnu");
+		sprintf((TTRP)->bname,"%s--%c-%c-%d--%s%s",BASEFILENAME,(MTXAP)->typecode,RSB_TRANSPOSITION_AS_CHAR(TRANSA),NRHS,ocs,/*ename*/"-tuning_trace");
+		(TTRP)->pfp = rsb__util_fopen(fname,"w");
+		sprintf((TTRP)->mname,"%s",BASEFILENAME);
+	}
+ret:
+	return;
+}
+
+void rsb__attr_dump(struct rsb_attr_t*TTRP)
+{
+	if(TTRP && TTRP->dtr)
+	{
+	       	rsb_int_t tri; /* FIME: this shall go to a pair of .dat/.gp files */
+	       	rsb_int_t hmkl = ( RSB_TTRP_OK ( &((TTRP)->clattr)) );
+		rsb_int_t smaxt = rsb__set_num_threads(/*RSB_THREADS_GET_MAX*/RSB_THREADS_GET_MAX_SYS),msmaxth = smaxt;
+		rsb_time_t bptpo = RSB_CONST_IMPOSSIBLY_BIG_TIME, fdtpo = RSB_CONST_IMPOSSIBLY_BIG_TIME;
+		rsb_perf_t ofe = RSB_PERF_ZERO;
+		rsb_int_t br = 0;
+		rsb_time_t ttt = RSB_TIME_ZERO, stt = RSB_TIME_ZERO;
+		struct rsb_mtx_t mtxAc = (TTRP)->tattra[0].mtxAc ;
+      	 	RSB_FPRINTF((TTRP)->lfp,RSB_AP"# BEGIN TRACE RECORD [%d entries]\n",(TTRP)->trc);
+
+		rsb__tattr_dump((TTRP)->lfp, (& ((TTRP)->clattr)) );
+
+       		for(tri=0;tri<(TTRP)->trc;tri++) {
+       			RSB_FPRINTF((TTRP)->lfp,RSB_AP"# RECORD %d:\n",tri+1);
+			rsb__tattr_dump((TTRP)->lfp, (&((TTRP)->tattra[tri])) );
+			if( (TTRP)->tattra[ br ].btpo > (TTRP)->tattra[ tri ].btpo ) br = tri;
+		}
+		((TTRP)->br) = br;
+       		RSB_FPRINTF((TTRP)->lfp,RSB_AP"# best path (threads perf bpn tpo subm spdp spdppcnt): \n");
+
+       		for(tri=0;tri<(TTRP)->trc;tri++)
+		{
+       			if( tri == 0 )
+				fdtpo = (TTRP)->tattra[0].dtpo;
+			if( bptpo > (TTRP)->tattra[tri].btpo )
+			{
+	       			ofe = (TTRP)->tattra[tri].ofe;
+				bptpo = (TTRP)->tattra[tri].btpo;
+				RSB_FPRINTF((TTRP)->lfp,RSB_AP" %d "RSB_TPS" "RSB_BPS" "RSB_TPS" %d "RSB_SPS" "RSB_CPS,(TTRP)->tattra[tri].optt,ofe/(TTRP)->tattra[tri].btpo,(TTRP)->tattra[tri].bpn,(TTRP)->tattra[tri].btpo,(TTRP)->tattra[tri].mtxAc.all_leaf_matrices_n,fdtpo/(TTRP)->tattra[tri].btpo, RSB_SPEEDUP_TO_PCT(fdtpo/(TTRP)->tattra[tri].btpo)
+						);
+				/* FIXME: may put here speedup over MKL */
+				RSB_FPRINTF((TTRP)->lfp,"\n");
+			}
+		}
+		ttt = ((TTRP)->tattra[0 ].ttt);
+       		for(tri=1;tri<(TTRP)->trc;tri++)
+			stt += ((TTRP)->tattra[tri].ttt);
+		RSB_FPRINTF((TTRP)->lfp,RSB_AP"\n"RSB_AP"\n");
+       		RSB_FPRINTF((TTRP)->lfp,RSB_AP"# SUMMARY AUTOTUNING ttt="RSB_TPS" stt="RSB_TPS" \n",ttt,stt);
+       		RSB_FPRINTF((TTRP)->lfp,RSB_AP"# SUMMARY: mtx ");
+       		RSB_FPRINTF((TTRP)->lfp," sym transa typecode nr nc nnz nrhs");
+       		if( hmkl )
+       			RSB_FPRINTF((TTRP)->lfp," mkl-d mkl-t rsb-vs-mkl-d rsb-vs-mkl-t rsb-vs-mkl-s"),
+       			RSB_FPRINTF((TTRP)->lfp," k-mkl-rsb-t k-mkl-vs-rsb-s k-rsb-rsb-t k-rsb-vs-rsb-s");
+       		RSB_FPRINTF((TTRP)->lfp," rsb-d rsb-t rsb-s rsb-vs-rsb-t rsb-vs-rsb-s");
+       		RSB_FPRINTF((TTRP)->lfp," bpn-d bpn-s");
+       		RSB_FPRINTF((TTRP)->lfp," subm-d subm-s");
+       		RSB_FPRINTF((TTRP)->lfp," speedup-d speedup-s");
+       		RSB_FPRINTF((TTRP)->lfp,"\n");
+       		RSB_FPRINTF((TTRP)->lfp,RSB_AP"# SUMMARY: %s ",(TTRP)->mname);
+       		RSB_FPRINTF((TTRP)->lfp," %c %c %c %d %d %d %d",
+				rsb__do_get_symmetry_char(&mtxAc), RSB_TRANSPOSITION_AS_CHAR((TTRP)->transA), mtxAc.typecode,
+				mtxAc.nr,
+				mtxAc.nc,
+				mtxAc.nnz,
+				(TTRP)->nrhs
+				);
+       		if( hmkl )
+       			RSB_FPRINTF((TTRP)->lfp," "RSB_PPS " "RSB_PPS" "RSB_SPS" "RSB_SPS" "RSB_SPS" "
+				,ofe/(TTRP)->clattr.dtpo, ofe/(TTRP)->clattr.btpo
+				,(TTRP)->clattr.dtpo/(TTRP)->tattra[0 ].dtpo
+				,(TTRP)->clattr.btpo/(TTRP)->tattra[0 ].btpo
+				,(TTRP)->clattr.btpo/(TTRP)->tattra[br].btpo
+					),
+       			RSB_FPRINTF((TTRP)->lfp," "RSB_ITS" "RSB_ITS" "RSB_ITS" "RSB_ITS" "
+				,( ttt / ((TTRP)->clattr.dtpo-(TTRP)->tattra[0 ].btpo) )
+				,( stt / ((TTRP)->clattr.dtpo-(TTRP)->tattra[br].btpo) )
+				,( ttt / ((TTRP)->tattra[0 ].dtpo-(TTRP)->tattra[0 ].btpo) )
+				,( stt / ((TTRP)->tattra[0 ].dtpo-(TTRP)->tattra[br].btpo) )
+				//,(TTRP)->clattr.dtpo-(TTRP)->tattra[0 ].dtpo
+				   );
+       		RSB_FPRINTF((TTRP)->lfp," "RSB_PPS" "RSB_PPS" "RSB_PPS" "RSB_SPS" "RSB_SPS,
+				ofe/(TTRP)->tattra[0].dtpo,ofe/(TTRP)->tattra[0].btpo,ofe/(TTRP)->tattra[(TTRP)->br].btpo
+				,(TTRP)->tattra[0].dtpo/(TTRP)->tattra[0 ].btpo
+				,(TTRP)->tattra[0].dtpo/(TTRP)->tattra[br].btpo
+				);
+       		RSB_FPRINTF((TTRP)->lfp," "RSB_BPS" "RSB_BPS,(TTRP)->tattra[0].bpn,(TTRP)->tattra[br].bpn);
+       		RSB_FPRINTF((TTRP)->lfp," %d %d",(TTRP)->tattra[0 ].mtxAc.all_leaf_matrices_n,(TTRP)->tattra[br].mtxAc.all_leaf_matrices_n);
+       		RSB_FPRINTF((TTRP)->lfp," "RSB_BPS" "RSB_BPS,
+				(TTRP)->tattra[0 ].tpo[1]/(TTRP)->tattra[0 ].tpo[(TTRP)->tattra[0 ].optt],
+				(TTRP)->tattra[br].tpo[1]/(TTRP)->tattra[br].tpo[(TTRP)->tattra[br].optt]
+				);
+       		RSB_FPRINTF((TTRP)->lfp,"\n");
+       		RSB_FPRINTF((TTRP)->lfp,RSB_AP"\n"RSB_AP"# END TRACE RECORD\n");
+
+  	 	RSB_FPRINTF((TTRP)->pfp,
+"set terminal postscript eps color\n\
+set output '%s.eps'\n\
+set xlabel 'threads'\n\
+set ylabel 'Mflops'\n\
+set title  'Trace of empirical auto-tuning of %s in the RSB format'\n\
+#set title  'Trace of empirical auto-tuning of matrix %s in the RSB format'\n\
+set xrange [ 1 : %d ] noreverse nowriteback\n\
+plot ",(TTRP)->bname,(TTRP)->bname,(TTRP)->mname,smaxt);
+
+ if(hmkl)
+ 	RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index 0   u 1:2    with linespoints lw 2 lc rgb 'blue'   title 'MKL', ", (TTRP)->bname),
+ 	RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index 0   u 1:2:((%lg*$3)) with circles  lc rgb 'blue'  fs border 0.15 noborder notitle, ",(TTRP)->bname,0.2); // 0.2 -> 0.1
+ if((TTRP)->trc > 0)
+  RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index %d   u 1:2    with linespoints lw 2 lc rgb 'red' title 'RSB-t', ", (TTRP)->bname,hmkl?1:0),
+  RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index %d   u 1:2:((%lg*$3)) with circles  lc rgb 'red'  fs border 0.15 noborder notitle, ",(TTRP)->bname,hmkl?1:0,0.2); // 0.2 -> 0.1
+ if((TTRP)->trc > 1)
+  RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index %d:%d:1 u 1:2 with linespoints lw 2 lc    rgb 'green' title 'RSB-s', ",
+		  (TTRP)->bname, hmkl?2:1, (TTRP)->trc-(hmkl?0:1) ),
+  RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index %d:%d:1 u 1:2:((%lg*$3)) with circles  lc rgb 'green' fs border 0.15 noborder notitle, ",
+		  (TTRP)->bname, hmkl?2:1, (TTRP)->trc-(hmkl?0:1), 0.2 ),
+  RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index %d u 1:2 with linespoints lw 2 lc    rgb 'black' title 'top RSB', ",
+		  (TTRP)->bname, (TTRP)->trc+(hmkl?1:0) );
+ // '%s.dat' index 2   u 1:2:3:4 with vectors lw    2 lc rgb 'pink'  title 'pink', 
+					//, bname
+  for(tri=0;tri<(TTRP)->trc;tri++) {
+   msmaxth = RSB_MIN(msmaxth,((TTRP)->tattra[tri].maxt-(TTRP)->tattra[tri].mint)/2);
+  }
+  RSB_FPRINTF((TTRP)->pfp,"'%s.dat' index %d:%d using 1:2:(sprintf(\"%%d subm\",$5)) every %d::%d with labels left offset 1 notitle",
+		  (TTRP)->bname,0,(TTRP)->trc+(hmkl?0:-1), RSB_TRACE_MAX_THREADS_P1, msmaxth);
+  RSB_FPRINTF((TTRP)->pfp,",'%s.dat' index %d using 1:2:(sprintf(\"%%6.3f x\",$6)) "/*every %d::%d*/" with labels left offset 0 notitle",
+		  (TTRP)->bname,  (TTRP)->trc+(hmkl?1: 0)/*, RSB_TRACE_MAX_THREADS_P1, 1*/);
+
+		if((TTRP)->dtr)
+			fclose((TTRP)->lfp),
+			fclose((TTRP)->pfp);
+	}
+}
+
+#define RSB_ATTR_PREPARE(TTRP,ATTRP,ERRVAL) { if(ATTRP && ATTRP->dtr){  (TTRP)=&((ATTRP)->tattra[(ATTRP)->trc]); /*(TTRP)->ttt=-rsb_time();*//* (ATTRP)->trc++;*/ if ((ATTRP)->trc >= RSB_CONST_MAX_TUNING_SAMPLES )ERRVAL=RSB_ERR_INTERNAL_ERROR;} else {(TTRP)=NULL;} }
+#define RSB_ATTR_ADVANCE(TTRP,ATTRP,ET) { if(ATTRP && ATTRP->dtr){/*(TTRP)=&((ATTRP)->tattra[(ATTRP)->trc]);*/ (TTRP)->ttt += rsb_time(); (TTRP)->ttt += (ET); (ATTRP)->trc++;  } else {(TTRP)=NULL;} /* printf("ADVANCING TO %d/%d  (%zd bytes)\n",(ATTRP)->trc,RSB_CONST_MAX_TUNING_SAMPLES,sizeof(struct rsb_attr_t));*/}
+
+rsb_err_t rsb__do_bench_spxm(rsb_time_t *tpop, rsb_int_t *timesp, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC, rsb_time_t maxdt, rsb_int_t mintimes, enum rsb_op_t op, rsb_int_t maxtimes, int verbose, rsb_int_t *tnp, struct rsb_ts_t * tstp)
+{
+	/*!
+	       	Iterates calling the computing kernel.
+		Does not modify matrix structure.
+		If RSB_DT_THREADS_TUNE_TNP(tnp) it modifes temporarily executing threads count.
+		Unless RSB_SOME_ERROR(errval), shall give in output *tpop > RSB_TIME_ZERO.
+
+	 	TODO: can consider: const rsb_bool_t want_inner_flush = RSB_BOOL_FALSE; if(want_inner_flush == RSB_BOOL_TRUE) RSB_DO_ERROR_CUMULATE(errval,rsb__flush_cache(0));
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t it = RSB_TIME_ZERO, ct = RSB_TIME_ZERO;	/* initial/current time */
+	rsb_time_t dt = it, tt = RSB_TIME_ZERO; /* elapsed (delta) / total  time */
+	rsb_time_t bt = RSB_CONST_IMPOSSIBLY_BIG_TIME, wt = RSB_TIME_ZERO; /* best / worst  time */
+	rsb_time_t ss = RSB_TIME_ZERO; /* sum of squares */
+	rsb_time_t jf = rsb__timer_granularity(); /* jiffie */
+	rsb_time_t mindt = RSB_TIME_ZERO; /* minimal time */
+	rsb_int_t times = 0;
+	rsb_int_t otn = 0;
+
+	RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+
+	if(tnp)
+	{
+		RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS, tnp, errval);
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+	}
+
+	dt = it = rsb_time();
+
+	switch(op)
+	{
+		case(rsb_op_spmv):
+		do
+		{
+			errval = rsb__do_spmm(transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC, RSB_OP_FLAG_DEFAULT);
+			RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,jf,times);
+		}
+		while(RSB_REPEAT(ct-it,times,mindt,mintimes,maxdt,maxtimes));
+		break;
+		case(rsb_op_spsvlt):
+		do
+		{
+			errval = rsb__do_spsm(transA, alphap, mtxAp, nrhs, order, betap, Bp, ldB, Cp, ldC);
+			RSB_SAMPLE_STAT(it,ct,dt,tt,bt,wt,ss,jf,times);
+		}
+		while(RSB_REPEAT(ct-it,times,mindt,mintimes,maxdt,maxtimes));
+		break;
+		case(rsb_op_nop):
+		/* no operation, no error */
+		break;
+		default:
+		/* no operation, error */
+	      	RSB_PERR_GOTO(done,RSB_ERRM_ES)
+	}
+
+done:
+	if(tnp)
+	{
+		RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+	}
+err:
+
+#if RSB_AT_WANT_BEST_TIME
+	RSB_ASSIGN_IF(tpop,bt)
+#else
+	RSB_ASSIGN_IF(tpop,( rsb_time() - it ) / times)
+#endif
+	RSB_ASSIGN_IF(timesp,times)
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+	if(getenv("RSB_VERBOSE_TUNING_INNER"))
+		verbose = rsb__util_atoi(getenv("RSB_VERBOSE_TUNING_INNER"));
+#endif /* RSB_ALLOW_INTERNAL_GETENVS*/
+
+	if( RSB_UNLIKELY( verbose >= RSB_AUT0_TUNING_VERBOSE ) )
+		RSB_STAT_DUMP(it,tnp?*tnp:otn,ct,dt,tt,bt,wt,ss,times);
+	RSB_STAT_TAKE(it,tnp?*tnp:otn,ct,dt,tt,bt,wt,ss,times,tstp);
+
+	return errval;
+}
+
+struct rsb_oas_t /* spmv/spsv/spmm/spsm op args structure, for rsb_dbf_t */
+{
+       	enum rsb_op_t op; /* e.g. may expand to combined N+T transposition benchmarking ... */
+       	rsb_trans_t transA;
+       	const void * alphap;
+       	rsb_coo_idx_t nrhs;
+       	rsb_flags_t order;
+       	const void * Bp;
+       	rsb_nnz_idx_t ldB;
+       	const void * betap;
+       	void * Cp;
+       	rsb_nnz_idx_t ldC;
+};
+
+struct rsb_aoa_t /* autotuning output args */
+{
+	rsb_time_t otpo, btpo; /* original / best time per operation */
+	rsb_int_t cc; /* clones count (total) */
+	rsb_time_t ct; /* clones time */
+	struct rsb_ts_t otpos, btpos; /* original/best time per operation stats */
+};
+
+struct rsb_aia_t /* autotuning input args */
+{
+	/* non RSB specific */
+	rsb_time_t maxt; /* max operation iterations time */
+	rsb_int_t mintimes; /* min operation iterations */
+	rsb_int_t maxtimes; /* max operation iterations */
+	const char * mtxns; /* matrix name string */
+	rsb_perf_t ofe; /* operation flops estimate */
+	int verbose; /* TODO: indicate valid values */
+	rsb_int_t *tnp; /* threads number */
+	/* RSB specific */
+	const struct rsb_mtx_t * mtxAp;
+	rsb_int_t maxr; /* max rounds */
+	rsb_int_t maxms; /* max merge steps */
+	rsb_int_t maxss; /* max split steps */
+	rsb_int_t continue_partitioning; /* continue partitioning and pretend faster: mostly here for debug */
+ 	/* rsb_time_t *iotp, rsb_time_t *tttp, rsb_time_t *rctp */
+};
+
+#if 0
+struct rsb_ats_t /* autotuning times struct (FIXME: still unused) */
+{
+ 	rsb_time_t *iotp;
+	rsb_time_t *tttp;
+	rsb_time_t *rctp;
+};
+#endif
+
+struct rsb_bos_t /* benchmarkable operation structure, for rsb_dbf_t */
+{
+	struct rsb_oas_t oas;
+       	rsb_int_t *timesp;
+       	rsb_time_t maxdt;
+       	rsb_int_t mintimes;
+       	rsb_int_t maxtimes;
+       	int verbose;
+       	rsb_int_t *tnp;
+	struct rsb_ts_t*otsp;
+};
+
+typedef rsb_err_t (*rsb_bf_t) (rsb_time_t *tpop, const struct rsb_mtx_t * mtxAp, void*p) ;
+
+rsb_err_t rsb_dbf_t (rsb_time_t *tpop, const struct rsb_mtx_t * mtxAp, void*p)
+{
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	struct rsb_bos_t bos;
+
+	if(!p)
+	{
+		errval = RSB_ERR_BADARGS; 
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	bos = *(struct rsb_bos_t*) p;
+
+	errval = rsb__do_bench_spxm(tpop, bos.timesp, bos.oas.transA, bos.oas.alphap, mtxAp, bos.oas.nrhs, bos.oas.order, bos.oas.Bp, bos.oas.ldB, bos.oas.betap, bos.oas.Cp, bos.oas.ldC, bos.maxdt, bos.mintimes, bos.oas.op, bos.maxtimes, bos.verbose, bos.tnp, bos.otsp);
+err:
+	return errval;
+}
+
+static rsb_err_t rsb__do_bench_spxm_t(rsb_time_t *otpop, rsb_time_t *tpop, rsb_int_t *tnp, const struct rsb_mtx_t * mtxAp, rsb_time_t maxt, rsb_int_t mintimes, rsb_int_t maxtimes, int verbose, struct rsb_oas_t*oasp, struct rsb_tattr_t*ttrp, struct rsb_ts_t * tstp)
+{
+	/*!
+		Thread tuning.
+	       	No matrix structure modification is attempted.
+
+		FIXME: Unfinished: errors may leave library status undefined.
+		FIXME: expand usage of rsb_dbf_t.
+	 */
+	/* const rsb_thread_t weta[]={1,2,3,4,5,6,7,8,9};*/
+	rsb_int_t otn = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_int_t btimes = 0, times = 0;
+	rsb_time_t ctpo = RSB_TIME_ZERO, btpo = RSB_TIME_ZERO, otpo = RSB_TIME_ZERO;
+	/* rsb_int_t mt = rsb_get_num_threads(); */
+	rsb_int_t mt = rsb__set_num_threads(/*RSB_THREADS_GET_MAX*/RSB_THREADS_GET_MAX_SYS);
+	rsb_perf_t ofe = RSB_PERF_ZERO; /* operation flops estimate */
+	struct rsb_ts_t ots;
+	struct rsb_ts_t otpos, btpos;
+
+	RSB_BZERO_P(&ots);
+
+	if(!tpop || !oasp )
+	{
+		errval = RSB_ERR_BADARGS; 
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	if( !oasp->alphap || !oasp->betap || !mtxAp || !oasp->Bp || !oasp->Cp )
+	{
+		errval = RSB_ERR_BADARGS; 
+		RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	ofe = rsb__estimate_mflops_per_op_spmv_uaua(mtxAp) * oasp->nrhs;
+
+	RSB_THREAD_STATS
+	RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+	if(RSB_SOME_ERROR(errval))
+	{
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES)
+	}
+
+	rsb__tattr_init(ttrp,mtxAp,mtxAp->nr,mtxAp->nnz,mtxAp->typecode,mtxAp->flags,oasp->nrhs);
+
+	if( RSB_DT_THREADS_TUNE_TNP(tnp) )
+	{
+		const rsb_int_t tinc = -1; /* FIXME: experimental */
+		rsb_int_t mit, mat = mt, cut;
+		rsb_time_t wtpo = RSB_TIME_ZERO ; /* worst time per operation */
+		rsb_time_t ltpo = RSB_TIME_ZERO ; /* last time per operation */
+		rsb_time_t tst = -rsb_time();
+		rsb_int_t diar = 0; /* degradations in a row */
+		rsb_int_t mdiar = 0; /* maximal degradations in a row */
+
+		if(tinc > 1)
+			mit = 1, mat = mt, mdiar = mt+1;
+		else
+			mit = mt, mat = 1, mdiar = 2;
+
+		if(verbose > 1)
+			RSB_STDOUT("Will test in range (%d to %d) threads, with increase of %d and tolerate max %d consecutive degradations.\n",mit,mat,tinc,mdiar);
+
+		for(cut=mit;tinc>0?(cut<=mat):(cut>=1);cut+=tinc)
+		{
+#if 0
+			errval = rsb__do_bench_spxm(&ctpo, &times, oasp->transA, oasp->alphap, mtxAp, oasp->nrhs, oasp->order, oasp->Bp, oasp->ldB, oasp->betap, oasp->Cp, oasp->ldC, maxt, mintimes, oasp->op, maxtimes, verbose, &cut, NULL);
+#else
+			struct rsb_bos_t bos = { *oasp, &times, maxt, mintimes, maxtimes, verbose, &cut, &ots };
+			rsb_bf_t bf = &rsb_dbf_t; /* FIXME: in the future, bf shall be an argument */
+
+			bf(&ctpo,mtxAp,&bos);
+#endif
+
+			if( ltpo == RSB_TIME_ZERO ) 
+				ltpo = ctpo;
+
+			if( ctpo > wtpo || wtpo == RSB_TIME_ZERO ) 
+				wtpo = ctpo;
+
+			if( ctpo < btpo || btpo == RSB_TIME_ZERO ) 
+			{
+				*tnp = cut;
+				btpo = ctpo;
+				btpos = ots;
+				btimes = times;
+			}
+
+			/* if( ctpo > ltpo ) */
+			if( ctpo > btpo )
+				diar ++;
+			else
+				diar = 0;
+
+			rsb__tattr_sets(ttrp,otn,cut,ctpo,*tnp,times);
+
+			if(verbose)
+			RSB_STDOUT("%c %d threads: %0.4lgs  (%0.2lg Mflops) (%d/%d degradations so far)  %c%c\n",cut==otn?'~':' ',cut,ctpo,ofe/ctpo,
+					diar,mdiar,
+					ctpo<otpo ? '+' : (ctpo==otpo ?'=':'-'), ctpo<btpo ? '+' : ' ' );
+			ltpo = ctpo;
+
+			if( diar >= mdiar )
+				break;
+		} /* cut */
+
+		if(verbose)
+			RSB_STDOUT("Best threads choice is %d; starting threads were %d; max speed gap is %0.2lgx; search took %0.2lgs.\n",*tnp,otn,wtpo/btpo,tst+rsb_time());
+		goto done;
+	} /* RSB_DT_THREADS_TUNE_TNP(tnp) */
+
+	/* FIXME: need a better tuning + exploration mechanism. */
+
+	if(1)
+	{
+#if 0
+		errval = rsb__do_bench_spxm(&btpo, &btimes, oasp->transA, oasp->alphap, mtxAp, oasp->nrhs, oasp->order, oasp->Bp, oasp->ldB, oasp->betap, oasp->Cp, oasp->ldC, maxt, mintimes, oasp->op, maxtimes, verbose, NULL,NULL);
+#else
+		struct rsb_bos_t bos = { *oasp, &btimes, maxt, mintimes, maxtimes, verbose, NULL, &ots };
+		rsb_bf_t bf = &rsb_dbf_t; /* FIXME: in the future, bf shall be an argument */
+
+		bf(&btpo,mtxAp,&bos); /* replaces rsb__do_bench_spxm */
+#endif
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_ES)
+		}
+		otpo = btpo;
+		btpos = ots;
+		rsb__tattr_sets(ttrp,otn,otn,btpo,otn,btimes);
+
+		if(verbose)
+			RSB_STDOUT("Reference operation time is %lg s (%0.4lg Mflops) with %d threads.\n",otpo,ofe/otpo,otn);
+		goto done;
+	}
+done:
+	*tpop = btpo;
+	RSB_ASSIGN_IF(otpop,otpo)
+	RSB_ASSIGN_IF(tstp,btpos)
+	goto err;
+err:
+	return errval;
+}
+
+static rsb_err_t rsb__do_tune_spxm_round( rsb_real_t*sfp, struct rsb_mtx_t ** mtxOpp, rsb_real_t*osvalp, const struct rsb_aia_t*aiap, rsb_time_t *iotp, rsb_time_t *tttp, rsb_time_t *rctp, struct rsb_aoa_t*aoap, struct rsb_oas_t*oasp, struct rsb_attr_t *attrp )
+{
+	/*
+	 * A round of auto-tuning.
+	 *
+	 * Can check for different blockings (by re-invoking the constructor).
+	 *
+	 * See RSB_DT_SAME_THREADS_TNP RSB_DT_THREADS_TUNE_TNP RSB_DT_SPEC_THREADS_TNP.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_real_t rsf = RSB_REAL_ZERO; /* result speedup factor */
+	double bsval = RSB_REAL_ZERO, /* best subdivision [multiplier] value */
+	       sval = RSB_INVALID_SVAL, ospv = RSB_REAL_ZERO, svalr = 4.0, svalm = 2.0;
+	rsb_time_t omaxt = aiap->maxt/(svalr*svalr), rtpo = RSB_TIME_ZERO;
+	struct rsb_mtx_t * mtxOp = NULL, *mtxCp = NULL; /* Output, Clone */
+	const struct rsb_mtx_t * mtxBp = NULL; /* Best */
+	rsb_flags_t sflags = RSB_FLAG_QUAD_PARTITIONING | RSB_FLAG_USE_HALFWORD_INDICES; /* structural flags */
+	rsb_int_t otn = 0,mt/*,tries=2*rsb_get_num_threads()*/;
+	rsb_perf_t ofe = RSB_PERF_ZERO; /* operation flops estimate */
+	rsb_time_t iot = RSB_TIME_ZERO,	/* I/O time */
+                   att = rsb_time(), 	/* auto tuning time */
+		   rct = RSB_TIME_ZERO; /* reference cloning  time */
+	rsb_thread_t btn = 0; /* best      threads number */
+	rsb_thread_t rtn = 0; /* requested threads number */
+	rsb_thread_t ctn = 0; /* current threads number */
+	rsb_bool_t only_one_sample = (mtxOpp == NULL) && RSB_DT_SAME_THREADS_TNP(aiap->tnp);
+	struct rsb_tattr_t * ttrp = NULL; /* thread tuning record pointer  */
+	struct rsb_aoa_t aoa; /* autotuning output args */
+
+	RSB_BZERO_P(&aoa);
+
+	mt = rsb__set_num_threads(/*RSB_THREADS_GET_MAX*/RSB_THREADS_GET_MAX_SYS); 
+
+	if(mtxOpp && *mtxOpp)
+		mtxBp = *mtxOpp;
+	else
+		mtxBp = aiap->mtxAp;
+
+	ofe = rsb__estimate_mflops_per_op_spmv_uaua(mtxBp) * oasp->nrhs; /* FIXME: oasp dep. */
+
+	if( !oasp || /*!mtxAp ||*/ /* *mtxOpp || */ (!RSB_IS_VALID_TRANS(oasp->transA)) || RSB_INVALID_COO_COUNT(oasp->nrhs)  /*|| !RSB_IS_VALID_THREAD_SPEC(ltn) || !RSB_IS_VALID_THREAD_SPEC(utn)*/)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+	}
+	
+	RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+	
+	if(RSB_DT_SPEC_THREADS_TNP(aiap->tnp))
+	       	ctn = rtn = *aiap->tnp;
+
+	if(aiap->verbose)
+	{
+		if( mtxOpp == NULL )
+		{
+			if( RSB_DT_SAME_THREADS_TNP(aiap->tnp) )
+				; /* RSB_STDOUT("Will sample performance at default threads.\n"); */
+			else
+				RSB_STDOUT("Started tuning inner round on given matrix instance.\n");
+		}
+		else
+		{
+			RSB_STDOUT("Started tuning inner round: will search for an optimal matrix instance."); /* with or without threads ?? */
+			RSB_STDOUT("\n""Starting with requested %d threads ; current default %d ; at most %d.\n",rtn,otn,mt);
+		}
+	}
+
+	//do
+	{
+		RSB_ATTR_PREPARE(ttrp,attrp,errval);
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_EM); 
+		}
+		errval = rsb__do_bench_spxm_t(&aoa.otpo,&aoa.btpo, /*(mtxOpp) ? NULL :*/ aiap->tnp, mtxBp, omaxt, aiap->mintimes, aiap->maxtimes, aiap->verbose, oasp, ttrp, NULL);
+		RSB_ATTR_ADVANCE(ttrp,attrp,0.0);
+
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_EM); 
+		}
+	}
+
+	if( mtxOpp == NULL )
+	{
+		if(aiap->verbose)
+		{
+			if(aiap->tnp && *aiap->tnp!=0)
+				RSB_STDOUT("Seems like best threads choice with this matrix is %d threads, with speedup %lg x.\n",*aiap->tnp,aoa.otpo/aoa.btpo);
+		}
+		RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_SUBDIVISION_MULTIPLIER,&bsval,errval);
+		goto ann; /* job done */
+	}
+
+	btn = otn;
+
+	rct -= rsb_time();
+	/* Make a pure COO clone beforehand, so to make sure not to get 1 to 1 cloned copies after. */
+	errval = rsb__clone(&mtxCp,mtxBp->typecode,RSB_TRANSPOSITION_N,NULL,mtxBp,RSB_DO_FLAG_FILTEROUT(mtxBp->flags,sflags));
+	rct += rsb_time();
+	/*if(RSB_VERBOSE_FOR_SAVING(aiap->verbose))
+		RSB_ATR_SAVE(errval,mtxns,mtxBp->typecode,transA,nrhs,"r",0,bsval,mtxCp,iot,op);
+	*/
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(ret,RSB_ERRM_NL);
+	}
+
+	if(RSB_VERBOSE_FOR_MSG(aiap->verbose))
+	{
+		RSB_STDOUT("Copied to a disposable COO clone: ");
+		RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxCp));
+		RSB_STDOUT("\n");
+	}
+	/* Save subdivision multiplier values. */
+	if(osvalp)
+		ospv = *osvalp;
+	else
+		RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_SUBDIVISION_MULTIPLIER,&ospv,errval);
+	
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(ret,RSB_ERRM_NL);
+		goto ret;
+       	}
+	/* TODO: shall make sure that RSB_REAL_ZERO is never returned */
+	bsval = ospv;
+
+	aoa.otpo = aoa.btpo; /* best time with this matrix */
+
+	if(aiap->verbose)
+		RSB_STDOUT("Starting autotuning stage, with subdivision of %lg (current threads=%d, requested threads=%d, max threads = %d).\n",ospv,otn,rtn,mt);
+
+	/* For different thread counts */
+	/* ... */
+	// for(tni=...;tni<...;++tni)
+	/* For different cache blocking solutions */
+	for(sval=(ospv/svalr);sval<=(ospv*svalr);sval*=svalm)
+	{
+		rsb_time_t cct = - rsb_time();
+		/* Set cache blocking */
+		RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_SUBDIVISION_MULTIPLIER,&sval,errval);
+		if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_EM); }
+
+		if(otn != ctn)
+			RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&ctn,errval);
+		rct -= rsb_time();
+		errval = rsb__clone(&mtxOp,RSB_NUMERICAL_TYPE_SAME_TYPE,RSB_TRANSPOSITION_N,NULL,mtxCp,mtxBp->flags|sflags);
+		rct += rsb_time();
+		if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_EM); }
+		if(otn != ctn)
+			RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_EXECUTING_THREADS,&otn,errval);
+		if(RSB_VERBOSE_FOR_MSG(aiap->verbose))
+		{
+			RSB_STDOUT("Considering a candidate clone: ");
+			RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxOp));
+			RSB_STDOUT("\n");
+		}
+
+		if(RSB_VERBOSE_FOR_SAVING(aiap->verbose))
+			RSB_ATR_SAVE(errval,aiap->mtxns,mtxBp->typecode,oasp->transA,oasp->nrhs,sval,"r",0,ctn,mtxOp,iot,oasp->op);
+
+		/* FIXME: need to check whether matrix is effectively more/less subdivided; if not then skipping */
+		RSB_ATTR_PREPARE(ttrp,attrp,errval);
+		if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_IE); }
+		{
+			errval = rsb__do_bench_spxm_t( &aoa.otpo, &rtpo, aiap->tnp ? &ctn : NULL, mtxOp, omaxt, aiap->mintimes, aiap->maxtimes, aiap->verbose, oasp, ttrp, NULL);
+		}
+		cct += rsb_time();
+		RSB_ATTR_ADVANCE(ttrp,attrp,cct);
+		if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_EM); }
+
+		/* Free the "slower" matrix, keep the "optimized" one. */
+		if(aiap->verbose)
+		{
+			rsb_real_t bpn = 0;
+			rsb_blk_idx_t lfc = 0;
+			errval = rsb__do_mtx_get_info(mtxOp, RSB_MIF_INDEX_STORAGE_IN_BYTES_PER_NNZ__TO__RSB_REAL_T, &bpn);
+			if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_EM) }
+			errval = rsb__do_mtx_get_info(mtxOp, RSB_MIF_LEAVES_COUNT__TO__RSB_BLK_INDEX_T, &lfc);
+			if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_EM) }
+			// RSB_STDOUT("For subdivision %lg (%d leaves, %5.4lg bytes/nz, %d threads), challenging %lg s with %lg s (speedup %lg x)\n",sval,(int)lfc,bpn,(int)(tnp?*tnp:otn),rbtpo,rtpo,rbtpo/rtpo);
+			RSB_STDOUT("Challenging best inner round reference (%lg s/%d threads) with: subdivision %lg, %d leaves, %5.4lg bytes/nz, %lg s/%d threads (speedup %lg x), same?%c.\n", aoa.btpo,(int)btn, sval, (int)lfc, bpn, rtpo,(int)ctn, aoa.btpo/rtpo,((mtxOp==*mtxOpp)?'y':'n'));
+		}
+
+		if(rtpo < aoa.btpo)
+		{
+			if( mtxOp != *mtxOpp )
+			{
+				if(/*RSB_VERBOSE_FOR_MSG */(aiap->verbose) && *mtxOpp)
+				{
+					RSB_STDOUT("New good candidate clone found: discarding old candidate clone: ");
+					RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(*mtxOpp));
+					RSB_STDOUT("\n");
+				}
+#if RSB_AT_DESTROYS_MTX
+				RSB_MTX_FREE(*mtxOpp);
+#endif /* RSB_AT_DESTROYS_MTX */
+			}
+			*mtxOpp = mtxOp;
+			mtxBp = mtxOp;
+			mtxOp = NULL;
+			aoa.btpo = rtpo;
+			bsval = sval;
+			btn = ctn;
+		}
+		else
+		{
+			if(/*RSB_VERBOSE_FOR_MSG*/(aiap->verbose))
+			{
+				RSB_STDOUT("New candidate clone performs slowly; discarding it: ");
+				RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxOp));
+				RSB_STDOUT("\n");
+			}
+			RSB_MTX_FREE(mtxOp);
+		}
+		
+		if( aiap->tnp && *aiap->tnp > mt )
+		{
+			/*  */
+			break;
+		}
+	}
+
+	if(aiap->tnp)
+	{
+		*aiap->tnp = btn;
+	}
+ann:
+	if( !only_one_sample )
+	if(aiap->verbose)
+	{
+		RSB_STDOUT("%s %s performance",(mtxOpp==NULL && RSB_DT_SAME_THREADS_TNP(aiap->tnp))?"Measured":"Best",( oasp->op == rsb_op_spmv ?"sparse multiply":"sparse solve"));
+		if(mtxOpp!=NULL)
+			RSB_STDOUT(" with subdivision multiplier of %lg",bsval);
+		if(aiap->tnp && *aiap->tnp!=0)
+			RSB_STDOUT(" using %d threads",(int)btn );
+		RSB_STDOUT(": %lg Mflops.\n", (ofe)/(aoa.btpo) );
+	}
+	/* Pick up the best solution for return. */
+err:
+	/* FIXME: in some error cases the RSB_IO_WANT_SUBDIVISION_MULTIPLIER seems not to be be resetted.  */
+	if(RSB_SOME_ERROR(errval))
+	{
+	       	RSB_MTX_FREE(mtxOp);
+	}
+	/* Restore subdivision multiplier values. */
+	if( ospv != RSB_REAL_ZERO )
+		RSB_DO_REINIT_SINGLE_VALUE_SET(RSB_IO_WANT_SUBDIVISION_MULTIPLIER,&ospv,errval);
+	RSB_ASSIGN_IF(osvalp,bsval);
+ret:
+	rsf = aoa.otpo / aoa.btpo;
+	att = ( rsb_time() - att ) - iot;
+	if( aiap->verbose && RSB_DT_THREADS_TUNE_TNP(aiap->tnp) )
+	{
+		RSB_STDOUT("Auto-tuning inner round complete. Gained a total speedup of %lg x (= %lg : %lg)).\n",rsf,aoa.otpo,aoa.btpo);
+	}
+	RSB_ASSIGN_IF(sfp,rsf) /* speedup factor */
+	RSB_ASSIGN_IF(iotp,iot)
+	RSB_ASSIGN_IF(tttp,att)
+	RSB_ASSIGN_IF(rctp,rct)
+	RSB_ASSIGN_IF(aoap,aoa)
+	RSB_MTX_FREE(mtxCp);
+	RSB_DO_ERR_RETURN(errval)
+}
+
+#define RSB_TIME_BETTER_THRESHOLD 0.01
+/* #define RSB_TIME_BETTER_THAN(TNEW,TOLD) ( (TNEW) < ((TOLD)*(1.0+RSB_TIME_BETTER_THRESHOLD)) ) */
+#define RSB_TIME_BETTER_THAN(TNEW,TOLD) ( (TOLD) > ((TNEW)*(1.0+RSB_TIME_BETTER_THRESHOLD)) )
+#define RSB_TIME_DEFINITELY_WORSE_THAN(TNEW,TOLD) ( (TNEW) > ((TOLD)*(1.0+0.02)) )
+
+static rsb_err_t rsb__rblk_tune_inner(struct rsb_mtx_t **mtxOpp, rsb_submatrix_idx_t ms, const struct rsb_aia_t*aiap, rsb_int_t*epsp, struct rsb_ts_t*otposp, struct rsb_ts_t*btposp, rsb_time_t*otpopp, rsb_int_t *tnp,  rsb_time_t*btpopp, rsb_int_t *ccp, rsb_time_t *ctp, rsb_bool_t want_split, struct rsb_oas_t*oasp, struct rsb_attr_t *attrp )
+{
+	/**
+	 * Re-blocking (split/merge) based autotuning.
+	 * This function makes a clone of the original matrix and works on that.
+	 * Works by:
+	 *  - taking a first clone of a matrix, benchmarking it
+	 *  - taking a second clone and reblocking it
+	 *  - keeping the best and iterating
+	 * So in principle, three instances may be at a given time.
+	 * Additionally, one should keep in mind that  rsb__mtx_split/rsb__leaves_merge  use temporary memory.
+	 * 
+	 * FIXME: in case of an error (e.g. insufficient memory), it is still possible that a new (repartitioned) matrix will be returned, and the error return code will be reset.
+	 * TODO: ideally, one should have reversible merge and split operators and no cloning, but reversal only would be necessary (maybe in librsb-1.3); one may create such "reversal" structures: an array of transition actions.
+	 * TODO: support for truly (no clone) in place tuning, via the "reversal" based algorithms.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_time_t tmt = RSB_TIME_ZERO, tst = RSB_TIME_ZERO, /* total [merge/split,sort] time */
+		   tlt = RSB_TIME_ZERO, tat = RSB_TIME_ZERO, /* total [elapsed,analysis] */
+		   tpt = RSB_TIME_ZERO, tct = RSB_TIME_ZERO, /* total [partitioning(=splitting/merging),cloning] time */
+		   st = RSB_TIME_ZERO, at = RSB_TIME_ZERO, lt = RSB_TIME_ZERO; /* sort, analysis, elapsed, */
+	rsb_submatrix_idx_t eps = 0; /* effective (executed) partitioning steps */
+	rsb_submatrix_idx_t pps = 0; /* profitable (executed) partitioning steps */
+	rsb_submatrix_idx_t nsbp,nsap,nsdp; /* number of submatrices [before/after/during] partition */
+	rsb_submatrix_idx_t cc = 0; /* cloning count */
+	rsb_submatrix_idx_t nt = rsb_get_num_threads(); /*  */
+	rsb_submatrix_idx_t manp = 0; /* maximum number of passes (merges/splits) */
+	int wom = 0, db = /* tnp == NULL ? 1 : */ 0; /* want only merging, do bench */
+#if 0
+	int wom = da = 0; /* do autotuning */
+#endif
+	rsb_time_t tpop = RSB_TIME_ZERO, otpop = RSB_TIME_ZERO, btpop = RSB_TIME_ZERO; /* time per operation (t.p.op.), old t.p.op., best t.p.op. */
+	struct rsb_mtx_t *mtxWp = NULL, *mtxBp = NULL; /* matrices: work, best  */
+	rsb_int_t diar = 0; /* degradations in a row */
+	rsb_int_t niar = 0; /* neutral in a row */
+	rsb_int_t mdiar = 3; /* max [tolerated] degradations in a row [before skipping tuning] */
+	rsb_int_t itn = 0, otn = 0; /* initial/optimal threads number */
+	const rsb_char_t*pos = (want_split) ? "split" : "merge"; /* partition operation string  */
+	const int wv = /*1*/aiap->verbose; /* want verbose */
+	double mpoe = 0.0; /* estimated megaflops per operation */
+	struct rsb_tattr_t * ttrp = NULL; /* thread tuning record pointer  */
+	rsb_time_t iot = RSB_TIME_ZERO;	/* I/O time */
+        const struct rsb_mtx_t*mtxAp = ( aiap->mtxAp ? aiap->mtxAp : *mtxOpp );
+	struct rsb_ts_t otpos, btpos;
+	struct rsb_ts_t tpos;
+
+	RSB_BZERO_P(& tpos);
+	RSB_BZERO_P(&otpos);
+	RSB_BZERO_P(&btpos);
+
+        RSB_DEBUG_ASSERT(mtxAp);
+	RSB_ASSERT(nt>0);
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+	if(getenv("RSB_MERGE_WOM")) wom = rsb__util_atoi(getenv("RSB_MERGE_WOM"));
+	if(getenv("RSB_MERGE_DB")) db = rsb__util_atoi(getenv("RSB_MERGE_DB"));
+	/*if(getenv("RSB_MERGE_DA")) da = rsb__util_atoi(getenv("RSB_MERGE_DA")); */
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+	if(!mtxAp)
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+	}
+
+	RSB_ASSERT( ! ( aiap->mtxAp != NULL && mtxOpp != NULL ) );
+	RSB_ASSERT( mtxAp->all_leaf_matrices_n > 0 );
+
+	if( aiap->mtxAp != NULL && mtxOpp != NULL )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+	}
+
+	nsap = nsbp = mtxAp->all_leaf_matrices_n;
+#if 0
+	{
+		const rsb_int_t maxpasses = 20; /* max number of merging passes (FIXME: this is an arbitrary constant ...) */
+		manp = RSB_MAX( nt, nsbp / maxpasses );
+	}
+#endif
+	mpoe = aiap->ofe;
+
+	tct -= rsb_time();
+	mtxWp = rsb__clone_simple(mtxAp);
+	tct += rsb_time();
+	cc++;
+
+	if(!mtxWp)
+	{
+		errval = RSB_ERR_ENOMEM;
+		RSB_PERR_GOTO(ret, RSB_ERRM_E_MTXAP);
+	}
+
+	RSB_ATTR_PREPARE(ttrp,attrp,errval);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+	       	RSB_PERR_GOTO(err,RSB_ERRM_EM); 
+	}
+
+	if(db)
+		errval = rsb__do_bench_spxm(&tpop,NULL,oasp->transA,oasp->alphap,mtxWp,oasp->nrhs,oasp->order,oasp->Bp,oasp->ldB,oasp->betap,oasp->Cp,oasp->ldC,aiap->maxt,aiap->mintimes,oasp->op,aiap->maxtimes,wv,NULL,&tpos);
+	else
+	{
+		RSB_ASSIGN_IF_SP(otn,tnp);
+		errval = rsb__do_bench_spxm_t(NULL,&tpop,&otn,mtxAp,aiap->maxt,aiap->mintimes,aiap->maxtimes,wv,oasp,ttrp,&tpos);
+		itn = otn;
+	}
+	RSB_ATTR_ADVANCE(ttrp,attrp,0.0);
+	RSB_ASSERT( ( tpop != RSB_TIME_ZERO ) || RSB_SOME_ERROR(errval) );
+
+	if(RSB_SOME_ERROR(errval))
+	{
+	       	RSB_PERR_GOTO(err,RSB_ERRM_EM); 
+	}
+
+	btpop = otpop = tpop;
+	btpos = otpos = tpos;
+
+	mdiar = RSB_MIN(mdiar,ms);
+
+	if(wv)
+		RSB_STDOUT("Starting %s (%s threads) based auto-tuning procedure (transA=%c, nrhs=%d) (max %d steps, inclusive %d grace steps) on: ",pos,RSB_DT_SAME_THREADS_TNP(tnp)?"same":(RSB_DT_THREADS_TUNE_TNP(tnp)?"and":"user-supplied"),RSB_TRANSPOSITION_AS_CHAR(oasp->transA),oasp->nrhs,ms,mdiar),
+		RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxWp)),
+	       	RSB_STDOUT(" (tpop: %0.4lg  Mflops: %3.3lf)\n",tpop,mpoe/tpop);
+
+	tpt -= rsb_time();
+
+	for(eps=0;eps<ms && ( want_split || ( mtxWp->all_leaf_matrices_n>manp ) ) ;++eps)
+	{
+		rsb_int_t ctn = 0;
+		rsb_time_t cct = - rsb_time();
+
+		tpop = RSB_TIME_ZERO;
+		tmt -= rsb_time();
+		nsdp = mtxWp->all_leaf_matrices_n;
+
+		if(want_split)
+		{
+			errval = rsb__mtx_realloc_with_spare_leaves(&mtxWp, RSB_TMP_OVERALLOC_MTX*rsb__submatrices(mtxWp));
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+
+			errval = rsb__mtx_split(mtxWp,manp,&st,&at,&lt,wv,0); /* FIXME: manp is not used yet by rsb__mtx_split */
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}		
+		}
+		else
+		{
+			errval = rsb__leaves_merge(mtxWp,manp,&st,&at,&lt,wv,0);
+
+			if(RSB_SOME_ERROR(errval))
+			{
+				/* FIXME: this is a critical error and mtxWp shall be deallocated */
+				RSB_PERR_GOTO(err,RSB_ERRM_ES);
+			}
+		}
+
+		tst += st;
+		tlt += lt;
+		tat += at;
+		tmt += rsb_time(); 
+		nsap = mtxWp->all_leaf_matrices_n;
+		if(wom)
+		       	continue; /* want only merging/splitting */
+		/* full tuning: */
+#if 0
+		if(da)
+		/*otn = - manp;*/ errval = rsb__tune_spxx(NULL, NULL, &otn, mintimes, 0, 0, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES, RSB_AT_TIME_AUTO, transA, alphap, mtxWp, nrhs, order, Bp, ldB, betap, Cp, ldC, op, /*RSB_AUT0_TUNING_SILENT*//*RSB_AUT0_TUNING_VERBOSE*/wv, NULL, NULL, NULL, NULL );
+#endif
+
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+
+		RSB_ATTR_PREPARE(ttrp,attrp,errval);
+		if(RSB_SOME_ERROR(errval))
+		{
+		       	RSB_PERR_GOTO(err,RSB_ERRM_EM); 
+		}
+
+		/* just benchmark: */
+		if(db)
+			errval = rsb__do_bench_spxm(&tpop,NULL,oasp->transA,oasp->alphap,mtxWp,oasp->nrhs,oasp->order,oasp->Bp,oasp->ldB,oasp->betap,oasp->Cp,oasp->ldC,aiap->maxt,aiap->mintimes,oasp->op,aiap->maxtimes,wv,NULL,&tpos);
+		else
+		{
+#if RSB_TUNE_WITH_LIKWID
+			static rsb_bool_t want_likwid = RSB_BOOL_TRUE;
+			char likwid_marker_string[RSB_MAX_FILENAME_LENGTH];
+			RSB_ATR_SAVENAME(likwid_marker_string,errval,aiap->mtxns,mtxWp->typecode,oasp->transA,oasp->nrhs,/*sval*/1.0,pos,pps,/*ctn*/otn,mtxWp,iot,oasp->op);
+			RSB_TM_LIKWID_MARKER_R_START(likwid_marker_string);
+#endif /* RSB_TUNE_WITH_LIKWID */
+			ctn = 0;
+			RSB_ASSIGN_IF_SP(ctn,tnp);
+			errval = rsb__do_bench_spxm_t(NULL,&tpop,&ctn,mtxWp,aiap->maxt,aiap->mintimes,aiap->maxtimes,wv,oasp,ttrp,&tpos); /* FIXME: need first argument */
+#if RSB_TUNE_WITH_LIKWID
+			RSB_TM_LIKWID_MARKER_R_STOP(likwid_marker_string);
+#endif /* RSB_TUNE_WITH_LIKWID */
+		}
+		RSB_ASSERT( ( tpop != RSB_TIME_ZERO ) || RSB_SOME_ERROR(errval) );
+		cct += rsb_time();
+		RSB_ATTR_ADVANCE(ttrp,attrp,cct); /* FIXME: need to adjust cct time */
+
+		if(RSB_SOME_ERROR(errval))
+		{
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+
+		if(ctn == 0)
+		{
+			RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_EXECUTING_THREADS,&ctn,errval);
+		}
+
+		if(tpop)
+		if(wv)
+		{
+			RSB_STDOUT("After %s step %d: tpop: %0.4lg s   ~Mflops: %3.3lf   nsubm:%d otn:%d\n",pos,eps+1,tpop,mpoe/tpop,nsap,ctn);
+		}
+
+		if( ! ( aiap->continue_partitioning < 0 ) )
+		if( mtxOpp )
+		{
+			if( RSB_TIME_BETTER_THAN(tpop,btpop) || ( aiap->continue_partitioning > 0 ) )
+			{
+				tct -= rsb_time(); 
+				RSB_MTX_FREE(mtxBp);
+				mtxBp = mtxWp;
+				mtxWp = rsb__clone_simple(mtxWp);
+				otn = ctn;
+				if( mtxWp == NULL )
+				{
+					RSB_STDOUT("Cloning failed: probably out of memory !\n"); /* FIXME: temporary. */
+					errval = RSB_ERR_ENOMEM;
+					/* A non critical error: execution can continue. */
+					RSB_PERR_GOTO(ret,RSB_ERRM_EM);
+				}
+				tct += rsb_time(); 
+				cc ++;
+				++pps;
+				pps += diar; /* due to the present speedup, we count the degradations and neutral as benign */
+				pps += niar;
+				diar = 0;
+				niar = 0;
+				if(wv)
+				RSB_STDOUT("Applying %s (%d -> %d leaves, %d th.) yielded SPEEDUP of %6.3lfx: %0.4lgs -> %0.4lgs, so taking this instance.\n",pos,nsdp,nsap,ctn,btpop/tpop,btpop,tpop);
+				btpop = tpop;
+				btpos = tpos;
+
+				if(RSB_VERBOSE_FOR_SAVING(wv))
+					RSB_ATR_SAVE(errval,aiap->mtxns,mtxWp->typecode,oasp->transA,oasp->nrhs,/*sval*/1.0,pos,pps,/*ctn*/otn,mtxWp,iot,oasp->op);
+				/* FIXME: subdivision parameter sval ignored here  ... */
+			}
+			else /* either niar++ or diar++ */
+#if 1
+			if( ! RSB_TIME_DEFINITELY_WORSE_THAN(tpop,btpop) )
+			{
+				++niar;
+				if(wv)
+					RSB_STDOUT("Applying %s (%d -> %d leaves, %d th.) yielded NEGLIGIBLE change (%dth in a row) (old/new=%.5lfx): %0.4lgs -> %0.4lgs, so IGNORING this instance.\n",pos,nsdp,nsap,ctn,niar,btpop/tpop,btpop,tpop);
+
+			}
+#else
+			if ( eps == ms-1 ) /* better, minimal threshold if last  */
+			{
+				diar = 0; // FIXME: here also makes sense to retain last matrix, even if no clone is necessary anymore
+				if(wv)
+				       	RSB_STDOUT("Negligible improvement: not taking a clone yet.\n");
+			}
+#endif
+		}
+
+		if( RSB_TIME_DEFINITELY_WORSE_THAN(tpop,btpop) || ( aiap->continue_partitioning < 0 ) )
+		{
+			++diar;
+			if(wv) RSB_STDOUT("Applying %s (%d -> %d leaves, %d th.) yielded SLOWDOWN (%dth of %d tolerable) of %6.3lfx: %0.4lgs -> %0.4lgs.\n",pos,nsdp,nsap,ctn,diar,mdiar,tpop/btpop,btpop,tpop);
+		}
+
+		if( diar > mdiar )
+		{
+			if(wv)
+			RSB_STDOUT("Skipping further %s based tests after %d definite performance degradations in a row.\n",pos,diar);
+			/* to stop process when no more matrices to merge/split */
+			eps ++; /* effective steps */
+			goto noloop;
+		}
+
+		if ( (!want_split) && nsap == 1 )
+		{
+			if(wv)
+			RSB_STDOUT("Merged all the matrix leaves: no reason to continue merging.\n");
+			eps ++; /* effective steps */
+			goto noloop;
+		}
+	} /* eps */
+noloop:
+	tpt += rsb_time();
+
+	if(RSB_AT_NO_VERBOSE(wv))
+		goto ret;
+
+	RSB_STDOUT("A total of %d %s steps (of max %d) (%d -> %d subms) took %0.4lgs (of which %0.4lgs partitioning, %0.4lgs I/O); computing times: %0.4lgs in par. loops, %0.4lgs sorting, %0.4lgs analyzing)\n",eps,pos,ms,nsbp,nsap,tpt,tmt,iot,tlt,tst,tat);
+
+	if(tpop)
+	{
+		RSB_STDOUT("Total %s + benchmarking process took %0.4lgs, equivalent to %3.1lf/%3.1lf new/old ops (%0.4lgs for %d clones -- as %3.1lf/%3.1lf ops, or %3.1lf/%3.1lf ops per clone)",pos,tpt,tpt/btpop,tpt/otpop,tct,cc,tct/btpop,tct/otpop,(tct/cc)/btpop,(tct/cc)/otpop);
+		RSB_STDOUT(", SPEEDUP of %6.3lfx",otpop/btpop);
+		if(otpop>btpop)
+			RSB_STDOUT("\n");
+		else
+			RSB_STDOUT(" (NO SPEEDUP)\n");
+	}
+
+	if(otpop>btpop)
+	{
+		RSB_STDOUT("Applying multi-%s (%d -> %d leaves, %d steps, %d -> %d th.sp.) yielded SPEEDUP of %6.3lfx (%0.4lgs -> %0.4lgs), will amortize in %10.1lf ops by saving %0.4lgs per op.\n",pos,mtxAp->all_leaf_matrices_n,mtxBp->all_leaf_matrices_n,pps,itn,otn,otpop/btpop,otpop,btpop,((tpt)/(otpop-btpop)),otpop-btpop);
+	}
+	goto ret;
+err:
+	RSB_ERROR("A recoverable error happened during tuning!\n");
+ret:
+	RSB_MTX_FREE(mtxWp);
+        
+        if( mtxBp && mtxOpp && *mtxOpp != mtxBp )
+	{
+	        /* RSB_MTX_FREE(*mtxOpp); */
+		if( btpop >= otpop )
+		{
+			if( aiap->continue_partitioning > 0 )
+				btpop = otpop * 0.8; /* for not breaking consistency in callers .. */
+			if( aiap->verbose && aiap->continue_partitioning > 0 )
+				RSB_STDOUT("Taking clone by force and pretending its time is %5.3lgx the original (%2.3es instead of %2.3es).\n",0.8,btpop,otpop);
+		}
+
+	        RSB_ASSIGN_IF(mtxOpp,mtxBp);
+	}
+
+	btpop = RSB_MIN(btpop,otpop); /* this is a parachute to signalize autotuner's failure without breaking consistency. */
+	RSB_ASSIGN_IF(btpopp,btpop)
+	RSB_ASSIGN_IF(btposp,btpos)
+	RSB_ASSIGN_IF(otpopp,otpop)
+	RSB_ASSIGN_IF(otposp,otpos)
+	RSB_ASSIGN_IF(ccp,*ccp+cc)
+	RSB_ASSIGN_IF(ctp,*ctp+tct)
+	otn = ( otn == 0 ) ? rsb__set_num_threads(RSB_THREADS_GET) /*rsb_want_executing_threads() */ /* rsb_get_num_threads()*/ : otn;
+	RSB_ASSIGN_IF(tnp,otn)
+	RSB_ASSIGN_IF(epsp,pps)
+
+	return errval;
+}
+
+static rsb_err_t rsb__tune_spxx_blk(struct rsb_mtx_t **mtxOpp, rsb_int_t*epsp, rsb_int_t *otcp, const struct rsb_aia_t*aiap, struct rsb_aoa_t*aoap, struct rsb_oas_t*oasp, struct rsb_attr_t *attrp )
+{
+	/*
+	 * Try to obtain a block autotuned clone of a matrix.
+	 *
+	 * otcp: optimal thread count (pointer)
+	 * ...
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const int wv = 1; /* want verbose */
+	rsb_int_t ootc = otcp ? *otcp : 0;
+
+        RSB_DEBUG_ASSERT(aiap->mtxAp ? aiap->mtxAp : *mtxOpp);
+
+	if(! aiap->maxms )
+	{
+		goto wos;
+	}
+
+	errval = rsb__rblk_tune_inner(mtxOpp,aiap->maxms,aiap,epsp,&aoap->otpos,&aoap->btpos,&aoap->otpo,otcp,&aoap->btpo,&aoap->cc,&aoap->ct,RSB_BOOL_FALSE,oasp,attrp);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+	}
+
+	/* if( ( aiap->mtxAp && *mtxOpp ) && ( aiap->mtxAp != *mtxOpp ) ) */
+	if( aoap->otpo != aoap->btpo )
+	{
+		/* autotuning (probably) succeeded */
+		goto ret;
+	}
+
+	if(wv && aiap->maxss && aiap->verbose)
+	{
+		RSB_STDOUT("Merging based autotuning FAILED (=NO SPEEDUP); let's try splitting then...\n");
+	}
+
+	if( otcp && *otcp != ootc ) /* if 'polluted' *otcp contents... */
+		*otcp = ootc; /* ...revert them. TODO: do this within rsb__rblk_tune_inner. */
+wos:	/* want only split */
+	if(! aiap->maxss )
+	{
+		goto ret;
+	}
+
+	errval = rsb__rblk_tune_inner(mtxOpp,aiap->maxss,aiap,epsp,&aoap->otpos,&aoap->btpos,&aoap->otpo,otcp,&aoap->btpo,&aoap->cc,&aoap->ct,RSB_BOOL_TRUE,oasp,attrp);
+
+	if(RSB_SOME_ERROR(errval))
+	{
+		RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+	}
+ret:
+	return errval;
+}
+
+static rsb_err_t rsb__tune_spxx_bos(struct rsb_mtx_t ** mtxOpp, rsb_real_t *tsfp, rsb_int_t*epsp, const struct rsb_aia_t*aiap, struct rsb_aoa_t*aoap, struct rsb_oas_t*oasp,  struct rsb_attr_t *attrp)
+{
+	/*
+	 * Original back-end function to librsb-1.1 auto tuning.
+	 * Enriched with librsb-1.2 auto tuning functionality.
+	 * It uses the auto tuning technique following user's request.
+	 * It allocates/deallocates the necessary vectors if these are not provided.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	const rsb_time_t tg = rsb__timer_granularity();
+	rsb_aligned_t alpha[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_aligned_t  beta[RSB_CONST_ENOUGH_ALIGNED_FOR_ANY_TYPE];
+	rsb_real_t tsf = 1.0, sfm = 1.0; /* total speed-up factor / speed up factor multiplier */
+	rsb_int_t ri, gr = RSB_MIN(2,aiap->maxr); /* round index / grace rounds */
+	const rsb_int_t rc = aiap->maxr; /* round count */
+	double ospv = RSB_REAL_ZERO; /* optimal subdivision parameter value */
+	void *tBp = NULL, *tCp = NULL; /* temporary  B / C  pointers */
+	const struct rsb_mtx_t * mtxBp = NULL; /* Base matrix */
+	rsb_time_t tatt = RSB_TIME_ZERO, tiot = RSB_TIME_ZERO; /* total  auto tuning / i.o. time */
+	rsb_time_t tct = RSB_TIME_ZERO; /* total constructor time */
+	rsb_time_t fotpo = RSB_TIME_ZERO; /* first original time per operation */
+	rsb_int_t mt = rsb__set_num_threads(RSB_THREADS_GET_MAX_SYS);
+	rsb_int_t tn = RSB_DT_SPEC_THREADS_TNP(aiap->tnp) ? *aiap->tnp : 0;
+	rsb_bool_t only_one_sample = (mtxOpp == NULL) && RSB_DT_SAME_THREADS_TNP(aiap->tnp);
+	struct rsb_aoa_t aoa; /* autotuning output args */
+#if RSB_ALLOW_INTERNAL_GETENVS
+	if(getenv("RSB_AT_GR" )) gr  = rsb__util_atoi(getenv("RSB_AT_GR" )); /* override: grace rounds */
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+	RSB_BZERO_P(&aoa);
+	RSB_THREAD_STATS
+
+	rsb__attr_init(attrp,aiap->verbose>1?RSB_BOOL_TRUE:RSB_BOOL_FALSE,aiap->mtxAp,aiap->mtxns,oasp->transA,oasp->nrhs,oasp->op);
+
+	if( mtxOpp && *mtxOpp )
+		mtxBp = *mtxOpp;
+	else
+		mtxBp = aiap->mtxAp;
+
+	if( RSB_DT_SPEC_THREADS_TNP(aiap->tnp) && *aiap->tnp > mt )
+	{
+		/* if max thread count is exceeded, the code will crash (this shall be fixed for resilience purposes) */
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,"Requested an excessive threads count !");
+	}
+	
+	if( ( mtxOpp != NULL ) && ( *mtxOpp != NULL ) && ( aiap->mtxAp != NULL ) )
+	{
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	
+	if( mtxBp == NULL )
+	{
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+	
+	if( ( mtxOpp == NULL ) && ( aiap->mtxAp != NULL ) && ( aiap->tnp == NULL) )
+	{
+		errval = RSB_ERR_BADARGS;
+	}
+	
+/*
+	if( tnp == NULL && mtxOpp == NULL )
+	{
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+*/
+	if(/*RSB_VERBOSE_FOR_MSG*/(aiap->verbose))
+	{
+		if(only_one_sample)
+			RSB_STDOUT("Will sample matrix: ");
+		else
+			RSB_STDOUT("Will autotune matrix: ");
+		RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(mtxBp));
+		RSB_STDOUT(".\n");
+	}
+
+	if(aiap->tnp && mtxOpp == NULL && mtxBp->all_leaf_matrices_n<2 )
+	{
+		/* FIXME: special case */
+		if(RSB_VERBOSE_FOR_MSG(aiap->verbose))
+			RSB_STDOUT("Selected threads tuning but matrix has only %d leaves. Skipping.\n",mtxBp->all_leaf_matrices_n);
+	       	/* RSB_PERR_GOTO(err,RSB_ERRM_ES); */
+	}
+
+	if( oasp->alphap == NULL )
+		oasp->alphap = &alpha[0];
+	if( oasp->betap == NULL )
+		oasp->betap = &beta[0];
+	if( oasp->nrhs < 1 )
+		oasp->nrhs = 1;
+	if( oasp->ldC == 0 )
+		oasp->ldC = rsb_do_get_rows_of(mtxBp,oasp->transA);
+	if( oasp->ldB == 0 )
+		oasp->ldB = rsb_do_get_columns_of(mtxBp,oasp->transA);
+
+	if( oasp->ldC < rsb_do_get_rows_of(mtxBp,oasp->transA) || oasp->ldB < rsb_do_get_columns_of(mtxBp,oasp->transA) )
+	{
+		errval = RSB_ERR_BADARGS;
+	       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+	}
+
+	if( oasp->Bp == NULL )
+	{
+		oasp->Bp = tBp = rsb__calloca_vector(oasp->ldB*oasp->nrhs,mtxBp->typecode);
+		if( !tBp )
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+		       	RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+	}
+
+	if( oasp->Cp == NULL )
+	{
+		oasp->Cp = tCp = rsb__calloca_vector(oasp->ldC*oasp->nrhs,mtxBp->typecode);
+		if( !tCp )
+		{
+			errval = RSB_ERR_INTERNAL_ERROR;
+			RSB_PERR_GOTO(err,RSB_ERRM_ES);
+		}
+	}
+
+	if(aiap->verbose)
+	if(aiap->maxr > 1) /* FIXME: this is for the cases the routine is invoked from the merge/split based one. */
+	{
+		rsb_int_t ctn = 0.0;
+		RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_EXECUTING_THREADS,&ctn,errval);
+		if( only_one_sample )
+			RSB_STDOUT("Sampling");
+		else
+			RSB_STDOUT("Starting autotuning");
+		RSB_STDOUT(" (%d x %lg s stages, transA=%c, nrhs=%d, timer gran.=%lg), %d suggested as starting thread count%s.\n",aiap->maxr,aiap->maxt,RSB_TRANSPOSITION_AS_CHAR(oasp->transA),oasp->nrhs,tg,ctn,(tn==0)?"(default)":"");
+	}
+
+	if( mtxOpp )
+	if(RSB_VERBOSE_FOR_SAVING(aiap->verbose))
+		RSB_ATR_SAVE(errval,aiap->mtxns,mtxBp->typecode,oasp->transA,oasp->nrhs,ospv,"r",0,0,mtxBp,tiot,oasp->op);
+
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(beta ,mtxBp->typecode,1,1))){ RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+	if(RSB_SOME_ERROR(errval = rsb__fill_with_ones(alpha,mtxBp->typecode,1,1))){ RSB_PERR_GOTO(err,RSB_ERRM_ES)}
+
+	if(aiap->maxr == 0)
+	{
+		/* Use this routine as benchmark only :-) */
+		tsf = 1.0;
+		if(/*RSB_VERBOSE_FOR_MSG*/(aiap->verbose))
+			RSB_STDOUT("Taking a single sample of performance.\n");
+		errval = rsb__do_bench_spxm_t(NULL,&fotpo,NULL,mtxBp,aiap->maxt,aiap->mintimes,aiap->maxtimes,aiap->verbose,oasp,NULL/* TODO: attrp/tattrp */,NULL);
+		aoa.otpo = aoa.btpo = fotpo;
+		RSB_ASSIGN_IF(aoap,aoa)
+		goto err;
+	}
+
+	RSB_DO_REINIT_SINGLE_VALUE_GET(RSB_IO_WANT_SUBDIVISION_MULTIPLIER,&ospv,errval);
+	for(ri=0;ri<rc;++ri)
+	{
+		rsb_time_t ratt = RSB_TIME_ZERO, riot = RSB_TIME_ZERO, rct = RSB_TIME_ZERO; /* round  auto tuning / i.o. / constructor  time */
+
+		if( aiap->maxms > 0 || aiap->maxss > 0 )
+		{
+			struct rsb_mtx_t * mtxOp = mtxOpp ? *mtxOpp : NULL;
+			rsb_int_t eps = 0; /* effective partitiong steps */
+
+			tatt = - rsb_time();
+		        RSB_DEBUG_ASSERT(aiap->mtxAp);
+			errval = rsb__tune_spxx_blk(mtxOpp,&eps,aiap->tnp,aiap,&aoa,oasp,attrp);
+			fotpo = aoa.otpo;
+#if RSB_AT_DESTROYS_MTX
+			if(mtxOpp && *mtxOpp != mtxOp)
+			{
+		 		RSB_MTX_FREE(mtxOp);
+			}
+#endif /* RSB_AT_DESTROYS_MTX */
+			RSB_ASSIGN_IF(epsp,eps)
+			RSB_ASSIGN_IF(aoap,aoa)
+			tatt += rsb_time();
+			tct += aoa.ct;
+			goto done; /* no reason to continue */
+		}
+
+		errval = rsb__do_tune_spxm_round(&sfm,mtxOpp,&ospv,aiap,&riot,&ratt,&rct,&aoa,oasp,attrp);
+		tatt += ratt;
+		tiot += riot;
+		tct += rct;
+
+		if( fotpo == RSB_TIME_ZERO ) /* at first iteration */
+			fotpo = aoa.otpo;
+
+		if(RSB_SOME_ERROR(errval)){ RSB_PERR_GOTO(err,RSB_ERRM_EM); }
+
+		tsf *= sfm;
+
+		if( mtxOpp && ( mtxBp != *mtxOpp ) )
+		{
+			if(RSB_VERBOSE_FOR_MSG(aiap->verbose))
+			{
+				RSB_STDOUT("Found a better candidate clone matrix (original is gone): ");
+				RSB_STDOUT(RSB_PRINTF_MATRIX_AT_SUMMARY_ARGS(*mtxOpp));
+				RSB_STDOUT("\n ");
+			}
+			mtxBp = *mtxOpp;
+		}
+
+		/* if(rc>1) */
+		if( !only_one_sample )
+		if(aiap->verbose)
+		{
+			RSB_STDOUT("Last tuner inner round (%d of %d) took %lg s (eq. to %6.1lg/%6.1lg old/new op.times), gained loc./glob. speedup of %lg x (%lg : %lg) / %lg x (%lg : %lg). ",
+					ri+1,rc,ratt,ratt/fotpo,ratt/aoa.btpo,/*tsf*/ aoa.otpo/aoa.btpo,aoa.otpo,aoa.btpo, fotpo/aoa.btpo,fotpo,aoa.btpo);
+			if(aoa.btpo < aoa.otpo)
+				RSB_STDOUT("This is amortizable in %zd op.times.\n", (size_t)ceil((ratt)/(aoa.otpo-aoa.btpo)));
+			else
+				RSB_STDOUT("This is not amortizable !\n");
+		}
+
+		if( ( ( ( sfm <= 1.0) && mtxOpp != NULL && !( sfm > 1.0) ) || ( mtxOpp == NULL && tn == *aiap->tnp ) ) )
+		{
+			if( !only_one_sample )
+			if(aiap->verbose)
+			{
+				RSB_STDOUT("Auto tuning inner round %d did not find a configuration better than the original.\n",ri+1);
+			}
+			if(gr > 1)
+			{
+				gr--;
+				if( !only_one_sample )
+				if(aiap->verbose)
+					RSB_STDOUT("Will attempt %d more 'grace' rounds.\n",gr);
+			}
+			else
+			{
+				if( !only_one_sample )
+				if(aiap->verbose)
+				if(ri+1<rc)
+					RSB_STDOUT("Skipping further auto tuning.\n");
+				goto done;
+			}
+		}
+		/* if(!mtxOpp)
+			tnpp = *tnp; */
+	} /* ri */
+done:
+	tsf = fotpo/aoa.btpo; /* FIXME */
+
+	if( ! only_one_sample )
+	if(aiap->verbose)
+	{
+		RSB_STDOUT("In %d tuning rounds (tot. %0.2lgs, %0.2lgs for constructor, %d clones) obtained ",RSB_MIN(ri+1,rc),tatt,tct,aoa.cc);
+		if(fotpo==aoa.btpo)
+	       		RSB_STDOUT("NO speedup (best stays %0.4lg Mflops).",aiap->ofe/fotpo);
+		else
+		{
+	      		RSB_STDOUT("%s %6.1lf%% (%0.4lgx) (from %0.4lg to %0.4lg Mflops).",
+				(fotpo < aoa.btpo) ? "a dubious SLOWDOWN of " : "a SPEEDUP of",
+				RSB_SPEEDUP_TO_PCT(fotpo/aoa.btpo),fotpo/aoa.btpo,aiap->ofe/fotpo,aiap->ofe/aoa.btpo);
+		}
+		if(tiot)
+			RSB_STDOUT(" Employed %0.2lgs for I/O of matrix plots.",tiot);
+		RSB_STDOUT("\n");
+	}
+err:
+	RSB_THREAD_STATS
+	RSB_ASSIGN_IF(tsfp,tsf)
+	RSB_CONDITIONAL_FREE_ALLOCA(tBp);
+	RSB_CONDITIONAL_FREE_ALLOCA(tCp);
+
+	return errval;
+}
+
+#define RSB_ATTEMPTING_MERGING_11_AND_12_AT 1
+
+rsb_err_t rsb__tune_spxx( struct rsb_mtx_t ** mtxOpp, rsb_real_t *tsfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_int_t maxms, rsb_int_t maxss, rsb_int_t mintimes, rsb_int_t maxtimes, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC, enum rsb_op_t op, rsb_int_t*epsp, rsb_time_t*otpopp, rsb_time_t*btpopp, int verbose, const char * m [...]
+{
+	/*
+		otpopp = original time per operation pointer
+		btpopp =   best   time per operation pointer
+	  
+	 	Note: it is possible that *mtxOpp will be overwritten and an error code returned.
+	 */
+	rsb_err_t errval = RSB_ERR_BADARGS;
+        
+	RSB_ASSERT( ! ( mtxAp != NULL && mtxOpp != NULL && *mtxOpp != NULL ) );
+
+	if( mtxAp != NULL && mtxOpp != NULL && *mtxOpp != NULL )
+	{
+		errval = RSB_ERR_BADARGS;
+		RSB_PERR_GOTO(ret,RSB_ERRM_ES);
+	}
+
+        if(!mtxOpp || !*mtxOpp)
+        {
+		if(verbose && ( maxms || maxss ) )
+			RSB_STDOUT("Setting old tuning style.");
+                maxms = maxss = 0; /* merge / split cannot apply in this case */
+        }
+
+#if RSB_ALLOW_INTERNAL_GETENVS
+	if(getenv("RSB_MAXMS")) maxms = rsb__util_atoi(getenv("RSB_MAXMS")); /* override: max merge steps */
+	if(getenv("RSB_MAXSS")) maxss = rsb__util_atoi(getenv("RSB_MAXSS")); /* override: max subdivide steps */
+	if(getenv("RSB_MAXR" )) maxr  = rsb__util_atoi(getenv("RSB_MAXR" )); /* override: max rounds */
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+
+        if( mtxAp || ( mtxOpp && *mtxOpp ) )
+        {
+        	/*rsb_int_t mintimes = RSB_AUT0_TUNING_DEFAULT_TIMES;*/	/* min iterations for taking operation time samples */
+		/*rsb_int_t maxtimes = RSB_AUT0_TUNING_DEFAULT_TIMES;*/ /* samples when benchmarking */
+        	const rsb_time_t tg = rsb__timer_granularity();
+        	struct rsb_oas_t oas = { op, transA, alphap, nrhs, order, Bp, ldB, betap, Cp, ldC }; /* FIXME: these may be overwritten */
+        	struct rsb_aia_t aia = { maxt, mintimes, maxtimes, mtxns, RSB_PERF_ZERO, verbose, tnp, mtxAp, maxr, maxms, maxss, 0 /* =cont.part. */ };
+        	struct rsb_aoa_t aoa;
+
+		RSB_BZERO_P(&aoa);
+		aia.ofe = rsb__estimate_mflops_per_op_spmv_uaua(mtxAp ? mtxAp : *mtxOpp /*aia.mtxAp*/) * nrhs;
+	        /* maxt = ( maxt <= RSB_TIME_ZERO ) ? RSB_AUT0_TUNING_DEFAULT_TIME : maxt;*/
+
+	        if ( aia.maxt < RSB_TIME_ZERO ) aia.mintimes = - ceil( aia.maxt );
+        	aia.maxt = ( aia.maxt <= RSB_TIME_ZERO ) ? tg : aia.maxt; /* FIXME: this code seems obscure */
+
+        	aia.mtxns = ( aia.mtxns == NULL ) ? "rsb" : aia.mtxns; /* matrix name string */
+        	aia.verbose = RSB_MAX(rsb_global_session_handle.verbose_tuning,aia.verbose);
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(getenv("RSB_VERBOSE_TUNING")) aia.verbose = rsb__util_atoi(getenv("RSB_VERBOSE_TUNING"));
+#endif /* RSB_ALLOW_INTERNAL_GETENVS*/
+		if( aia.maxr > RSB_CONST_MAX_TUNING_ROUNDS )
+		{
+#if 1
+			if(RSB_VERBOSE_FOR_MSG(aia.verbose))
+				RSB_STDOUT("The specified %d tuning rounds is exxagerated: forcing down to %d ;)\n",aia.maxr,RSB_CONST_MAX_TUNING_ROUNDS);
+			aia.maxr = RSB_MIN(aia.maxr,RSB_CONST_MAX_TUNING_ROUNDS);
+#else
+			errval = RSB_ERR_INTERNAL_ERROR;
+		       	RSB_PERR_GOTO(err,RSB_ERRM_IE);
+#endif
+		}
+#if RSB_ALLOW_INTERNAL_GETENVS
+		if(getenv("RSB_MINTIMES")) aia.mintimes = rsb__util_atoi(getenv("RSB_MINTIMES"));
+		if(getenv("RSB_CONTINUE_PARTITIONING")) aia.continue_partitioning = rsb__util_atoi(getenv("RSB_CONTINUE_PARTITIONING"));
+		if(getenv("RSB_MAXTIMES")) aia.maxtimes = rsb__util_atoi(getenv("RSB_MAXTIMES"));
+#endif /* RSB_ALLOW_INTERNAL_GETENVS*/
+
+        	errval = rsb__tune_spxx_bos( mtxOpp, tsfp, epsp, &aia, &aoa, &oas, attrp );
+
+#if 0
+		//if(RSB_VERBOSE_FOR_MSG(aia.verbose))
+		if(aia.verbose)
+		{
+			RSB_STAT_DUMP_TS(aoa.btpos);
+			RSB_STAT_DUMP_TS(aoa.otpos);
+		}
+#endif
+        	RSB_ASSIGN_IF(btposp,aoa.btpos)
+        	RSB_ASSIGN_IF(otposp,aoa.otpos)
+        	RSB_ASSIGN_IF(btpopp,aoa.btpo)
+        	RSB_ASSIGN_IF(otpopp,aoa.otpo)
+        }
+        else
+                goto ret;
+ret:
+	return errval;
+}
+
+#if RSB_ATTEMPTING_MERGING_11_AND_12_AT
+#define RSB_CONST_DEF_MS_AT_AUTO_STEPS /*RSB_CONST_MS_AT_AUTO_STEPS*/6 /* 1.2 (merged/split clone) based autotuning */
+#else
+#define RSB_CONST_DEF_MS_AT_AUTO_STEPS 0 /* 1.1 (full clone) based autotuning */
+#endif
+
+rsb_err_t rsb__do_tune_spmm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC)
+{
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+	errval = rsb__tune_spxx( mtxOpp, sfp, tnp, RSB_MAX(maxr,1), RSB_CONST_DEF_MS_AT_AUTO_STEPS, RSB_CONST_DEF_MS_AT_AUTO_STEPS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES, maxt, transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC, rsb_op_spmv, NULL, NULL, NULL, RSB_AUT0_TUNING_SILENT, NULL, NULL, NULL, NULL);
+	return errval;
+}
+
+rsb_err_t rsb__do_tune_spsm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC)
+{
+	rsb_err_t errval = RSB_ERR_BADARGS;
+
+	errval = rsb__tune_spxx( mtxOpp, sfp, tnp, RSB_MAX(maxr,1), RSB_CONST_DEF_MS_AT_AUTO_STEPS, RSB_CONST_DEF_MS_AT_AUTO_STEPS, RSB_CONST_MS_AT_AUTO_STEPS, RSB_AUT0_TUNING_DEFAULT_TIMES, maxt, transA, alphap, mtxAp, nrhs, order, Bp, ldB, betap, Cp, ldC, rsb_op_spsvlt, NULL, NULL, NULL, RSB_AUT0_TUNING_SILENT, NULL, NULL, NULL, NULL);
+	return errval;
+}
+
+/* @endcond */
diff --git a/rsb_tune.h b/rsb_tune.h
new file mode 100644
index 0000000..529714b
--- /dev/null
+++ b/rsb_tune.h
@@ -0,0 +1,160 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief 
+ * @author Michele Martone
+ * */
+#ifndef RSB_TUNE_H_INCLUDED
+#define RSB_TUNE_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "rsb.h"
+#include "rsb_internals.h"
+
+/* FIXME: the following constants need a systematization... */
+#define RSB_AUT0_TUNING_DEFAULT_TIME 10.0
+#define RSB_AUT0_TUNING_DEFAULT_TIMES 10
+#define RSB_AUT0_TUNING_SILENT 0
+#define RSB_AUT0_TUNING_VERBOSE 1
+#define RSB_AUT0_TUNING_QUATSCH 2
+#define RSB_AT_TIME_AUTO 0.0
+#define RSB_AT_NTIMES_AUTO 0
+#define RSB_AT_MIN_TIMES 3
+#define RSB_AT_MAX_TIME 3.0
+#define RSB_TRACE_MAX_THREADS_P1 1 + RSB_CONST_MAX_SUPPORTED_THREADS
+#define RSB_AT_THREADS_AUTO 0 /* see RSB_THREADS_AUTO (different meaning): this one may be used for auto+overwrite */
+#define RSB_CONST_AUTO_TUNING_ROUNDS 1
+#define RSB_CONST_MAX_TUNING_SUBROUNDS 5
+#define RSB_CONST_MS_AT_AUTO_STEPS 10 /* measurement steps in the autotuning */
+#define RSB_CONST_MAX_TUNING_SAMPLES ((RSB_CONST_MAX_TUNING_ROUNDS)*(1+RSB_CONST_MAX_TUNING_SUBROUNDS))
+#define RSB_AT_WANT_BEST_TIME 1 /* Autotuning shall base on 'best time' for a given (matrix, op, sampling run) */
+#define RSB_DT_SAME_THREADS_TNP(TNP) ( (TNP)==NULL || *(TNP)== 0 ) /* On tnp==NULL will use the default thread count. */
+#define RSB_DT_THREADS_TUNE_TNP(TNP) ( (TNP)!=NULL && *(TNP) < 0 ) /* On tnp!=NULL && *tnp<0  will probe different thread counts. */
+#define RSB_DT_SPEC_THREADS_TNP(TNP) ( (TNP)!=NULL && *(TNP) > 0 ) /* On tnp!=NULL && *tnp>0  will use the given thread count. */
+#define RSB_AT_DESTROYS_MTX 1 /* whether autotuning is allowed to destroy a suboptimal matrix after tuning */
+
+#define RSB_REPEAT_MIN_T(CUR_T,MIN_T,NOS) ( (MIN_T) > 0.0 ? ( (CUR_T) <= (MIN_T) ) : (NOS) )
+#define RSB_REPEAT_MIN_I(CUR_I,MIN_I,NOS) ( (MIN_I) > 0   ? ( (CUR_I) <  (MIN_I) ) : (NOS) )
+#define RSB_REPEAT_MAX_T(CUR_T,MAX_T,NOS) ( (MAX_T) > 0.0 ? ( (CUR_T) <= (MAX_T) ) : (NOS) )
+#define RSB_REPEAT_MAX_I(CUR_I,MAX_I,NOS) ( (MAX_I) > 0   ? ( (CUR_I) <  (MAX_I) ) : (NOS) )
+
+#define RSB_REPEAT(CUR_T,CUR_I,MIN_T,MIN_I,MAX_T,MAX_I) ( \
+	( /* conditions sufficient for continuation */ \
+	  RSB_REPEAT_MIN_T((CUR_T),(MIN_T),0) || \
+	  RSB_REPEAT_MIN_I((CUR_I),(MIN_I),0) \
+       	) || \
+	( /* conditions necessary for continuation */ \
+	  RSB_REPEAT_MAX_I((CUR_I),(MAX_I),1) && \
+	  RSB_REPEAT_MAX_T((CUR_T),(MAX_T),1) && \
+	1 ) )
+
+#define RSB_REPEAT_(CUR_T,CUR_I,MIN_T,MIN_I,MAX_T,MAX_I) ( \
+	  RSB_REPEAT_MAX_T((CUR_T),(MAX_T),0)  \
+ )
+
+/* A substitute for obsolete RSB_REPEAT_MAX_T. */
+#define RSB_SAMPLE_STAT(IT,CT,DT,TT,BT,WT,SS,JF,TI) { \
+		(CT) = rsb_time(); \
+		(DT) = (CT) - (DT); \
+		(BT) = RSB_MAX((JF),RSB_MIN((DT), (BT))); /* Initialize JF (jiffie) to the smallest dt and this will avoid having zeroes. */ \
+		(WT) = RSB_MAX((DT), (WT)); \
+		(TT) = (CT) - (IT); \
+		(SS)+= (DT)*(DT); \
+		(DT) = (CT); \
+		(TI) ++; \
+	}
+
+#define RSB_SPEEDUP_TO_PCT(X) ((((double)(X))-1.0)*100.0) /* best coupled to a %6.1lf printf code; FIXME: shall use this format all thorough */
+#define RSB_PCT(F,T) ((((double)(F)) / ((double)(T)))*100.0) /* FIXME: PCT -> PCNT */
+#define RSB_STAT_DUMP(IT,TN,CT,DT,TT,BT,WT,SS,TI) { \
+	RSB_STDOUT("%zd iterations (%d th.) took %0.4lgs; avg %0.4lgs ( +/- %6.2lf/%6.2lf %%); best %0.4lgs; worst %0.4lgs; std dev. %0.4lg (taking %s).\n", \
+			(size_t)(TI),TN,TT,TT/TI, \
+			RSB_PCT(((TT/TI)-(BT)),(TT/TI)), \
+			RSB_PCT(((WT)-(TT/TI)),(TT/TI)), \
+			BT,WT, \
+			sqrt( (SS)/(TI) - (TT/TI)*(TT/TI) ),  \
+			RSB_AT_WANT_BEST_TIME ? "best":"avg" \
+			); \
+	}
+
+#define RSB_STAT_TAKE(IT,TN,CT,DT,TT,BT,WT,SS,TI,TSTP) if(TSTP) /* struct rsb_ts_t */ { \
+		(TSTP)->avg = TT/TI;	\
+		(TSTP)->min = BT;	\
+		(TSTP)->max = WT;	\
+		(TSTP)->sd  = sqrt( (SS)/(TI) - (TT/TI)*(TT/TI) );	\
+		(TSTP)->ns = TI;	\
+	}
+
+#define RSB_STAT_DUMP_TS(TS) { RSB_STDOUT("%zd iterations took avg %0.4lgs ( +/- %6.2lf/%6.2lf %%); best %0.4lgs; worst %0.4lgs; std dev. %0.4lg.\n", (TS).ns, (TS).avg, RSB_PCT((((TS).avg)-((TS).min)),((TS).avg)), RSB_PCT((((TS).max)-((TS).avg)),((TS).avg)), (TS).min, (TS).max, (TS).sd); }
+
+struct rsb_tattr_t /* threads auto tuning trace */
+{
+	rsb_time_t tpo[RSB_TRACE_MAX_THREADS_P1]; /**/
+	rsb_int_t nit[RSB_TRACE_MAX_THREADS_P1]; /* number of iterations */
+	rsb_real_t bpn; /* bytes per nonzeroes number */
+	rsb_perf_t ofe; /* operation mflops estimate */
+	rsb_perf_t btpo; /* best time per operation */
+	rsb_perf_t dtpo; /* time per operation */
+	rsb_time_t ttt; /* threads tuning time */
+	rsb_int_t mint,maxt,optt,deft; /* min/max/optimal/default threads */
+	/* what about bytes per non zero ? */
+	/*const struct rsb_mtx_t*mtxAp;*/
+	struct rsb_mtx_t mtxAc; /* TODO: rsb__blank_ptrs() */
+	rsb_int_t vl; /* verbosity level */
+};
+
+struct rsb_attr_t /* auto tuning trace */
+{
+	rsb_bool_t dtr;  /* dump trace ?*/
+	FILE*lfp,*pfp;	 /* log/plot file pointer */
+	rsb_int_t trc,br;   /* tuning rounds count (max allowed RSB_CONST_MAX_TUNING_ROUNDS-1),best round */
+	rsb_trans_t transA;
+	rsb_int_t nrhs;
+	rsb_int_t opid;  /* TODO: use this */
+	rsb_type_t typecode;
+	struct rsb_tattr_t clattr; /* competitor's library a.t.t. */
+	struct rsb_tattr_t tattra[RSB_CONST_MAX_TUNING_SAMPLES]; /* threads auto tuning trace array */
+       	char bname[RSB_MAX_FILENAME_LENGTH];
+       	char mname[RSB_MAX_FILENAME_LENGTH];
+};
+
+void rsb__tattr_init(struct rsb_tattr_t* TTRP, const struct rsb_mtx_t*MTXAP, rsb_coo_idx_t nrA, rsb_nnz_idx_t nnz, rsb_type_t typecode, rsb_flags_t flags, rsb_int_t nrhs);
+void rsb__tattr_sets(struct rsb_tattr_t* ttrp, rsb_int_t dnt, rsb_int_t nt, rsb_time_t tpo, rsb_int_t bnt, rsb_int_t nits);
+void rsb__attr_dump(struct rsb_attr_t*TTRP);
+
+rsb_err_t rsb__tune_spxx( struct rsb_mtx_t ** mtxOpp, rsb_real_t *tsfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_int_t maxms, rsb_int_t maxss, rsb_int_t mintimes, rsb_int_t maxtimes, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC, enum rsb_op_t op, rsb_int_t*epsp, rsb_time_t*otpopp, rsb_time_t*btpopp, int verbose, const char * m [...]
+rsb_err_t rsb__do_tune_spmm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC);
+rsb_err_t rsb__do_tune_spsm(struct rsb_mtx_t ** mtxOpp, rsb_real_t *sfp, rsb_int_t *tnp, rsb_int_t maxr, rsb_time_t maxt, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC);
+rsb_err_t rsb__do_bench_spxm(rsb_time_t *tpop, rsb_int_t *timesp, rsb_trans_t transA, const void * alphap, const struct rsb_mtx_t * mtxAp, rsb_coo_idx_t nrhs, rsb_flags_t order, const void * Bp, rsb_nnz_idx_t ldB, const void * betap, void * Cp, rsb_nnz_idx_t ldC, rsb_time_t maxdt, rsb_int_t mintimes, enum rsb_op_t op, rsb_int_t maxtimes, int verbose, rsb_int_t *tnp, struct rsb_ts_t * tstp);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RSB_TUNE_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_types.h b/rsb_types.h
new file mode 100644
index 0000000..d837c8a
--- /dev/null
+++ b/rsb_types.h
@@ -0,0 +1,579 @@
+
+
+/** @file
+    @brief
+    Macros and constants, which are type specific.
+    \n
+    Here reside declarations related to supported matrix numerical types, and other declarations
+    according to the build time options.
+    \n
+    If you wish to use this library with different matrix numerical types, you shall regenerate
+     the library source code accordingly; see the README file how to do this.
+    \n
+    Only a small part of these declarations is needed to the user (see \ref matrix_type_symbols_section).
+    \n
+    Therefore, only the declarations which are commented are actually meant to be used in functions;
+    please regard the remaining ones as internal.
+  */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_TYPES_H_INCLUDED
+#define RSB_TYPES_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#ifndef __cplusplus
+/* complex.h is ISO C99 */
+#include <complex.h>
+#endif /* __cplusplus */
+/* 
+   Each one of the following symbols is assigned to a type which is supported
+   by an option set at library code generation time.
+   Other types may be enabled by regenerating the whole library code.
+   To enable types, please read the documentation.
+ */
+
+/* Miscellaneous version strings.
+  Adopting a naming scheme similar to that of png.h.
+ */
+#define RSB_LIBRSB_VER_STRING		"1.2.0"	/*!< Library version string. */
+#define RSB_HEADER_VERSION_STRING		"librsb version 1.2.0-rc2 - June 30, 2015"	/*!< Library header version string. */
+#define RSB_LIBRSB_VER_MAJOR		1	/*!< Major version. */
+#define RSB_LIBRSB_VER_MINOR		2	/*!< Minor version. */
+#define RSB_LIBRSB_VER_PATCH		0	/*!< Patch version. */
+#define RSB_LIBRSB_VER		10200	/*!< Version number. */
+#define RSB_LIBRSB_VER_DATE		RSB_M4_WANT_RSB_LIBRSB_VER_DATE	/*!< Version release date. */
+
+#define RSB_HAVE_TYPE_DOUBLE  1 /*!< Type double is supported, so RSB_HAVE_TYPE_DOUBLE  is defined .*/
+#define RSB_HAVE_TYPE_FLOAT  1 /*!< Type float is supported, so RSB_HAVE_TYPE_FLOAT  is defined .*/
+#define RSB_HAVE_TYPE_FLOAT_COMPLEX  1 /*!< Type float complex is supported, so RSB_HAVE_TYPE_FLOAT_COMPLEX  is defined .*/
+#define RSB_HAVE_TYPE_DOUBLE_COMPLEX  1 /*!< Type double complex is supported, so RSB_HAVE_TYPE_DOUBLE_COMPLEX  is defined .*/
+#define RSB_DEFAULT_TYPE double	/*!< The default numerical matrix type (can be used for declarations), used in the example programs. */
+#define RSB_DEFAULT_POSSIBLY_INTEGER_TYPE double/*!< The default, integer if possible , numerical type (can be used for declarations). */
+#define RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE float  /*!< The default, blas if possible , numerical type (can be used for declarations). */
+#define RSB_DEFAULT_TYPE_STRING "double"	/*!< A string specifying the name of the default type. */
+#define RSB_DEFAULT_POSSIBLY_INTEGER_TYPE_STRING "double" /*!< A string specifying the name of the default possibly integer type.*/
+#define RSB_DEFAULT_SYMMETRY RSB_SYMMETRY_U	/*!< The default symmetry flag. */
+#define RSB_DEFAULT_TRANSPOSITION RSB_TRANSPOSITION_N	/*!< The default transposition flag (no transposition). */
+#define RSB_ROWS_TRANSPOSITIONS_ARRAY	{RSB_TRANSPOSITION_N, RSB_TRANSPOSITION_T, RSB_TRANSPOSITION_C, RSB_INVALID_TRANS } /*!< An array with transposition constants. */
+
+/*!  This preprocessor index can be used to address the double-related arrays.  */
+#define RSB_TYPE_INDEX_DOUBLE  0
+/*!  This preprocessor index can be used to address the float-related arrays.  */
+#define RSB_TYPE_INDEX_FLOAT  1
+/*!  This preprocessor index can be used to address the float complex-related arrays.  */
+#define RSB_TYPE_INDEX_FLOAT_COMPLEX  2
+/*!  This preprocessor index can be used to address the double complex-related arrays.  */
+#define RSB_TYPE_INDEX_DOUBLE_COMPLEX  3
+
+/* @cond INNERDOC  */
+/*
+   Each one of the following symbols is assigned to an operation which is supported
+   by an option set at library code generation time.
+   \n
+   Other operations may be enabled by regenerating the whole library code.
+   To enable operations, please read the documentation.
+ */
+#define RSB_HAVE_OPTYPE_SPMV_UAUA  1
+#define RSB_HAVE_OPTYPE_SPMV_UAUZ  1
+#define RSB_HAVE_OPTYPE_SPMV_UXUA  1
+#define RSB_HAVE_OPTYPE_SPMV_UNUA  1
+#define RSB_HAVE_OPTYPE_SPMV_SASA  1
+#define RSB_HAVE_OPTYPE_SPSV_UXUA  1
+#define RSB_HAVE_OPTYPE_SPMV_SXSA  1
+#define RSB_HAVE_OPTYPE_SPSV_SXSX  1
+#define RSB_HAVE_OPTYPE_INFTY_NORM  1
+#define RSB_HAVE_OPTYPE_ROWSSUMS  1
+#define RSB_HAVE_OPTYPE_SCALE  1
+
+/*!
+ * These preprocessor indices can be used to address various mop-related arrays.
+ */
+#define RSB_OPTYPE_INDEX_SPMV_UAUA  0
+#define RSB_OPTYPE_INDEX_SPMV_UAUZ  1
+#define RSB_OPTYPE_INDEX_SPMV_UXUA  2
+#define RSB_OPTYPE_INDEX_SPMV_UNUA  3
+#define RSB_OPTYPE_INDEX_SPMV_SASA  4
+#define RSB_OPTYPE_INDEX_SPSV_UXUA  5
+#define RSB_OPTYPE_INDEX_SPMV_SXSA  6
+#define RSB_OPTYPE_INDEX_SPSV_SXSX  7
+#define RSB_OPTYPE_INDEX_INFTY_NORM  8
+#define RSB_OPTYPE_INDEX_ROWSSUMS  9
+#define RSB_OPTYPE_INDEX_SCALE  10
+#define RSB_OPTYPE_INDEX_MAT_STATS  11
+
+/**
+ \name Values for valid matrix coordinate index types flags.
+ */
+#define  RSB_COORDINATE_TYPE_C 0x01 /*!< Character code for type rsb_coo_idx_t.*/
+#define  RSB_COORDINATE_TYPE_H 0x02 /*!< Character code for type rsb_half_idx_t.*/
+/* @endcond */
+/**
+ \name Values for valid matrix transposition flags.
+ \anchor matrix_transposition_flags_section
+ The Hermitian flag will act as simple transposed, for non complex types.
+ */
+#define  RSB_TRANSPOSITION_N 0x4E /*!< N: Non transposed flag, valid for \ref rsb_trans_t typed variables. */
+#define  RSB_TRANSPOSITION_T 0x54 /*!< T: Transposed flag value, valid for \ref rsb_trans_t valued variables. */
+#define  RSB_TRANSPOSITION_C 0x43 /*!< C: Conjugated transpose flag, valid for \ref rsb_trans_t typed variables. */
+/* @cond INNERDOC  */
+/**
+ \name Values for valid matrix symmetry flags.
+ \anchor matrix_symmetry_flags_section
+ */
+#define  RSB_SYMMETRY_U 0x00 /*  */
+#define  RSB_SYMMETRY_S RSB_FLAG_SYMMETRIC /*  */
+#define  RSB_SYMMETRY_H RSB_FLAG_HERMITIAN /*  */
+/* @endcond */
+/**
+\name Values for inner diagonal specification values.
+ \anchor matrix_diagonal_flags_section
+ */
+/* @cond INNERDOC  */
+#define  RSB_DIAGONAL_E 0x01 /*  */ /*!< */
+#define  RSB_DIAGONAL_I 0x02 /*  */ /*!< */
+/* @endcond INNERDOC  */
+/* @cond INNERDOC  */
+/**
+ \name Values for valid matrix storage formats.
+ \anchor matrix_storage_flags_section
+ */
+#define  RSB_MATRIX_STORAGE_BCOR 0x40 /* */
+#define  RSB_MATRIX_STORAGE_BCSR 0x01 /*  */
+/**
+ \name Values for valid matrix storage formats strings.
+ \anchor matrix_storage_strings_section
+ */
+#define  RSB_MATRIX_STORAGE_BCOR_STRING "BCOR"
+#define  RSB_MATRIX_STORAGE_BCSR_STRING "BCSR"
+/* @endcond */
+
+/**
+ \name Valid symbol values for matrix numerical type specification -- type codes -- (type \see #rsb_type_t).
+ \anchor matrix_type_symbols_section
+ */
+#define RSB_NUMERICAL_TYPE_SAME_TYPE  1 /*!< a bogus type flag for specifying no type conversion */
+#define  RSB_NUMERICAL_TYPE_DOUBLE  'D' /*!< Character code for type double. */
+#define RSB_NUMERICAL_TYPE_SAME_TYPE  1 /*!< a bogus type flag for specifying no type conversion */
+#define  RSB_NUMERICAL_TYPE_FLOAT  'S' /*!< Character code for type float. */
+#define RSB_NUMERICAL_TYPE_SAME_TYPE  1 /*!< a bogus type flag for specifying no type conversion */
+#define  RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  'C' /*!< Character code for type float complex. */
+#define RSB_NUMERICAL_TYPE_SAME_TYPE  1 /*!< a bogus type flag for specifying no type conversion */
+#define  RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  'Z' /*!< Character code for type double complex. */
+
+#define  RSB_NUMERICAL_TYPE_FORTRAN_SAME_TYPE  1 /*!< a bogus type flag for specifying no type conversion */
+#define  RSB_NUMERICAL_TYPE_FORTRAN_INT  ICHAR('I') /*!< Character code for type int, to be used (only) from Fortran. */
+#define  RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE  ICHAR('D') /*!< Character code for type double, to be used (only) from Fortran. */
+#define  RSB_NUMERICAL_TYPE_FORTRAN_FLOAT  ICHAR('S') /*!< Character code for type float, to be used (only) from Fortran. */
+#define  RSB_NUMERICAL_TYPE_FORTRAN_FLOAT_COMPLEX  ICHAR('C') /*!< Character code for type float complex, to be used (only) from Fortran. */
+#define  RSB_NUMERICAL_TYPE_FORTRAN_DOUBLE_COMPLEX  ICHAR('Z') /*!< Character code for type double complex, to be used (only) from Fortran. */
+
+#define  RSB_NUMERICAL_TYPE_DEFAULT   RSB_NUMERICAL_TYPE_DOUBLE   /*!< A default numerical matrix type. */
+#define  RSB_NUMERICAL_TYPE_DEFAULT_INTEGER   RSB_NUMERICAL_TYPE_DOUBLE   /*!< A default numerical matrix type; if possible, an integer one. */
+#define  RSB_NUMERICAL_TYPE_INVALID_TYPE  '?' /*!< By definition, an invalid type code. */
+#define  RSB_NUMERICAL_TYPE_FIRST_BLAS   RSB_NUMERICAL_TYPE_FLOAT   /*!< A default numerical matrix type; if possible, not integer one. If no such type is configured in, then the invalid type. */
+
+#define  RSB_CHAR_AS_TRANSPOSITION(TRANSC)	\
+(														\
+		(TRANSC) == ('N') ? (RSB_TRANSPOSITION_N) : 		\
+		(TRANSC) == ('n') ? (RSB_TRANSPOSITION_N) : 		\
+		(TRANSC) == ('T') ? (RSB_TRANSPOSITION_T) : 		\
+		(TRANSC) == ('t') ? (RSB_TRANSPOSITION_T) : 		\
+		(TRANSC) == ('C') ? (RSB_TRANSPOSITION_C) : 		\
+		(TRANSC) == ('c') ? (RSB_TRANSPOSITION_C) : 		\
+		'?'												\
+) /*!< Get the right transposition flag out of either n, c, t chars. */
+
+
+/**
+ \name Miscellaneous constants.
+ */
+#define RSB_CONST_MAX_TUNING_ROUNDS 16 /*!< Maximal count of tuning rounds in one invocation of (rsb_tune_spmm/rsb_tune_spsm). */
+
+/* @cond INNERDOC  */
+/**
+ \name Values for other numerical type related macros.
+*/
+#define  RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS "D S C Z "
+
+/* a bogus type for pattern input (TODO : should also implement ANY, just for matrix input) */
+#define RSB_NUMERICAL_TYPE_PATTERN  0
+/* @endcond */
+/* @cond INNERDOC */
+
+#define  RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING "%lg"
+#define  RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING "%g"
+#define  RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING "%g %g"
+#define  RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING "%lg %lg"
+
+
+
+#if 1
+ 
+#define RSB_ROWS_TRANSPOSITIONS_ARRAY_AS_CHAR	{'n', 't', 'c', RSB_INVALID_TRANS_CHAR }
+
+
+#define  RSB_TRANSPOSITIONS_PREPROCESSOR_SYMBOLS "n t c "
+
+#define RSB_TRANSPOSITION_AS_CHAR(TRANSA) 										\
+(														\
+		(TRANSA) == (RSB_TRANSPOSITION_N) ? ('N') : 		\
+		(TRANSA) == (RSB_TRANSPOSITION_T) ? ('T') : 		\
+		(TRANSA) == (RSB_TRANSPOSITION_C) ? ('C') : 		\
+		'?'												\
+)
+
+
+#define RSB_NUMERICAL_TYPE_STRING(CSP,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported (double,float,float complex,double complex) */ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:CSP="double";break; 	\
+			case RSB_NUMERICAL_TYPE_FLOAT 	:CSP="float";break; 	\
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:CSP="float_complex";break; 	\
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:CSP="double_complex";break; 	\
+			/* unsupported type */ \
+			default : CSP="?"; \
+		} \
+		}
+
+
+
+#define RSB_NUMERICAL_TYPE_SIZE(TYPE) \
+	( (TYPE)==(RSB_NUMERICAL_TYPE_DOUBLE ) ?  sizeof(double) : \
+	(( (TYPE)==(RSB_NUMERICAL_TYPE_FLOAT ) ?  sizeof(float) : \
+	(( (TYPE)==(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ) ?  sizeof(float complex) : \
+	(( (TYPE)==(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ) ?  sizeof(double complex) : \
+	(0  ) )  ) )  ) )  ) ) 
+
+#define RSB_SIZEOF_BACKUP(TYPE) /* This is for rsb__pr_load. Please feed in upper case char codes (toupper(...)). */ \
+    	( (TYPE)==(73) ?  4 : \
+	(( (TYPE)==(68) ?  8 : \
+	(( (TYPE)==(83) ?  4 : \
+	(( (TYPE)==(67) ?  8 : \
+	(( (TYPE)==(90) ?  16 : \
+	(0  ) )  ) )  ) )  ) )  ) ) 
+
+#define RSB_NUMERICAL_TYPE_REAL_TYPE(TYPE) \
+	( (TYPE)==(RSB_NUMERICAL_TYPE_DOUBLE ) ?  RSB_NUMERICAL_TYPE_DOUBLE  : \
+	(( (TYPE)==(RSB_NUMERICAL_TYPE_FLOAT ) ?  RSB_NUMERICAL_TYPE_FLOAT  : \
+	(( (TYPE)==(RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ) ?  RSB_NUMERICAL_TYPE_FLOAT  : \
+	(( (TYPE)==(RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ) ?  RSB_NUMERICAL_TYPE_DOUBLE  : \
+	(0  ) )  ) )  ) )  ) ) 
+
+#define RSB_NUMERICAL_TYPE_CAST_TO_ANY_P(CTYPE,CVAR,TYPE,TP,TOFF) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported (double,float,float complex,double complex) */ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:\
+				(CVAR)=(CTYPE)((double*)TP)[TOFF] ; break; 	\
+			case RSB_NUMERICAL_TYPE_FLOAT 	:\
+				(CVAR)=(CTYPE)((float*)TP)[TOFF] ; break; 	\
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:\
+				(CVAR)=(CTYPE)((float complex*)TP)[TOFF] ; break; 	\
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:\
+				(CVAR)=(CTYPE)((double complex*)TP)[TOFF] ; break; 	\
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+/* *A += abs(*B) */
+#define RSB_NUMERICAL_TYPE_ABS_SUM_AND_STORE_ELEMENTS(A,B,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported (double,float,float complex,double complex) */ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:	*(double*)(A)+= (	\
+				*(double*)(B) < (double)(0) ? - *(double*)(B) : *(double*)(B) ); break; 	\
+			case RSB_NUMERICAL_TYPE_FLOAT 	:	*(float*)(A)+= (	\
+				*(float*)(B) < (float)(0) ? - *(float*)(B) : *(float*)(B) ); break; 	\
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:	*(float complex*)(A)+= (	\
+				*(float complex*)(B) < (float complex)(0) ? - *(float complex*)(B) : *(float complex*)(B) ); break; 	\
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:	*(double complex*)(A)+= (	\
+				*(double complex*)(B) < (double complex)(0) ? - *(double complex*)(B) : *(double complex*)(B) ); break; 	\
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+/* *A += *B */
+#define RSB_NUMERICAL_TYPE_SUM_AND_STORE_ELEMENTS(A,B,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported (double,float,float complex,double complex) */ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:	*(double*)(A)+=*(double*)(B); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT 	:	*(float*)(A)+=*(float*)(B); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:	*(float complex*)(A)+=*(float complex*)(B); break; \
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:	*(double complex*)(A)+=*(double complex*)(B); break; \
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+#define RSB_NUMERICAL_TYPE_SET_ELEMENT(DST,SRC,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported (double,float,float complex,double complex) */ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:	*(double*)(DST)=*(double*)(SRC); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT 	:	*(float*)(DST)=*(float*)(SRC); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:	*(float complex*)(DST)=*(float complex*)(SRC); break; \
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:	*(double complex*)(DST)=*(double complex*)(SRC); break; \
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+#define RSB_NUMERICAL_TYPE_SET_ELEMENT_REAL(DST,SRC,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:	*(double*)(DST)=(*(double*)(SRC)); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT 	:	*(float*)(DST)=(*(float*)(SRC)); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:	*(float*)(DST)=crealf(*(float complex*)(SRC)); break; \
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:	*(double*)(DST)=creal(*(double complex*)(SRC)); break; \
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+#define RSB_NUMERICAL_TYPE_SET_ELEMENT_FROM_DOUBLE(DST,DSRC,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported (double,float,float complex,double complex) */ \
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:	*(double*)(DST)=(double)(DSRC); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT 	:	*(float*)(DST)=(float)(DSRC); break; \
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:	*(float complex*)(DST)=(float complex)(DSRC); break; \
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:	*(double complex*)(DST)=(double complex)(DSRC); break; \
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+/* CODE NOT DEBUGGED */
+#define RSB_VECTOR_FIND_MAXIMAL_ELEMENT(INDEX,ARRAY,ELEMENTS,TYPE) 								\
+		{ 													\
+		int _index;												\
+		switch(TYPE) 												\
+		{ 													\
+			/* supported (double,float,float complex,double complex) */ 									\
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:						\
+			{												\
+				double * _array = (double*)(ARRAY);								\
+				double _maxel=(double)(0);									\
+				int  _maxindex=0;									\
+				_maxel=_maxel-_maxel;	/* could this be evil ? */					\
+				for(_index=0;_index<(ELEMENTS);++_index)						\
+					if(fabs(_maxel)<fabs(_array[_index])){_maxel=_array[_index];_maxindex=_index;}	\
+					(INDEX)=_maxindex;								\
+			}												\
+			break;			\
+			case RSB_NUMERICAL_TYPE_FLOAT 	:						\
+			{												\
+				float * _array = (float*)(ARRAY);								\
+				float _maxel=(float)(0);									\
+				int  _maxindex=0;									\
+				_maxel=_maxel-_maxel;	/* could this be evil ? */					\
+				for(_index=0;_index<(ELEMENTS);++_index)						\
+					if(fabsf(_maxel)<fabsf(_array[_index])){_maxel=_array[_index];_maxindex=_index;}	\
+					(INDEX)=_maxindex;								\
+			}												\
+			break;			\
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:						\
+			{												\
+				float complex * _array = (float complex*)(ARRAY);								\
+				float complex _maxel=(float complex)(0);									\
+				int  _maxindex=0;									\
+				_maxel=_maxel-_maxel;	/* could this be evil ? */					\
+				for(_index=0;_index<(ELEMENTS);++_index)						\
+					if(cabsf(_maxel)<cabsf(_array[_index])){_maxel=_array[_index];_maxindex=_index;}	\
+					(INDEX)=_maxindex;								\
+			}												\
+			break;			\
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:						\
+			{												\
+				double complex * _array = (double complex*)(ARRAY);								\
+				double complex _maxel=(double complex)(0);									\
+				int  _maxindex=0;									\
+				_maxel=_maxel-_maxel;	/* could this be evil ? */					\
+				for(_index=0;_index<(ELEMENTS);++_index)						\
+					if(cabs(_maxel)<cabs(_array[_index])){_maxel=_array[_index];_maxindex=_index;}	\
+					(INDEX)=_maxindex;								\
+			}												\
+			break;			\
+			/* unsupported type */ \
+			default :  (INDEX)=-1; \
+		} \
+		}
+
+#define RSB_NUMERICAL_OP_INDEX_FROM_CODE(CODE) 								\
+( ((CODE)==RSB_OPTYPE_INDEX_SPMV_UAUA )?(0):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPMV_UAUZ )?(1):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPMV_UXUA )?(2):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPMV_UNUA )?(3):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPMV_SASA )?(4):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPSV_UXUA )?(5):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPMV_SXSA )?(6):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SPSV_SXSX )?(7):			\
+( ((CODE)==RSB_OPTYPE_INDEX_INFTY_NORM )?(8):			\
+( ((CODE)==RSB_OPTYPE_INDEX_ROWSSUMS )?(9):			\
+( ((CODE)==RSB_OPTYPE_INDEX_SCALE )?(10):			\
+( ((CODE)==RSB_OPTYPE_INDEX_MAT_STATS )?(11):			\
+-1 ) \
+) \
+) \
+) \
+) \
+) \
+) \
+) \
+) \
+) \
+) \
+) \
+/* uhm. does it seem redundant ? */
+#define RSB_NUMERICAL_TYPE_INDEX_FROM_CODE(CODE) 								\
+( ((CODE)==RSB_NUMERICAL_TYPE_DOUBLE )?(0):			\
+( ((CODE)==RSB_NUMERICAL_TYPE_FLOAT )?(1):			\
+( ((CODE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )?(2):			\
+( ((CODE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )?(3):			\
+-1 ) \
+) \
+) \
+) \
+/* uhm. seems redundant ? */
+
+
+#define RSB_IS_ELEMENT_MINUS_ONE(SRC,TYPE) 										\
+(														\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE ) ? (*(double*)(SRC)==(double)(-1)) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT ) ? (*(float*)(SRC)==(float)(-1)) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ) ? (*(float complex*)(SRC)==(float complex)(-1)) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ) ? (*(double complex*)(SRC)==(double complex)(-1)) : 		\
+		0												\
+)
+
+#define RSB_IS_ELEMENT_ONE(SRC,TYPE) 										\
+(														\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE ) ? (*(double*)(SRC)==(double)1) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT ) ? (*(float*)(SRC)==(float)1) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ) ? (*(float complex*)(SRC)==(float complex)1) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ) ? (*(double complex*)(SRC)==(double complex)1) : 		\
+		0												\
+)
+
+#define RSB_IS_ELEMENT_ZERO(SRC,TYPE) 										\
+(														\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE ) ? (*(double*)(SRC)==(double)0) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT ) ? (*(float*)(SRC)==(float)0) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ) ? (*(float complex*)(SRC)==(float complex)0) : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ) ? (*(double complex*)(SRC)==(double complex)0) : 		\
+		0												\
+)
+
+#define RSB_IS_ELEMENT_NONZERO(SRC,TYPE) 		(!(RSB_IS_ELEMENT_ZERO(SRC,TYPE)))
+
+#define RSB_MATRIX_UNSUPPORTED_TYPE(TYPE) ( \
+			(TYPE)!=RSB_NUMERICAL_TYPE_DOUBLE  && \
+			(TYPE)!=RSB_NUMERICAL_TYPE_FLOAT  && \
+			(TYPE)!=RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  && \
+			(TYPE)!=RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  && \
+			1 )
+
+#define RSB_IS_MATRIX_TYPE_COMPLEX(TYPE) 										\
+(														\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE ) ? 0 : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT ) ? 0 : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ) ? 1 : 		\
+		(TYPE) == (RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX ) ? 1 : 		\
+		0												\
+)
+
+#define RSB_IS_ELEMENT_LESS_THAN(SRC,CMPSRC,TYPE) \
+( 			( (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE  && (*(double*)(SRC))<(*(double*)(CMPSRC)) ) || \
+			( (TYPE)==RSB_NUMERICAL_TYPE_FLOAT  && (*(float*)(SRC))<(*(float*)(CMPSRC)) ) || \
+			( (TYPE)==RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  && crealf(*(float complex*)(SRC))<crealf(*(float complex*)(CMPSRC)) ) || \
+			( (TYPE)==RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  && creal(*(double complex*)(SRC))<creal(*(double complex*)(CMPSRC)) ) || \
+			0 )
+
+
+/** use RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE to oversize your arrays safely */
+#define RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE	 1 
+/** use RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE_EXTRA to oversize your arrays safely */
+#define RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE_EXTRA	 (1-1) 
+#define RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH (2*1024)	/** chars to reserve for a matrix implementation code */
+
+/* Section dedicated to implemented operations on matrices. */
+
+
+
+#define RSB_ROWS_UNROLL_ARRAY		{ 1 }
+#define RSB_COLUMNS_UNROLL_ARRAY	{ 1 }
+
+
+#define RSB_ROWS_UNROLL_ARRAY_LENGTH		1
+#define RSB_COLUMNS_UNROLL_ARRAY_LENGTH		1
+#define RSB_IMPLEMENTED_META_MOPS		12
+#define RSB_IMPLEMENTED_MOPS		11
+#define RSB_IMPLEMENTED_TYPES		4
+#define RSB_IMPLEMENTED_SOME_BLAS_TYPES		1
+
+#define RSB_MATRIX_OPS_ARRAY	{ "spmv_uaua","spmv_uauz","spmv_uxua","spmv_unua","spmv_sasa","spsv_uxua","spmv_sxsa","spsv_sxsx","infty_norm","rowssums","scale","mat_stats" }
+#define RSB_MATRIX_TYPES_ARRAY	{ "double","float","float complex","double complex", }
+#define RSB_MATRIX_TYPE_CODES_ARRAY	{ RSB_NUMERICAL_TYPE_DOUBLE ,RSB_NUMERICAL_TYPE_FLOAT ,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX , }
+#define RSB_MATRIX_SPBLAS_TYPE_CODES_ARRAY	{ RSB_NUMERICAL_TYPE_FLOAT ,RSB_NUMERICAL_TYPE_DOUBLE ,RSB_NUMERICAL_TYPE_FLOAT_COMPLEX ,RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX , }
+
+#define RSB_M4_MATRIX_META_OPS_STRING	"spmv_uaua,spmv_uauz,spmv_uxua,spmv_unua,spmv_sasa,spsv_uxua,spmv_sxsa,spsv_sxsx,infty_norm,rowssums,scale"
+#define RSB_M4_MATRIX_TYPES_STRING		"double,float,float complex,double complex"
+#define RSB_M4_WANT_COLUMN_UNLOOP_FACTORS_STRING		"1"
+#define RSB_M4_WANT_ROW_UNLOOP_FACTORS_STRING		"1"
+
+/**
+ \name Macro to check matrix storage flags correctness
+ */
+#define  RSB_IS_MATRIX_STORAGE_ALLOWED_FOR_LEAF(MATRIX_STORAGE)	(( \
+	((MATRIX_STORAGE)==RSB_MATRIX_STORAGE_BCOR) || \
+	((MATRIX_STORAGE)==RSB_MATRIX_STORAGE_BCSR) || \
+	0 ) ? RSB_BOOL_TRUE:RSB_BOOL_FALSE )
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif
+#endif /* RSB_TYPES_H_INCLUDED */
+/* @endcond */
diff --git a/rsb_types.m4 b/rsb_types.m4
new file mode 100644
index 0000000..3418b7e
--- /dev/null
+++ b/rsb_types.m4
@@ -0,0 +1,628 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+ifelse(LIBMMVBR_INCLUDED_TYPES_M4,1,`',`
+define(`LIBMMVBR_INCLUDED_TYPES_M4',`1')dnl
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+include(`libspblas_macros.m4')dnl
+dnl
+dnl 
+dnl 
+dnl	FIXME : this should go out of here:
+dnl
+define(`RSB_M4_MATRIX_OPS',(WANT_MATRIX_OPS))dnl
+dnl
+dnl 
+dnl 
+dnl 
+/** @file
+    @brief
+    Macros and constants, which are type specific.
+    \n
+    Here reside declarations related to supported matrix numerical types, and other declarations
+    according to the build time options.
+    \n
+    If you wish to use this library with different matrix numerical types, you shall regenerate
+     the library source code accordingly; see the README file how to do this.
+    \n
+    Only a small part of these declarations is needed to the user (see \ref matrix_type_symbols_section).
+    \n
+    Therefore, only the declarations which are commented are actually meant to be used in functions;
+    please regard the remaining ones as internal.
+  */
+RSB_M4_HEADER_MESSAGE()dnl
+dnl 
+dnl 	FIXME: move RSB_TYPES to RSB_CONFIG
+dnl 
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_TYPES_H_INCLUDED
+#define RSB_TYPES_H_INCLUDED
+')dnl
+dnl 
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+dnl #include "rsb.h"
+dnl #include <stdio.h>
+
+ifelse(RSB_M4_OR(RSB_M4_MEMBER(`long double complex',WANT_TYPES),RSB_M4_MEMBER(`double complex',WANT_TYPES),RSB_M4_MEMBER(`float complex',WANT_TYPES)),1,`dnl
+dnl	
+ifelse(RSB_M4_HAVE_COMPLEX_TYPE,`()',`',`dnl
+#ifndef __cplusplus
+/* complex.h is ISO C99 */
+dnl	#ifdef RSB_HAVE_COMPLEX_H
+#include <complex.h>
+dnl	#endif
+#endif /* __cplusplus */
+')dnl
+')dnl
+dnl
+dnl	***********************************************************************
+dnl
+/* 
+   Each one of the following symbols is assigned to a type which is supported
+   by an option set at library code generation time.
+   Other types may be enabled by regenerating the whole library code.
+   To enable types, please read the documentation.
+ */
+
+/* Miscellaneous version strings.
+  Adopting a naming scheme similar to that of png.h.
+ */
+`#define 'RSB_LIBRSB_VER_STRING`		'"RSB_M4_WANT_LIBRSB_VER_MAJOR.RSB_M4_WANT_LIBRSB_VER_MINOR.RSB_M4_WANT_LIBRSB_VER_PATCH`'"`'	/*!< Library version string. */
+`#define 'RSB_HEADER_VERSION_STRING`		'"librsb version RSB_M4_WANT_LIBRSB_VER_MAJOR.RSB_M4_WANT_LIBRSB_VER_MINOR.RSB_M4_WANT_LIBRSB_VER_PATCH'`'RSB_M4_WANT_LIBRSB_VER_PRERS` - RSB_M4_WANT_LIBRSB_VER_DATE"`'	/*!< Library header version string. */
+`#define 'RSB_LIBRSB_VER_MAJOR`		'RSB_M4_WANT_LIBRSB_VER_MAJOR`'	/*!< Major version. */
+`#define 'RSB_LIBRSB_VER_MINOR`		'RSB_M4_WANT_LIBRSB_VER_MINOR`'	/*!< Minor version. */
+`#define 'RSB_LIBRSB_VER_PATCH`		'RSB_M4_WANT_LIBRSB_VER_PATCH`'	/*!< Patch version. */
+`#define 'RSB_LIBRSB_VER`		'RSB_M4_WANT_LIBRSB_LIBRSB_VER`'	/*!< Version number. */
+`#define 'RSB_LIBRSB_VER_DATE`		'RSB_M4_WANT_RSB_LIBRSB_VER_DATE`'	/*!< Version release date. */
+
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+dnl
+`#define' RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL(type) 1 /*!< Type type is supported, so RSB_M4_HAVE_TYPE_PREPROCESSOR_SYMBOL(type) is defined .*/
+dnl
+')dnl
+`#define' RSB_DEFAULT_TYPE RSB_M4_DEFAULT_TYPE	/*!< The default numerical matrix type (can be used for declarations), used in the example programs. */
+`#define' RSB_DEFAULT_POSSIBLY_INTEGER_TYPE RSB_M4_DEFAULT_POSSIBLY_INTEGER_TYPE/*!< The default, integer if possible , numerical type (can be used for declarations). */
+`#define' RSB_DEFAULT_POSSIBLY_FIRST_BLAS_TYPE RSB_M4_FIRST(RSB_M4_DEFAULT_POSSIBLY_BLAS_TYPE_OR_DEFAULT) ` '/*!< The default, blas if possible , numerical type (can be used for declarations). */
+`#define' RSB_DEFAULT_TYPE_STRING RSB_M4_QUOTED_COMMA_LIST((RSB_M4_DEFAULT_TYPE))	/*!< A string specifying the name of the default type. */
+`#define' RSB_DEFAULT_POSSIBLY_INTEGER_TYPE_STRING RSB_M4_QUOTED_COMMA_LIST((RSB_M4_DEFAULT_POSSIBLY_INTEGER_TYPE)) /*!< A string specifying the name of the default possibly integer type.*/
+`#define' RSB_DEFAULT_SYMMETRY RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL(RSB_M4_DEFAULT_SYMMETRY)	/*!< The default symmetry flag. */
+`#define' RSB_DEFAULT_TRANSPOSITION RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(RSB_M4_DEFAULT_TRANSPOSITION)	/*!< The default transposition flag (no transposition). */
+dnl
+`#define RSB_ROWS_TRANSPOSITIONS_ARRAY	{'dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(transposition), ')RSB_INVALID_TRANS } /*!< An array with transposition constants. */
+
+dnl
+pushdef(`counter',`0')dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+/*!  This preprocessor index can be used to address the type-related arrays.  */
+`#define' RSB_M4_TYPE_INDEX_PREPROCESSOR_SYMBOL(type) counter
+pushdef(`counter',eval(counter+1))dnl
+')dnl
+foreach(`type',RSB_M4_MATRIX_TYPE,`popdef(`counter')')dnl
+popdef(`counter')dnl
+
+dnl
+dnl
+dnl	***********************************************************************
+/* @cond INNERDOC  */
+dnl
+/*
+   Each one of the following symbols is assigned to an operation which is supported
+   by an option set at library code generation time.
+   \n
+   Other operations may be enabled by regenerating the whole library code.
+   To enable operations, please read the documentation.
+ */
+dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+dnl
+`#define' RSB_M4_HAVE_OPTYPE_PREPROCESSOR_SYMBOL(mop) 1
+dnl
+')dnl
+dnl
+
+/*!
+ * These preprocessor indices can be used to address various mop-related arrays.
+ */
+dnl
+pushdef(`counter',`0')dnl
+foreach(`mop',RSB_M4_MATRIX_META_OPS,`dnl
+`#define' RSB_M4_OPTYPE_INDEX_PREPROCESSOR_SYMBOL(mop) counter
+pushdef(`counter',eval(counter+1))dnl
+')dnl
+foreach(`mop',RSB_M4_MATRIX_META_OPS,`popdef(`counter')')dnl
+popdef(`counter')dnl
+dnl
+dnl
+
+dnl
+/**
+ \name Values for valid matrix coordinate index types flags.
+ */
+dnl
+foreach(`citype',RSB_M4_MATRIX_COORDINATE_TYPES,`dnl
+`#define ' RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_PREPROCESSOR_SYMBOL(citype) RSB_M4_MATRIX_INDEX_COORDINATE_TYPE_CHARCODE_(citype) /*!< Character code for type citype.*/
+')dnl
+dnl
+/* @endcond */
+dnl
+/**
+ \name Values for valid matrix transposition flags.
+ \anchor matrix_transposition_flags_section
+ The Hermitian flag will act as simple transposed, for non complex types.
+ */
+dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+`#define ' RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(transposition) RSB_M4_MATRIX_TRANSPOSITION_CHARCODE(transposition)
+')dnl
+dnl
+/* @cond INNERDOC  */
+dnl
+/**
+ \name Values for valid matrix symmetry flags.
+ \anchor matrix_symmetry_flags_section
+ */
+dnl
+foreach(`symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+`#define ' RSB_M4_MATRIX_SYMMETRY_PREPROCESSOR_SYMBOL(symmetry) RSB_M4_MATRIX_SYMMETRY_CHARCODE(symmetry)
+')dnl
+dnl
+/* @endcond */
+dnl
+/**
+dnl \name Values for valid matrix symmetry flags.
+\name Values for inner diagonal specification values.
+ \anchor matrix_diagonal_flags_section
+ */
+dnl
+/* @cond INNERDOC  */
+dnl
+foreach(`diagonal',RSB_M4_MATRIX_DIAGONAL_TYPES,`dnl
+`#define ' RSB_M4_MATRIX_DIAGONAL_PREPROCESSOR_SYMBOL(diagonal) RSB_M4_MATRIX_DIAGONAL_CHARCODE(diagonal) /*!< */
+')dnl
+dnl
+/* @endcond INNERDOC  */
+dnl
+/* @cond INNERDOC  */
+dnl
+/**
+ \name Values for valid matrix storage formats.
+ \anchor matrix_storage_flags_section
+ */
+dnl
+foreach(`matrix_storage',RSB_M4_MATRIX_STORAGE,`dnl
+`#define ' RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(matrix_storage) RSB_M4_MATRIX_STORAGE_CHARCODE(matrix_storage)
+')dnl
+dnl
+/**
+ \name Values for valid matrix storage formats strings.
+ \anchor matrix_storage_strings_section
+ */
+dnl
+foreach(`matrix_storage',RSB_M4_MATRIX_STORAGE,`dnl
+`#define ' RSB_M4_MATRIX_STORAGE_PREPROCESSOR_STRING(matrix_storage) "touppercase(RSB_M4_CHOPSPACES(matrix_storage))"
+')dnl
+dnl
+/* @endcond */
+dnl
+
+dnl
+/**
+ \name Valid symbol values for matrix numerical type specification -- type codes -- (type \see #rsb_type_t).
+ \anchor matrix_type_symbols_section
+ */
+dnl
+foreach(`citype',RSB_M4_MATRIX_TYPES,`dnl
+`#define' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(SAME_TYPE) 1 /*!< a bogus type flag for specifying no type conversion */
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(citype) singlequote(RSB_M4_TYPE_CHARCODE(citype)) /*!< Character code for type citype. */
+')dnl
+
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(`FORTRAN_'`SAME_TYPE') 1 /*!< a bogus type flag for specifying no type conversion */
+foreach(`citype',RSB_M4_ALL_MATRIX_TYPES,`dnl
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(`FORTRAN_'citype) ICHAR(singlequote(RSB_M4_TYPE_CHARCODE(citype))) /*!< Character code for type citype, to be used (only) from Fortran. */
+')dnl
+
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(`DEFAULT')  RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(RSB_M4_DEFAULT_TYPE)  /*!< A default numerical matrix type. */
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(`DEFAULT_INTEGER')  RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(RSB_M4_DEFAULT_POSSIBLY_INTEGER_TYPE)  /*!< A default numerical matrix type; if possible, an integer one. */
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(RSB_M4_INVALID_TYPE) singlequote(RSB_M4_TYPE_CHARCODE(RSB_M4_INVALID_TYPE)) /*!< By definition, an invalid type code. */
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(`FIRST_BLAS')  RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(RSB_M4_DEFAULT_POSSIBLY_BLAS_TYPE)  /*!< A default numerical matrix type; if possible, not integer one. If no such type is configured in, then the invalid type. */
+
+`#define ' RSB_CHAR_AS_TRANSPOSITION(TRANSC)	\
+(														\
+foreach(`transA',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+		(TRANSC) == (touppercase(singlequote(transA))) ? (RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(transA)) : 		\
+		(TRANSC) == (tolowercase(singlequote(transA))) ? (RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(transA)) : 		\
+')dnl
+		singlequote(?)												\
+) /*!< Get the right transposition flag out of either n, c, t chars. */
+
+
+/**
+ \name Miscellaneous constants.
+ */
+dnl
+#define RSB_CONST_MAX_TUNING_ROUNDS 16 /*!< Maximal count of tuning rounds in one invocation of (rsb_tune_spmm/rsb_tune_spsm). */
+
+dnl
+/* @cond INNERDOC  */
+dnl
+/**
+ \name Values for other numerical type related macros.
+*/
+`#define ' RSB_NUMERICAL_TYPE_PREPROCESSOR_SYMBOLS "foreach(`type',RSB_M4_MATRIX_TYPES,`RSB_M4_TYPE_CHARCODE(type) ')"
+
+/* a bogus type for pattern input (TODO : should also implement ANY, just for matrix input) */
+`#define' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(PATTERN) 0
+dnl
+/* @endcond */
+dnl
+/* @cond INNERDOC */
+dnl
+dnl
+
+dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+`#define ' RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(type) dnl
+ifelse(type,`long double',"%Lg")dnl
+ifelse(type,`double',"%lg")dnl
+ifelse(type,`float',"%g")dnl
+ifelse(type,`int',"%d")dnl
+ifelse(type,`char',"%c")dnl
+dnl ifelse(type,`complex',"%g %g")dnl
+ifelse(type,`long double complex',"%Lg %Lg")dnl
+ifelse(type,`double complex',"%lg %lg")dnl
+ifelse(type,`float complex',"%g %g")dnl
+
+')dnl
+dnl
+dnl
+dnl	#define RSB_SIZEOF(type) rsb__do_sizeof(type)
+dnl
+dnl
+dnl	UNLOOP_PAIRS
+dnl	------------
+dnl
+define(`UNLOOP_PAIRS',`foreach(`type',RSB_M4_MATRIX_TYPES,`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),sizeof(type),')')
+dnl
+dnl define(`UNLOOP_PAIRS_ALL',`foreach(`type',RSB_M4_ALL_MATRIX_TYPES,`RSB_M4_TYPE_CHARCODE_ASCII_VALUE(type),sizeof(type),')') dnl depends on complex.h
+define(`UNLOOP_PAIRS_ALL_GUESSED',`foreach(`type',RSB_M4_ALL_MATRIX_TYPES,`RSB_M4_TYPE_CHARCODE_ASCII_VALUE(type),RSB_M4_BACKUP_SIZEOF(type),')')
+dnl
+define(`REALT__PAIRS',`foreach(`type',RSB_M4_MATRIX_TYPES,`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(RSB_M4_REALT(type)),')')
+#if 1
+dnl
+dnl
+dnl	SINGLE_LINEAR_SEARCH
+dnl	--------------------
+dnl	Expands to a linear search implemented in the C macro preprocessor language.
+dnl
+define(`SINGLE_LINEAR_SEARCH',`ifelse($#,1,$1,`( (TYPE)==($1) ?  pushdef(`type',$1)$2 popdef(`type'): \
+	(SINGLE_LINEAR_SEARCH(shift(shift($@))) ) ) ')') 
+dnl 
+dnl
+dnl
+`#define RSB_ROWS_TRANSPOSITIONS_ARRAY_AS_CHAR	{'dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`singlequote(transposition), ')RSB_INVALID_TRANS_CHAR }
+
+
+`#define ' RSB_TRANSPOSITIONS_PREPROCESSOR_SYMBOLS "foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`transposition ')"
+
+#define RSB_TRANSPOSITION_AS_CHAR(TRANSA) 										\
+(														\
+foreach(`transA',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+		(TRANSA) == (RSB_M4_MATRIX_TRANSPOSITION_PREPROCESSOR_SYMBOL(transA)) ? (touppercase(singlequote(transA))) : 		\
+')dnl
+		singlequote(?)												\
+)
+
+
+#define RSB_NUMERICAL_TYPE_STRING(CSP,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported RSB_M4_MATRIX_TYPES */ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:CSP="RSB_M4_CHOPSPACES(type)";break; 	\
+')dnl
+			/* unsupported type */ \
+			default : CSP="?"; \
+		} \
+		}
+
+
+
+#define RSB_NUMERICAL_TYPE_SIZE(TYPE) \
+	SINGLE_LINEAR_SEARCH( UNLOOP_PAIRS 0 )
+
+#define RSB_SIZEOF_BACKUP(TYPE) /* This is for rsb__pr_load. Please feed in upper case char codes (toupper(...)). */ \
+    	SINGLE_LINEAR_SEARCH( UNLOOP_PAIRS_ALL_GUESSED 0 )
+
+#define RSB_NUMERICAL_TYPE_REAL_TYPE(TYPE) \
+	SINGLE_LINEAR_SEARCH( REALT__PAIRS 0 )
+dnl
+
+#define RSB_NUMERICAL_TYPE_CAST_TO_ANY_P(CTYPE,CVAR,TYPE,TP,TOFF) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported RSB_M4_MATRIX_TYPES */ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:\
+				(CVAR)=(CTYPE)((type*)TP)[TOFF] ; break; 	\
+')dnl
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+/* *A += abs(*B) */
+#define RSB_NUMERICAL_TYPE_ABS_SUM_AND_STORE_ELEMENTS(A,B,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported RSB_M4_MATRIX_TYPES */ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:	*(type*)(A)+= (	\
+				*(type*)(B) < (type)(0) ? - *(type*)(B) : *(type*)(B) ); break; 	\
+')dnl
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+/* *A += *B */
+#define RSB_NUMERICAL_TYPE_SUM_AND_STORE_ELEMENTS(A,B,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported RSB_M4_MATRIX_TYPES */ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:	*(type*)(A)+=*(type*)(B); break; \
+')dnl
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+#define RSB_NUMERICAL_TYPE_SET_ELEMENT(DST,SRC,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported RSB_M4_MATRIX_TYPES */ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:	*(type*)(DST)=*(type*)(SRC); break; \
+')dnl
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+#define RSB_NUMERICAL_TYPE_SET_ELEMENT_REAL(DST,SRC,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:	*(RSB_M4_REALT(type)*)(DST)=RSB_M4_CREAL(type,*(type*)(SRC)); break; \
+')dnl
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+#define RSB_NUMERICAL_TYPE_SET_ELEMENT_FROM_DOUBLE(DST,DSRC,TYPE) \
+		{ \
+		switch(TYPE) \
+		{ \
+			/* supported RSB_M4_MATRIX_TYPES */ \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:	*(type*)(DST)=(type)(DSRC); break; \
+')dnl
+			/* unsupported type */ \
+			default : ; \
+		} \
+		}
+
+/* CODE NOT DEBUGGED */
+#define RSB_VECTOR_FIND_MAXIMAL_ELEMENT(INDEX,ARRAY,ELEMENTS,TYPE) 								\
+		{ 													\
+		int _index;												\
+		switch(TYPE) 												\
+		{ 													\
+			/* supported RSB_M4_MATRIX_TYPES */ 									\
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:						\
+			{												\
+				type * _array = (type*)(ARRAY);								\
+				type _maxel=(type)(0);									\
+				int  _maxindex=0;									\
+				_maxel=_maxel-_maxel;	/* could this be evil ? */					\
+				for(_index=0;_index<(ELEMENTS);++_index)						\
+					if(RSB_M4_ABS(type,_maxel)<RSB_M4_ABS(type,_array[_index])){_maxel=_array[_index];_maxindex=_index;}	\
+					(INDEX)=_maxindex;								\
+			}												\
+			break;			\
+')dnl
+			/* unsupported type */ \
+			default :  (INDEX)=-1; \
+		} \
+		}
+
+dnl
+dnl	***********************************************************************
+dnl
+#define RSB_NUMERICAL_OP_INDEX_FROM_CODE(CODE) 								\
+pushdef(`code',`0')dnl
+foreach(`op',RSB_M4_MATRIX_META_OPS,`dnl
+( ((CODE)==RSB_M4_OPTYPE_INDEX_PREPROCESSOR_SYMBOL(op))?(code):			\
+pushdef(`code',eval(code+1))dnl
+')dnl
+-1 dnl
+foreach(`op',RSB_M4_MATRIX_META_OPS,`dnl
+popdef(`code')dnl
+) \
+')dnl
+popdef(`code')dnl
+dnl
+/* uhm. does it seem redundant ? */
+dnl
+dnl	***********************************************************************
+dnl
+#define RSB_NUMERICAL_TYPE_INDEX_FROM_CODE(CODE) 								\
+pushdef(`code',`0')dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+( ((CODE)==RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type))?(code):			\
+pushdef(`code',eval(code+1))dnl
+')dnl
+-1 dnl
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+popdef(`code')dnl
+) \
+')dnl
+popdef(`code')dnl
+dnl
+/* uhm. seems redundant ? */
+dnl
+dnl	***********************************************************************
+dnl
+
+
+#define RSB_IS_ELEMENT_MINUS_ONE(SRC,TYPE) 										\
+(														\
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+		(TYPE) == (RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)) ? (*(type*)(SRC)==(type)(-1)) : 		\
+')dnl
+		0												\
+)
+
+#define RSB_IS_ELEMENT_ONE(SRC,TYPE) 										\
+(														\
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+		(TYPE) == (RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)) ? (*(type*)(SRC)==(type)1) : 		\
+')dnl
+		0												\
+)
+
+#define RSB_IS_ELEMENT_ZERO(SRC,TYPE) 										\
+(														\
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+		(TYPE) == (RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)) ? (*(type*)(SRC)==(type)0) : 		\
+')dnl
+		0												\
+)
+
+#define RSB_IS_ELEMENT_NONZERO(SRC,TYPE) 		(!(RSB_IS_ELEMENT_ZERO(SRC,TYPE)))
+
+#define RSB_MATRIX_UNSUPPORTED_TYPE(TYPE) ( \
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			(TYPE)!=RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type) && \
+')dnl
+			1 )
+
+#define RSB_IS_MATRIX_TYPE_COMPLEX(TYPE) 										\
+(														\
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+		(TYPE) == (RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)) ? RSB_M4_IS_COMPLEX_TYPE(type) : 		\
+')dnl
+		0												\
+)
+
+#define RSB_IS_ELEMENT_LESS_THAN(SRC,CMPSRC,TYPE) \
+( foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			( (TYPE)==RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type) && RSB_M4_CREAL(type,*(type*)(SRC))<RSB_M4_CREAL(type,*(type*)(CMPSRC)) ) || \
+')dnl
+			0 )
+
+
+dnl
+dnl
+/** use RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE to oversize your arrays safely */
+`#define RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE	' RSB_M4_MAX2(RSB_M4_MAXN(WANT_COLUMN_UNLOOP_FACTORS),RSB_M4_MAXN(WANT_ROW_UNLOOP_FACTORS)) 
+dnl
+dnl
+dnl
+/** use RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE_EXTRA to oversize your arrays safely */
+`#define RSB_MAXIMAL_CONFIGURED_BLOCK_SIZE_EXTRA	' (RSB_M4_MAX2(RSB_M4_MAXN(WANT_COLUMN_UNLOOP_FACTORS),RSB_M4_MAXN(WANT_ROW_UNLOOP_FACTORS))-1) 
+dnl
+dnl
+#define RSB_CONST_MATRIX_IMPLEMENTATION_CODE_STRING_MAX_LENGTH (2*1024)	/** chars to reserve for a matrix implementation code */
+
+/* Section dedicated to implemented operations on matrices. */
+
+
+
+dnl
+dnl
+`#define RSB_ROWS_UNROLL_ARRAY		{' RSB_M4_COMMA_LIST(RSB_M4_COLUMNS_UNROLL) }
+dnl
+`#define RSB_COLUMNS_UNROLL_ARRAY	{' RSB_M4_COMMA_LIST(RSB_M4_ROWS_UNROLL) }
+dnl
+
+
+`#define RSB_ROWS_UNROLL_ARRAY_LENGTH		'RSB_M4_LIST_LENGTH(WANT_ROW_UNLOOP_FACTORS)
+`#define RSB_COLUMNS_UNROLL_ARRAY_LENGTH		'RSB_M4_LIST_LENGTH(WANT_COLUMN_UNLOOP_FACTORS)
+`#define RSB_IMPLEMENTED_META_MOPS		'RSB_M4_LIST_LENGTH(RSB_M4_QUOTED_COMMA_LIST(RSB_M4_MATRIX_META_OPS))
+`#define RSB_IMPLEMENTED_MOPS		'RSB_M4_LIST_LENGTH(RSB_M4_QUOTED_COMMA_LIST(RSB_M4_MATRIX_OPS))
+`#define RSB_IMPLEMENTED_TYPES		'RSB_M4_LIST_LENGTH(WANT_TYPES)
+dnl `#define RSB_IMPLEMENTED_BLAS_TYPES		'RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST_LENGTH
+`#define RSB_IMPLEMENTED_SOME_BLAS_TYPES		'RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES_LIST_LENGTH
+
+`#define 'RSB_M4_MATRIX_META_OPS_ARRAY`	{' RSB_M4_QUOTED_COMMA_LIST(RSB_M4_MATRIX_META_OPS) }
+dnl
+dnl NOTE: the following maps "double complex" to "double,complex"
+dnl `#define 'RSB_M4_MATRIX_TYPES_ARRAY`	{' RSB_M4_QUOTED_COMMA_LIST(RSB_M4_MATRIX_TYPES) }
+dnl
+`#define 'RSB_M4_MATRIX_TYPES_ARRAY`	{' foreach(`type',RSB_M4_MATRIX_TYPES,`"type",') }
+`#define 'RSB_MATRIX_TYPE_CODES_ARRAY`	{' foreach(`type',RSB_M4_MATRIX_TYPES,`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),') }
+`#define 'RSB_MATRIX_SPBLAS_TYPE_CODES_ARRAY`	{' foreach(`type',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type),') }
+
+`#define 'RSB_M4_MATRIX_META_OPS_STRING`	'"WANT_MATRIX_OPS"
+`#define 'RSB_M4_MATRIX_TYPES_STRING`		'"WANT_TYPES"
+`#define 'RSB_M4_WANT_COLUMN_UNLOOP_FACTORS_STRING`		'"RSB_M4_SPACED_LIST((WANT_COLUMN_UNLOOP_FACTORS))"
+`#define 'RSB_M4_WANT_ROW_UNLOOP_FACTORS_STRING`		'"RSB_M4_SPACED_LIST((WANT_ROW_UNLOOP_FACTORS))"
+
+/**
+ \name Macro to check matrix storage flags correctness
+ */
+dnl
+`#define ' RSB_IS_MATRIX_STORAGE_ALLOWED_FOR_LEAF(MATRIX_STORAGE)	(( \
+foreach(`matrix_storage',RSB_M4_MATRIX_STORAGE,`dnl
+	((MATRIX_STORAGE)==RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(matrix_storage)) || \
+')dnl
+	0 ) ? RSB_BOOL_TRUE:RSB_BOOL_FALSE )
+dnl
+
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+dnl
+#endif
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl
+dnl	FIXME: WHY THE HECK UNCOMMENTING THE FOLLOWING TRIGGERS AN ERROR ?
+dnl
+dnl #endif
+#endif /* RSB_TYPES_H_INCLUDED */
+')dnl
+dnl
+/* @endcond */
+dnl
+',`dnl
+static int foo(){return 0;}
+')dnl ONLY_WANT_HEADERS
+dnl
+dnl
+')dnl the whole file
+dnl
+dnl
diff --git a/rsb_unroll.c b/rsb_unroll.c
new file mode 100644
index 0000000..5cda4c8
--- /dev/null
+++ b/rsb_unroll.c
@@ -0,0 +1,69 @@
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Unrolled kernels, for each type, operation, submatrix.
+ * Right now, they are used for VBR and alike formats.
+ */
+/* Take care of compiling this code without loop unrolling optimizations (-fno-unroll-loops, or -ON with N<=2 on gcc) */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+#include "rsb_types.h"
+
+/* NULL should be defined. */
+#ifndef NULL
+#define NULL ((int*)(0))
+#endif /* NULL */
+/**
+ * No VBR/VBC formats compiled in.
+ */
+
+
+
+
+
+/**
+ * Loops unroll factors.
+ */
+#define RSB_MIN_ROW_UNLOOP_FACTOR	1
+#define RSB_MAX_ROW_UNLOOP_FACTOR	1
+#define RSB_MIN_COLUMN_UNLOOP_FACTOR	1
+#define RSB_MAX_COLUMN_UNLOOP_FACTOR	1
+
+/**
+ * No VBR/VBC formats compiled in.
+ */
+
+
+
+
+
+/* @endcond */
+
diff --git a/rsb_unroll.h b/rsb_unroll.h
new file mode 100644
index 0000000..b5ad7f7
--- /dev/null
+++ b/rsb_unroll.h
@@ -0,0 +1,74 @@
+/* @cond INNERDOC */
+/**
+ * @file
+ * @brief
+ * Unrolled kernels, for each type, operation, submatrix.
+ * Right now, they are used for VBR and alike formats.
+ */
+/* Take care of compiling this code without loop unrolling optimizations (-fno-unroll-loops, or -ON with N<=2 on gcc) */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+#ifndef RSB_UNROLL_H_INCLUDED
+#define RSB_UNROLL_H_INCLUDED
+
+#include "rsb_types.h"
+
+/* NULL should be defined. */
+#ifndef NULL
+#define NULL ((int*)(0))
+#endif /* NULL */
+/**
+ * No VBR/VBC formats compiled in.
+ */
+
+
+
+
+
+/**
+ * Loops unroll factors.
+ */
+#define RSB_MIN_ROW_UNLOOP_FACTOR	1
+#define RSB_MAX_ROW_UNLOOP_FACTOR	1
+#define RSB_MIN_COLUMN_UNLOOP_FACTOR	1
+#define RSB_MAX_COLUMN_UNLOOP_FACTOR	1
+
+/**
+ * No VBR/VBC formats compiled in.
+ */
+
+
+#endif  /* RSB_UNROLL_H_INCLUDED */
+
+
+
+
+#define RSB_FITTING_SAMPLES		/*12 8*/4
+/* @endcond */
+
diff --git a/rsb_unroll.m4 b/rsb_unroll.m4
new file mode 100644
index 0000000..4ee1543
--- /dev/null
+++ b/rsb_unroll.m4
@@ -0,0 +1,247 @@
+dnl
+dnl	@author: Michele Martone
+dnl
+dnl	execute this script with M4 to obtain a loop unrolled functions collection
+dnl	this `forloop' macro is the one in the ./examples  directory distributed with the M4 package
+dnl	TODO : To use goto's and labels to code much less boundary loops!
+dnl	TODO : Introduce variables for indices inside for loops: do not rely on the compiler from a point on...
+dnl
+dnl
+dnl
+dnl	The generated code will expand from here
+dnl
+dnl
+/* @cond INNERDOC */
+dnl
+/**
+ * @file
+ * @brief
+ * Unrolled kernels, for each type, operation, submatrix.
+ * Right now, they are used for VBR and alike formats.
+ */
+dnl
+/* Take care of compiling this code without loop unrolling optimizations (-fno-unroll-loops, or -ON with N<=2 on gcc) */
+include(`rsb_misc.m4')dnl
+RSB_M4_HEADER_MESSAGE()dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#ifndef RSB_UNROLL_H_INCLUDED
+#define RSB_UNROLL_H_INCLUDED
+')
+dnl 
+dnl
+dnl
+dnl
+dnl
+#include "rsb_types.h"
+include(`do_unroll.m4')dnl
+dnl
+
+/* NULL should be defined. */
+#ifndef NULL
+#define NULL ((int*)(0))
+#endif /* NULL */
+dnl
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`VBR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`VBC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No VBR/VBC formats compiled in.
+ */
+',`dnl
+dnl
+
+/* FIXME : we only want this code if VB and L stuff is in */
+#if defined(RSB_MATRIX_STORAGE_LC) || defined(RSB_MATRIX_STORAGE_LR) || defined(WANT_MATRIX_VB_STORAGE)
+
+
+/*!
+ * This code instance has coverage for submatrices of sizes as in the 
+ * cartesian (rows) x (columns) product of RSB_M4_ROWS_UNROLL x RSB_M4_COLUMNS_UNROLL.
+ * 
+ *  For each submatrix operation, this code instance offers a macro for
+ *  dispatching a function pointer to the specialized function, regarding
+ *  rows and columns unrolling factors.
+ *
+ * In case a submatrix of a size not in the above cartesian product is 
+ * given, the dispatching macros will assign a pointer to a function
+ * unrolled RSB_M4_ROWS_FALLBACK_UNROLL times on the rows and RSB_M4_COLUMNS_FALLBACK_UNROLL times on the columns.
+ */
+
+/* Function headers */
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl unrollm(`mrowsu',`Mrowsu',`mcolsu',`Mcolsu',
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`unrolling',(`u',`l'),`dnl
+RSB_M4_UNROLL_KERNEL(`row',`rows',rowsu,`column',`columns',colsu,`type',`h',mop,unrolling,transposition)dnl
+')')')')')')')dnl
+')dnl
+
+dnl
+dnl	
+ifdef(`ONLY_WANT_HEADERS',,`
+/* Function definitions */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`rowsu',RSB_M4_ROWS_UNROLL,`dnl
+foreach(`colsu',RSB_M4_COLUMNS_UNROLL,`dnl
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+foreach(`symmetry',RSB_M4_MATRIX_SYMMETRY,`dnl
+foreach(`unrolling',(`u',`l'),`dnl
+RSB_M4_UNROLL_KERNEL(`row',`rows',rowsu,`column',`columns',colsu,`type',,mop,unrolling,transposition)
+')')')')')')') ')dnl
+
+
+dnl
+dnl	Function dispatch table name generating macros
+dnl
+pushdef(`rsb_mx_dispatch_table_name',`dnl
+pushdef(`type',$1)dnl
+pushdef(`unrolling',ifelse($2,`l',`l',`'))dnl FIXME : this is a temporary fix (setting to `' instead of `u')
+pushdef(`optype',$3)dnl
+void(*RSB_M4_PREFIX`'optype`_'RSB_M4_TYPE_CODE(type)`_'unrolling`_pointer_table'[])
+RSB_M4_KERNEL_FUNCTION_ARGS(type,unrolling,optype)
+popdef(`unrolling')dnl
+popdef(`optype')dnl
+popdef(`type')
+')dnl
+
+dnl popdef(`rsb_mx_dispatch_table_name')
+dnl
+dnl
+')dnl	ifelse(RSB_M4_MEMBER..
+dnl
+
+dnl
+dnl	Extern declarations
+dnl
+dnl foreach(`mop',(m,spmv_uauz),
+dnl foreach(`looped',`(l,)',
+dnl ifdef(`ONLY_WANT_HEADERS',foreach(`type',RSB_M4_MATRIX_TYPES, ` extern rsb_mx_dispatch_table_name(type,looped,mop); /* Dispatch table */ '))))
+
+dnl
+dnl	Function dispatch table names generating macros
+dnl
+
+dnl dnl	THE FOLLOWING WAS COMMENTED BY HAND BECAUSE IT WAS TROUBLESOME TO USE ONLY M4 TO DEACTIVATE IT.
+dnl dnl THEREFORE, WHEN YOU RE-ACTIVATE THE VBR STUFF, YOU SHOULD UN-COMMENT THE FOLLOWING BLOCK! (FIXME, TODO)
+dnl dnl
+dnl dnl
+dnl dnl	leave glombo where it is : it is a dummy parameter, or 'l' handling macros won't work :)
+dnl foreach(`mop',RSB_M4_MATRIX_OPS,`
+dnl foreach(`looped',(`l',`u'),`
+dnl foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+dnl ifdef(`ONLY_WANT_HEADERS',,`dnl
+dnl foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+dnl rsb_mx_dispatch_table_name(type,looped,mop)dnl
+dnl ={ /* Dispatch table compilation */
+dnl foreach(`rowsu',RSB_M4_ROWS_UNROLL, `foreach(`colsu',RSB_M4_COLUMNS_UNROLL,
+dnl `	RSB_M4_KERNEL_FUNCTION_NAME(type,rowsu,colsu,looped,mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE),
+dnl ')')	(void*)NULL
+dnl };
+dnl ') ') ') ') ')
+dnl
+dnl	
+
+dnl maximum element in a list
+dnl : FIXME : these macros are also defined in sort.m4. you should remove this redundance!
+define(`max2',`ifelse(eval(`$1>$2'),1,`$1',`$2')')dnl
+define(`maxn',`ifelse($#,1,$1,`ifelse($#,2,`max2($1,$2)',`max2($1,maxn(shift($@)))')')')dnl
+define(`min2',`ifelse($2,,$1,`ifelse(eval(`$1<$2'),1,`$1',`$2')')')dnl
+define(`minn',`ifelse($2,,$1,`ifelse($#,2,`min2($1,$2)',`min2($1,minn(shift($@)))')')')dnl
+
+/**
+ * Loops unroll factors.
+ */
+`#define RSB_MIN_ROW_UNLOOP_FACTOR'	minn(WANT_ROW_UNLOOP_FACTORS)
+`#define RSB_MAX_ROW_UNLOOP_FACTOR'	maxn(WANT_ROW_UNLOOP_FACTORS)
+`#define RSB_MIN_COLUMN_UNLOOP_FACTOR'	minn(WANT_COLUMN_UNLOOP_FACTORS)
+`#define RSB_MAX_COLUMN_UNLOOP_FACTOR'	maxn(WANT_COLUMN_UNLOOP_FACTORS)
+
+dnl
+dnl please automate generation of such macros
+dnl
+dnl #if 0
+dnl #define RSB_GET_MV_DOUBLE_KERNEL(rows,columns) \
+dnl 	( \
+dnl 	(columns)>7 \
+dnl 		? \
+dnl 		( \
+dnl 			(rows) > 2 ? \
+dnl 			rsb_mv_double_r4_c8_l: \
+dnl 			rsb_mv_double_r1_c8_l \
+dnl 		): \
+dnl 		( \
+dnl 			(columns)>1? \
+dnl 			rsb_mv_double_r4_c8_l: \
+dnl 			rsb_mv_double_r4_c4_l \
+dnl 		) \
+dnl 	)
+dnl //#else
+dnl #define RSB_GET_MV_DOUBLE_KERNEL(rows,columns) \
+dnl ( \
+dnl 			rsb_mv_double_r4_c8_l \
+dnl )
+dnl #endif
+dnl	
+dnl
+dnl
+dnl
+dnl
+ifelse(dnl
+RSB_M4_MEMBER(`VBR',WANT_MATRIX_STORAGE)dnl
+RSB_M4_MEMBER(`VBC',WANT_MATRIX_STORAGE)dnl
+,`00',`dnl
+/**
+ * No VBR/VBC formats compiled in.
+ */
+',`dnl
+dnl
+dnl
+dnl
+#if 1
+foreach(`mop',RSB_M4_MATRIX_OPS,`dnl
+foreach(`looped',(,),`dnl (,) or (l)
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+foreach(`transposition',RSB_M4_MATRIX_TRANSPOSITIONS,`dnl
+pushdef(`fix', `RSB_M4_KERNEL_FUNCTION_NAME(type,rowsu,colsu,looped,mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE) \
+' )
+DOUBLE_LINEAR_KERNEL_SEARCH_MACRO_(mop,type,looped,`UNLOOP_R_C_PAIRS`RSB_M4_KERNEL_FUNCTION_NAME(type,RSB_M4_ROWS_FALLBACK_UNROLL,RSB_M4_COLUMNS_FALLBACK_UNROLL,`l',mop,transposition,RSB_M4_DEFAULT_COORDINATE_INDEX_TYPE)'')
+popdef(`fix')
+')')')')
+dnl
+#endif
+dnl	TODO : implement a macro choosing a suboptimal kernel function
+dnl
+dnl #undef RSB_double_spmv
+dnl #define RSB_double_spmv(R,C) rsb_mv_double_r4_c4
+
+/* FIXME : we only want this code if VB and L stuff is in */
+#endif
+#
+dnl
+')dnl	ifelse(RSB_M4_MEMBER..
+dnl
+
+
+ifdef(`ONLY_WANT_HEADERS',`dnl
+#endif  /* RSB_UNROLL_H_INCLUDED */
+')
+
+
+ifdef(`ONLY_WANT_HEADERS',`
+`#define RSB_FITTING_SAMPLES		'dnl
+RSB_M4_FITTING_SAMPLES
+')dnl
+dnl
+/* @endcond */
+dnl
+
diff --git a/rsb_user.c b/rsb_user.c
new file mode 100644
index 0000000..041fa9c
--- /dev/null
+++ b/rsb_user.c
@@ -0,0 +1,445 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2015 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ * @file
+ * @author Michele Martone
+ * @brief Functions dumping system information to users.
+ * */
+
+#include <unistd.h>	/* sysconf */
+#include "rsb_internals.h"
+#include "rsb.h"
+#ifdef RSB_HAVE_LIMITS_H
+#include <limits.h>	/* CHAR_BIT */
+#endif /* RSB_HAVE_LIMITS_H */
+#include <assert.h>	/* assert */
+#ifdef RSB_HAVE_MALLOC_H
+#include <malloc.h>	/* posix_memalign */
+#endif /* RSB_HAVE_MALLOC_H */
+
+#ifdef RSB_HAVE_TIMES_H
+#include <sys/times.h>
+#endif /* RSB_HAVE_TIMES_H */
+#ifdef RSB_HAVE_SYS_SYSTEMCFG_H 
+#include <sys/systemcfg.h>	/* for _H_SYSTEMCFG */
+#endif /* RSB_HAVE_SYS_SYSTEMCFG_H */
+#ifdef RSB_HAVE_SCHED_H
+#include <sched.h>	/* sched_getaffinity; FIXME: move to sys.c */
+#include "rsb-config.h"
+#endif /* RSB_HAVE_SCHED_H */
+
+RSB_INTERNALS_COMMON_HEAD_DECLS
+
+#ifdef _H_SYSTEMCFG
+#if 0
+from systemcfg.h :
+extern struct {
+        int architecture;       /* processor architecture */
+        int implementation;     /* processor implementation */
+        int version;            /* processor version */
+        int width;              /* width (32 || 64) */
+        int ncpus;              /* 1 = UP, n = n-way MP */
+        int cache_attrib;       /* L1 cache attributes (bit flags)      */
+                                /* bit          0/1 meaning             */
+                                /* -------------------------------------*/
+                                /* 31    no cache / cache present       */
+                                /* 30    separate I and D / combined    */
+        int icache_size;        /* size of L1 instruction cache */
+        int dcache_size;        /* size of L1 data cache */
+        int icache_asc;         /* L1 instruction cache associativity */
+        int dcache_asc;         /* L1 data cache associativity */
+        int icache_block;       /* L1 instruction cache block size */
+        int dcache_block;       /* L1 data cache block size */
+        int icache_line;        /* L1 instruction cache line size */
+        int dcache_line;        /* L1 data cache line size */
+        int L2_cache_size;      /* size of L2 cache, 0 = No L2 cache */
+        int L2_cache_asc;       /* L2 cache associativity */
+        int tlb_attrib;         /* TLB attributes (bit flags)           */
+                                /* bit          0/1 meaning             */
+                                /* -------------------------------------*/
+                                /* 31    no TLB / TLB present           */
+                                /* 30    separate I and D / combined    */
+        int itlb_size;          /* entries in instruction TLB */
+        int dtlb_size;          /* entries in data TLB */
+        int itlb_asc;           /* instruction tlb associativity */
+        int dtlb_asc;           /* data tlb associativity */
+        long long physmem;      /* bytes of OS available memory             */
+..
+}_system_configuration;
+#endif
+
+static rsb_err_t aix_sys_info()
+{
+	/*!
+	 	\ingroup internals
+	*/
+	RSB_INFO("Working on an AIX system\n");
+	RSB_INFO("CPU		: %ld \n",_system_configuration.ncpus);
+	RSB_INFO("cache_at	:%ld \n",_system_configuration.cache_attrib);
+	RSB_INFO("L1		: %ld \n",_system_configuration.dcache_size);
+	RSB_INFO("L2		: %ld \n",_system_configuration.L2_cache_size);
+	RSB_INFO("MEM		: %lld \n",_system_configuration.physmem);
+}
+#endif /* _H_SYSTEMCFG */
+
+
+
+static rsb_err_t get_sysconf_cacheinfo( long *cpa, long *cpb, long *cpc, int cac,  int cbc,  int ccc, int cl)
+{
+	/*!
+	 \ingroup internals
+	*/
+	*cpa = sysconf(cac);
+	*cpb = sysconf(cbc);
+	*cpc = sysconf(ccc);
+	if(*cpa<1 || *cpb < 1 || *cpc < 1)
+		RSB_INFO("sysconf() : no level %d cache\n",cl);
+	else
+	{
+		RSB_INFO("sysconf() : level %d cache size %ld \n",cl,*cpc);
+		RSB_INFO("sysconf() : level %d cache associativity %ld \n",cl,*cpa);
+		RSB_INFO("sysconf() : level %d cache line size %ld \n",cl,*cpb);
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+static long rsb_max_threads(void)
+{
+	/*!
+	 * \ingroup gr_internals
+	 *
+	 * Just a user-oriented function.
+	 *
+	 * \return the maximum number of available hardware threads
+	 *
+	 * If on AIX, we use the native solution, as sysconf() gives values with are not usable as threads.
+	 * */
+#ifdef _H_SYSTEMCFG
+	return _system_configuration.ncpus;
+#else /* _H_SYSTEMCFG */
+#ifdef RSB_HAVE_SYSCONF 
+	/*
+	 * _SC_NPROCESSORS_ONLN : The number of processors currently online (available).
+	 * _SC_NPROCESSORS_CONF : The number of processors configured.
+	 */
+	//return sysconf(_SC_NPROCESSORS_CONF);
+	return sysconf(_SC_NPROCESSORS_ONLN);
+#else /* RSB_HAVE_SYSCONF  */
+	return 0;	/* this should be regarded as an error */
+#endif /* RSB_HAVE_SYSCONF  */
+#endif /* _H_SYSTEMCFG */
+}
+
+rsb_err_t rsb__sys_info()
+{
+	/*!
+	 \ingroup internals
+	 *
+	 * A function printing out information about the system.
+	 * It gives informations for the user about the library configuration.
+	 * It should be called after library initialization.
+	 *
+	 * \return an error code or RSB_ERR_NO_ERROR.
+	 * TODO: move to sys.c
+	 * */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+
+#if RSB_WITH_HWLOC
+	{
+		int i;
+		for(i=1;i<4;++i)
+		{
+			size_t sz = rsb__get_lnc_size_hwloc(i);
+			if(sz)
+       				RSB_INFO("hwloc size of cache level %d: %ld\n",i,sz);
+		}
+	}
+#endif	/* RSB_WITH_HWLOC */
+
+       	RSB_INFO("detected max available cores/threads : %ld\n",(long int)rsb_max_threads());
+#if RSB_WANT_OMP_RECURSIVE_KERNELS
+	#pragma omp parallel
+	{
+       	RSB_INFO("detected max OpenMP procs : %ld\n",(long int)omp_get_num_procs());
+	}
+#endif /* RSB_WANT_OMP_RECURSIVE_KERNELS */
+       	RSB_INFO("detected %ld levels of cache\n",(long int)rsb__get_cache_levels_num());
+	{
+		int i;
+		for(i=1;i <= rsb__get_cache_levels_num();++i)
+		       	RSB_INFO("L%d size: %ld \n",i,(long int)rsb__get_lnc_size(i));
+	}
+
+#ifdef _H_SYSTEMCFG
+	aix_sys_info();
+#endif /* _H_SYSTEMCFG */
+
+//       	RSB_INFO("LL size: %ld \n",(long int)rsb__get_lastlevel_c_size());
+
+#ifndef RSB_HAVE_SYSCONF
+        RSB_INFO("sysconf() not available\n");
+#else /* RSB_HAVE_SYSCONF */
+//       	RSB_INFO("detected %ld levels of cache\n",(long int)rsb__get_cache_levels_num());
+#endif /* RSB_HAVE_SYSCONF */
+	{
+#ifdef RSB_HAVE_SYSCONF
+        long int pagesize = 0;
+        long int mem_pages = 0;
+        size_t tot_mem = 0;
+#if   defined(PAGESIZE)
+        pagesize = sysconf(PAGESIZE);
+#elif defined(_SC_PAGESIZE)
+        pagesize = sysconf(_SC_PAGESIZE);
+#elif defined(PAGE_SIZE)
+        pagesize = sysconf(PAGE_SIZE);
+#else /* PAGE_SIZE */
+#endif /* PAGE_SIZE */
+        if( pagesize)RSB_INFO("sysconf() : %ld bytes per pagesize\n",pagesize);
+        if(!pagesize)RSB_INFO("sysconf() available, PAGESIZE _SC_PAGESIZE PAGE_SIZE undefined\n");
+
+	/* 
+	 _SC_AVPHYS_PAGES : The number of currently available pages of physical memory.
+	 _SC_PHYS_PAGES   : The  number  of pages of physical memory.
+	*/
+#if   defined(_SC_PHYS_PAGES)
+        mem_pages = sysconf(_SC_PHYS_PAGES);
+#else /* _SC_PHYS_PAGES */
+#endif /* _SC_PHYS_PAGES */
+        tot_mem = (size_t)mem_pages;
+	tot_mem *= (size_t)pagesize;
+        if( mem_pages)RSB_INFO("sysconf() : %ld physical pages\n",mem_pages);
+        if(!mem_pages)RSB_INFO("sysconf() available, _SC_PHYS_PAGES undefined\n");
+        if( mem_pages && pagesize)RSB_INFO("sysconf() : %zd bytes (%zd MB) of physical memory\n",tot_mem,(tot_mem)/(1024*1024));
+#if   defined(_SC_AVPHYS_PAGES)
+        RSB_INFO("sysconf() : %ld available (free) physical pages\n",sysconf(_SC_AVPHYS_PAGES));
+        RSB_INFO("sysconf() : %ld available (free) physical memory\n",sysconf(_SC_AVPHYS_PAGES)*pagesize);
+#endif /* _SC_AVPHYS_PAGES */
+#endif /* RSB_HAVE_SYSCONF */
+	}
+	{
+#ifdef RSB_HAVE_SYSCONF 
+	long int sc_nprocessors_conf;
+	long int sc_nprocessors_onln;
+	/*
+	 * _SC_NPROCESSORS_ONLN : The number of processors currently online (available).
+	 * _SC_NPROCESSORS_CONF : The number of processors configured.
+	 */
+	sc_nprocessors_conf = sysconf(_SC_NPROCESSORS_CONF);
+	sc_nprocessors_onln = sysconf(_SC_NPROCESSORS_ONLN);
+	RSB_INFO("sysconf() , processors : %ld\n",sc_nprocessors_conf);
+	RSB_INFO("sysconf() , processors online : %ld\n",sc_nprocessors_onln);
+#endif /* RSB_HAVE_SYSCONF  */
+	}
+#ifdef RSB_HAVE_SYSCONF 
+	{
+#ifdef _SC_LEVEL1_DCACHE_SIZE 
+	long int c1a,c1b,c1c;
+	c1a = sysconf(_SC_LEVEL1_DCACHE_ASSOC);
+	c1b = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
+	c1c = sysconf(_SC_LEVEL1_DCACHE_SIZE);
+	get_sysconf_cacheinfo(&c1a,&c1b,&c1c,_SC_LEVEL1_DCACHE_ASSOC,_SC_LEVEL1_DCACHE_LINESIZE,_SC_LEVEL1_DCACHE_SIZE,1);
+#else /* _SC_LEVEL1_DCACHE_SIZE */
+	RSB_INFO("sysconf() implementation obsolete: no L%d cache info\n",1);
+#endif /* _SC_LEVEL1_DCACHE_SIZE */
+	}
+	{
+#ifdef _SC_LEVEL2_CACHE_SIZE 
+	long int c2a,c2b,c2c;
+	c2a = sysconf(_SC_LEVEL2_CACHE_ASSOC);
+	c2b = sysconf(_SC_LEVEL2_CACHE_LINESIZE);
+	c2c = sysconf(_SC_LEVEL2_CACHE_SIZE);
+	get_sysconf_cacheinfo(&c2a,&c2b,&c2c,_SC_LEVEL2_CACHE_ASSOC,_SC_LEVEL2_CACHE_LINESIZE,_SC_LEVEL2_CACHE_SIZE,2);
+#else /* _SC_LEVEL2_CACHE_SIZE */
+	RSB_INFO("sysconf() implementation obsolete: no L%d cache info\n",2);
+#endif /* _SC_LEVEL2_CACHE_SIZE */
+	}
+	{
+#ifdef _SC_LEVEL3_CACHE_SIZE 
+	long int c3a,c3b,c3c;
+	c3a = sysconf(_SC_LEVEL3_CACHE_ASSOC);
+	c3b = sysconf(_SC_LEVEL3_CACHE_LINESIZE);
+	c3c = sysconf(_SC_LEVEL3_CACHE_SIZE);
+	get_sysconf_cacheinfo(&c3a,&c3b,&c3c,_SC_LEVEL3_CACHE_ASSOC,_SC_LEVEL3_CACHE_LINESIZE,_SC_LEVEL3_CACHE_SIZE,3);
+#else /* _SC_LEVEL3_CACHE_SIZE  */
+	RSB_INFO("sysconf() implementation obsolete: no L%d cache info\n",3);
+#endif /* _SC_LEVEL3_CACHE_SIZE  */
+	}
+	{
+#ifdef _SC_LEVEL4_CACHE_SIZE 
+	long int c4a,c4b,c4c;
+	c4a = sysconf(_SC_LEVEL4_CACHE_ASSOC);
+	c4b = sysconf(_SC_LEVEL4_CACHE_LINESIZE);
+	c4c = sysconf(_SC_LEVEL4_CACHE_SIZE);
+	get_sysconf_cacheinfo(&c4a,&c4b,&c4c,_SC_LEVEL4_CACHE_ASSOC,_SC_LEVEL4_CACHE_LINESIZE,_SC_LEVEL4_CACHE_SIZE,4);
+#else /* _SC_LEVEL4_CACHE_SIZE */
+	RSB_INFO("sysconf() implementation obsolete: no L%d cache info\n",4);
+#endif /* _SC_LEVEL4_CACHE_SIZE */
+	}
+#endif /* RSB_HAVE_SYSCONF */
+#ifdef CHAR_BIT
+	/* It should happen, but it could not. */
+	RSB_ASSERT(CHAR_BIT==sizeof(char)*8);
+
+	/* It should not happen, but it could. */
+	if(CHAR_BIT!=8)
+	{
+		RSB_DO_ERROR_CUMULATE(errval,RSB_ERR_INTERNAL_ERROR);
+		RSB_INFO("%d bits per byte! This is catastrophic.\n",CHAR_BIT);
+	}
+	else
+		RSB_INFO("8 bits per byte. Good.\n");
+#else /* CHAR_BIT */
+	RSB_INFO("We have no informations on bits per byte. Beware!\n");
+#endif /* CHAR_BIT */
+#if 1
+	{
+		long cbbs = rsb__get_cache_block_byte_size();
+		if(cbbs)
+			RSB_STDERR("cache block size		: %ld \n",cbbs);
+		else
+			RSB_STDERR("cache block size unknown (detected %ld: this is a problem!)\n",cbbs);
+	}
+#endif
+#ifdef INT_MAX
+	RSB_INFO("SHRT_MAX : %hd\n",SHRT_MAX);
+	RSB_INFO("SHRT_MIN : %hd\n",SHRT_MIN);
+	RSB_INFO("USHRT_MAX : %hu\n",USHRT_MAX);
+	RSB_INFO("INT_MIN : %d\n",INT_MIN);
+	RSB_INFO("INT_MAX : %d\n",INT_MAX);
+	RSB_INFO("UINT_MAX : %u\n",UINT_MAX);
+	RSB_INFO("LONG_MAX : %ld\n",LONG_MAX);
+	RSB_INFO("LONG_MIN : %ld\n",LONG_MIN);
+#ifdef ULONG_MAX
+	RSB_INFO("ULONG_MAX : %lu\n",ULONG_MAX);
+#else /* ULONG_MAX */
+	RSB_INFO("ULONG_MAX : undefined\n");
+#endif /* ULONG_MAX */
+#ifdef 	LLONG_MAX 
+	RSB_INFO("LLONG_MAX : %lld\n",LLONG_MAX);
+#else /* LLONG_MAX  */
+	RSB_INFO("LLONG_MAX : undefined\n");
+#endif /* LLONG_MAX  */
+#ifdef LLONG_MIN
+	RSB_INFO("LLONG_MIN : %lld\n",LLONG_MIN);
+#else /* LLONG_MIN */
+	RSB_INFO("LLONG_MIN : undefined\n");
+#endif /* LLONG_MIN */
+#ifdef ULLONG_MAX
+	RSB_INFO("ULLONG_MAX : %llu\n",ULLONG_MAX);
+#else /* ULLONG_MAX */
+	RSB_INFO("ULLONG_MAX : undefined\n");
+#endif /* ULLONG_MAX */
+#else /* INT_MAX */
+	RSB_INFO("INT_MAX : undefined\n");
+#endif /* INT_MAX */
+	RSB_INFO("RSB_MARKER_COO_VALUE : %llu\n",(long long unsigned)RSB_MARKER_COO_VALUE);
+	RSB_INFO("RSB_MARKER_NNZ_VALUE : %llu\n",(long long unsigned)RSB_MARKER_NNZ_VALUE);
+	RSB_INFO("RSB_SUBM_IDX_MARKER : %llu\n",(long long unsigned)RSB_SUBM_IDX_MARKER);
+	RSB_INFO("RSB_MAX_ALLOCATABLE_MEMORY_CHUNK: %llu\n",(long long unsigned)RSB_MAX_ALLOCATABLE_MEMORY_CHUNK);
+
+	RSB_INFO("timing min delta (if negative, don't complain with us)   : %lg s\n", rsb__timer_sanity());
+	RSB_INFO("timing granularity : %lg s\n", rsb__timer_granularity());
+#if   defined(RSB_CFLAGS)
+	RSB_INFO("CFLAGS   : %s\n",RSB_CFLAGS);
+#else /* RSB_CFLAGS */
+	RSB_INFO("no CFLAGS info\n");
+#endif /* RSB_CFLAGS */
+#if   defined(RSB_CC)
+	RSB_INFO("CC       : %s\n",RSB_CC);
+#else /* RSB_CC */
+	RSB_INFO("no CC info\n");
+#endif /* RSB_CC */
+#ifdef RSB_HAVE_SCHED_H
+#ifdef RSB_HAVE_SCHED_GETAFFINITY 
+#ifdef _GNU_SOURCE
+	{
+		size_t num_cpus = CPU_SETSIZE;
+		cpu_set_t cpuset;
+		CPU_ZERO(&cpuset);
+		if(1)
+		{
+			int sgar = 0;
+
+			if( (sgar = sched_getaffinity(0, num_cpus, &cpuset)) != 0 )
+			{
+				RSB_INFO("sched_getaffinity error : %d\n",sgar);
+			}
+			else
+			{
+				RSB_INFO("sched_getaffinity's CPU_COUNT() of set:    %d\n", CPU_COUNT_S(CPU_ALLOC_SIZE(1), &cpuset));
+				RSB_INFO("sched_getaffinity runnable : %zd\n",CPU_COUNT(&cpuset));
+			}
+		}
+	}
+#endif /* _GNU_SOURCE */
+#endif /* RSB_HAVE_SCHED_H */
+#endif /* RSB_HAVE_SCHED_GETAFFINITY */
+	{
+	rsb_char_t usmhib[RSB_MAX_LINE_LENGTH];
+	RSB_INFO("memhinfo : %s\n",rsb__get_mem_hierarchy_info_string(usmhib));
+	}
+        RSB_INFO("detected free  memory : %zd\n",(size_t)rsb__sys_free_system_memory());
+        RSB_INFO("detected total memory : %zd\n",(size_t)rsb__sys_total_system_memory());
+
+	{
+		rsb_nnz_idx_t *p = NULL;
+		rsb_nnz_idx_t n, i, maxtries = RSB_MAX_MATRIX_NNZ, res = 0, cookie = 0, tries, v, mintries = RSB_CONST_MIN_TIMES_FOR_MICRO_BENCHMARK;
+		n = 4*(rsb__get_lastlevel_c_size()/sizeof(rsb_nnz_idx_t));
+		if(n<2)goto failed;
+		p = rsb__malloc(sizeof(rsb_nnz_idx_t)*n);
+		if(!p)goto failed;
+		v = 1;
+		while(2*v <= n)v *= 2;
+		--v;
+		for(i=0;i<n;++i)p[i] = i;
+		while((v/2)>1)
+		{
+			rsb_time_t mtl = RSB_CONST_IMPOSSIBLY_BIG_TIME,mtb = RSB_CONST_IMPOSSIBLY_BIG_TIME,bt,mbt = RSB_CONST_TIME_FOR_MICRO_BENCHMARK,tt=0;
+			for(tt=0,tries=0;tries<mintries || (tt<mbt && tries<maxtries);++tries)
+			{
+				/* NOTE: we are not interested in flushing the cache, here */
+				bt = - rsb_time();
+				cookie += rsb__seek_nnz_idx_t(p,v,n);
+				bt += rsb_time();
+				mtb = RSB_MIN(mtb,bt);
+				tt += bt;
+				bt = - rsb_time();
+				cookie += rsb__seek_nnz_idx_t_linear(p,v,n);
+				bt += rsb_time();
+				mtl = RSB_MIN(mtl,bt);
+				tt += bt;
+			}
+			res = cookie;
+			RSB_INFO("for array sized %d elems, took %g s for linear search and %g s for binary search for element %d, in %d tries, for a total of %f s (ignore this:%d)\n", n,mtl,mtb,v,tries,tt,res);
+			v = v/2;
+		}
+failed:
+		RSB_CONDITIONAL_FREE(p);
+	}	errval = rsb__dump_system_performance_summary();
+	
+	goto err;
+err:
+	RSB_DO_ERR_RETURN(errval)
+}
+
+/* @endcond */
diff --git a/rsb_util.c b/rsb_util.c
new file mode 100644
index 0000000..2b34273
--- /dev/null
+++ b/rsb_util.c
@@ -0,0 +1,8240 @@
+/* @cond INNERDOC */
+
+/**
+ * @file
+ * @brief
+ * Auxiliary functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define RSB_WANT_OMP        1
+#define RSB_MAX_OMP_THREADS 4
+#include <omp.h>       /* OpenMP parallelism (EXPERIMENTAL) */
+
+
+#include "rsb_common.h"
+/* non blas-like functions */
+
+rsb_err_t rsb__util_m4_sanity_check(void){
+	/**
+		There are bugs in the m4 macros or a bad m4 implementation which will trigger this test to fail.
+		We are interested in catching them, as we should rely on a sane m4 environment.
+	*/
+	/* generated by the $0 macro */
+
+	if(
+		0!=0 ||
+		1!=1 || 
+		1!=1 || 
+		0!=0 ||
+		0!=0 ||
+		0!=0 ||
+		0!=0 ||
+		1!=1 ||
+		0!=0 ||
+		1!=1 ||
+		1!=1 ||
+		1!=1 ||
+		0
+		)
+		goto err;
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+
+const void * rsb__util_increase_by_one(void *p, rsb_nnz_idx_t n, rsb_flags_t typecode){
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  ) {(((double*)p)[n])+=1;return p;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  ) {(((float*)p)[n])+=1;return p;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ) {(((float complex*)p)[n])+=1;return p;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ) {(((double complex*)p)[n])+=1;return p;}
+	else 
+#endif
+	return NULL;
+}
+
+void rsb__util_set_area_to_fraction_of_integer(void *p, const int alphai, rsb_flags_t typecode){
+	/*
+		alpha NULL will imply 1
+	*/
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  ) {*(double*)p = 1;*(double*)p/=alphai;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  ) {*(float*)p = 1;*(float*)p/=alphai;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ) {*(float complex*)p = 1;*(float complex*)p/=alphai;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ) {*(double complex*)p = 1;*(double complex*)p/=alphai;}
+	else 
+#endif
+	return;
+}
+
+void rsb__util_set_area_to_negated_fraction(void *p, const void *alpha, rsb_flags_t typecode){
+	/*
+		alpha NULL will imply 1
+	*/
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  ) {*(double*)p = -1;if(alpha)*(double*)p/=(*(double*)alpha);}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  ) {*(float*)p = -1;if(alpha)*(float*)p/=(*(float*)alpha);}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ) {*(float complex*)p = -1;if(alpha)*(float complex*)p/=(*(float complex*)alpha);}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ) {*(double complex*)p = -1;if(alpha)*(double complex*)p/=(*(double complex*)alpha);}
+	else 
+#endif
+	return;
+}
+
+void rsb__util_set_area_to_converted_integer(void *p, rsb_flags_t typecode, const rsb_int n){
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  ) {*(double*)p = (double)n;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  ) {*(float*)p = (float)n;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ) {*(float complex*)p = (float complex)n;}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ) {*(double complex*)p = (double complex)n;}
+	else 
+#endif
+	return;
+}
+
+rsb_coo_idx_t * rsb__util_get_partitioning_array( size_t bs, size_t X , rsb_blk_idx_t * X_b, rsb_flags_t flags){
+	/*!
+	 * Given a block size (be it rows or columns), an element size X in bytes,
+	 * and a dimension (rows or columns), returns an array containing the 
+	 * indices of the elements in each block.
+	 *
+	 * Therefore, the allocated arrays 
+	 *
+	 * \param bs	the block size
+	 * \param X	the rows or columns count
+	 * \param X_b	on output, the allocated array elements count : (X+bs-1)/bs
+	 * \return NULL on error;  a valid array pointer on success
+	 *
+	 * FIXME : why not size_t ? or maybe rsb_size_t ?
+	 * */
+	size_t i;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * p_x = NULL;
+
+	*X_b = (X+bs-1)/bs;
+
+	/* WARNING : 1 is the extreme limit before overflow :) */
+	if( ( ((size_t)(*X_b)) < ((size_t)((X+bs-1)/bs))) || (RSB_BLK_ADD_OVERFLOW(*X_b,1)) )
+	{
+		/* overflow. should print some message. */
+		errval = RSB_ERR_LIMITS;goto err;
+	}
+
+	p_x = rsb__malloc(sizeof(rsb_coo_idx_t)*(*X_b+1));
+	if(! p_x) goto err;
+	/* note: should use some perrno some day */
+
+	/* note the last block size : it is the same, regardless congruences */
+	{
+for(i=0;i+15<*X_b;i+=16){
+p_x[i+0 ] = (i+0 )*bs;
+	p_x[i+1 ] = (i+1 )*bs;
+	p_x[i+2 ] = (i+2 )*bs;
+	p_x[i+3 ] = (i+3 )*bs;
+	p_x[i+4 ] = (i+4 )*bs;
+	p_x[i+5 ] = (i+5 )*bs;
+	p_x[i+6 ] = (i+6 )*bs;
+	p_x[i+7 ] = (i+7 )*bs;
+	p_x[i+8 ] = (i+8 )*bs;
+	p_x[i+9 ] = (i+9 )*bs;
+	p_x[i+10 ] = (i+10 )*bs;
+	p_x[i+11 ] = (i+11 )*bs;
+	p_x[i+12 ] = (i+12 )*bs;
+	p_x[i+13 ] = (i+13 )*bs;
+	p_x[i+14 ] = (i+14 )*bs;
+	p_x[i+15 ] = (i+15 )*bs;
+	}
+for(     ;i<*X_b;++i){ p_x[i+0 ] = (i+0 )*bs;
+	 }
+}
+
+
+	/* FIXME : this point should be remarked and documented way better ! */
+	if(flags&(RSB_FLAG_WANT_BCSS_STORAGE|RSB_FLAG_WANT_FIXED_BLOCKING_VBR))
+		p_x[*X_b] = *X_b*bs;	/* the last element of p_x is the index of the last matrix row/column    + 1  */
+	else
+		p_x[*X_b] = X;	/* the last element of p_x is the index of the last matrix row/column    + 1  */
+	
+	return p_x;
+err:
+	RSB_CONDITIONAL_FREE(p_x);
+	rsb__do_perror(NULL,errval);
+	return NULL;
+}
+
+rsb_err_t rsb__vector_diff(void * c, const void * a, const void * b, rsb_type_t type, size_t n){
+	/*!
+	 * c <- a-b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy,dcopy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*ta = a,*tb = b;double *tc = c;
+		{
+for(i=0;i+15<n;i+=16){
+		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+			tc[i+1 ] = ta[i+1 ]-tb[i+1 ];
+			tc[i+2 ] = ta[i+2 ]-tb[i+2 ];
+			tc[i+3 ] = ta[i+3 ]-tb[i+3 ];
+			tc[i+4 ] = ta[i+4 ]-tb[i+4 ];
+			tc[i+5 ] = ta[i+5 ]-tb[i+5 ];
+			tc[i+6 ] = ta[i+6 ]-tb[i+6 ];
+			tc[i+7 ] = ta[i+7 ]-tb[i+7 ];
+			tc[i+8 ] = ta[i+8 ]-tb[i+8 ];
+			tc[i+9 ] = ta[i+9 ]-tb[i+9 ];
+			tc[i+10 ] = ta[i+10 ]-tb[i+10 ];
+			tc[i+11 ] = ta[i+11 ]-tb[i+11 ];
+			tc[i+12 ] = ta[i+12 ]-tb[i+12 ];
+			tc[i+13 ] = ta[i+13 ]-tb[i+13 ];
+			tc[i+14 ] = ta[i+14 ]-tb[i+14 ];
+			tc[i+15 ] = ta[i+15 ]-tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*ta = a,*tb = b;float *tc = c;
+		{
+for(i=0;i+15<n;i+=16){
+		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+			tc[i+1 ] = ta[i+1 ]-tb[i+1 ];
+			tc[i+2 ] = ta[i+2 ]-tb[i+2 ];
+			tc[i+3 ] = ta[i+3 ]-tb[i+3 ];
+			tc[i+4 ] = ta[i+4 ]-tb[i+4 ];
+			tc[i+5 ] = ta[i+5 ]-tb[i+5 ];
+			tc[i+6 ] = ta[i+6 ]-tb[i+6 ];
+			tc[i+7 ] = ta[i+7 ]-tb[i+7 ];
+			tc[i+8 ] = ta[i+8 ]-tb[i+8 ];
+			tc[i+9 ] = ta[i+9 ]-tb[i+9 ];
+			tc[i+10 ] = ta[i+10 ]-tb[i+10 ];
+			tc[i+11 ] = ta[i+11 ]-tb[i+11 ];
+			tc[i+12 ] = ta[i+12 ]-tb[i+12 ];
+			tc[i+13 ] = ta[i+13 ]-tb[i+13 ];
+			tc[i+14 ] = ta[i+14 ]-tb[i+14 ];
+			tc[i+15 ] = ta[i+15 ]-tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*ta = a,*tb = b;float complex *tc = c;
+		{
+for(i=0;i+15<n;i+=16){
+		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+			tc[i+1 ] = ta[i+1 ]-tb[i+1 ];
+			tc[i+2 ] = ta[i+2 ]-tb[i+2 ];
+			tc[i+3 ] = ta[i+3 ]-tb[i+3 ];
+			tc[i+4 ] = ta[i+4 ]-tb[i+4 ];
+			tc[i+5 ] = ta[i+5 ]-tb[i+5 ];
+			tc[i+6 ] = ta[i+6 ]-tb[i+6 ];
+			tc[i+7 ] = ta[i+7 ]-tb[i+7 ];
+			tc[i+8 ] = ta[i+8 ]-tb[i+8 ];
+			tc[i+9 ] = ta[i+9 ]-tb[i+9 ];
+			tc[i+10 ] = ta[i+10 ]-tb[i+10 ];
+			tc[i+11 ] = ta[i+11 ]-tb[i+11 ];
+			tc[i+12 ] = ta[i+12 ]-tb[i+12 ];
+			tc[i+13 ] = ta[i+13 ]-tb[i+13 ];
+			tc[i+14 ] = ta[i+14 ]-tb[i+14 ];
+			tc[i+15 ] = ta[i+15 ]-tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*ta = a,*tb = b;double complex *tc = c;
+		{
+for(i=0;i+15<n;i+=16){
+		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+			tc[i+1 ] = ta[i+1 ]-tb[i+1 ];
+			tc[i+2 ] = ta[i+2 ]-tb[i+2 ];
+			tc[i+3 ] = ta[i+3 ]-tb[i+3 ];
+			tc[i+4 ] = ta[i+4 ]-tb[i+4 ];
+			tc[i+5 ] = ta[i+5 ]-tb[i+5 ];
+			tc[i+6 ] = ta[i+6 ]-tb[i+6 ];
+			tc[i+7 ] = ta[i+7 ]-tb[i+7 ];
+			tc[i+8 ] = ta[i+8 ]-tb[i+8 ];
+			tc[i+9 ] = ta[i+9 ]-tb[i+9 ];
+			tc[i+10 ] = ta[i+10 ]-tb[i+10 ];
+			tc[i+11 ] = ta[i+11 ]-tb[i+11 ];
+			tc[i+12 ] = ta[i+12 ]-tb[i+12 ];
+			tc[i+13 ] = ta[i+13 ]-tb[i+13 ];
+			tc[i+14 ] = ta[i+14 ]-tb[i+14 ];
+			tc[i+15 ] = ta[i+15 ]-tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[i+0 ] = ta[i+0 ]-tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_vector_norm_square(void * c, const void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * c <- a^T*a
+         *
+	 * \param a	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*ta = a;double *tc = c;
+		tc[0] = ((double)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[i+0 ]*ta[i+0 ];
+			tc[0]+=ta[i+1 ]*ta[i+1 ];
+			tc[0]+=ta[i+2 ]*ta[i+2 ];
+			tc[0]+=ta[i+3 ]*ta[i+3 ];
+			tc[0]+=ta[i+4 ]*ta[i+4 ];
+			tc[0]+=ta[i+5 ]*ta[i+5 ];
+			tc[0]+=ta[i+6 ]*ta[i+6 ];
+			tc[0]+=ta[i+7 ]*ta[i+7 ];
+			tc[0]+=ta[i+8 ]*ta[i+8 ];
+			tc[0]+=ta[i+9 ]*ta[i+9 ];
+			tc[0]+=ta[i+10 ]*ta[i+10 ];
+			tc[0]+=ta[i+11 ]*ta[i+11 ];
+			tc[0]+=ta[i+12 ]*ta[i+12 ];
+			tc[0]+=ta[i+13 ]*ta[i+13 ];
+			tc[0]+=ta[i+14 ]*ta[i+14 ];
+			tc[0]+=ta[i+15 ]*ta[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[i+0 ]*ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*ta = a;float *tc = c;
+		tc[0] = ((float)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[i+0 ]*ta[i+0 ];
+			tc[0]+=ta[i+1 ]*ta[i+1 ];
+			tc[0]+=ta[i+2 ]*ta[i+2 ];
+			tc[0]+=ta[i+3 ]*ta[i+3 ];
+			tc[0]+=ta[i+4 ]*ta[i+4 ];
+			tc[0]+=ta[i+5 ]*ta[i+5 ];
+			tc[0]+=ta[i+6 ]*ta[i+6 ];
+			tc[0]+=ta[i+7 ]*ta[i+7 ];
+			tc[0]+=ta[i+8 ]*ta[i+8 ];
+			tc[0]+=ta[i+9 ]*ta[i+9 ];
+			tc[0]+=ta[i+10 ]*ta[i+10 ];
+			tc[0]+=ta[i+11 ]*ta[i+11 ];
+			tc[0]+=ta[i+12 ]*ta[i+12 ];
+			tc[0]+=ta[i+13 ]*ta[i+13 ];
+			tc[0]+=ta[i+14 ]*ta[i+14 ];
+			tc[0]+=ta[i+15 ]*ta[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[i+0 ]*ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*ta = a;float complex *tc = c;
+		tc[0] = ((float complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[i+0 ]*ta[i+0 ];
+			tc[0]+=ta[i+1 ]*ta[i+1 ];
+			tc[0]+=ta[i+2 ]*ta[i+2 ];
+			tc[0]+=ta[i+3 ]*ta[i+3 ];
+			tc[0]+=ta[i+4 ]*ta[i+4 ];
+			tc[0]+=ta[i+5 ]*ta[i+5 ];
+			tc[0]+=ta[i+6 ]*ta[i+6 ];
+			tc[0]+=ta[i+7 ]*ta[i+7 ];
+			tc[0]+=ta[i+8 ]*ta[i+8 ];
+			tc[0]+=ta[i+9 ]*ta[i+9 ];
+			tc[0]+=ta[i+10 ]*ta[i+10 ];
+			tc[0]+=ta[i+11 ]*ta[i+11 ];
+			tc[0]+=ta[i+12 ]*ta[i+12 ];
+			tc[0]+=ta[i+13 ]*ta[i+13 ];
+			tc[0]+=ta[i+14 ]*ta[i+14 ];
+			tc[0]+=ta[i+15 ]*ta[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[i+0 ]*ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*ta = a;double complex *tc = c;
+		tc[0] = ((double complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[i+0 ]*ta[i+0 ];
+			tc[0]+=ta[i+1 ]*ta[i+1 ];
+			tc[0]+=ta[i+2 ]*ta[i+2 ];
+			tc[0]+=ta[i+3 ]*ta[i+3 ];
+			tc[0]+=ta[i+4 ]*ta[i+4 ];
+			tc[0]+=ta[i+5 ]*ta[i+5 ];
+			tc[0]+=ta[i+6 ]*ta[i+6 ];
+			tc[0]+=ta[i+7 ]*ta[i+7 ];
+			tc[0]+=ta[i+8 ]*ta[i+8 ];
+			tc[0]+=ta[i+9 ]*ta[i+9 ];
+			tc[0]+=ta[i+10 ]*ta[i+10 ];
+			tc[0]+=ta[i+11 ]*ta[i+11 ];
+			tc[0]+=ta[i+12 ]*ta[i+12 ];
+			tc[0]+=ta[i+13 ]*ta[i+13 ];
+			tc[0]+=ta[i+14 ]*ta[i+14 ];
+			tc[0]+=ta[i+15 ]*ta[i+15 ];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[i+0 ]*ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_vector_norm(void * c, const void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * c <- sqrt(a^T*a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_err_t errval;
+	if(!c)
+		return RSB_ERR_BADARGS;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		double*cp = (double*)c;
+		errval = rsb_vector_norm_square(cp,a,type,n);
+		*cp = sqrt(*cp);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		float*cp = (float*)c;
+		errval = rsb_vector_norm_square(cp,a,type,n);
+		*cp = sqrtf(*cp);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		float complex*cp = (float complex*)c;
+		errval = rsb_vector_norm_square(cp,a,type,n);
+		*cp = csqrtf(*cp);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		double complex*cp = (double complex*)c;
+		errval = rsb_vector_norm_square(cp,a,type,n);
+		*cp = csqrt(*cp);
+	}
+	else 
+#endif
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+	RSB_DO_ERR_RETURN(errval)
+}
+
+static rsb_err_t rsb_vector_norm_square_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+{
+	/*!
+	 * c <- a^T*a
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(inc==1)
+		return rsb_vector_norm_square(c,a,type,n);
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*ta = a;double *tc = c;
+		tc[0] = ((double)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+			tc[0]+=ta[(i+1 )*inc]*ta[(i+1 )*inc];
+			tc[0]+=ta[(i+2 )*inc]*ta[(i+2 )*inc];
+			tc[0]+=ta[(i+3 )*inc]*ta[(i+3 )*inc];
+			tc[0]+=ta[(i+4 )*inc]*ta[(i+4 )*inc];
+			tc[0]+=ta[(i+5 )*inc]*ta[(i+5 )*inc];
+			tc[0]+=ta[(i+6 )*inc]*ta[(i+6 )*inc];
+			tc[0]+=ta[(i+7 )*inc]*ta[(i+7 )*inc];
+			tc[0]+=ta[(i+8 )*inc]*ta[(i+8 )*inc];
+			tc[0]+=ta[(i+9 )*inc]*ta[(i+9 )*inc];
+			tc[0]+=ta[(i+10 )*inc]*ta[(i+10 )*inc];
+			tc[0]+=ta[(i+11 )*inc]*ta[(i+11 )*inc];
+			tc[0]+=ta[(i+12 )*inc]*ta[(i+12 )*inc];
+			tc[0]+=ta[(i+13 )*inc]*ta[(i+13 )*inc];
+			tc[0]+=ta[(i+14 )*inc]*ta[(i+14 )*inc];
+			tc[0]+=ta[(i+15 )*inc]*ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*ta = a;float *tc = c;
+		tc[0] = ((float)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+			tc[0]+=ta[(i+1 )*inc]*ta[(i+1 )*inc];
+			tc[0]+=ta[(i+2 )*inc]*ta[(i+2 )*inc];
+			tc[0]+=ta[(i+3 )*inc]*ta[(i+3 )*inc];
+			tc[0]+=ta[(i+4 )*inc]*ta[(i+4 )*inc];
+			tc[0]+=ta[(i+5 )*inc]*ta[(i+5 )*inc];
+			tc[0]+=ta[(i+6 )*inc]*ta[(i+6 )*inc];
+			tc[0]+=ta[(i+7 )*inc]*ta[(i+7 )*inc];
+			tc[0]+=ta[(i+8 )*inc]*ta[(i+8 )*inc];
+			tc[0]+=ta[(i+9 )*inc]*ta[(i+9 )*inc];
+			tc[0]+=ta[(i+10 )*inc]*ta[(i+10 )*inc];
+			tc[0]+=ta[(i+11 )*inc]*ta[(i+11 )*inc];
+			tc[0]+=ta[(i+12 )*inc]*ta[(i+12 )*inc];
+			tc[0]+=ta[(i+13 )*inc]*ta[(i+13 )*inc];
+			tc[0]+=ta[(i+14 )*inc]*ta[(i+14 )*inc];
+			tc[0]+=ta[(i+15 )*inc]*ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*ta = a;float complex *tc = c;
+		tc[0] = ((float complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+			tc[0]+=ta[(i+1 )*inc]*ta[(i+1 )*inc];
+			tc[0]+=ta[(i+2 )*inc]*ta[(i+2 )*inc];
+			tc[0]+=ta[(i+3 )*inc]*ta[(i+3 )*inc];
+			tc[0]+=ta[(i+4 )*inc]*ta[(i+4 )*inc];
+			tc[0]+=ta[(i+5 )*inc]*ta[(i+5 )*inc];
+			tc[0]+=ta[(i+6 )*inc]*ta[(i+6 )*inc];
+			tc[0]+=ta[(i+7 )*inc]*ta[(i+7 )*inc];
+			tc[0]+=ta[(i+8 )*inc]*ta[(i+8 )*inc];
+			tc[0]+=ta[(i+9 )*inc]*ta[(i+9 )*inc];
+			tc[0]+=ta[(i+10 )*inc]*ta[(i+10 )*inc];
+			tc[0]+=ta[(i+11 )*inc]*ta[(i+11 )*inc];
+			tc[0]+=ta[(i+12 )*inc]*ta[(i+12 )*inc];
+			tc[0]+=ta[(i+13 )*inc]*ta[(i+13 )*inc];
+			tc[0]+=ta[(i+14 )*inc]*ta[(i+14 )*inc];
+			tc[0]+=ta[(i+15 )*inc]*ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*ta = a;double complex *tc = c;
+		tc[0] = ((double complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+			tc[0]+=ta[(i+1 )*inc]*ta[(i+1 )*inc];
+			tc[0]+=ta[(i+2 )*inc]*ta[(i+2 )*inc];
+			tc[0]+=ta[(i+3 )*inc]*ta[(i+3 )*inc];
+			tc[0]+=ta[(i+4 )*inc]*ta[(i+4 )*inc];
+			tc[0]+=ta[(i+5 )*inc]*ta[(i+5 )*inc];
+			tc[0]+=ta[(i+6 )*inc]*ta[(i+6 )*inc];
+			tc[0]+=ta[(i+7 )*inc]*ta[(i+7 )*inc];
+			tc[0]+=ta[(i+8 )*inc]*ta[(i+8 )*inc];
+			tc[0]+=ta[(i+9 )*inc]*ta[(i+9 )*inc];
+			tc[0]+=ta[(i+10 )*inc]*ta[(i+10 )*inc];
+			tc[0]+=ta[(i+11 )*inc]*ta[(i+11 )*inc];
+			tc[0]+=ta[(i+12 )*inc]*ta[(i+12 )*inc];
+			tc[0]+=ta[(i+13 )*inc]*ta[(i+13 )*inc];
+			tc[0]+=ta[(i+14 )*inc]*ta[(i+14 )*inc];
+			tc[0]+=ta[(i+15 )*inc]*ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		tc[0]+=ta[(i+0 )*inc]*ta[(i+0 )*inc];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vector_norm_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+{
+	/*!
+	 * c <- sqrt(a^T*a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_err_t errval;
+	if(!c)
+		return RSB_ERR_BADARGS;
+	if(inc==1)
+		return rsb_vector_norm(c,a,type,n);
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		double*cp = (double*)c;
+		errval = rsb_vector_norm_square_strided(cp,a,type,n,inc);
+		*cp = sqrt(*cp);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		float*cp = (float*)c;
+		errval = rsb_vector_norm_square_strided(cp,a,type,n,inc);
+		*cp = sqrtf(*cp);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		float complex*cp = (float complex*)c;
+		errval = rsb_vector_norm_square_strided(cp,a,type,n,inc);
+		*cp = csqrtf(*cp);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		double complex*cp = (double complex*)c;
+		errval = rsb_vector_norm_square_strided(cp,a,type,n,inc);
+		*cp = csqrt(*cp);
+	}
+	else 
+#endif
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__util_vector_sum_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+{
+	/*!
+	 * c <- sum(a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		register double acc = ((double)(0)); const double*ta = a; double*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+		acc+=ta[(i+0 )*inc];
+			acc+=ta[(i+1 )*inc];
+			acc+=ta[(i+2 )*inc];
+			acc+=ta[(i+3 )*inc];
+			acc+=ta[(i+4 )*inc];
+			acc+=ta[(i+5 )*inc];
+			acc+=ta[(i+6 )*inc];
+			acc+=ta[(i+7 )*inc];
+			acc+=ta[(i+8 )*inc];
+			acc+=ta[(i+9 )*inc];
+			acc+=ta[(i+10 )*inc];
+			acc+=ta[(i+11 )*inc];
+			acc+=ta[(i+12 )*inc];
+			acc+=ta[(i+13 )*inc];
+			acc+=ta[(i+14 )*inc];
+			acc+=ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		acc+=ta[(i+0 )*inc];
+	 }
+}
+; 
+		tc[0] = acc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		register float acc = ((float)(0)); const float*ta = a; float*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+		acc+=ta[(i+0 )*inc];
+			acc+=ta[(i+1 )*inc];
+			acc+=ta[(i+2 )*inc];
+			acc+=ta[(i+3 )*inc];
+			acc+=ta[(i+4 )*inc];
+			acc+=ta[(i+5 )*inc];
+			acc+=ta[(i+6 )*inc];
+			acc+=ta[(i+7 )*inc];
+			acc+=ta[(i+8 )*inc];
+			acc+=ta[(i+9 )*inc];
+			acc+=ta[(i+10 )*inc];
+			acc+=ta[(i+11 )*inc];
+			acc+=ta[(i+12 )*inc];
+			acc+=ta[(i+13 )*inc];
+			acc+=ta[(i+14 )*inc];
+			acc+=ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		acc+=ta[(i+0 )*inc];
+	 }
+}
+; 
+		tc[0] = acc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		register float complex acc = ((float complex)(0)); const float complex*ta = a; float complex*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+		acc+=ta[(i+0 )*inc];
+			acc+=ta[(i+1 )*inc];
+			acc+=ta[(i+2 )*inc];
+			acc+=ta[(i+3 )*inc];
+			acc+=ta[(i+4 )*inc];
+			acc+=ta[(i+5 )*inc];
+			acc+=ta[(i+6 )*inc];
+			acc+=ta[(i+7 )*inc];
+			acc+=ta[(i+8 )*inc];
+			acc+=ta[(i+9 )*inc];
+			acc+=ta[(i+10 )*inc];
+			acc+=ta[(i+11 )*inc];
+			acc+=ta[(i+12 )*inc];
+			acc+=ta[(i+13 )*inc];
+			acc+=ta[(i+14 )*inc];
+			acc+=ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		acc+=ta[(i+0 )*inc];
+	 }
+}
+; 
+		tc[0] = acc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		register double complex acc = ((double complex)(0)); const double complex*ta = a; double complex*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+		acc+=ta[(i+0 )*inc];
+			acc+=ta[(i+1 )*inc];
+			acc+=ta[(i+2 )*inc];
+			acc+=ta[(i+3 )*inc];
+			acc+=ta[(i+4 )*inc];
+			acc+=ta[(i+5 )*inc];
+			acc+=ta[(i+6 )*inc];
+			acc+=ta[(i+7 )*inc];
+			acc+=ta[(i+8 )*inc];
+			acc+=ta[(i+9 )*inc];
+			acc+=ta[(i+10 )*inc];
+			acc+=ta[(i+11 )*inc];
+			acc+=ta[(i+12 )*inc];
+			acc+=ta[(i+13 )*inc];
+			acc+=ta[(i+14 )*inc];
+			acc+=ta[(i+15 )*inc];
+	}
+for(     ;i<n;++i){ 		acc+=ta[(i+0 )*inc];
+	 }
+}
+; 
+		tc[0] = acc;
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_vector_sum(void * c, const void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * c <- sum(a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double*ta = a; double*tc = c; tc[0] = ((double)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	tc[0]+=ta[i+0 ];
+		tc[0]+=ta[i+1 ];
+		tc[0]+=ta[i+2 ];
+		tc[0]+=ta[i+3 ];
+		tc[0]+=ta[i+4 ];
+		tc[0]+=ta[i+5 ];
+		tc[0]+=ta[i+6 ];
+		tc[0]+=ta[i+7 ];
+		tc[0]+=ta[i+8 ];
+		tc[0]+=ta[i+9 ];
+		tc[0]+=ta[i+10 ];
+		tc[0]+=ta[i+11 ];
+		tc[0]+=ta[i+12 ];
+		tc[0]+=ta[i+13 ];
+		tc[0]+=ta[i+14 ];
+		tc[0]+=ta[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[0]+=ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float*ta = a; float*tc = c; tc[0] = ((float)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	tc[0]+=ta[i+0 ];
+		tc[0]+=ta[i+1 ];
+		tc[0]+=ta[i+2 ];
+		tc[0]+=ta[i+3 ];
+		tc[0]+=ta[i+4 ];
+		tc[0]+=ta[i+5 ];
+		tc[0]+=ta[i+6 ];
+		tc[0]+=ta[i+7 ];
+		tc[0]+=ta[i+8 ];
+		tc[0]+=ta[i+9 ];
+		tc[0]+=ta[i+10 ];
+		tc[0]+=ta[i+11 ];
+		tc[0]+=ta[i+12 ];
+		tc[0]+=ta[i+13 ];
+		tc[0]+=ta[i+14 ];
+		tc[0]+=ta[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[0]+=ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex*ta = a; float complex*tc = c; tc[0] = ((float complex)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	tc[0]+=ta[i+0 ];
+		tc[0]+=ta[i+1 ];
+		tc[0]+=ta[i+2 ];
+		tc[0]+=ta[i+3 ];
+		tc[0]+=ta[i+4 ];
+		tc[0]+=ta[i+5 ];
+		tc[0]+=ta[i+6 ];
+		tc[0]+=ta[i+7 ];
+		tc[0]+=ta[i+8 ];
+		tc[0]+=ta[i+9 ];
+		tc[0]+=ta[i+10 ];
+		tc[0]+=ta[i+11 ];
+		tc[0]+=ta[i+12 ];
+		tc[0]+=ta[i+13 ];
+		tc[0]+=ta[i+14 ];
+		tc[0]+=ta[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[0]+=ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex*ta = a; double complex*tc = c; tc[0] = ((double complex)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	tc[0]+=ta[i+0 ];
+		tc[0]+=ta[i+1 ];
+		tc[0]+=ta[i+2 ];
+		tc[0]+=ta[i+3 ];
+		tc[0]+=ta[i+4 ];
+		tc[0]+=ta[i+5 ];
+		tc[0]+=ta[i+6 ];
+		tc[0]+=ta[i+7 ];
+		tc[0]+=ta[i+8 ];
+		tc[0]+=ta[i+9 ];
+		tc[0]+=ta[i+10 ];
+		tc[0]+=ta[i+11 ];
+		tc[0]+=ta[i+12 ];
+		tc[0]+=ta[i+13 ];
+		tc[0]+=ta[i+14 ];
+		tc[0]+=ta[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[0]+=ta[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb__vector_mult_sum(const void * a, const void * b, void * c, rsb_type_t type, size_t n, const int inca, const int incb)
+{
+	/*!
+	 * c <- sum(a*b)
+	 * It is allowed to give c == a or c == b or a==b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * 
+	 * p.s.: this routine is, numerically speaking, a crime!
+	 * 
+	 * */
+	size_t i;
+	if(a==b && inca==incb)
+		return rsb_vector_norm_square_strided(c,a,type,n,inca);
+	if(inca == 1 && incb == 1)
+	{
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*tb = b; const double*ta = a; double*tc = c,cacc = ((double)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[i+0 ]*tb[i+0 ];
+			cacc+=ta[i+1 ]*tb[i+1 ];
+			cacc+=ta[i+2 ]*tb[i+2 ];
+			cacc+=ta[i+3 ]*tb[i+3 ];
+			cacc+=ta[i+4 ]*tb[i+4 ];
+			cacc+=ta[i+5 ]*tb[i+5 ];
+			cacc+=ta[i+6 ]*tb[i+6 ];
+			cacc+=ta[i+7 ]*tb[i+7 ];
+			cacc+=ta[i+8 ]*tb[i+8 ];
+			cacc+=ta[i+9 ]*tb[i+9 ];
+			cacc+=ta[i+10 ]*tb[i+10 ];
+			cacc+=ta[i+11 ]*tb[i+11 ];
+			cacc+=ta[i+12 ]*tb[i+12 ];
+			cacc+=ta[i+13 ]*tb[i+13 ];
+			cacc+=ta[i+14 ]*tb[i+14 ];
+			cacc+=ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*tb = b; const float*ta = a; float*tc = c,cacc = ((float)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[i+0 ]*tb[i+0 ];
+			cacc+=ta[i+1 ]*tb[i+1 ];
+			cacc+=ta[i+2 ]*tb[i+2 ];
+			cacc+=ta[i+3 ]*tb[i+3 ];
+			cacc+=ta[i+4 ]*tb[i+4 ];
+			cacc+=ta[i+5 ]*tb[i+5 ];
+			cacc+=ta[i+6 ]*tb[i+6 ];
+			cacc+=ta[i+7 ]*tb[i+7 ];
+			cacc+=ta[i+8 ]*tb[i+8 ];
+			cacc+=ta[i+9 ]*tb[i+9 ];
+			cacc+=ta[i+10 ]*tb[i+10 ];
+			cacc+=ta[i+11 ]*tb[i+11 ];
+			cacc+=ta[i+12 ]*tb[i+12 ];
+			cacc+=ta[i+13 ]*tb[i+13 ];
+			cacc+=ta[i+14 ]*tb[i+14 ];
+			cacc+=ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*tb = b; const float complex*ta = a; float complex*tc = c,cacc = ((float complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[i+0 ]*tb[i+0 ];
+			cacc+=ta[i+1 ]*tb[i+1 ];
+			cacc+=ta[i+2 ]*tb[i+2 ];
+			cacc+=ta[i+3 ]*tb[i+3 ];
+			cacc+=ta[i+4 ]*tb[i+4 ];
+			cacc+=ta[i+5 ]*tb[i+5 ];
+			cacc+=ta[i+6 ]*tb[i+6 ];
+			cacc+=ta[i+7 ]*tb[i+7 ];
+			cacc+=ta[i+8 ]*tb[i+8 ];
+			cacc+=ta[i+9 ]*tb[i+9 ];
+			cacc+=ta[i+10 ]*tb[i+10 ];
+			cacc+=ta[i+11 ]*tb[i+11 ];
+			cacc+=ta[i+12 ]*tb[i+12 ];
+			cacc+=ta[i+13 ]*tb[i+13 ];
+			cacc+=ta[i+14 ]*tb[i+14 ];
+			cacc+=ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*tb = b; const double complex*ta = a; double complex*tc = c,cacc = ((double complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[i+0 ]*tb[i+0 ];
+			cacc+=ta[i+1 ]*tb[i+1 ];
+			cacc+=ta[i+2 ]*tb[i+2 ];
+			cacc+=ta[i+3 ]*tb[i+3 ];
+			cacc+=ta[i+4 ]*tb[i+4 ];
+			cacc+=ta[i+5 ]*tb[i+5 ];
+			cacc+=ta[i+6 ]*tb[i+6 ];
+			cacc+=ta[i+7 ]*tb[i+7 ];
+			cacc+=ta[i+8 ]*tb[i+8 ];
+			cacc+=ta[i+9 ]*tb[i+9 ];
+			cacc+=ta[i+10 ]*tb[i+10 ];
+			cacc+=ta[i+11 ]*tb[i+11 ];
+			cacc+=ta[i+12 ]*tb[i+12 ];
+			cacc+=ta[i+13 ]*tb[i+13 ];
+			cacc+=ta[i+14 ]*tb[i+14 ];
+			cacc+=ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	else
+	{
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*tb = b; const double*ta = a; double*tc = c,cacc = ((double)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+			cacc+=ta[inca*(i+1 )]*tb[incb*(i+1 )];
+			cacc+=ta[inca*(i+2 )]*tb[incb*(i+2 )];
+			cacc+=ta[inca*(i+3 )]*tb[incb*(i+3 )];
+			cacc+=ta[inca*(i+4 )]*tb[incb*(i+4 )];
+			cacc+=ta[inca*(i+5 )]*tb[incb*(i+5 )];
+			cacc+=ta[inca*(i+6 )]*tb[incb*(i+6 )];
+			cacc+=ta[inca*(i+7 )]*tb[incb*(i+7 )];
+			cacc+=ta[inca*(i+8 )]*tb[incb*(i+8 )];
+			cacc+=ta[inca*(i+9 )]*tb[incb*(i+9 )];
+			cacc+=ta[inca*(i+10 )]*tb[incb*(i+10 )];
+			cacc+=ta[inca*(i+11 )]*tb[incb*(i+11 )];
+			cacc+=ta[inca*(i+12 )]*tb[incb*(i+12 )];
+			cacc+=ta[inca*(i+13 )]*tb[incb*(i+13 )];
+			cacc+=ta[inca*(i+14 )]*tb[incb*(i+14 )];
+			cacc+=ta[inca*(i+15 )]*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*tb = b; const float*ta = a; float*tc = c,cacc = ((float)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+			cacc+=ta[inca*(i+1 )]*tb[incb*(i+1 )];
+			cacc+=ta[inca*(i+2 )]*tb[incb*(i+2 )];
+			cacc+=ta[inca*(i+3 )]*tb[incb*(i+3 )];
+			cacc+=ta[inca*(i+4 )]*tb[incb*(i+4 )];
+			cacc+=ta[inca*(i+5 )]*tb[incb*(i+5 )];
+			cacc+=ta[inca*(i+6 )]*tb[incb*(i+6 )];
+			cacc+=ta[inca*(i+7 )]*tb[incb*(i+7 )];
+			cacc+=ta[inca*(i+8 )]*tb[incb*(i+8 )];
+			cacc+=ta[inca*(i+9 )]*tb[incb*(i+9 )];
+			cacc+=ta[inca*(i+10 )]*tb[incb*(i+10 )];
+			cacc+=ta[inca*(i+11 )]*tb[incb*(i+11 )];
+			cacc+=ta[inca*(i+12 )]*tb[incb*(i+12 )];
+			cacc+=ta[inca*(i+13 )]*tb[incb*(i+13 )];
+			cacc+=ta[inca*(i+14 )]*tb[incb*(i+14 )];
+			cacc+=ta[inca*(i+15 )]*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*tb = b; const float complex*ta = a; float complex*tc = c,cacc = ((float complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+			cacc+=ta[inca*(i+1 )]*tb[incb*(i+1 )];
+			cacc+=ta[inca*(i+2 )]*tb[incb*(i+2 )];
+			cacc+=ta[inca*(i+3 )]*tb[incb*(i+3 )];
+			cacc+=ta[inca*(i+4 )]*tb[incb*(i+4 )];
+			cacc+=ta[inca*(i+5 )]*tb[incb*(i+5 )];
+			cacc+=ta[inca*(i+6 )]*tb[incb*(i+6 )];
+			cacc+=ta[inca*(i+7 )]*tb[incb*(i+7 )];
+			cacc+=ta[inca*(i+8 )]*tb[incb*(i+8 )];
+			cacc+=ta[inca*(i+9 )]*tb[incb*(i+9 )];
+			cacc+=ta[inca*(i+10 )]*tb[incb*(i+10 )];
+			cacc+=ta[inca*(i+11 )]*tb[incb*(i+11 )];
+			cacc+=ta[inca*(i+12 )]*tb[incb*(i+12 )];
+			cacc+=ta[inca*(i+13 )]*tb[incb*(i+13 )];
+			cacc+=ta[inca*(i+14 )]*tb[incb*(i+14 )];
+			cacc+=ta[inca*(i+15 )]*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*tb = b; const double complex*ta = a; double complex*tc = c,cacc = ((double complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+			cacc+=ta[inca*(i+1 )]*tb[incb*(i+1 )];
+			cacc+=ta[inca*(i+2 )]*tb[incb*(i+2 )];
+			cacc+=ta[inca*(i+3 )]*tb[incb*(i+3 )];
+			cacc+=ta[inca*(i+4 )]*tb[incb*(i+4 )];
+			cacc+=ta[inca*(i+5 )]*tb[incb*(i+5 )];
+			cacc+=ta[inca*(i+6 )]*tb[incb*(i+6 )];
+			cacc+=ta[inca*(i+7 )]*tb[incb*(i+7 )];
+			cacc+=ta[inca*(i+8 )]*tb[incb*(i+8 )];
+			cacc+=ta[inca*(i+9 )]*tb[incb*(i+9 )];
+			cacc+=ta[inca*(i+10 )]*tb[incb*(i+10 )];
+			cacc+=ta[inca*(i+11 )]*tb[incb*(i+11 )];
+			cacc+=ta[inca*(i+12 )]*tb[incb*(i+12 )];
+			cacc+=ta[inca*(i+13 )]*tb[incb*(i+13 )];
+			cacc+=ta[inca*(i+14 )]*tb[incb*(i+14 )];
+			cacc+=ta[inca*(i+15 )]*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 		cacc+=ta[inca*(i+0 )]*tb[incb*(i+0 )];
+	 }
+}
+; 
+		*tc = cacc;
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_fill_with_zeros_nostride(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will zero the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  ){
+	double*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = ((double)(0));ta[i+1 ] = ((double)(0));ta[i+2 ] = ((double)(0));ta[i+3 ] = ((double)(0));ta[i+4 ] = ((double)(0));ta[i+5 ] = ((double)(0));ta[i+6 ] = ((double)(0));ta[i+7 ] = ((double)(0));ta[i+8 ] = ((double)(0));ta[i+9 ] = ((double)(0));ta[i+10 ] = ((double)(0));ta[i+11 ] = ((double)(0));ta[i+12 ] = ((double)(0));ta[i+13 ] = ((double)(0));ta[i+14 ] = ((double)(0));ta[i+15 ] = ((double)(0));}
+for(     ;i<n;++i){ ta[i+0 ] = ((double)(0)); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  ){
+	float*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = ((float)(0));ta[i+1 ] = ((float)(0));ta[i+2 ] = ((float)(0));ta[i+3 ] = ((float)(0));ta[i+4 ] = ((float)(0));ta[i+5 ] = ((float)(0));ta[i+6 ] = ((float)(0));ta[i+7 ] = ((float)(0));ta[i+8 ] = ((float)(0));ta[i+9 ] = ((float)(0));ta[i+10 ] = ((float)(0));ta[i+11 ] = ((float)(0));ta[i+12 ] = ((float)(0));ta[i+13 ] = ((float)(0));ta[i+14 ] = ((float)(0));ta[i+15 ] = ((float)(0));}
+for(     ;i<n;++i){ ta[i+0 ] = ((float)(0)); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ){
+	float complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = ((float complex)(0));ta[i+1 ] = ((float complex)(0));ta[i+2 ] = ((float complex)(0));ta[i+3 ] = ((float complex)(0));ta[i+4 ] = ((float complex)(0));ta[i+5 ] = ((float complex)(0));ta[i+6 ] = ((float complex)(0));ta[i+7 ] = ((float complex)(0));ta[i+8 ] = ((float complex)(0));ta[i+9 ] = ((float complex)(0));ta[i+10 ] = ((float complex)(0));ta[i+11 ] = ((float complex)(0));ta[i+12 ] = ((float complex)(0));ta[i+13 ] = ((float complex)(0));ta[i+14 ] = ((float complex)(0));ta[i+15 [...]
+for(     ;i<n;++i){ ta[i+0 ] = ((float complex)(0)); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ){
+	double complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = ((double complex)(0));ta[i+1 ] = ((double complex)(0));ta[i+2 ] = ((double complex)(0));ta[i+3 ] = ((double complex)(0));ta[i+4 ] = ((double complex)(0));ta[i+5 ] = ((double complex)(0));ta[i+6 ] = ((double complex)(0));ta[i+7 ] = ((double complex)(0));ta[i+8 ] = ((double complex)(0));ta[i+9 ] = ((double complex)(0));ta[i+10 ] = ((double complex)(0));ta[i+11 ] = ((double complex)(0));ta[i+12 ] = ((double complex)(0));ta[i+13 ] = ((double complex)(0));ta[i+14 ] = ((double compl [...]
+for(     ;i<n;++i){ ta[i+0 ] = ((double complex)(0)); }
+}
+}
+	else 
+#endif
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_fill_with_zeros(void * array, rsb_type_t type, size_t n, size_t incx)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will zero the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(incx==1)
+		return rsb_fill_with_zeros_nostride(array,type,n);
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  ){
+	double*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[(i+0 )*incx] = ((double)(0));ta[(i+1 )*incx] = ((double)(0));ta[(i+2 )*incx] = ((double)(0));ta[(i+3 )*incx] = ((double)(0));ta[(i+4 )*incx] = ((double)(0));ta[(i+5 )*incx] = ((double)(0));ta[(i+6 )*incx] = ((double)(0));ta[(i+7 )*incx] = ((double)(0));ta[(i+8 )*incx] = ((double)(0));ta[(i+9 )*incx] = ((double)(0));ta[(i+10 )*incx] = ((double)(0));ta[(i+11 )*incx] = ((double)(0));ta[(i+12 )*incx] = ((double)(0));ta[(i+13 )*incx] = ((double)(0));ta[(i+14 )*incx] = ((double)(0));ta[(i+1 [...]
+for(     ;i<n;++i){ ta[(i+0 )*incx] = ((double)(0)); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  ){
+	float*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[(i+0 )*incx] = ((float)(0));ta[(i+1 )*incx] = ((float)(0));ta[(i+2 )*incx] = ((float)(0));ta[(i+3 )*incx] = ((float)(0));ta[(i+4 )*incx] = ((float)(0));ta[(i+5 )*incx] = ((float)(0));ta[(i+6 )*incx] = ((float)(0));ta[(i+7 )*incx] = ((float)(0));ta[(i+8 )*incx] = ((float)(0));ta[(i+9 )*incx] = ((float)(0));ta[(i+10 )*incx] = ((float)(0));ta[(i+11 )*incx] = ((float)(0));ta[(i+12 )*incx] = ((float)(0));ta[(i+13 )*incx] = ((float)(0));ta[(i+14 )*incx] = ((float)(0));ta[(i+15 )*incx] = ((f [...]
+for(     ;i<n;++i){ ta[(i+0 )*incx] = ((float)(0)); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ){
+	float complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[(i+0 )*incx] = ((float complex)(0));ta[(i+1 )*incx] = ((float complex)(0));ta[(i+2 )*incx] = ((float complex)(0));ta[(i+3 )*incx] = ((float complex)(0));ta[(i+4 )*incx] = ((float complex)(0));ta[(i+5 )*incx] = ((float complex)(0));ta[(i+6 )*incx] = ((float complex)(0));ta[(i+7 )*incx] = ((float complex)(0));ta[(i+8 )*incx] = ((float complex)(0));ta[(i+9 )*incx] = ((float complex)(0));ta[(i+10 )*incx] = ((float complex)(0));ta[(i+11 )*incx] = ((float complex)(0));ta[(i+12 )*incx] = ((f [...]
+for(     ;i<n;++i){ ta[(i+0 )*incx] = ((float complex)(0)); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ){
+	double complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[(i+0 )*incx] = ((double complex)(0));ta[(i+1 )*incx] = ((double complex)(0));ta[(i+2 )*incx] = ((double complex)(0));ta[(i+3 )*incx] = ((double complex)(0));ta[(i+4 )*incx] = ((double complex)(0));ta[(i+5 )*incx] = ((double complex)(0));ta[(i+6 )*incx] = ((double complex)(0));ta[(i+7 )*incx] = ((double complex)(0));ta[(i+8 )*incx] = ((double complex)(0));ta[(i+9 )*incx] = ((double complex)(0));ta[(i+10 )*incx] = ((double complex)(0));ta[(i+11 )*incx] = ((double complex)(0));ta[(i+12 ) [...]
+for(     ;i<n;++i){ ta[(i+0 )*incx] = ((double complex)(0)); }
+}
+}
+	else 
+#endif
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_vector_scale(void * a, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * a <- a * alpha
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param alphap scaling value (if NULL assumed to be zero)
+	 * \param n	the input array length
+	 * \note see dscal in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(alphap==NULL || RSB_IS_ELEMENT_ZERO(alphap,type))
+		return rsb_fill_with_zeros(a,type,n,1);
+		
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double alpha = *(double*)alphap; double*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]*=alpha;
+		ta[i+1 ]*=alpha;
+		ta[i+2 ]*=alpha;
+		ta[i+3 ]*=alpha;
+		ta[i+4 ]*=alpha;
+		ta[i+5 ]*=alpha;
+		ta[i+6 ]*=alpha;
+		ta[i+7 ]*=alpha;
+		ta[i+8 ]*=alpha;
+		ta[i+9 ]*=alpha;
+		ta[i+10 ]*=alpha;
+		ta[i+11 ]*=alpha;
+		ta[i+12 ]*=alpha;
+		ta[i+13 ]*=alpha;
+		ta[i+14 ]*=alpha;
+		ta[i+15 ]*=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float alpha = *(float*)alphap; float*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]*=alpha;
+		ta[i+1 ]*=alpha;
+		ta[i+2 ]*=alpha;
+		ta[i+3 ]*=alpha;
+		ta[i+4 ]*=alpha;
+		ta[i+5 ]*=alpha;
+		ta[i+6 ]*=alpha;
+		ta[i+7 ]*=alpha;
+		ta[i+8 ]*=alpha;
+		ta[i+9 ]*=alpha;
+		ta[i+10 ]*=alpha;
+		ta[i+11 ]*=alpha;
+		ta[i+12 ]*=alpha;
+		ta[i+13 ]*=alpha;
+		ta[i+14 ]*=alpha;
+		ta[i+15 ]*=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex alpha = *(float complex*)alphap; float complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]*=alpha;
+		ta[i+1 ]*=alpha;
+		ta[i+2 ]*=alpha;
+		ta[i+3 ]*=alpha;
+		ta[i+4 ]*=alpha;
+		ta[i+5 ]*=alpha;
+		ta[i+6 ]*=alpha;
+		ta[i+7 ]*=alpha;
+		ta[i+8 ]*=alpha;
+		ta[i+9 ]*=alpha;
+		ta[i+10 ]*=alpha;
+		ta[i+11 ]*=alpha;
+		ta[i+12 ]*=alpha;
+		ta[i+13 ]*=alpha;
+		ta[i+14 ]*=alpha;
+		ta[i+15 ]*=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex alpha = *(double complex*)alphap; double complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]*=alpha;
+		ta[i+1 ]*=alpha;
+		ta[i+2 ]*=alpha;
+		ta[i+3 ]*=alpha;
+		ta[i+4 ]*=alpha;
+		ta[i+5 ]*=alpha;
+		ta[i+6 ]*=alpha;
+		ta[i+7 ]*=alpha;
+		ta[i+8 ]*=alpha;
+		ta[i+9 ]*=alpha;
+		ta[i+10 ]*=alpha;
+		ta[i+11 ]*=alpha;
+		ta[i+12 ]*=alpha;
+		ta[i+13 ]*=alpha;
+		ta[i+14 ]*=alpha;
+		ta[i+15 ]*=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_strided_vector_scale(void * a, const void * alphap, rsb_type_t type, size_t n, size_t stride)
+{
+	/*!
+	 * a <- a * alpha
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see dscal in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(stride==1)
+		return rsb_vector_scale(a,alphap,type,n);
+	if(alphap==NULL || RSB_IS_ELEMENT_ZERO(alphap,type))
+		return rsb_fill_with_zeros(a,type,n,stride);
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double alpha = *(double*)alphap; double*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[stride*(i+0 )]*=alpha;
+			ta[stride*(i+1 )]*=alpha;
+			ta[stride*(i+2 )]*=alpha;
+			ta[stride*(i+3 )]*=alpha;
+			ta[stride*(i+4 )]*=alpha;
+			ta[stride*(i+5 )]*=alpha;
+			ta[stride*(i+6 )]*=alpha;
+			ta[stride*(i+7 )]*=alpha;
+			ta[stride*(i+8 )]*=alpha;
+			ta[stride*(i+9 )]*=alpha;
+			ta[stride*(i+10 )]*=alpha;
+			ta[stride*(i+11 )]*=alpha;
+			ta[stride*(i+12 )]*=alpha;
+			ta[stride*(i+13 )]*=alpha;
+			ta[stride*(i+14 )]*=alpha;
+			ta[stride*(i+15 )]*=alpha;
+	}
+for(     ;i<n;++i){ 		ta[stride*(i+0 )]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float alpha = *(float*)alphap; float*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[stride*(i+0 )]*=alpha;
+			ta[stride*(i+1 )]*=alpha;
+			ta[stride*(i+2 )]*=alpha;
+			ta[stride*(i+3 )]*=alpha;
+			ta[stride*(i+4 )]*=alpha;
+			ta[stride*(i+5 )]*=alpha;
+			ta[stride*(i+6 )]*=alpha;
+			ta[stride*(i+7 )]*=alpha;
+			ta[stride*(i+8 )]*=alpha;
+			ta[stride*(i+9 )]*=alpha;
+			ta[stride*(i+10 )]*=alpha;
+			ta[stride*(i+11 )]*=alpha;
+			ta[stride*(i+12 )]*=alpha;
+			ta[stride*(i+13 )]*=alpha;
+			ta[stride*(i+14 )]*=alpha;
+			ta[stride*(i+15 )]*=alpha;
+	}
+for(     ;i<n;++i){ 		ta[stride*(i+0 )]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex alpha = *(float complex*)alphap; float complex*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[stride*(i+0 )]*=alpha;
+			ta[stride*(i+1 )]*=alpha;
+			ta[stride*(i+2 )]*=alpha;
+			ta[stride*(i+3 )]*=alpha;
+			ta[stride*(i+4 )]*=alpha;
+			ta[stride*(i+5 )]*=alpha;
+			ta[stride*(i+6 )]*=alpha;
+			ta[stride*(i+7 )]*=alpha;
+			ta[stride*(i+8 )]*=alpha;
+			ta[stride*(i+9 )]*=alpha;
+			ta[stride*(i+10 )]*=alpha;
+			ta[stride*(i+11 )]*=alpha;
+			ta[stride*(i+12 )]*=alpha;
+			ta[stride*(i+13 )]*=alpha;
+			ta[stride*(i+14 )]*=alpha;
+			ta[stride*(i+15 )]*=alpha;
+	}
+for(     ;i<n;++i){ 		ta[stride*(i+0 )]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex alpha = *(double complex*)alphap; double complex*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[stride*(i+0 )]*=alpha;
+			ta[stride*(i+1 )]*=alpha;
+			ta[stride*(i+2 )]*=alpha;
+			ta[stride*(i+3 )]*=alpha;
+			ta[stride*(i+4 )]*=alpha;
+			ta[stride*(i+5 )]*=alpha;
+			ta[stride*(i+6 )]*=alpha;
+			ta[stride*(i+7 )]*=alpha;
+			ta[stride*(i+8 )]*=alpha;
+			ta[stride*(i+9 )]*=alpha;
+			ta[stride*(i+10 )]*=alpha;
+			ta[stride*(i+11 )]*=alpha;
+			ta[stride*(i+12 )]*=alpha;
+			ta[stride*(i+13 )]*=alpha;
+			ta[stride*(i+14 )]*=alpha;
+			ta[stride*(i+15 )]*=alpha;
+	}
+for(     ;i<n;++i){ 		ta[stride*(i+0 )]*=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_vector_add(void * a, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * a <- a + alpha
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+		
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double alpha = *(double*)alphap; double*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[i+0 ]+=alpha;
+			ta[i+1 ]+=alpha;
+			ta[i+2 ]+=alpha;
+			ta[i+3 ]+=alpha;
+			ta[i+4 ]+=alpha;
+			ta[i+5 ]+=alpha;
+			ta[i+6 ]+=alpha;
+			ta[i+7 ]+=alpha;
+			ta[i+8 ]+=alpha;
+			ta[i+9 ]+=alpha;
+			ta[i+10 ]+=alpha;
+			ta[i+11 ]+=alpha;
+			ta[i+12 ]+=alpha;
+			ta[i+13 ]+=alpha;
+			ta[i+14 ]+=alpha;
+			ta[i+15 ]+=alpha;
+	}
+for(     ;i<n;++i){ 		ta[i+0 ]+=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float alpha = *(float*)alphap; float*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[i+0 ]+=alpha;
+			ta[i+1 ]+=alpha;
+			ta[i+2 ]+=alpha;
+			ta[i+3 ]+=alpha;
+			ta[i+4 ]+=alpha;
+			ta[i+5 ]+=alpha;
+			ta[i+6 ]+=alpha;
+			ta[i+7 ]+=alpha;
+			ta[i+8 ]+=alpha;
+			ta[i+9 ]+=alpha;
+			ta[i+10 ]+=alpha;
+			ta[i+11 ]+=alpha;
+			ta[i+12 ]+=alpha;
+			ta[i+13 ]+=alpha;
+			ta[i+14 ]+=alpha;
+			ta[i+15 ]+=alpha;
+	}
+for(     ;i<n;++i){ 		ta[i+0 ]+=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex alpha = *(float complex*)alphap; float complex*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[i+0 ]+=alpha;
+			ta[i+1 ]+=alpha;
+			ta[i+2 ]+=alpha;
+			ta[i+3 ]+=alpha;
+			ta[i+4 ]+=alpha;
+			ta[i+5 ]+=alpha;
+			ta[i+6 ]+=alpha;
+			ta[i+7 ]+=alpha;
+			ta[i+8 ]+=alpha;
+			ta[i+9 ]+=alpha;
+			ta[i+10 ]+=alpha;
+			ta[i+11 ]+=alpha;
+			ta[i+12 ]+=alpha;
+			ta[i+13 ]+=alpha;
+			ta[i+14 ]+=alpha;
+			ta[i+15 ]+=alpha;
+	}
+for(     ;i<n;++i){ 		ta[i+0 ]+=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex alpha = *(double complex*)alphap; double complex*ta = a;
+		{
+for(i=0;i+15<n;i+=16){
+		ta[i+0 ]+=alpha;
+			ta[i+1 ]+=alpha;
+			ta[i+2 ]+=alpha;
+			ta[i+3 ]+=alpha;
+			ta[i+4 ]+=alpha;
+			ta[i+5 ]+=alpha;
+			ta[i+6 ]+=alpha;
+			ta[i+7 ]+=alpha;
+			ta[i+8 ]+=alpha;
+			ta[i+9 ]+=alpha;
+			ta[i+10 ]+=alpha;
+			ta[i+11 ]+=alpha;
+			ta[i+12 ]+=alpha;
+			ta[i+13 ]+=alpha;
+			ta[i+14 ]+=alpha;
+			ta[i+15 ]+=alpha;
+	}
+for(     ;i<n;++i){ 		ta[i+0 ]+=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_vector_div(void * a, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * this is a benchmark-oriented function only..
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+		
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double alpha = *(double*)alphap; double*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]/=alpha;
+		ta[i+1 ]/=alpha;
+		ta[i+2 ]/=alpha;
+		ta[i+3 ]/=alpha;
+		ta[i+4 ]/=alpha;
+		ta[i+5 ]/=alpha;
+		ta[i+6 ]/=alpha;
+		ta[i+7 ]/=alpha;
+		ta[i+8 ]/=alpha;
+		ta[i+9 ]/=alpha;
+		ta[i+10 ]/=alpha;
+		ta[i+11 ]/=alpha;
+		ta[i+12 ]/=alpha;
+		ta[i+13 ]/=alpha;
+		ta[i+14 ]/=alpha;
+		ta[i+15 ]/=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]/=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float alpha = *(float*)alphap; float*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]/=alpha;
+		ta[i+1 ]/=alpha;
+		ta[i+2 ]/=alpha;
+		ta[i+3 ]/=alpha;
+		ta[i+4 ]/=alpha;
+		ta[i+5 ]/=alpha;
+		ta[i+6 ]/=alpha;
+		ta[i+7 ]/=alpha;
+		ta[i+8 ]/=alpha;
+		ta[i+9 ]/=alpha;
+		ta[i+10 ]/=alpha;
+		ta[i+11 ]/=alpha;
+		ta[i+12 ]/=alpha;
+		ta[i+13 ]/=alpha;
+		ta[i+14 ]/=alpha;
+		ta[i+15 ]/=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]/=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex alpha = *(float complex*)alphap; float complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]/=alpha;
+		ta[i+1 ]/=alpha;
+		ta[i+2 ]/=alpha;
+		ta[i+3 ]/=alpha;
+		ta[i+4 ]/=alpha;
+		ta[i+5 ]/=alpha;
+		ta[i+6 ]/=alpha;
+		ta[i+7 ]/=alpha;
+		ta[i+8 ]/=alpha;
+		ta[i+9 ]/=alpha;
+		ta[i+10 ]/=alpha;
+		ta[i+11 ]/=alpha;
+		ta[i+12 ]/=alpha;
+		ta[i+13 ]/=alpha;
+		ta[i+14 ]/=alpha;
+		ta[i+15 ]/=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]/=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex alpha = *(double complex*)alphap; double complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]/=alpha;
+		ta[i+1 ]/=alpha;
+		ta[i+2 ]/=alpha;
+		ta[i+3 ]/=alpha;
+		ta[i+4 ]/=alpha;
+		ta[i+5 ]/=alpha;
+		ta[i+6 ]/=alpha;
+		ta[i+7 ]/=alpha;
+		ta[i+8 ]/=alpha;
+		ta[i+9 ]/=alpha;
+		ta[i+10 ]/=alpha;
+		ta[i+11 ]/=alpha;
+		ta[i+12 ]/=alpha;
+		ta[i+13 ]/=alpha;
+		ta[i+14 ]/=alpha;
+		ta[i+15 ]/=alpha;
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]/=alpha;
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vector_increase_by_one(void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+		
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{ double*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=((double)(1.0));
+		ta[i+1 ]+=((double)(1.0));
+		ta[i+2 ]+=((double)(1.0));
+		ta[i+3 ]+=((double)(1.0));
+		ta[i+4 ]+=((double)(1.0));
+		ta[i+5 ]+=((double)(1.0));
+		ta[i+6 ]+=((double)(1.0));
+		ta[i+7 ]+=((double)(1.0));
+		ta[i+8 ]+=((double)(1.0));
+		ta[i+9 ]+=((double)(1.0));
+		ta[i+10 ]+=((double)(1.0));
+		ta[i+11 ]+=((double)(1.0));
+		ta[i+12 ]+=((double)(1.0));
+		ta[i+13 ]+=((double)(1.0));
+		ta[i+14 ]+=((double)(1.0));
+		ta[i+15 ]+=((double)(1.0));
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=((double)(1.0));
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{ float*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=((float)(1.0));
+		ta[i+1 ]+=((float)(1.0));
+		ta[i+2 ]+=((float)(1.0));
+		ta[i+3 ]+=((float)(1.0));
+		ta[i+4 ]+=((float)(1.0));
+		ta[i+5 ]+=((float)(1.0));
+		ta[i+6 ]+=((float)(1.0));
+		ta[i+7 ]+=((float)(1.0));
+		ta[i+8 ]+=((float)(1.0));
+		ta[i+9 ]+=((float)(1.0));
+		ta[i+10 ]+=((float)(1.0));
+		ta[i+11 ]+=((float)(1.0));
+		ta[i+12 ]+=((float)(1.0));
+		ta[i+13 ]+=((float)(1.0));
+		ta[i+14 ]+=((float)(1.0));
+		ta[i+15 ]+=((float)(1.0));
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=((float)(1.0));
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{ float complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=((float complex)(1.0));
+		ta[i+1 ]+=((float complex)(1.0));
+		ta[i+2 ]+=((float complex)(1.0));
+		ta[i+3 ]+=((float complex)(1.0));
+		ta[i+4 ]+=((float complex)(1.0));
+		ta[i+5 ]+=((float complex)(1.0));
+		ta[i+6 ]+=((float complex)(1.0));
+		ta[i+7 ]+=((float complex)(1.0));
+		ta[i+8 ]+=((float complex)(1.0));
+		ta[i+9 ]+=((float complex)(1.0));
+		ta[i+10 ]+=((float complex)(1.0));
+		ta[i+11 ]+=((float complex)(1.0));
+		ta[i+12 ]+=((float complex)(1.0));
+		ta[i+13 ]+=((float complex)(1.0));
+		ta[i+14 ]+=((float complex)(1.0));
+		ta[i+15 ]+=((float complex)(1.0));
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=((float complex)(1.0));
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{ double complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=((double complex)(1.0));
+		ta[i+1 ]+=((double complex)(1.0));
+		ta[i+2 ]+=((double complex)(1.0));
+		ta[i+3 ]+=((double complex)(1.0));
+		ta[i+4 ]+=((double complex)(1.0));
+		ta[i+5 ]+=((double complex)(1.0));
+		ta[i+6 ]+=((double complex)(1.0));
+		ta[i+7 ]+=((double complex)(1.0));
+		ta[i+8 ]+=((double complex)(1.0));
+		ta[i+9 ]+=((double complex)(1.0));
+		ta[i+10 ]+=((double complex)(1.0));
+		ta[i+11 ]+=((double complex)(1.0));
+		ta[i+12 ]+=((double complex)(1.0));
+		ta[i+13 ]+=((double complex)(1.0));
+		ta[i+14 ]+=((double complex)(1.0));
+		ta[i+15 ]+=((double complex)(1.0));
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=((double complex)(1.0));
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_vector_pow(void * a, rsb_type_t type, const void *y, size_t n)
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(!a || !y)
+		goto err;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		double ty = *(double*)y,*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = pow(ta[i+0 ],ty);
+		ta[i+1 ] = pow(ta[i+1 ],ty);
+		ta[i+2 ] = pow(ta[i+2 ],ty);
+		ta[i+3 ] = pow(ta[i+3 ],ty);
+		ta[i+4 ] = pow(ta[i+4 ],ty);
+		ta[i+5 ] = pow(ta[i+5 ],ty);
+		ta[i+6 ] = pow(ta[i+6 ],ty);
+		ta[i+7 ] = pow(ta[i+7 ],ty);
+		ta[i+8 ] = pow(ta[i+8 ],ty);
+		ta[i+9 ] = pow(ta[i+9 ],ty);
+		ta[i+10 ] = pow(ta[i+10 ],ty);
+		ta[i+11 ] = pow(ta[i+11 ],ty);
+		ta[i+12 ] = pow(ta[i+12 ],ty);
+		ta[i+13 ] = pow(ta[i+13 ],ty);
+		ta[i+14 ] = pow(ta[i+14 ],ty);
+		ta[i+15 ] = pow(ta[i+15 ],ty);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = pow(ta[i+0 ],ty);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		float ty = *(float*)y,*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = powf(ta[i+0 ],ty);
+		ta[i+1 ] = powf(ta[i+1 ],ty);
+		ta[i+2 ] = powf(ta[i+2 ],ty);
+		ta[i+3 ] = powf(ta[i+3 ],ty);
+		ta[i+4 ] = powf(ta[i+4 ],ty);
+		ta[i+5 ] = powf(ta[i+5 ],ty);
+		ta[i+6 ] = powf(ta[i+6 ],ty);
+		ta[i+7 ] = powf(ta[i+7 ],ty);
+		ta[i+8 ] = powf(ta[i+8 ],ty);
+		ta[i+9 ] = powf(ta[i+9 ],ty);
+		ta[i+10 ] = powf(ta[i+10 ],ty);
+		ta[i+11 ] = powf(ta[i+11 ],ty);
+		ta[i+12 ] = powf(ta[i+12 ],ty);
+		ta[i+13 ] = powf(ta[i+13 ],ty);
+		ta[i+14 ] = powf(ta[i+14 ],ty);
+		ta[i+15 ] = powf(ta[i+15 ],ty);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = powf(ta[i+0 ],ty);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		float complex ty = *(float complex*)y,*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = cpowf(ta[i+0 ],ty);
+		ta[i+1 ] = cpowf(ta[i+1 ],ty);
+		ta[i+2 ] = cpowf(ta[i+2 ],ty);
+		ta[i+3 ] = cpowf(ta[i+3 ],ty);
+		ta[i+4 ] = cpowf(ta[i+4 ],ty);
+		ta[i+5 ] = cpowf(ta[i+5 ],ty);
+		ta[i+6 ] = cpowf(ta[i+6 ],ty);
+		ta[i+7 ] = cpowf(ta[i+7 ],ty);
+		ta[i+8 ] = cpowf(ta[i+8 ],ty);
+		ta[i+9 ] = cpowf(ta[i+9 ],ty);
+		ta[i+10 ] = cpowf(ta[i+10 ],ty);
+		ta[i+11 ] = cpowf(ta[i+11 ],ty);
+		ta[i+12 ] = cpowf(ta[i+12 ],ty);
+		ta[i+13 ] = cpowf(ta[i+13 ],ty);
+		ta[i+14 ] = cpowf(ta[i+14 ],ty);
+		ta[i+15 ] = cpowf(ta[i+15 ],ty);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = cpowf(ta[i+0 ],ty);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		double complex ty = *(double complex*)y,*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = cpow(ta[i+0 ],ty);
+		ta[i+1 ] = cpow(ta[i+1 ],ty);
+		ta[i+2 ] = cpow(ta[i+2 ],ty);
+		ta[i+3 ] = cpow(ta[i+3 ],ty);
+		ta[i+4 ] = cpow(ta[i+4 ],ty);
+		ta[i+5 ] = cpow(ta[i+5 ],ty);
+		ta[i+6 ] = cpow(ta[i+6 ],ty);
+		ta[i+7 ] = cpow(ta[i+7 ],ty);
+		ta[i+8 ] = cpow(ta[i+8 ],ty);
+		ta[i+9 ] = cpow(ta[i+9 ],ty);
+		ta[i+10 ] = cpow(ta[i+10 ],ty);
+		ta[i+11 ] = cpow(ta[i+11 ],ty);
+		ta[i+12 ] = cpow(ta[i+12 ],ty);
+		ta[i+13 ] = cpow(ta[i+13 ],ty);
+		ta[i+14 ] = cpow(ta[i+14 ],ty);
+		ta[i+15 ] = cpow(ta[i+15 ],ty);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = cpow(ta[i+0 ],ty);
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_vector_sqrt(void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(!a)goto err;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{double*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = sqrt(ta[i+0 ]);
+		ta[i+1 ] = sqrt(ta[i+1 ]);
+		ta[i+2 ] = sqrt(ta[i+2 ]);
+		ta[i+3 ] = sqrt(ta[i+3 ]);
+		ta[i+4 ] = sqrt(ta[i+4 ]);
+		ta[i+5 ] = sqrt(ta[i+5 ]);
+		ta[i+6 ] = sqrt(ta[i+6 ]);
+		ta[i+7 ] = sqrt(ta[i+7 ]);
+		ta[i+8 ] = sqrt(ta[i+8 ]);
+		ta[i+9 ] = sqrt(ta[i+9 ]);
+		ta[i+10 ] = sqrt(ta[i+10 ]);
+		ta[i+11 ] = sqrt(ta[i+11 ]);
+		ta[i+12 ] = sqrt(ta[i+12 ]);
+		ta[i+13 ] = sqrt(ta[i+13 ]);
+		ta[i+14 ] = sqrt(ta[i+14 ]);
+		ta[i+15 ] = sqrt(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = sqrt(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{float*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = sqrtf(ta[i+0 ]);
+		ta[i+1 ] = sqrtf(ta[i+1 ]);
+		ta[i+2 ] = sqrtf(ta[i+2 ]);
+		ta[i+3 ] = sqrtf(ta[i+3 ]);
+		ta[i+4 ] = sqrtf(ta[i+4 ]);
+		ta[i+5 ] = sqrtf(ta[i+5 ]);
+		ta[i+6 ] = sqrtf(ta[i+6 ]);
+		ta[i+7 ] = sqrtf(ta[i+7 ]);
+		ta[i+8 ] = sqrtf(ta[i+8 ]);
+		ta[i+9 ] = sqrtf(ta[i+9 ]);
+		ta[i+10 ] = sqrtf(ta[i+10 ]);
+		ta[i+11 ] = sqrtf(ta[i+11 ]);
+		ta[i+12 ] = sqrtf(ta[i+12 ]);
+		ta[i+13 ] = sqrtf(ta[i+13 ]);
+		ta[i+14 ] = sqrtf(ta[i+14 ]);
+		ta[i+15 ] = sqrtf(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = sqrtf(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{float complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = csqrtf(ta[i+0 ]);
+		ta[i+1 ] = csqrtf(ta[i+1 ]);
+		ta[i+2 ] = csqrtf(ta[i+2 ]);
+		ta[i+3 ] = csqrtf(ta[i+3 ]);
+		ta[i+4 ] = csqrtf(ta[i+4 ]);
+		ta[i+5 ] = csqrtf(ta[i+5 ]);
+		ta[i+6 ] = csqrtf(ta[i+6 ]);
+		ta[i+7 ] = csqrtf(ta[i+7 ]);
+		ta[i+8 ] = csqrtf(ta[i+8 ]);
+		ta[i+9 ] = csqrtf(ta[i+9 ]);
+		ta[i+10 ] = csqrtf(ta[i+10 ]);
+		ta[i+11 ] = csqrtf(ta[i+11 ]);
+		ta[i+12 ] = csqrtf(ta[i+12 ]);
+		ta[i+13 ] = csqrtf(ta[i+13 ]);
+		ta[i+14 ] = csqrtf(ta[i+14 ]);
+		ta[i+15 ] = csqrtf(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = csqrtf(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{double complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = csqrt(ta[i+0 ]);
+		ta[i+1 ] = csqrt(ta[i+1 ]);
+		ta[i+2 ] = csqrt(ta[i+2 ]);
+		ta[i+3 ] = csqrt(ta[i+3 ]);
+		ta[i+4 ] = csqrt(ta[i+4 ]);
+		ta[i+5 ] = csqrt(ta[i+5 ]);
+		ta[i+6 ] = csqrt(ta[i+6 ]);
+		ta[i+7 ] = csqrt(ta[i+7 ]);
+		ta[i+8 ] = csqrt(ta[i+8 ]);
+		ta[i+9 ] = csqrt(ta[i+9 ]);
+		ta[i+10 ] = csqrt(ta[i+10 ]);
+		ta[i+11 ] = csqrt(ta[i+11 ]);
+		ta[i+12 ] = csqrt(ta[i+12 ]);
+		ta[i+13 ] = csqrt(ta[i+13 ]);
+		ta[i+14 ] = csqrt(ta[i+14 ]);
+		ta[i+15 ] = csqrt(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = csqrt(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+err:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vector_scale_inv(void * a, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * a <- 1/a * alpha
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see dscal in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	if(!alphap)
+		return RSB_ERR_BADARGS;
+		
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		double alphai = ((double)(1.0))/(*(double*)alphap);
+		return rsb_vector_scale(a,&alphai,type,n);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		float alphai = ((float)(1.0))/(*(float*)alphap);
+		return rsb_vector_scale(a,&alphai,type,n);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		float complex alphai = ((float complex)(1.0))/(*(float complex*)alphap);
+		return rsb_vector_scale(a,&alphai,type,n);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		double complex alphai = ((double complex)(1.0))/(*(double complex*)alphap);
+		return rsb_vector_scale(a,&alphai,type,n);
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vector_sum_of_abs_diffs(void * c, const void * a, const void * b, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*ap = a,*bp = b;
+		double ac = ((double)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += fabs(ap[i+0 ]-bp[i+0 ]);
+				ac += fabs(ap[i+1 ]-bp[i+1 ]);
+				ac += fabs(ap[i+2 ]-bp[i+2 ]);
+				ac += fabs(ap[i+3 ]-bp[i+3 ]);
+				ac += fabs(ap[i+4 ]-bp[i+4 ]);
+				ac += fabs(ap[i+5 ]-bp[i+5 ]);
+				ac += fabs(ap[i+6 ]-bp[i+6 ]);
+				ac += fabs(ap[i+7 ]-bp[i+7 ]);
+				ac += fabs(ap[i+8 ]-bp[i+8 ]);
+				ac += fabs(ap[i+9 ]-bp[i+9 ]);
+				ac += fabs(ap[i+10 ]-bp[i+10 ]);
+				ac += fabs(ap[i+11 ]-bp[i+11 ]);
+				ac += fabs(ap[i+12 ]-bp[i+12 ]);
+				ac += fabs(ap[i+13 ]-bp[i+13 ]);
+				ac += fabs(ap[i+14 ]-bp[i+14 ]);
+				ac += fabs(ap[i+15 ]-bp[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += fabs(ap[i+0 ]-bp[i+0 ]);
+		 }
+}
+; 
+		*((double*)(c)) = ac;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*ap = a,*bp = b;
+		float ac = ((float)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += fabsf(ap[i+0 ]-bp[i+0 ]);
+				ac += fabsf(ap[i+1 ]-bp[i+1 ]);
+				ac += fabsf(ap[i+2 ]-bp[i+2 ]);
+				ac += fabsf(ap[i+3 ]-bp[i+3 ]);
+				ac += fabsf(ap[i+4 ]-bp[i+4 ]);
+				ac += fabsf(ap[i+5 ]-bp[i+5 ]);
+				ac += fabsf(ap[i+6 ]-bp[i+6 ]);
+				ac += fabsf(ap[i+7 ]-bp[i+7 ]);
+				ac += fabsf(ap[i+8 ]-bp[i+8 ]);
+				ac += fabsf(ap[i+9 ]-bp[i+9 ]);
+				ac += fabsf(ap[i+10 ]-bp[i+10 ]);
+				ac += fabsf(ap[i+11 ]-bp[i+11 ]);
+				ac += fabsf(ap[i+12 ]-bp[i+12 ]);
+				ac += fabsf(ap[i+13 ]-bp[i+13 ]);
+				ac += fabsf(ap[i+14 ]-bp[i+14 ]);
+				ac += fabsf(ap[i+15 ]-bp[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += fabsf(ap[i+0 ]-bp[i+0 ]);
+		 }
+}
+; 
+		*((float*)(c)) = ac;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*ap = a,*bp = b;
+		float complex ac = ((float complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += cabsf(ap[i+0 ]-bp[i+0 ]);
+				ac += cabsf(ap[i+1 ]-bp[i+1 ]);
+				ac += cabsf(ap[i+2 ]-bp[i+2 ]);
+				ac += cabsf(ap[i+3 ]-bp[i+3 ]);
+				ac += cabsf(ap[i+4 ]-bp[i+4 ]);
+				ac += cabsf(ap[i+5 ]-bp[i+5 ]);
+				ac += cabsf(ap[i+6 ]-bp[i+6 ]);
+				ac += cabsf(ap[i+7 ]-bp[i+7 ]);
+				ac += cabsf(ap[i+8 ]-bp[i+8 ]);
+				ac += cabsf(ap[i+9 ]-bp[i+9 ]);
+				ac += cabsf(ap[i+10 ]-bp[i+10 ]);
+				ac += cabsf(ap[i+11 ]-bp[i+11 ]);
+				ac += cabsf(ap[i+12 ]-bp[i+12 ]);
+				ac += cabsf(ap[i+13 ]-bp[i+13 ]);
+				ac += cabsf(ap[i+14 ]-bp[i+14 ]);
+				ac += cabsf(ap[i+15 ]-bp[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += cabsf(ap[i+0 ]-bp[i+0 ]);
+		 }
+}
+; 
+		*((float complex*)(c)) = ac;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*ap = a,*bp = b;
+		double complex ac = ((double complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += cabs(ap[i+0 ]-bp[i+0 ]);
+				ac += cabs(ap[i+1 ]-bp[i+1 ]);
+				ac += cabs(ap[i+2 ]-bp[i+2 ]);
+				ac += cabs(ap[i+3 ]-bp[i+3 ]);
+				ac += cabs(ap[i+4 ]-bp[i+4 ]);
+				ac += cabs(ap[i+5 ]-bp[i+5 ]);
+				ac += cabs(ap[i+6 ]-bp[i+6 ]);
+				ac += cabs(ap[i+7 ]-bp[i+7 ]);
+				ac += cabs(ap[i+8 ]-bp[i+8 ]);
+				ac += cabs(ap[i+9 ]-bp[i+9 ]);
+				ac += cabs(ap[i+10 ]-bp[i+10 ]);
+				ac += cabs(ap[i+11 ]-bp[i+11 ]);
+				ac += cabs(ap[i+12 ]-bp[i+12 ]);
+				ac += cabs(ap[i+13 ]-bp[i+13 ]);
+				ac += cabs(ap[i+14 ]-bp[i+14 ]);
+				ac += cabs(ap[i+15 ]-bp[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += cabs(ap[i+0 ]-bp[i+0 ]);
+		 }
+}
+; 
+		*((double complex*)(c)) = ac;
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vector_sum_of_abs(void * c, const void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double*ap = a;
+		double ac = ((double)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += fabs(ap[i+0 ]);
+				ac += fabs(ap[i+1 ]);
+				ac += fabs(ap[i+2 ]);
+				ac += fabs(ap[i+3 ]);
+				ac += fabs(ap[i+4 ]);
+				ac += fabs(ap[i+5 ]);
+				ac += fabs(ap[i+6 ]);
+				ac += fabs(ap[i+7 ]);
+				ac += fabs(ap[i+8 ]);
+				ac += fabs(ap[i+9 ]);
+				ac += fabs(ap[i+10 ]);
+				ac += fabs(ap[i+11 ]);
+				ac += fabs(ap[i+12 ]);
+				ac += fabs(ap[i+13 ]);
+				ac += fabs(ap[i+14 ]);
+				ac += fabs(ap[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += fabs(ap[i+0 ]);
+		 }
+}
+; 
+		*((double*)(c)) = ac;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float*ap = a;
+		float ac = ((float)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += fabsf(ap[i+0 ]);
+				ac += fabsf(ap[i+1 ]);
+				ac += fabsf(ap[i+2 ]);
+				ac += fabsf(ap[i+3 ]);
+				ac += fabsf(ap[i+4 ]);
+				ac += fabsf(ap[i+5 ]);
+				ac += fabsf(ap[i+6 ]);
+				ac += fabsf(ap[i+7 ]);
+				ac += fabsf(ap[i+8 ]);
+				ac += fabsf(ap[i+9 ]);
+				ac += fabsf(ap[i+10 ]);
+				ac += fabsf(ap[i+11 ]);
+				ac += fabsf(ap[i+12 ]);
+				ac += fabsf(ap[i+13 ]);
+				ac += fabsf(ap[i+14 ]);
+				ac += fabsf(ap[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += fabsf(ap[i+0 ]);
+		 }
+}
+; 
+		*((float*)(c)) = ac;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex*ap = a;
+		float complex ac = ((float complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += cabsf(ap[i+0 ]);
+				ac += cabsf(ap[i+1 ]);
+				ac += cabsf(ap[i+2 ]);
+				ac += cabsf(ap[i+3 ]);
+				ac += cabsf(ap[i+4 ]);
+				ac += cabsf(ap[i+5 ]);
+				ac += cabsf(ap[i+6 ]);
+				ac += cabsf(ap[i+7 ]);
+				ac += cabsf(ap[i+8 ]);
+				ac += cabsf(ap[i+9 ]);
+				ac += cabsf(ap[i+10 ]);
+				ac += cabsf(ap[i+11 ]);
+				ac += cabsf(ap[i+12 ]);
+				ac += cabsf(ap[i+13 ]);
+				ac += cabsf(ap[i+14 ]);
+				ac += cabsf(ap[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += cabsf(ap[i+0 ]);
+		 }
+}
+; 
+		*((float complex*)(c)) = ac;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex*ap = a;
+		double complex ac = ((double complex)(0));
+		{
+for(i=0;i+15<n;i+=16){
+		ac += cabs(ap[i+0 ]);
+				ac += cabs(ap[i+1 ]);
+				ac += cabs(ap[i+2 ]);
+				ac += cabs(ap[i+3 ]);
+				ac += cabs(ap[i+4 ]);
+				ac += cabs(ap[i+5 ]);
+				ac += cabs(ap[i+6 ]);
+				ac += cabs(ap[i+7 ]);
+				ac += cabs(ap[i+8 ]);
+				ac += cabs(ap[i+9 ]);
+				ac += cabs(ap[i+10 ]);
+				ac += cabs(ap[i+11 ]);
+				ac += cabs(ap[i+12 ]);
+				ac += cabs(ap[i+13 ]);
+				ac += cabs(ap[i+14 ]);
+				ac += cabs(ap[i+15 ]);
+		}
+for(     ;i<n;++i){ 		ac += cabs(ap[i+0 ]);
+		 }
+}
+; 
+		*((double complex*)(c)) = ac;
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vector_to_abs(void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{double*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = fabs(ta[i+0 ]);
+		ta[i+1 ] = fabs(ta[i+1 ]);
+		ta[i+2 ] = fabs(ta[i+2 ]);
+		ta[i+3 ] = fabs(ta[i+3 ]);
+		ta[i+4 ] = fabs(ta[i+4 ]);
+		ta[i+5 ] = fabs(ta[i+5 ]);
+		ta[i+6 ] = fabs(ta[i+6 ]);
+		ta[i+7 ] = fabs(ta[i+7 ]);
+		ta[i+8 ] = fabs(ta[i+8 ]);
+		ta[i+9 ] = fabs(ta[i+9 ]);
+		ta[i+10 ] = fabs(ta[i+10 ]);
+		ta[i+11 ] = fabs(ta[i+11 ]);
+		ta[i+12 ] = fabs(ta[i+12 ]);
+		ta[i+13 ] = fabs(ta[i+13 ]);
+		ta[i+14 ] = fabs(ta[i+14 ]);
+		ta[i+15 ] = fabs(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = fabs(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{float*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = fabsf(ta[i+0 ]);
+		ta[i+1 ] = fabsf(ta[i+1 ]);
+		ta[i+2 ] = fabsf(ta[i+2 ]);
+		ta[i+3 ] = fabsf(ta[i+3 ]);
+		ta[i+4 ] = fabsf(ta[i+4 ]);
+		ta[i+5 ] = fabsf(ta[i+5 ]);
+		ta[i+6 ] = fabsf(ta[i+6 ]);
+		ta[i+7 ] = fabsf(ta[i+7 ]);
+		ta[i+8 ] = fabsf(ta[i+8 ]);
+		ta[i+9 ] = fabsf(ta[i+9 ]);
+		ta[i+10 ] = fabsf(ta[i+10 ]);
+		ta[i+11 ] = fabsf(ta[i+11 ]);
+		ta[i+12 ] = fabsf(ta[i+12 ]);
+		ta[i+13 ] = fabsf(ta[i+13 ]);
+		ta[i+14 ] = fabsf(ta[i+14 ]);
+		ta[i+15 ] = fabsf(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = fabsf(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{float complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = cabsf(ta[i+0 ]);
+		ta[i+1 ] = cabsf(ta[i+1 ]);
+		ta[i+2 ] = cabsf(ta[i+2 ]);
+		ta[i+3 ] = cabsf(ta[i+3 ]);
+		ta[i+4 ] = cabsf(ta[i+4 ]);
+		ta[i+5 ] = cabsf(ta[i+5 ]);
+		ta[i+6 ] = cabsf(ta[i+6 ]);
+		ta[i+7 ] = cabsf(ta[i+7 ]);
+		ta[i+8 ] = cabsf(ta[i+8 ]);
+		ta[i+9 ] = cabsf(ta[i+9 ]);
+		ta[i+10 ] = cabsf(ta[i+10 ]);
+		ta[i+11 ] = cabsf(ta[i+11 ]);
+		ta[i+12 ] = cabsf(ta[i+12 ]);
+		ta[i+13 ] = cabsf(ta[i+13 ]);
+		ta[i+14 ] = cabsf(ta[i+14 ]);
+		ta[i+15 ] = cabsf(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = cabsf(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{double complex*ta = a;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ] = cabs(ta[i+0 ]);
+		ta[i+1 ] = cabs(ta[i+1 ]);
+		ta[i+2 ] = cabs(ta[i+2 ]);
+		ta[i+3 ] = cabs(ta[i+3 ]);
+		ta[i+4 ] = cabs(ta[i+4 ]);
+		ta[i+5 ] = cabs(ta[i+5 ]);
+		ta[i+6 ] = cabs(ta[i+6 ]);
+		ta[i+7 ] = cabs(ta[i+7 ]);
+		ta[i+8 ] = cabs(ta[i+8 ]);
+		ta[i+9 ] = cabs(ta[i+9 ]);
+		ta[i+10 ] = cabs(ta[i+10 ]);
+		ta[i+11 ] = cabs(ta[i+11 ]);
+		ta[i+12 ] = cabs(ta[i+12 ]);
+		ta[i+13 ] = cabs(ta[i+13 ]);
+		ta[i+14 ] = cabs(ta[i+14 ]);
+		ta[i+15 ] = cabs(ta[i+15 ]);
+	}
+for(     ;i<n;++i){ 	ta[i+0 ] = cabs(ta[i+0 ]);
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+static rsb_err_t rsb_alpha_sum(void * a, const void * b, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * a <- a + alpha * b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double alpha = alphap ? *(double*)alphap : ((double)(1.0));
+	double*ta = a; const double*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=alpha*tb[i+0 ];
+		ta[i+1 ]+=alpha*tb[i+1 ];
+		ta[i+2 ]+=alpha*tb[i+2 ];
+		ta[i+3 ]+=alpha*tb[i+3 ];
+		ta[i+4 ]+=alpha*tb[i+4 ];
+		ta[i+5 ]+=alpha*tb[i+5 ];
+		ta[i+6 ]+=alpha*tb[i+6 ];
+		ta[i+7 ]+=alpha*tb[i+7 ];
+		ta[i+8 ]+=alpha*tb[i+8 ];
+		ta[i+9 ]+=alpha*tb[i+9 ];
+		ta[i+10 ]+=alpha*tb[i+10 ];
+		ta[i+11 ]+=alpha*tb[i+11 ];
+		ta[i+12 ]+=alpha*tb[i+12 ];
+		ta[i+13 ]+=alpha*tb[i+13 ];
+		ta[i+14 ]+=alpha*tb[i+14 ];
+		ta[i+15 ]+=alpha*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=alpha*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float alpha = alphap ? *(float*)alphap : ((float)(1.0));
+	float*ta = a; const float*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=alpha*tb[i+0 ];
+		ta[i+1 ]+=alpha*tb[i+1 ];
+		ta[i+2 ]+=alpha*tb[i+2 ];
+		ta[i+3 ]+=alpha*tb[i+3 ];
+		ta[i+4 ]+=alpha*tb[i+4 ];
+		ta[i+5 ]+=alpha*tb[i+5 ];
+		ta[i+6 ]+=alpha*tb[i+6 ];
+		ta[i+7 ]+=alpha*tb[i+7 ];
+		ta[i+8 ]+=alpha*tb[i+8 ];
+		ta[i+9 ]+=alpha*tb[i+9 ];
+		ta[i+10 ]+=alpha*tb[i+10 ];
+		ta[i+11 ]+=alpha*tb[i+11 ];
+		ta[i+12 ]+=alpha*tb[i+12 ];
+		ta[i+13 ]+=alpha*tb[i+13 ];
+		ta[i+14 ]+=alpha*tb[i+14 ];
+		ta[i+15 ]+=alpha*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=alpha*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex alpha = alphap ? *(float complex*)alphap : ((float complex)(1.0));
+	float complex*ta = a; const float complex*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=alpha*tb[i+0 ];
+		ta[i+1 ]+=alpha*tb[i+1 ];
+		ta[i+2 ]+=alpha*tb[i+2 ];
+		ta[i+3 ]+=alpha*tb[i+3 ];
+		ta[i+4 ]+=alpha*tb[i+4 ];
+		ta[i+5 ]+=alpha*tb[i+5 ];
+		ta[i+6 ]+=alpha*tb[i+6 ];
+		ta[i+7 ]+=alpha*tb[i+7 ];
+		ta[i+8 ]+=alpha*tb[i+8 ];
+		ta[i+9 ]+=alpha*tb[i+9 ];
+		ta[i+10 ]+=alpha*tb[i+10 ];
+		ta[i+11 ]+=alpha*tb[i+11 ];
+		ta[i+12 ]+=alpha*tb[i+12 ];
+		ta[i+13 ]+=alpha*tb[i+13 ];
+		ta[i+14 ]+=alpha*tb[i+14 ];
+		ta[i+15 ]+=alpha*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=alpha*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex alpha = alphap ? *(double complex*)alphap : ((double complex)(1.0));
+	double complex*ta = a; const double complex*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[i+0 ]+=alpha*tb[i+0 ];
+		ta[i+1 ]+=alpha*tb[i+1 ];
+		ta[i+2 ]+=alpha*tb[i+2 ];
+		ta[i+3 ]+=alpha*tb[i+3 ];
+		ta[i+4 ]+=alpha*tb[i+4 ];
+		ta[i+5 ]+=alpha*tb[i+5 ];
+		ta[i+6 ]+=alpha*tb[i+6 ];
+		ta[i+7 ]+=alpha*tb[i+7 ];
+		ta[i+8 ]+=alpha*tb[i+8 ];
+		ta[i+9 ]+=alpha*tb[i+9 ];
+		ta[i+10 ]+=alpha*tb[i+10 ];
+		ta[i+11 ]+=alpha*tb[i+11 ];
+		ta[i+12 ]+=alpha*tb[i+12 ];
+		ta[i+13 ]+=alpha*tb[i+13 ];
+		ta[i+14 ]+=alpha*tb[i+14 ];
+		ta[i+15 ]+=alpha*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	ta[i+0 ]+=alpha*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+
+rsb_err_t rsb__util_set_array_to_converted_integer(void *p, rsb_flags_t typecode, const rsb_nnz_idx_t n, const rsb_nnz_idx_t incp, const rsb_int v)
+{
+	/*!
+	 * */
+	size_t i;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	double*tp = p; const double tv = (double)v;
+	{
+for(i=0;i+15<n;i+=16){
+	tp[((i+0 )*incp)] = tv;
+		tp[((i+1 )*incp)] = tv;
+		tp[((i+2 )*incp)] = tv;
+		tp[((i+3 )*incp)] = tv;
+		tp[((i+4 )*incp)] = tv;
+		tp[((i+5 )*incp)] = tv;
+		tp[((i+6 )*incp)] = tv;
+		tp[((i+7 )*incp)] = tv;
+		tp[((i+8 )*incp)] = tv;
+		tp[((i+9 )*incp)] = tv;
+		tp[((i+10 )*incp)] = tv;
+		tp[((i+11 )*incp)] = tv;
+		tp[((i+12 )*incp)] = tv;
+		tp[((i+13 )*incp)] = tv;
+		tp[((i+14 )*incp)] = tv;
+		tp[((i+15 )*incp)] = tv;
+	}
+for(     ;i<n;++i){ 	tp[((i+0 )*incp)] = tv;
+	 }
+}
+; 
+	}
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	float*tp = p; const float tv = (float)v;
+	{
+for(i=0;i+15<n;i+=16){
+	tp[((i+0 )*incp)] = tv;
+		tp[((i+1 )*incp)] = tv;
+		tp[((i+2 )*incp)] = tv;
+		tp[((i+3 )*incp)] = tv;
+		tp[((i+4 )*incp)] = tv;
+		tp[((i+5 )*incp)] = tv;
+		tp[((i+6 )*incp)] = tv;
+		tp[((i+7 )*incp)] = tv;
+		tp[((i+8 )*incp)] = tv;
+		tp[((i+9 )*incp)] = tv;
+		tp[((i+10 )*incp)] = tv;
+		tp[((i+11 )*incp)] = tv;
+		tp[((i+12 )*incp)] = tv;
+		tp[((i+13 )*incp)] = tv;
+		tp[((i+14 )*incp)] = tv;
+		tp[((i+15 )*incp)] = tv;
+	}
+for(     ;i<n;++i){ 	tp[((i+0 )*incp)] = tv;
+	 }
+}
+; 
+	}
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	float complex*tp = p; const float complex tv = (float complex)v;
+	{
+for(i=0;i+15<n;i+=16){
+	tp[((i+0 )*incp)] = tv;
+		tp[((i+1 )*incp)] = tv;
+		tp[((i+2 )*incp)] = tv;
+		tp[((i+3 )*incp)] = tv;
+		tp[((i+4 )*incp)] = tv;
+		tp[((i+5 )*incp)] = tv;
+		tp[((i+6 )*incp)] = tv;
+		tp[((i+7 )*incp)] = tv;
+		tp[((i+8 )*incp)] = tv;
+		tp[((i+9 )*incp)] = tv;
+		tp[((i+10 )*incp)] = tv;
+		tp[((i+11 )*incp)] = tv;
+		tp[((i+12 )*incp)] = tv;
+		tp[((i+13 )*incp)] = tv;
+		tp[((i+14 )*incp)] = tv;
+		tp[((i+15 )*incp)] = tv;
+	}
+for(     ;i<n;++i){ 	tp[((i+0 )*incp)] = tv;
+	 }
+}
+; 
+	}
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	double complex*tp = p; const double complex tv = (double complex)v;
+	{
+for(i=0;i+15<n;i+=16){
+	tp[((i+0 )*incp)] = tv;
+		tp[((i+1 )*incp)] = tv;
+		tp[((i+2 )*incp)] = tv;
+		tp[((i+3 )*incp)] = tv;
+		tp[((i+4 )*incp)] = tv;
+		tp[((i+5 )*incp)] = tv;
+		tp[((i+6 )*incp)] = tv;
+		tp[((i+7 )*incp)] = tv;
+		tp[((i+8 )*incp)] = tv;
+		tp[((i+9 )*incp)] = tv;
+		tp[((i+10 )*incp)] = tv;
+		tp[((i+11 )*incp)] = tv;
+		tp[((i+12 )*incp)] = tv;
+		tp[((i+13 )*incp)] = tv;
+		tp[((i+14 )*incp)] = tv;
+		tp[((i+15 )*incp)] = tv;
+	}
+for(     ;i<n;++i){ 	tp[((i+0 )*incp)] = tv;
+	 }
+}
+; 
+	}
+	else
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__vectors_left_sum_reduce_and_zero(void * d, void * s, const rsb_type_t typecode, const size_t n, const size_t incd, const size_t off)
+{
+	/*!
+	 * d[off:off+n-1] <- s[off:off+n-1] 
+	 * s[off:off+n-1] <- 0
+         *
+	 * \param array	an array pointer
+	 * \param typecode	a valid type code
+	 * \param incd	the the stride of d
+	 * \param off offset in the vectors
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	double*td = d,*ts = s;
+	{
+for(i=0;i+15<n;i+=16){
+	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((double)(0));
+		td[(off+i+1 )*incd]+=ts[(off+i+1 )];
+	ts[(off+i+1 )] = ((double)(0));
+		td[(off+i+2 )*incd]+=ts[(off+i+2 )];
+	ts[(off+i+2 )] = ((double)(0));
+		td[(off+i+3 )*incd]+=ts[(off+i+3 )];
+	ts[(off+i+3 )] = ((double)(0));
+		td[(off+i+4 )*incd]+=ts[(off+i+4 )];
+	ts[(off+i+4 )] = ((double)(0));
+		td[(off+i+5 )*incd]+=ts[(off+i+5 )];
+	ts[(off+i+5 )] = ((double)(0));
+		td[(off+i+6 )*incd]+=ts[(off+i+6 )];
+	ts[(off+i+6 )] = ((double)(0));
+		td[(off+i+7 )*incd]+=ts[(off+i+7 )];
+	ts[(off+i+7 )] = ((double)(0));
+		td[(off+i+8 )*incd]+=ts[(off+i+8 )];
+	ts[(off+i+8 )] = ((double)(0));
+		td[(off+i+9 )*incd]+=ts[(off+i+9 )];
+	ts[(off+i+9 )] = ((double)(0));
+		td[(off+i+10 )*incd]+=ts[(off+i+10 )];
+	ts[(off+i+10 )] = ((double)(0));
+		td[(off+i+11 )*incd]+=ts[(off+i+11 )];
+	ts[(off+i+11 )] = ((double)(0));
+		td[(off+i+12 )*incd]+=ts[(off+i+12 )];
+	ts[(off+i+12 )] = ((double)(0));
+		td[(off+i+13 )*incd]+=ts[(off+i+13 )];
+	ts[(off+i+13 )] = ((double)(0));
+		td[(off+i+14 )*incd]+=ts[(off+i+14 )];
+	ts[(off+i+14 )] = ((double)(0));
+		td[(off+i+15 )*incd]+=ts[(off+i+15 )];
+	ts[(off+i+15 )] = ((double)(0));
+	}
+for(     ;i<n;++i){ 	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((double)(0));
+	 }
+}
+; 
+	}
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	float*td = d,*ts = s;
+	{
+for(i=0;i+15<n;i+=16){
+	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((float)(0));
+		td[(off+i+1 )*incd]+=ts[(off+i+1 )];
+	ts[(off+i+1 )] = ((float)(0));
+		td[(off+i+2 )*incd]+=ts[(off+i+2 )];
+	ts[(off+i+2 )] = ((float)(0));
+		td[(off+i+3 )*incd]+=ts[(off+i+3 )];
+	ts[(off+i+3 )] = ((float)(0));
+		td[(off+i+4 )*incd]+=ts[(off+i+4 )];
+	ts[(off+i+4 )] = ((float)(0));
+		td[(off+i+5 )*incd]+=ts[(off+i+5 )];
+	ts[(off+i+5 )] = ((float)(0));
+		td[(off+i+6 )*incd]+=ts[(off+i+6 )];
+	ts[(off+i+6 )] = ((float)(0));
+		td[(off+i+7 )*incd]+=ts[(off+i+7 )];
+	ts[(off+i+7 )] = ((float)(0));
+		td[(off+i+8 )*incd]+=ts[(off+i+8 )];
+	ts[(off+i+8 )] = ((float)(0));
+		td[(off+i+9 )*incd]+=ts[(off+i+9 )];
+	ts[(off+i+9 )] = ((float)(0));
+		td[(off+i+10 )*incd]+=ts[(off+i+10 )];
+	ts[(off+i+10 )] = ((float)(0));
+		td[(off+i+11 )*incd]+=ts[(off+i+11 )];
+	ts[(off+i+11 )] = ((float)(0));
+		td[(off+i+12 )*incd]+=ts[(off+i+12 )];
+	ts[(off+i+12 )] = ((float)(0));
+		td[(off+i+13 )*incd]+=ts[(off+i+13 )];
+	ts[(off+i+13 )] = ((float)(0));
+		td[(off+i+14 )*incd]+=ts[(off+i+14 )];
+	ts[(off+i+14 )] = ((float)(0));
+		td[(off+i+15 )*incd]+=ts[(off+i+15 )];
+	ts[(off+i+15 )] = ((float)(0));
+	}
+for(     ;i<n;++i){ 	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((float)(0));
+	 }
+}
+; 
+	}
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	float complex*td = d,*ts = s;
+	{
+for(i=0;i+15<n;i+=16){
+	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((float complex)(0));
+		td[(off+i+1 )*incd]+=ts[(off+i+1 )];
+	ts[(off+i+1 )] = ((float complex)(0));
+		td[(off+i+2 )*incd]+=ts[(off+i+2 )];
+	ts[(off+i+2 )] = ((float complex)(0));
+		td[(off+i+3 )*incd]+=ts[(off+i+3 )];
+	ts[(off+i+3 )] = ((float complex)(0));
+		td[(off+i+4 )*incd]+=ts[(off+i+4 )];
+	ts[(off+i+4 )] = ((float complex)(0));
+		td[(off+i+5 )*incd]+=ts[(off+i+5 )];
+	ts[(off+i+5 )] = ((float complex)(0));
+		td[(off+i+6 )*incd]+=ts[(off+i+6 )];
+	ts[(off+i+6 )] = ((float complex)(0));
+		td[(off+i+7 )*incd]+=ts[(off+i+7 )];
+	ts[(off+i+7 )] = ((float complex)(0));
+		td[(off+i+8 )*incd]+=ts[(off+i+8 )];
+	ts[(off+i+8 )] = ((float complex)(0));
+		td[(off+i+9 )*incd]+=ts[(off+i+9 )];
+	ts[(off+i+9 )] = ((float complex)(0));
+		td[(off+i+10 )*incd]+=ts[(off+i+10 )];
+	ts[(off+i+10 )] = ((float complex)(0));
+		td[(off+i+11 )*incd]+=ts[(off+i+11 )];
+	ts[(off+i+11 )] = ((float complex)(0));
+		td[(off+i+12 )*incd]+=ts[(off+i+12 )];
+	ts[(off+i+12 )] = ((float complex)(0));
+		td[(off+i+13 )*incd]+=ts[(off+i+13 )];
+	ts[(off+i+13 )] = ((float complex)(0));
+		td[(off+i+14 )*incd]+=ts[(off+i+14 )];
+	ts[(off+i+14 )] = ((float complex)(0));
+		td[(off+i+15 )*incd]+=ts[(off+i+15 )];
+	ts[(off+i+15 )] = ((float complex)(0));
+	}
+for(     ;i<n;++i){ 	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((float complex)(0));
+	 }
+}
+; 
+	}
+	else
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	double complex*td = d,*ts = s;
+	{
+for(i=0;i+15<n;i+=16){
+	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((double complex)(0));
+		td[(off+i+1 )*incd]+=ts[(off+i+1 )];
+	ts[(off+i+1 )] = ((double complex)(0));
+		td[(off+i+2 )*incd]+=ts[(off+i+2 )];
+	ts[(off+i+2 )] = ((double complex)(0));
+		td[(off+i+3 )*incd]+=ts[(off+i+3 )];
+	ts[(off+i+3 )] = ((double complex)(0));
+		td[(off+i+4 )*incd]+=ts[(off+i+4 )];
+	ts[(off+i+4 )] = ((double complex)(0));
+		td[(off+i+5 )*incd]+=ts[(off+i+5 )];
+	ts[(off+i+5 )] = ((double complex)(0));
+		td[(off+i+6 )*incd]+=ts[(off+i+6 )];
+	ts[(off+i+6 )] = ((double complex)(0));
+		td[(off+i+7 )*incd]+=ts[(off+i+7 )];
+	ts[(off+i+7 )] = ((double complex)(0));
+		td[(off+i+8 )*incd]+=ts[(off+i+8 )];
+	ts[(off+i+8 )] = ((double complex)(0));
+		td[(off+i+9 )*incd]+=ts[(off+i+9 )];
+	ts[(off+i+9 )] = ((double complex)(0));
+		td[(off+i+10 )*incd]+=ts[(off+i+10 )];
+	ts[(off+i+10 )] = ((double complex)(0));
+		td[(off+i+11 )*incd]+=ts[(off+i+11 )];
+	ts[(off+i+11 )] = ((double complex)(0));
+		td[(off+i+12 )*incd]+=ts[(off+i+12 )];
+	ts[(off+i+12 )] = ((double complex)(0));
+		td[(off+i+13 )*incd]+=ts[(off+i+13 )];
+	ts[(off+i+13 )] = ((double complex)(0));
+		td[(off+i+14 )*incd]+=ts[(off+i+14 )];
+	ts[(off+i+14 )] = ((double complex)(0));
+		td[(off+i+15 )*incd]+=ts[(off+i+15 )];
+	ts[(off+i+15 )] = ((double complex)(0));
+	}
+for(     ;i<n;++i){ 	td[(off+i+0 )*incd]+=ts[(off+i+0 )];
+	ts[(off+i+0 )] = ((double complex)(0));
+	 }
+}
+; 
+	}
+	else
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+
+static rsb_err_t rsb_alpha_sum_strided(void * a, const void * b, const void * alphap, rsb_type_t type, size_t n, int inca, int incb)
+{
+	/*!
+	 * a <- a + alpha * b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(inca == 1 && incb == 1)
+		return rsb_alpha_sum(a,b,alphap,type,n);
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double alpha = alphap ? *(double*)alphap : ((double)(1.0));
+	double*ta = a; const double*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+		ta[inca*(i+1 )]+=alpha*tb[incb*(i+1 )];
+		ta[inca*(i+2 )]+=alpha*tb[incb*(i+2 )];
+		ta[inca*(i+3 )]+=alpha*tb[incb*(i+3 )];
+		ta[inca*(i+4 )]+=alpha*tb[incb*(i+4 )];
+		ta[inca*(i+5 )]+=alpha*tb[incb*(i+5 )];
+		ta[inca*(i+6 )]+=alpha*tb[incb*(i+6 )];
+		ta[inca*(i+7 )]+=alpha*tb[incb*(i+7 )];
+		ta[inca*(i+8 )]+=alpha*tb[incb*(i+8 )];
+		ta[inca*(i+9 )]+=alpha*tb[incb*(i+9 )];
+		ta[inca*(i+10 )]+=alpha*tb[incb*(i+10 )];
+		ta[inca*(i+11 )]+=alpha*tb[incb*(i+11 )];
+		ta[inca*(i+12 )]+=alpha*tb[incb*(i+12 )];
+		ta[inca*(i+13 )]+=alpha*tb[incb*(i+13 )];
+		ta[inca*(i+14 )]+=alpha*tb[incb*(i+14 )];
+		ta[inca*(i+15 )]+=alpha*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float alpha = alphap ? *(float*)alphap : ((float)(1.0));
+	float*ta = a; const float*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+		ta[inca*(i+1 )]+=alpha*tb[incb*(i+1 )];
+		ta[inca*(i+2 )]+=alpha*tb[incb*(i+2 )];
+		ta[inca*(i+3 )]+=alpha*tb[incb*(i+3 )];
+		ta[inca*(i+4 )]+=alpha*tb[incb*(i+4 )];
+		ta[inca*(i+5 )]+=alpha*tb[incb*(i+5 )];
+		ta[inca*(i+6 )]+=alpha*tb[incb*(i+6 )];
+		ta[inca*(i+7 )]+=alpha*tb[incb*(i+7 )];
+		ta[inca*(i+8 )]+=alpha*tb[incb*(i+8 )];
+		ta[inca*(i+9 )]+=alpha*tb[incb*(i+9 )];
+		ta[inca*(i+10 )]+=alpha*tb[incb*(i+10 )];
+		ta[inca*(i+11 )]+=alpha*tb[incb*(i+11 )];
+		ta[inca*(i+12 )]+=alpha*tb[incb*(i+12 )];
+		ta[inca*(i+13 )]+=alpha*tb[incb*(i+13 )];
+		ta[inca*(i+14 )]+=alpha*tb[incb*(i+14 )];
+		ta[inca*(i+15 )]+=alpha*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex alpha = alphap ? *(float complex*)alphap : ((float complex)(1.0));
+	float complex*ta = a; const float complex*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+		ta[inca*(i+1 )]+=alpha*tb[incb*(i+1 )];
+		ta[inca*(i+2 )]+=alpha*tb[incb*(i+2 )];
+		ta[inca*(i+3 )]+=alpha*tb[incb*(i+3 )];
+		ta[inca*(i+4 )]+=alpha*tb[incb*(i+4 )];
+		ta[inca*(i+5 )]+=alpha*tb[incb*(i+5 )];
+		ta[inca*(i+6 )]+=alpha*tb[incb*(i+6 )];
+		ta[inca*(i+7 )]+=alpha*tb[incb*(i+7 )];
+		ta[inca*(i+8 )]+=alpha*tb[incb*(i+8 )];
+		ta[inca*(i+9 )]+=alpha*tb[incb*(i+9 )];
+		ta[inca*(i+10 )]+=alpha*tb[incb*(i+10 )];
+		ta[inca*(i+11 )]+=alpha*tb[incb*(i+11 )];
+		ta[inca*(i+12 )]+=alpha*tb[incb*(i+12 )];
+		ta[inca*(i+13 )]+=alpha*tb[incb*(i+13 )];
+		ta[inca*(i+14 )]+=alpha*tb[incb*(i+14 )];
+		ta[inca*(i+15 )]+=alpha*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex alpha = alphap ? *(double complex*)alphap : ((double complex)(1.0));
+	double complex*ta = a; const double complex*tb = b;
+	{
+for(i=0;i+15<n;i+=16){
+	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+		ta[inca*(i+1 )]+=alpha*tb[incb*(i+1 )];
+		ta[inca*(i+2 )]+=alpha*tb[incb*(i+2 )];
+		ta[inca*(i+3 )]+=alpha*tb[incb*(i+3 )];
+		ta[inca*(i+4 )]+=alpha*tb[incb*(i+4 )];
+		ta[inca*(i+5 )]+=alpha*tb[incb*(i+5 )];
+		ta[inca*(i+6 )]+=alpha*tb[incb*(i+6 )];
+		ta[inca*(i+7 )]+=alpha*tb[incb*(i+7 )];
+		ta[inca*(i+8 )]+=alpha*tb[incb*(i+8 )];
+		ta[inca*(i+9 )]+=alpha*tb[incb*(i+9 )];
+		ta[inca*(i+10 )]+=alpha*tb[incb*(i+10 )];
+		ta[inca*(i+11 )]+=alpha*tb[incb*(i+11 )];
+		ta[inca*(i+12 )]+=alpha*tb[incb*(i+12 )];
+		ta[inca*(i+13 )]+=alpha*tb[incb*(i+13 )];
+		ta[inca*(i+14 )]+=alpha*tb[incb*(i+14 )];
+		ta[inca*(i+15 )]+=alpha*tb[incb*(i+15 )];
+	}
+for(     ;i<n;++i){ 	ta[inca*(i+0 )]+=alpha*tb[incb*(i+0 )];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__cblas_Xaxpy(rsb_type_t type, size_t n, const void * alphap, const void * x, const int incx, void * y, const int incy)
+{
+	/*!
+	 * y <- y + alpha * x
+         */
+	return rsb_alpha_sum_strided(y,x,alphap,type,n,incy,incx);
+}
+
+rsb_err_t rsb__vector_mult(const void * a, const void * b, void * c, rsb_type_t type, size_t n)
+{
+	/*!
+	 * c <- a*b
+	 * It is allowed to give c == a or c == b or a == b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * 
+	 * FIXME : useless ?
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double*ta = a; const double*tb = b; double*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+		tc[i+1 ] = ta[i+1 ]*tb[i+1 ];
+		tc[i+2 ] = ta[i+2 ]*tb[i+2 ];
+		tc[i+3 ] = ta[i+3 ]*tb[i+3 ];
+		tc[i+4 ] = ta[i+4 ]*tb[i+4 ];
+		tc[i+5 ] = ta[i+5 ]*tb[i+5 ];
+		tc[i+6 ] = ta[i+6 ]*tb[i+6 ];
+		tc[i+7 ] = ta[i+7 ]*tb[i+7 ];
+		tc[i+8 ] = ta[i+8 ]*tb[i+8 ];
+		tc[i+9 ] = ta[i+9 ]*tb[i+9 ];
+		tc[i+10 ] = ta[i+10 ]*tb[i+10 ];
+		tc[i+11 ] = ta[i+11 ]*tb[i+11 ];
+		tc[i+12 ] = ta[i+12 ]*tb[i+12 ];
+		tc[i+13 ] = ta[i+13 ]*tb[i+13 ];
+		tc[i+14 ] = ta[i+14 ]*tb[i+14 ];
+		tc[i+15 ] = ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float*ta = a; const float*tb = b; float*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+		tc[i+1 ] = ta[i+1 ]*tb[i+1 ];
+		tc[i+2 ] = ta[i+2 ]*tb[i+2 ];
+		tc[i+3 ] = ta[i+3 ]*tb[i+3 ];
+		tc[i+4 ] = ta[i+4 ]*tb[i+4 ];
+		tc[i+5 ] = ta[i+5 ]*tb[i+5 ];
+		tc[i+6 ] = ta[i+6 ]*tb[i+6 ];
+		tc[i+7 ] = ta[i+7 ]*tb[i+7 ];
+		tc[i+8 ] = ta[i+8 ]*tb[i+8 ];
+		tc[i+9 ] = ta[i+9 ]*tb[i+9 ];
+		tc[i+10 ] = ta[i+10 ]*tb[i+10 ];
+		tc[i+11 ] = ta[i+11 ]*tb[i+11 ];
+		tc[i+12 ] = ta[i+12 ]*tb[i+12 ];
+		tc[i+13 ] = ta[i+13 ]*tb[i+13 ];
+		tc[i+14 ] = ta[i+14 ]*tb[i+14 ];
+		tc[i+15 ] = ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex*ta = a; const float complex*tb = b; float complex*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+		tc[i+1 ] = ta[i+1 ]*tb[i+1 ];
+		tc[i+2 ] = ta[i+2 ]*tb[i+2 ];
+		tc[i+3 ] = ta[i+3 ]*tb[i+3 ];
+		tc[i+4 ] = ta[i+4 ]*tb[i+4 ];
+		tc[i+5 ] = ta[i+5 ]*tb[i+5 ];
+		tc[i+6 ] = ta[i+6 ]*tb[i+6 ];
+		tc[i+7 ] = ta[i+7 ]*tb[i+7 ];
+		tc[i+8 ] = ta[i+8 ]*tb[i+8 ];
+		tc[i+9 ] = ta[i+9 ]*tb[i+9 ];
+		tc[i+10 ] = ta[i+10 ]*tb[i+10 ];
+		tc[i+11 ] = ta[i+11 ]*tb[i+11 ];
+		tc[i+12 ] = ta[i+12 ]*tb[i+12 ];
+		tc[i+13 ] = ta[i+13 ]*tb[i+13 ];
+		tc[i+14 ] = ta[i+14 ]*tb[i+14 ];
+		tc[i+15 ] = ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex*ta = a; const double complex*tb = b; double complex*tc = c;
+	{
+for(i=0;i+15<n;i+=16){
+	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+		tc[i+1 ] = ta[i+1 ]*tb[i+1 ];
+		tc[i+2 ] = ta[i+2 ]*tb[i+2 ];
+		tc[i+3 ] = ta[i+3 ]*tb[i+3 ];
+		tc[i+4 ] = ta[i+4 ]*tb[i+4 ];
+		tc[i+5 ] = ta[i+5 ]*tb[i+5 ];
+		tc[i+6 ] = ta[i+6 ]*tb[i+6 ];
+		tc[i+7 ] = ta[i+7 ]*tb[i+7 ];
+		tc[i+8 ] = ta[i+8 ]*tb[i+8 ];
+		tc[i+9 ] = ta[i+9 ]*tb[i+9 ];
+		tc[i+10 ] = ta[i+10 ]*tb[i+10 ];
+		tc[i+11 ] = ta[i+11 ]*tb[i+11 ];
+		tc[i+12 ] = ta[i+12 ]*tb[i+12 ];
+		tc[i+13 ] = ta[i+13 ]*tb[i+13 ];
+		tc[i+14 ] = ta[i+14 ]*tb[i+14 ];
+		tc[i+15 ] = ta[i+15 ]*tb[i+15 ];
+	}
+for(     ;i<n;++i){ 	tc[i+0 ] = ta[i+0 ]*tb[i+0 ];
+	 }
+}
+; 
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__xcopy(void * a, const void * b, rsb_nnz_idx_t toi, rsb_nnz_idx_t foi, rsb_nnz_idx_t n,size_t el_size)
+{
+	/*!
+	 * a[toi:toi+n] <- b[foi:foi+n] 
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_memcpy(((rsb_byte_t*)a)+el_size*toi,((const rsb_byte_t*)b)+el_size*foi,el_size*n);
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__do_are_same(const void * ap, const void * bp, rsb_nnz_idx_t n,rsb_type_t typecode, rsb_nnz_idx_t incx, rsb_nnz_idx_t incy)
+{
+	/*!
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 *
+	 * \return \rsberrcodemsg
+	 *
+	 * For cases like 1+0I differing from 1-0I ..
+	 * */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	rsb_nnz_idx_t i;
+	const double *a = ap; const double *b = bp;
+
+	{
+for(i=0;i+15<n;i+=16){
+	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+	if( a[incx*(i+1 )]!=b[incy*(i+1 )]) goto differing;
+	if( a[incx*(i+2 )]!=b[incy*(i+2 )]) goto differing;
+	if( a[incx*(i+3 )]!=b[incy*(i+3 )]) goto differing;
+	if( a[incx*(i+4 )]!=b[incy*(i+4 )]) goto differing;
+	if( a[incx*(i+5 )]!=b[incy*(i+5 )]) goto differing;
+	if( a[incx*(i+6 )]!=b[incy*(i+6 )]) goto differing;
+	if( a[incx*(i+7 )]!=b[incy*(i+7 )]) goto differing;
+	if( a[incx*(i+8 )]!=b[incy*(i+8 )]) goto differing;
+	if( a[incx*(i+9 )]!=b[incy*(i+9 )]) goto differing;
+	if( a[incx*(i+10 )]!=b[incy*(i+10 )]) goto differing;
+	if( a[incx*(i+11 )]!=b[incy*(i+11 )]) goto differing;
+	if( a[incx*(i+12 )]!=b[incy*(i+12 )]) goto differing;
+	if( a[incx*(i+13 )]!=b[incy*(i+13 )]) goto differing;
+	if( a[incx*(i+14 )]!=b[incy*(i+14 )]) goto differing;
+	if( a[incx*(i+15 )]!=b[incy*(i+15 )]) goto differing;
+}
+for(     ;i<n;++i){ 	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+ }
+}
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	rsb_nnz_idx_t i;
+	const float *a = ap; const float *b = bp;
+
+	{
+for(i=0;i+15<n;i+=16){
+	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+	if( a[incx*(i+1 )]!=b[incy*(i+1 )]) goto differing;
+	if( a[incx*(i+2 )]!=b[incy*(i+2 )]) goto differing;
+	if( a[incx*(i+3 )]!=b[incy*(i+3 )]) goto differing;
+	if( a[incx*(i+4 )]!=b[incy*(i+4 )]) goto differing;
+	if( a[incx*(i+5 )]!=b[incy*(i+5 )]) goto differing;
+	if( a[incx*(i+6 )]!=b[incy*(i+6 )]) goto differing;
+	if( a[incx*(i+7 )]!=b[incy*(i+7 )]) goto differing;
+	if( a[incx*(i+8 )]!=b[incy*(i+8 )]) goto differing;
+	if( a[incx*(i+9 )]!=b[incy*(i+9 )]) goto differing;
+	if( a[incx*(i+10 )]!=b[incy*(i+10 )]) goto differing;
+	if( a[incx*(i+11 )]!=b[incy*(i+11 )]) goto differing;
+	if( a[incx*(i+12 )]!=b[incy*(i+12 )]) goto differing;
+	if( a[incx*(i+13 )]!=b[incy*(i+13 )]) goto differing;
+	if( a[incx*(i+14 )]!=b[incy*(i+14 )]) goto differing;
+	if( a[incx*(i+15 )]!=b[incy*(i+15 )]) goto differing;
+}
+for(     ;i<n;++i){ 	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+ }
+}
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	rsb_nnz_idx_t i;
+	const float complex *a = ap; const float complex *b = bp;
+
+	{
+for(i=0;i+15<n;i+=16){
+	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+	if( a[incx*(i+1 )]!=b[incy*(i+1 )]) goto differing;
+	if( a[incx*(i+2 )]!=b[incy*(i+2 )]) goto differing;
+	if( a[incx*(i+3 )]!=b[incy*(i+3 )]) goto differing;
+	if( a[incx*(i+4 )]!=b[incy*(i+4 )]) goto differing;
+	if( a[incx*(i+5 )]!=b[incy*(i+5 )]) goto differing;
+	if( a[incx*(i+6 )]!=b[incy*(i+6 )]) goto differing;
+	if( a[incx*(i+7 )]!=b[incy*(i+7 )]) goto differing;
+	if( a[incx*(i+8 )]!=b[incy*(i+8 )]) goto differing;
+	if( a[incx*(i+9 )]!=b[incy*(i+9 )]) goto differing;
+	if( a[incx*(i+10 )]!=b[incy*(i+10 )]) goto differing;
+	if( a[incx*(i+11 )]!=b[incy*(i+11 )]) goto differing;
+	if( a[incx*(i+12 )]!=b[incy*(i+12 )]) goto differing;
+	if( a[incx*(i+13 )]!=b[incy*(i+13 )]) goto differing;
+	if( a[incx*(i+14 )]!=b[incy*(i+14 )]) goto differing;
+	if( a[incx*(i+15 )]!=b[incy*(i+15 )]) goto differing;
+}
+for(     ;i<n;++i){ 	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+ }
+}
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	rsb_nnz_idx_t i;
+	const double complex *a = ap; const double complex *b = bp;
+
+	{
+for(i=0;i+15<n;i+=16){
+	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+	if( a[incx*(i+1 )]!=b[incy*(i+1 )]) goto differing;
+	if( a[incx*(i+2 )]!=b[incy*(i+2 )]) goto differing;
+	if( a[incx*(i+3 )]!=b[incy*(i+3 )]) goto differing;
+	if( a[incx*(i+4 )]!=b[incy*(i+4 )]) goto differing;
+	if( a[incx*(i+5 )]!=b[incy*(i+5 )]) goto differing;
+	if( a[incx*(i+6 )]!=b[incy*(i+6 )]) goto differing;
+	if( a[incx*(i+7 )]!=b[incy*(i+7 )]) goto differing;
+	if( a[incx*(i+8 )]!=b[incy*(i+8 )]) goto differing;
+	if( a[incx*(i+9 )]!=b[incy*(i+9 )]) goto differing;
+	if( a[incx*(i+10 )]!=b[incy*(i+10 )]) goto differing;
+	if( a[incx*(i+11 )]!=b[incy*(i+11 )]) goto differing;
+	if( a[incx*(i+12 )]!=b[incy*(i+12 )]) goto differing;
+	if( a[incx*(i+13 )]!=b[incy*(i+13 )]) goto differing;
+	if( a[incx*(i+14 )]!=b[incy*(i+14 )]) goto differing;
+	if( a[incx*(i+15 )]!=b[incy*(i+15 )]) goto differing;
+}
+for(     ;i<n;++i){ 	if( a[incx*(i+0 )]!=b[incy*(i+0 )]) goto differing;
+ }
+}
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE;
+differing:
+	return RSB_ERR_GENERIC_ERROR;
+}
+
+static rsb_err_t rsb__xcopy_strided_typed(void * a, const void * b, rsb_nnz_idx_t toi, rsb_nnz_idx_t foi, rsb_nnz_idx_t n,rsb_type_t typecode, rsb_nnz_idx_t incx, rsb_nnz_idx_t incy)
+{
+	/*!
+	 * a[toi:toi+n] <- b[foi:foi+n] 
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	if(incx==1 && incy==1)
+		return rsb__xcopy(a,b,toi,foi,n,RSB_SIZEOF(typecode));
+	/* else */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	rsb_nnz_idx_t i;
+	double *ap = a; const double *bp = b;
+	ap+=toi;
+	bp+=foi;
+	{
+for(i=0;i+15<n;i+=16){
+	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+		ap[(i+1 )*incx] = bp[(i+1 )*incy];
+		ap[(i+2 )*incx] = bp[(i+2 )*incy];
+		ap[(i+3 )*incx] = bp[(i+3 )*incy];
+		ap[(i+4 )*incx] = bp[(i+4 )*incy];
+		ap[(i+5 )*incx] = bp[(i+5 )*incy];
+		ap[(i+6 )*incx] = bp[(i+6 )*incy];
+		ap[(i+7 )*incx] = bp[(i+7 )*incy];
+		ap[(i+8 )*incx] = bp[(i+8 )*incy];
+		ap[(i+9 )*incx] = bp[(i+9 )*incy];
+		ap[(i+10 )*incx] = bp[(i+10 )*incy];
+		ap[(i+11 )*incx] = bp[(i+11 )*incy];
+		ap[(i+12 )*incx] = bp[(i+12 )*incy];
+		ap[(i+13 )*incx] = bp[(i+13 )*incy];
+		ap[(i+14 )*incx] = bp[(i+14 )*incy];
+		ap[(i+15 )*incx] = bp[(i+15 )*incy];
+	}
+for(     ;i<n;++i){ 	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+	 }
+}
+; 
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	rsb_nnz_idx_t i;
+	float *ap = a; const float *bp = b;
+	ap+=toi;
+	bp+=foi;
+	{
+for(i=0;i+15<n;i+=16){
+	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+		ap[(i+1 )*incx] = bp[(i+1 )*incy];
+		ap[(i+2 )*incx] = bp[(i+2 )*incy];
+		ap[(i+3 )*incx] = bp[(i+3 )*incy];
+		ap[(i+4 )*incx] = bp[(i+4 )*incy];
+		ap[(i+5 )*incx] = bp[(i+5 )*incy];
+		ap[(i+6 )*incx] = bp[(i+6 )*incy];
+		ap[(i+7 )*incx] = bp[(i+7 )*incy];
+		ap[(i+8 )*incx] = bp[(i+8 )*incy];
+		ap[(i+9 )*incx] = bp[(i+9 )*incy];
+		ap[(i+10 )*incx] = bp[(i+10 )*incy];
+		ap[(i+11 )*incx] = bp[(i+11 )*incy];
+		ap[(i+12 )*incx] = bp[(i+12 )*incy];
+		ap[(i+13 )*incx] = bp[(i+13 )*incy];
+		ap[(i+14 )*incx] = bp[(i+14 )*incy];
+		ap[(i+15 )*incx] = bp[(i+15 )*incy];
+	}
+for(     ;i<n;++i){ 	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+	 }
+}
+; 
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	rsb_nnz_idx_t i;
+	float complex *ap = a; const float complex *bp = b;
+	ap+=toi;
+	bp+=foi;
+	{
+for(i=0;i+15<n;i+=16){
+	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+		ap[(i+1 )*incx] = bp[(i+1 )*incy];
+		ap[(i+2 )*incx] = bp[(i+2 )*incy];
+		ap[(i+3 )*incx] = bp[(i+3 )*incy];
+		ap[(i+4 )*incx] = bp[(i+4 )*incy];
+		ap[(i+5 )*incx] = bp[(i+5 )*incy];
+		ap[(i+6 )*incx] = bp[(i+6 )*incy];
+		ap[(i+7 )*incx] = bp[(i+7 )*incy];
+		ap[(i+8 )*incx] = bp[(i+8 )*incy];
+		ap[(i+9 )*incx] = bp[(i+9 )*incy];
+		ap[(i+10 )*incx] = bp[(i+10 )*incy];
+		ap[(i+11 )*incx] = bp[(i+11 )*incy];
+		ap[(i+12 )*incx] = bp[(i+12 )*incy];
+		ap[(i+13 )*incx] = bp[(i+13 )*incy];
+		ap[(i+14 )*incx] = bp[(i+14 )*incy];
+		ap[(i+15 )*incx] = bp[(i+15 )*incy];
+	}
+for(     ;i<n;++i){ 	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+	 }
+}
+; 
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	rsb_nnz_idx_t i;
+	double complex *ap = a; const double complex *bp = b;
+	ap+=toi;
+	bp+=foi;
+	{
+for(i=0;i+15<n;i+=16){
+	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+		ap[(i+1 )*incx] = bp[(i+1 )*incy];
+		ap[(i+2 )*incx] = bp[(i+2 )*incy];
+		ap[(i+3 )*incx] = bp[(i+3 )*incy];
+		ap[(i+4 )*incx] = bp[(i+4 )*incy];
+		ap[(i+5 )*incx] = bp[(i+5 )*incy];
+		ap[(i+6 )*incx] = bp[(i+6 )*incy];
+		ap[(i+7 )*incx] = bp[(i+7 )*incy];
+		ap[(i+8 )*incx] = bp[(i+8 )*incy];
+		ap[(i+9 )*incx] = bp[(i+9 )*incy];
+		ap[(i+10 )*incx] = bp[(i+10 )*incy];
+		ap[(i+11 )*incx] = bp[(i+11 )*incy];
+		ap[(i+12 )*incx] = bp[(i+12 )*incy];
+		ap[(i+13 )*incx] = bp[(i+13 )*incy];
+		ap[(i+14 )*incx] = bp[(i+14 )*incy];
+		ap[(i+15 )*incx] = bp[(i+15 )*incy];
+	}
+for(     ;i<n;++i){ 	ap[(i+0 )*incx] = bp[(i+0 )*incy];
+	 }
+}
+; 
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__sqrt_of_sum_of_fabs_diffs(const void * a, const void * b, void *err, rsb_type_t type, size_t n)
+{
+	/*!
+	 * Will compute the square root of the sum of the squares of the vectors elements differences.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * 
+	 * FIXME
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double*ta = a; const double*tb = b;
+	*((double*)err) = ((double)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	*((double*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+		*((double*)(err))+=(ta[i+1 ]-tb[i+1 ])*(ta[i+1 ]-tb[i+1 ]);
+		*((double*)(err))+=(ta[i+2 ]-tb[i+2 ])*(ta[i+2 ]-tb[i+2 ]);
+		*((double*)(err))+=(ta[i+3 ]-tb[i+3 ])*(ta[i+3 ]-tb[i+3 ]);
+		*((double*)(err))+=(ta[i+4 ]-tb[i+4 ])*(ta[i+4 ]-tb[i+4 ]);
+		*((double*)(err))+=(ta[i+5 ]-tb[i+5 ])*(ta[i+5 ]-tb[i+5 ]);
+		*((double*)(err))+=(ta[i+6 ]-tb[i+6 ])*(ta[i+6 ]-tb[i+6 ]);
+		*((double*)(err))+=(ta[i+7 ]-tb[i+7 ])*(ta[i+7 ]-tb[i+7 ]);
+		*((double*)(err))+=(ta[i+8 ]-tb[i+8 ])*(ta[i+8 ]-tb[i+8 ]);
+		*((double*)(err))+=(ta[i+9 ]-tb[i+9 ])*(ta[i+9 ]-tb[i+9 ]);
+		*((double*)(err))+=(ta[i+10 ]-tb[i+10 ])*(ta[i+10 ]-tb[i+10 ]);
+		*((double*)(err))+=(ta[i+11 ]-tb[i+11 ])*(ta[i+11 ]-tb[i+11 ]);
+		*((double*)(err))+=(ta[i+12 ]-tb[i+12 ])*(ta[i+12 ]-tb[i+12 ]);
+		*((double*)(err))+=(ta[i+13 ]-tb[i+13 ])*(ta[i+13 ]-tb[i+13 ]);
+		*((double*)(err))+=(ta[i+14 ]-tb[i+14 ])*(ta[i+14 ]-tb[i+14 ]);
+		*((double*)(err))+=(ta[i+15 ]-tb[i+15 ])*(ta[i+15 ]-tb[i+15 ]);
+	}
+for(     ;i<n;++i){ 	*((double*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+	 }
+}
+; 
+	*((double*)err) = sqrt((*((double*)err)));
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float*ta = a; const float*tb = b;
+	*((float*)err) = ((float)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	*((float*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+		*((float*)(err))+=(ta[i+1 ]-tb[i+1 ])*(ta[i+1 ]-tb[i+1 ]);
+		*((float*)(err))+=(ta[i+2 ]-tb[i+2 ])*(ta[i+2 ]-tb[i+2 ]);
+		*((float*)(err))+=(ta[i+3 ]-tb[i+3 ])*(ta[i+3 ]-tb[i+3 ]);
+		*((float*)(err))+=(ta[i+4 ]-tb[i+4 ])*(ta[i+4 ]-tb[i+4 ]);
+		*((float*)(err))+=(ta[i+5 ]-tb[i+5 ])*(ta[i+5 ]-tb[i+5 ]);
+		*((float*)(err))+=(ta[i+6 ]-tb[i+6 ])*(ta[i+6 ]-tb[i+6 ]);
+		*((float*)(err))+=(ta[i+7 ]-tb[i+7 ])*(ta[i+7 ]-tb[i+7 ]);
+		*((float*)(err))+=(ta[i+8 ]-tb[i+8 ])*(ta[i+8 ]-tb[i+8 ]);
+		*((float*)(err))+=(ta[i+9 ]-tb[i+9 ])*(ta[i+9 ]-tb[i+9 ]);
+		*((float*)(err))+=(ta[i+10 ]-tb[i+10 ])*(ta[i+10 ]-tb[i+10 ]);
+		*((float*)(err))+=(ta[i+11 ]-tb[i+11 ])*(ta[i+11 ]-tb[i+11 ]);
+		*((float*)(err))+=(ta[i+12 ]-tb[i+12 ])*(ta[i+12 ]-tb[i+12 ]);
+		*((float*)(err))+=(ta[i+13 ]-tb[i+13 ])*(ta[i+13 ]-tb[i+13 ]);
+		*((float*)(err))+=(ta[i+14 ]-tb[i+14 ])*(ta[i+14 ]-tb[i+14 ]);
+		*((float*)(err))+=(ta[i+15 ]-tb[i+15 ])*(ta[i+15 ]-tb[i+15 ]);
+	}
+for(     ;i<n;++i){ 	*((float*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+	 }
+}
+; 
+	*((float*)err) = sqrtf((*((float*)err)));
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex*ta = a; const float complex*tb = b;
+	*((float complex*)err) = ((float complex)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	*((float complex*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+		*((float complex*)(err))+=(ta[i+1 ]-tb[i+1 ])*(ta[i+1 ]-tb[i+1 ]);
+		*((float complex*)(err))+=(ta[i+2 ]-tb[i+2 ])*(ta[i+2 ]-tb[i+2 ]);
+		*((float complex*)(err))+=(ta[i+3 ]-tb[i+3 ])*(ta[i+3 ]-tb[i+3 ]);
+		*((float complex*)(err))+=(ta[i+4 ]-tb[i+4 ])*(ta[i+4 ]-tb[i+4 ]);
+		*((float complex*)(err))+=(ta[i+5 ]-tb[i+5 ])*(ta[i+5 ]-tb[i+5 ]);
+		*((float complex*)(err))+=(ta[i+6 ]-tb[i+6 ])*(ta[i+6 ]-tb[i+6 ]);
+		*((float complex*)(err))+=(ta[i+7 ]-tb[i+7 ])*(ta[i+7 ]-tb[i+7 ]);
+		*((float complex*)(err))+=(ta[i+8 ]-tb[i+8 ])*(ta[i+8 ]-tb[i+8 ]);
+		*((float complex*)(err))+=(ta[i+9 ]-tb[i+9 ])*(ta[i+9 ]-tb[i+9 ]);
+		*((float complex*)(err))+=(ta[i+10 ]-tb[i+10 ])*(ta[i+10 ]-tb[i+10 ]);
+		*((float complex*)(err))+=(ta[i+11 ]-tb[i+11 ])*(ta[i+11 ]-tb[i+11 ]);
+		*((float complex*)(err))+=(ta[i+12 ]-tb[i+12 ])*(ta[i+12 ]-tb[i+12 ]);
+		*((float complex*)(err))+=(ta[i+13 ]-tb[i+13 ])*(ta[i+13 ]-tb[i+13 ]);
+		*((float complex*)(err))+=(ta[i+14 ]-tb[i+14 ])*(ta[i+14 ]-tb[i+14 ]);
+		*((float complex*)(err))+=(ta[i+15 ]-tb[i+15 ])*(ta[i+15 ]-tb[i+15 ]);
+	}
+for(     ;i<n;++i){ 	*((float complex*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+	 }
+}
+; 
+	*((float complex*)err) = csqrtf((*((float complex*)err)));
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex*ta = a; const double complex*tb = b;
+	*((double complex*)err) = ((double complex)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	*((double complex*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+		*((double complex*)(err))+=(ta[i+1 ]-tb[i+1 ])*(ta[i+1 ]-tb[i+1 ]);
+		*((double complex*)(err))+=(ta[i+2 ]-tb[i+2 ])*(ta[i+2 ]-tb[i+2 ]);
+		*((double complex*)(err))+=(ta[i+3 ]-tb[i+3 ])*(ta[i+3 ]-tb[i+3 ]);
+		*((double complex*)(err))+=(ta[i+4 ]-tb[i+4 ])*(ta[i+4 ]-tb[i+4 ]);
+		*((double complex*)(err))+=(ta[i+5 ]-tb[i+5 ])*(ta[i+5 ]-tb[i+5 ]);
+		*((double complex*)(err))+=(ta[i+6 ]-tb[i+6 ])*(ta[i+6 ]-tb[i+6 ]);
+		*((double complex*)(err))+=(ta[i+7 ]-tb[i+7 ])*(ta[i+7 ]-tb[i+7 ]);
+		*((double complex*)(err))+=(ta[i+8 ]-tb[i+8 ])*(ta[i+8 ]-tb[i+8 ]);
+		*((double complex*)(err))+=(ta[i+9 ]-tb[i+9 ])*(ta[i+9 ]-tb[i+9 ]);
+		*((double complex*)(err))+=(ta[i+10 ]-tb[i+10 ])*(ta[i+10 ]-tb[i+10 ]);
+		*((double complex*)(err))+=(ta[i+11 ]-tb[i+11 ])*(ta[i+11 ]-tb[i+11 ]);
+		*((double complex*)(err))+=(ta[i+12 ]-tb[i+12 ])*(ta[i+12 ]-tb[i+12 ]);
+		*((double complex*)(err))+=(ta[i+13 ]-tb[i+13 ])*(ta[i+13 ]-tb[i+13 ]);
+		*((double complex*)(err))+=(ta[i+14 ]-tb[i+14 ])*(ta[i+14 ]-tb[i+14 ]);
+		*((double complex*)(err))+=(ta[i+15 ]-tb[i+15 ])*(ta[i+15 ]-tb[i+15 ]);
+	}
+for(     ;i<n;++i){ 	*((double complex*)(err))+=(ta[i+0 ]-tb[i+0 ])*(ta[i+0 ]-tb[i+0 ]);
+	 }
+}
+; 
+	*((double complex*)err) = csqrt((*((double complex*)err)));
+	}
+	else 
+#endif
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__fill_with_increasing_values(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * FIXME : document me
+	 * starts with one.
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{ 
+	double*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = (const double)(i+0 +1);ta[i+1 ] = (const double)(i+1 +1);ta[i+2 ] = (const double)(i+2 +1);ta[i+3 ] = (const double)(i+3 +1);ta[i+4 ] = (const double)(i+4 +1);ta[i+5 ] = (const double)(i+5 +1);ta[i+6 ] = (const double)(i+6 +1);ta[i+7 ] = (const double)(i+7 +1);ta[i+8 ] = (const double)(i+8 +1);ta[i+9 ] = (const double)(i+9 +1);ta[i+10 ] = (const double)(i+10 +1);ta[i+11 ] = (const double)(i+11 +1);ta[i+12 ] = (const double)(i+12 +1);ta[i+13 ] = (const double)(i+13 +1);ta[i+14  [...]
+for(     ;i<n;++i){ ta[i+0 ] = (const double)(i+0 +1); }
+}
+
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{ 
+	float*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = (const float)(i+0 +1);ta[i+1 ] = (const float)(i+1 +1);ta[i+2 ] = (const float)(i+2 +1);ta[i+3 ] = (const float)(i+3 +1);ta[i+4 ] = (const float)(i+4 +1);ta[i+5 ] = (const float)(i+5 +1);ta[i+6 ] = (const float)(i+6 +1);ta[i+7 ] = (const float)(i+7 +1);ta[i+8 ] = (const float)(i+8 +1);ta[i+9 ] = (const float)(i+9 +1);ta[i+10 ] = (const float)(i+10 +1);ta[i+11 ] = (const float)(i+11 +1);ta[i+12 ] = (const float)(i+12 +1);ta[i+13 ] = (const float)(i+13 +1);ta[i+14 ] = (const flo [...]
+for(     ;i<n;++i){ ta[i+0 ] = (const float)(i+0 +1); }
+}
+
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{ 
+	float complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = (const float complex)(i+0 +1);ta[i+1 ] = (const float complex)(i+1 +1);ta[i+2 ] = (const float complex)(i+2 +1);ta[i+3 ] = (const float complex)(i+3 +1);ta[i+4 ] = (const float complex)(i+4 +1);ta[i+5 ] = (const float complex)(i+5 +1);ta[i+6 ] = (const float complex)(i+6 +1);ta[i+7 ] = (const float complex)(i+7 +1);ta[i+8 ] = (const float complex)(i+8 +1);ta[i+9 ] = (const float complex)(i+9 +1);ta[i+10 ] = (const float complex)(i+10 +1);ta[i+11 ] = (const float complex)(i+11  [...]
+for(     ;i<n;++i){ ta[i+0 ] = (const float complex)(i+0 +1); }
+}
+
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{ 
+	double complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = (const double complex)(i+0 +1);ta[i+1 ] = (const double complex)(i+1 +1);ta[i+2 ] = (const double complex)(i+2 +1);ta[i+3 ] = (const double complex)(i+3 +1);ta[i+4 ] = (const double complex)(i+4 +1);ta[i+5 ] = (const double complex)(i+5 +1);ta[i+6 ] = (const double complex)(i+6 +1);ta[i+7 ] = (const double complex)(i+7 +1);ta[i+8 ] = (const double complex)(i+8 +1);ta[i+9 ] = (const double complex)(i+9 +1);ta[i+10 ] = (const double complex)(i+10 +1);ta[i+11 ] = (const double co [...]
+for(     ;i<n;++i){ ta[i+0 ] = (const double complex)(i+0 +1); }
+}
+
+	}
+	else 
+#endif
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_do_conjugate(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * FIXME: copy over itself..
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  ){
+	double*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = (ta[i+0 ]);ta[i+1 ] = (ta[i+1 ]);ta[i+2 ] = (ta[i+2 ]);ta[i+3 ] = (ta[i+3 ]);ta[i+4 ] = (ta[i+4 ]);ta[i+5 ] = (ta[i+5 ]);ta[i+6 ] = (ta[i+6 ]);ta[i+7 ] = (ta[i+7 ]);ta[i+8 ] = (ta[i+8 ]);ta[i+9 ] = (ta[i+9 ]);ta[i+10 ] = (ta[i+10 ]);ta[i+11 ] = (ta[i+11 ]);ta[i+12 ] = (ta[i+12 ]);ta[i+13 ] = (ta[i+13 ]);ta[i+14 ] = (ta[i+14 ]);ta[i+15 ] = (ta[i+15 ]);}
+for(     ;i<n;++i){ ta[i+0 ] = (ta[i+0 ]); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  ){
+	float*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = (ta[i+0 ]);ta[i+1 ] = (ta[i+1 ]);ta[i+2 ] = (ta[i+2 ]);ta[i+3 ] = (ta[i+3 ]);ta[i+4 ] = (ta[i+4 ]);ta[i+5 ] = (ta[i+5 ]);ta[i+6 ] = (ta[i+6 ]);ta[i+7 ] = (ta[i+7 ]);ta[i+8 ] = (ta[i+8 ]);ta[i+9 ] = (ta[i+9 ]);ta[i+10 ] = (ta[i+10 ]);ta[i+11 ] = (ta[i+11 ]);ta[i+12 ] = (ta[i+12 ]);ta[i+13 ] = (ta[i+13 ]);ta[i+14 ] = (ta[i+14 ]);ta[i+15 ] = (ta[i+15 ]);}
+for(     ;i<n;++i){ ta[i+0 ] = (ta[i+0 ]); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ){
+	float complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = conjf(ta[i+0 ]);ta[i+1 ] = conjf(ta[i+1 ]);ta[i+2 ] = conjf(ta[i+2 ]);ta[i+3 ] = conjf(ta[i+3 ]);ta[i+4 ] = conjf(ta[i+4 ]);ta[i+5 ] = conjf(ta[i+5 ]);ta[i+6 ] = conjf(ta[i+6 ]);ta[i+7 ] = conjf(ta[i+7 ]);ta[i+8 ] = conjf(ta[i+8 ]);ta[i+9 ] = conjf(ta[i+9 ]);ta[i+10 ] = conjf(ta[i+10 ]);ta[i+11 ] = conjf(ta[i+11 ]);ta[i+12 ] = conjf(ta[i+12 ]);ta[i+13 ] = conjf(ta[i+13 ]);ta[i+14 ] = conjf(ta[i+14 ]);ta[i+15 ] = conjf(ta[i+15 ]);}
+for(     ;i<n;++i){ ta[i+0 ] = conjf(ta[i+0 ]); }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ){
+	double complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = conj(ta[i+0 ]);ta[i+1 ] = conj(ta[i+1 ]);ta[i+2 ] = conj(ta[i+2 ]);ta[i+3 ] = conj(ta[i+3 ]);ta[i+4 ] = conj(ta[i+4 ]);ta[i+5 ] = conj(ta[i+5 ]);ta[i+6 ] = conj(ta[i+6 ]);ta[i+7 ] = conj(ta[i+7 ]);ta[i+8 ] = conj(ta[i+8 ]);ta[i+9 ] = conj(ta[i+9 ]);ta[i+10 ] = conj(ta[i+10 ]);ta[i+11 ] = conj(ta[i+11 ]);ta[i+12 ] = conj(ta[i+12 ]);ta[i+13 ] = conj(ta[i+13 ]);ta[i+14 ] = conj(ta[i+14 ]);ta[i+15 ] = conj(ta[i+15 ]);}
+for(     ;i<n;++i){ ta[i+0 ] = conj(ta[i+0 ]); }
+}
+}
+	else 
+#endif
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_do_negate(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will negate the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+{ 
+	double*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = -ta[i+0 ];ta[i+1 ] = -ta[i+1 ];ta[i+2 ] = -ta[i+2 ];ta[i+3 ] = -ta[i+3 ];ta[i+4 ] = -ta[i+4 ];ta[i+5 ] = -ta[i+5 ];ta[i+6 ] = -ta[i+6 ];ta[i+7 ] = -ta[i+7 ];ta[i+8 ] = -ta[i+8 ];ta[i+9 ] = -ta[i+9 ];ta[i+10 ] = -ta[i+10 ];ta[i+11 ] = -ta[i+11 ];ta[i+12 ] = -ta[i+12 ];ta[i+13 ] = -ta[i+13 ];ta[i+14 ] = -ta[i+14 ];ta[i+15 ] = -ta[i+15 ];}
+for(     ;i<n;++i){ ta[i+0 ] = -ta[i+0 ]; }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+{ 
+	float*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = -ta[i+0 ];ta[i+1 ] = -ta[i+1 ];ta[i+2 ] = -ta[i+2 ];ta[i+3 ] = -ta[i+3 ];ta[i+4 ] = -ta[i+4 ];ta[i+5 ] = -ta[i+5 ];ta[i+6 ] = -ta[i+6 ];ta[i+7 ] = -ta[i+7 ];ta[i+8 ] = -ta[i+8 ];ta[i+9 ] = -ta[i+9 ];ta[i+10 ] = -ta[i+10 ];ta[i+11 ] = -ta[i+11 ];ta[i+12 ] = -ta[i+12 ];ta[i+13 ] = -ta[i+13 ];ta[i+14 ] = -ta[i+14 ];ta[i+15 ] = -ta[i+15 ];}
+for(     ;i<n;++i){ ta[i+0 ] = -ta[i+0 ]; }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{ 
+	float complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = -ta[i+0 ];ta[i+1 ] = -ta[i+1 ];ta[i+2 ] = -ta[i+2 ];ta[i+3 ] = -ta[i+3 ];ta[i+4 ] = -ta[i+4 ];ta[i+5 ] = -ta[i+5 ];ta[i+6 ] = -ta[i+6 ];ta[i+7 ] = -ta[i+7 ];ta[i+8 ] = -ta[i+8 ];ta[i+9 ] = -ta[i+9 ];ta[i+10 ] = -ta[i+10 ];ta[i+11 ] = -ta[i+11 ];ta[i+12 ] = -ta[i+12 ];ta[i+13 ] = -ta[i+13 ];ta[i+14 ] = -ta[i+14 ];ta[i+15 ] = -ta[i+15 ];}
+for(     ;i<n;++i){ ta[i+0 ] = -ta[i+0 ]; }
+}
+}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{ 
+	double complex*ta = array;
+{
+for(i=0;i+15<n;i+=16){
+ta[i+0 ] = -ta[i+0 ];ta[i+1 ] = -ta[i+1 ];ta[i+2 ] = -ta[i+2 ];ta[i+3 ] = -ta[i+3 ];ta[i+4 ] = -ta[i+4 ];ta[i+5 ] = -ta[i+5 ];ta[i+6 ] = -ta[i+6 ];ta[i+7 ] = -ta[i+7 ];ta[i+8 ] = -ta[i+8 ];ta[i+9 ] = -ta[i+9 ];ta[i+10 ] = -ta[i+10 ];ta[i+11 ] = -ta[i+11 ];ta[i+12 ] = -ta[i+12 ];ta[i+13 ] = -ta[i+13 ];ta[i+14 ] = -ta[i+14 ];ta[i+15 ] = -ta[i+15 ];}
+for(     ;i<n;++i){ ta[i+0 ] = -ta[i+0 ]; }
+}
+}
+	else 
+#endif
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_find_min(void * minp, const void * array, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(n<1)return RSB_ERR_BADARGS;
+	if(inc<1)return RSB_ERR_BADARGS;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{const double * ap = array;double *mp = minp;
+	*mp = *ap;for(i = 1;i<n;++i){if(fabs(ap[i*inc])<fabs(*mp) )*mp = ap[i*inc];
+	}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{const float * ap = array;float *mp = minp;
+	*mp = *ap;for(i = 1;i<n;++i){if(fabsf(ap[i*inc])<fabsf(*mp) )*mp = ap[i*inc];
+	}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{const float complex * ap = array;float complex *mp = minp;
+	*mp = *ap;for(i = 1;i<n;++i){if(cabsf(ap[i*inc])<cabsf(*mp) )*mp = ap[i*inc];
+	}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{const double complex * ap = array;double complex *mp = minp;
+	*mp = *ap;for(i = 1;i<n;++i){if(cabs(ap[i*inc])<cabs(*mp) )*mp = ap[i*inc];
+	}}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_find_max(void * maxp, const void * array, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(n<1)return RSB_ERR_BADARGS;
+	if(inc<1)return RSB_ERR_BADARGS;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{const double * ap = array;double *mp = maxp;
+	*mp = *ap;for(i=1;i<n;++i){if(fabs(ap[i*inc])>fabs(*mp))*mp = ap[i*inc];
+	}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{const float * ap = array;float *mp = maxp;
+	*mp = *ap;for(i=1;i<n;++i){if(fabsf(ap[i*inc])>fabsf(*mp))*mp = ap[i*inc];
+	}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{const float complex * ap = array;float complex *mp = maxp;
+	*mp = *ap;for(i=1;i<n;++i){if(cabsf(ap[i*inc])>cabsf(*mp))*mp = ap[i*inc];
+	}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{const double complex * ap = array;double complex *mp = maxp;
+	*mp = *ap;for(i=1;i<n;++i){if(cabs(ap[i*inc])>cabs(*mp))*mp = ap[i*inc];
+	}}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__util_drop_to_zero_if_above_threshold(void * array, rsb_type_t type, size_t n, const void * threshold)
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{const double th = (*(const double*)(threshold)); double*ta = array;
+	for(i = 0;i<n;++i)
+	{if(fabs(th)<fabs(ta[i]))ta[i] = ((double)(0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{const float th = (*(const float*)(threshold)); float*ta = array;
+	for(i = 0;i<n;++i)
+	{if(fabsf(th)<fabsf(ta[i]))ta[i] = ((float)(0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{const float complex th = (*(const float complex*)(threshold)); float complex*ta = array;
+	for(i = 0;i<n;++i)
+	{if(cabsf(th)<cabsf(ta[i]))ta[i] = ((float complex)(0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{const double complex th = (*(const double complex*)(threshold)); double complex*ta = array;
+	for(i = 0;i<n;++i)
+	{if(cabs(th)<cabs(ta[i]))ta[i] = ((double complex)(0));}}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_nnz_idx_t rsb__util_count_positive(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i, c = 0;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{	double*ta = array;
+		 for(i=0;i<n;++i)
+			c+=((ta[i])>(double)0);
+	}else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{	float*ta = array;
+		 for(i=0;i<n;++i)
+			c+=((ta[i])>(float)0);
+	}else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{	float complex*ta = array;
+		 for(i=0;i<n;++i)
+			c+=(crealf(ta[i])>(float)0);
+	}else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{	double complex*ta = array;
+		 for(i=0;i<n;++i)
+			c+=(creal(ta[i])>(double)0);
+	}else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return c;
+}
+
+rsb_nnz_idx_t rsb__util_count_negative(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i, c = 0;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{	double*ta = array;
+		 for(i=0;i<n;++i)
+			c+=((ta[i])<(double)0);
+	}else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{	float*ta = array;
+		 for(i=0;i<n;++i)
+			c+=((ta[i])<(float)0);
+	}else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{	float complex*ta = array;
+		 for(i=0;i<n;++i)
+			c+=(crealf(ta[i])<(float)0);
+	}else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{	double complex*ta = array;
+		 for(i=0;i<n;++i)
+			c+=(creal(ta[i])<(double)0);
+	}else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return c;
+}
+
+rsb_err_t rsb__util_drop_to_zero_if_under_threshold(void * array, rsb_type_t type, size_t n, const void * threshold)
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  ) {
+	const double th = (*(double*)(threshold)); double*ta = ((double*)(array));
+	for(i=0;i<n;++i){if(fabs(th)>fabs(ta[i]))ta[i] = ((double)(0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  ) {
+	const float th = (*(float*)(threshold)); float*ta = ((float*)(array));
+	for(i=0;i<n;++i){if(fabsf(th)>fabsf(ta[i]))ta[i] = ((float)(0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ) {
+	const float complex th = (*(float complex*)(threshold)); float complex*ta = ((float complex*)(array));
+	for(i=0;i<n;++i){if(cabsf(th)>cabsf(ta[i]))ta[i] = ((float complex)(0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ) {
+	const double complex th = (*(double complex*)(threshold)); double complex*ta = ((double complex*)(array));
+	for(i=0;i<n;++i){if(cabs(th)>cabs(ta[i]))ta[i] = ((double complex)(0));}}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__fill_with_ones(void * array, rsb_type_t type, size_t n, size_t incx){
+	/*!
+	 * \ingroup gr_vec
+	 * Will set to one the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * TODO:RENAME: rsb__fill_with_ones -> rsb__val_fill_with_ones.
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  ){
+	double*ta = ((double*)(array));
+ for(i=0;i<n;++i) {ta[i*incx] = ((double)(1.0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  ){
+	float*ta = ((float*)(array));
+ for(i=0;i<n;++i) {ta[i*incx] = ((float)(1.0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  ){
+	float complex*ta = ((float complex*)(array));
+ for(i=0;i<n;++i) {ta[i*incx] = ((float complex)(1.0));}}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  ){
+	double complex*ta = ((double complex*)(array));
+ for(i=0;i<n;++i) {ta[i*incx] = ((double complex)(1.0));}}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__debug_print_vectors_diff(const void * v1, const void * v2, size_t n, rsb_type_t type, size_t incx, size_t incy, int onlyfirst){
+	/*! 
+	 * A debug function for printing the difference of two vectors of a specified type, in parallel.
+	 * FIXME : It should take into account thresholds specific to each numerical type.
+	 **/
+#if RSB_ALLOW_STDOUT
+	size_t i, differing = 0;
+	if(!v1 || !v2)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t vectors diff :\n");
+	
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double *v1p = v1,*v2p = v2; double th = 0.0001;
+		for(i=0;i<n ;++i) 
+										if(fabs((double)(v1p[i*incx]-v2p[i*incy]))>th)/*FIXME : incomplete check*/
+{		differing++;
+		if((onlyfirst==0)||(onlyfirst>differing))
+		RSB_STDOUT("%zd : "RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING" "RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING"\n",(rsb_printf_int_t)i,						v1p[i*incx],v2p[i*incy]		);
+}
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float *v1p = v1,*v2p = v2; float th = 0.0001;
+		for(i=0;i<n ;++i) 
+										if(fabs((double)(v1p[i*incx]-v2p[i*incy]))>th)/*FIXME : incomplete check*/
+{		differing++;
+		if((onlyfirst==0)||(onlyfirst>differing))
+		RSB_STDOUT("%zd : "RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING" "RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING"\n",(rsb_printf_int_t)i,						v1p[i*incx],v2p[i*incy]		);
+}
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex *v1p = v1,*v2p = v2; float th = 0.0001;
+		for(i=0;i<n ;++i) 
+				if(crealf(v1p[i*incx])-crealf(v2p[i*incy])>th)/*FIXME : incomplete check*/{		differing++;
+		if((onlyfirst==0)||(onlyfirst>differing))
+		RSB_STDOUT("%zd : "RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING" "RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING"\n",(rsb_printf_int_t)i,				crealf(v1p[i*incx]),cimagf(v1p[i*incx]),crealf(v2p[i*incy]),cimagf(v2p[i*incy])		);
+}
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex *v1p = v1,*v2p = v2; double th = 0.0001;
+		for(i=0;i<n ;++i) 
+		if(creal(v1p[i*incx])-creal(v2p[i*incy])>th)/*FIXME : incomplete check*/{		differing++;
+		if((onlyfirst==0)||(onlyfirst>differing))
+		RSB_STDOUT("%zd : "RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING" "RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING"\n",(rsb_printf_int_t)i,		creal(v1p[i*incx]),cimag(v1p[i*incx]),creal(v2p[i*incy]),cimag(v2p[i*incy])		);
+}
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	if(differing>onlyfirst)RSB_STDOUT("...(for a total of %zd differing entries)...\n",(rsb_printf_int_t)(differing-onlyfirst));
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+
+rsb_err_t rsb__debug_print_value(const void * v, rsb_type_t type){
+	/*! 
+	 **/
+#if RSB_ALLOW_STDOUT
+	if(!v)return RSB_ERR_BADARGS;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double *v1p = v;
+		RSB_STDOUT(RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING,						v1p[0]		);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float *v1p = v;
+		RSB_STDOUT(RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING,						v1p[0]		);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex *v1p = v;
+		RSB_STDOUT(RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING,				crealf(v1p[0]),cimagf(v1p[0])		);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex *v1p = v;
+		RSB_STDOUT(RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING,		creal(v1p[0]),cimag(v1p[0])		);
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+
+rsb_err_t rsb__debug_print_vector_extra(const void * v1, size_t n, rsb_type_t type, size_t inc, int style, FILE*stream){
+	/*! 
+	 * A debug function for printing two vectors of a specified type, in parallel.
+	 **/
+#if RSB_ALLOW_STDOUT
+	rsb_nnz_idx_t i;
+	int want_header = ( style == 0x1 );
+	const char * ts = RSB_IS_MATRIX_TYPE_COMPLEX(type)?"complex":"real";
+	const char * ss = RSB_SYMMETRY_STRING(RSB_FLAG_NOFLAGS);
+	
+	if( n < 0 )
+		goto errb;
+
+	if(!v1 || !stream)
+		goto errb;
+
+	/*if(!want_header)
+		RSB_STDERR("\t vectors  :\n");*/
+	
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double *v1p = v1;
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix array %s %s\n%zd %zd\n",ts,ss,(rsb_printf_int_t)n,(rsb_printf_int_t)1);
+		for(i=0;i<n;++i) 
+		RSB_FPRINTF(stream,RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING "\n",						v1p[i*inc]		);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float *v1p = v1;
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix array %s %s\n%zd %zd\n",ts,ss,(rsb_printf_int_t)n,(rsb_printf_int_t)1);
+		for(i=0;i<n;++i) 
+		RSB_FPRINTF(stream,RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING "\n",						v1p[i*inc]		);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex *v1p = v1;
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix array %s %s\n%zd %zd\n",ts,ss,(rsb_printf_int_t)n,(rsb_printf_int_t)1);
+		for(i=0;i<n;++i) 
+		RSB_FPRINTF(stream,RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING "\n",				crealf(v1p[i*inc]),cimagf(v1p[i*inc])		);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex *v1p = v1;
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix array %s %s\n%zd %zd\n",ts,ss,(rsb_printf_int_t)n,(rsb_printf_int_t)1);
+		for(i=0;i<n;++i) 
+		RSB_FPRINTF(stream,RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING "\n",		creal(v1p[i*inc]),cimag(v1p[i*inc])		);
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+errb:
+	return RSB_ERR_BADARGS;
+}
+
+rsb_err_t rsb__debug_print_vector(const void * v1, size_t n, rsb_type_t type, size_t inc){
+	return rsb__debug_print_vector_extra(v1, n, type, inc, 0x0, stdout);
+}
+
+rsb_err_t rsb__debug_print_vectors(const void * v1, const void * v2, size_t n, size_t incx, size_t incy, rsb_type_t type){
+	/*! 
+	 * A debug function for printing two vectors of a specified type, in parallel.
+	 **/
+#if RSB_ALLOW_STDOUT
+	size_t i;
+	if(!v1 || !v2)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t vectors  :\n");
+	
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double *v1p = v1,*v2p = v2;
+		for(i=0;i<n;++i) 
+		RSB_STDOUT(RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING" "RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING"\n",v1p[(i)*incx],v2p[(i)*incy]);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float *v1p = v1,*v2p = v2;
+		for(i=0;i<n;++i) 
+		RSB_STDOUT(RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING" "RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING"\n",v1p[(i)*incx],v2p[(i)*incy]);
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex *v1p = v1,*v2p = v2;
+		for(i=0;i<n;++i) 
+		RSB_STDOUT(RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING" "RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING"\n",crealf(v1p[(i)*incx]),cimagf(v1p[(i)*incx]),crealf(v2p[(i)*incy]),cimagf(v2p[(i)*incy]));
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex *v1p = v1,*v2p = v2;
+		for(i=0;i<n;++i) 
+		RSB_STDOUT(RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING" "RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING"\n",creal(v1p[(i)*incx]),cimag(v1p[(i)*incx]),creal(v2p[(i)*incy]),cimag(v2p[(i)*incy]));
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+
+
+rsb_err_t rsb__do_account_sorted_optimized_css(
+	 const rsb_coo_idx_t * MIndx, const rsb_coo_idx_t * mIndx,
+	 const rsb_coo_idx_t Mdim, const rsb_coo_idx_t mdim,
+	 const rsb_nnz_idx_t nnz, rsb_nnz_idx_t * elements_per_block_row, rsb_nnz_idx_t * blocks_per_block_row
+)
+{
+	/**
+	 	\ingroup gr_internals
+
+		elements_per_block_row and blocks_per_block_row arrays should be blanked.
+		FIXME : missing error handling.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n = 0;
+
+	if(blocks_per_block_row)
+	for(n=0;n<nnz;++n)
+	{
+		RSB_DEBUG_ASSERT(MIndx[n]<Mdim);
+		RSB_DEBUG_ASSERT(MIndx[n]>=0);
+		elements_per_block_row[MIndx[n]]++;
+		blocks_per_block_row  [MIndx[n]]++;
+	}
+	else
+	for(n=0;n<nnz;++n)
+	{
+		RSB_DEBUG_ASSERT(MIndx[n]<Mdim);
+		RSB_DEBUG_ASSERT(MIndx[n]>=0);
+		elements_per_block_row[MIndx[n]]++;
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_account_sorted_optimized(
+	 struct rsb_mtx_t * mtxAp,
+	 const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	 const rsb_coo_idx_t Idim, const rsb_coo_idx_t Jdim,
+	 const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop,
+rsb_nnz_idx_t * elements_per_block_row, 
+rsb_nnz_idx_t * blocks_per_block_row
+)
+{
+	/**
+	 *	\ingroup gr_internals
+	 * 	FIXME : document this
+	 */
+	rsb_coo_idx_t blockrows = 0;
+	rsb_coo_idx_t blockcolumns = 0;
+	rsb_coo_idx_t baserow = 0;
+	rsb_coo_idx_t basecolumn = 0;
+	const rsb_coo_idx_t *Mpntr = NULL;
+	const rsb_coo_idx_t *mpntr = NULL;
+	const rsb_coo_idx_t *MIndx = NULL;
+	const rsb_coo_idx_t *mIndx = NULL;
+	rsb_blk_idx_t mI = 0, MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t k = 0;	/* will index a nnz sized array */
+	int K = 0;
+	
+	if(0)
+	//if( flags & RSB_FLAG_SHOULD_DEBUG )
+		errval = rsb__do_account_sorted( mtxAp, IA, JA, nnz, pinfop, elements_per_block_row, blocks_per_block_row);
+
+	if(nnz==0)
+	{
+		/* FIXME: new case, incomplete (useful for diagonal implicit matrices) */
+		return RSB_ERR_NO_ERROR;
+	}
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS
+	if(!pinfop)
+	{
+		/* a performance fix */
+		if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			return rsb__do_account_sorted_optimized_css(JA,IA,Jdim,Idim,nnz,elements_per_block_row,blocks_per_block_row);
+		else
+			return rsb__do_account_sorted_optimized_css(IA,JA,Idim,Jdim,nnz,elements_per_block_row,blocks_per_block_row);
+	}
+#endif
+	
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mpntr = pinfop->rpntr;
+		Mpntr = pinfop->cpntr;
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		Mpntr = pinfop->rpntr;
+		mpntr = pinfop->cpntr;
+		MIndx = IA;
+		mIndx = JA;
+	}
+
+	/*	storage BCOR	*/
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCOR )
+{
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+
+	k = mI = MI = K=0;
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping preceding block columns .. */
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	*elements_per_block_row = 0;
+	*blocks_per_block_row   = 0;	
+	elements_per_block_row[MI*0] += blockrows * blockcolumns;
+	blocks_per_block_row[MI]   +=1;
+
+	while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k,(rsb_printf_int_t) (MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			*elements_per_block_row += blockrows * blockcolumns;
+			blocks_per_block_row[MI]   +=1;
+			++K;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+
+			while( MIndx[k] >= Mpntr[MI+1] )++MI;
+			blockrows    = Mpntr[MI+1] - Mpntr[MI];
+			baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+
+				mI = 0;
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			/* get rid of this var : elements_per_block_row */
+			*elements_per_block_row += blockrows * blockcolumns;
+			blocks_per_block_row[MI]   +=1;
+			++K;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		++k;
+	}
+	errval = RSB_ERR_NO_ERROR;goto ret;
+	}
+	/*	storage BCSR	*/
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCSR )
+{
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+
+	k = mI = MI = K=0;
+	while( MIndx[k] >= (blockrows   *(MI+1)) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= (blockcolumns*(mI+1)) )++mI;	/* skipping preceding block columns .. */
+	blockrows    = (blockrows   *(MI+1)) - (blockrows   *(MI));
+	blockcolumns = (blockcolumns*(mI+1)) - (blockcolumns*(mI));
+	baserow = (blockrows   *(MI));
+	basecolumn = (blockcolumns*(mI));
+	*elements_per_block_row = 0;
+	*blocks_per_block_row   = 0;	
+	elements_per_block_row[MI*0] += blockrows * blockcolumns;
+	blocks_per_block_row[MI]   +=1;
+
+	while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k,(rsb_printf_int_t) (MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			mI = mIndx[k]/blockcolumns;
+			basecolumn = (blockcolumns*(mI));
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+			}
+			else
+			{
+				/* same block row  */
+			}
+			*elements_per_block_row += blockrows * blockcolumns;
+			blocks_per_block_row[MI]   +=1;
+			++K;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+			MI = MIndx[k]/blockrows;
+			baserow = (blockrows   *(MI));
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = mIndx[k]/blockcolumns;
+				basecolumn = (blockcolumns*(mI));
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			/* get rid of this var : elements_per_block_row */
+			*elements_per_block_row += blockrows * blockcolumns;
+			blocks_per_block_row[MI]   +=1;
+			++K;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		++k;
+	}
+	errval = RSB_ERR_NO_ERROR;goto ret;
+	}
+	errval = RSB_ERR_INTERNAL_ERROR;
+ret:	return errval;
+}
+
+rsb_err_t rsb__do_insert_sorted_optimized_css( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * MIndx, const rsb_coo_idx_t * mIndx, const rsb_nnz_idx_t nnz)
+{
+	/**
+	 	\ingroup gr_internals
+
+		elements_per_block_row and blocks_per_block_row arrays should be blanked.
+		FIXME : missing error handling.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n = 0;
+
+	/* in case of RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR, they are equal */
+	if(mtxAp->VA != VA)
+		rsb_memcpy(mtxAp->VA  ,VA  ,mtxAp->el_size*nnz);
+
+	for(n=0;n<nnz+1;++n)
+		mtxAp->indptr[n] = n;
+
+	for(n=0;n<mtxAp->nnz;++n)
+		mtxAp->bindx [n] = mIndx[n];
+	mtxAp->bindx [nnz] = 0;
+
+	// should also set bindx, indptr, 
+	RSB_DO_ERR_RETURN(errval)
+}
+
+rsb_err_t rsb__do_insert_sorted_optimized( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop)
+{
+	/*
+	 *	FIXME ! UNFINISHED 
+	 * 	and please note that linked format is incomplete, so it does not support well block column major
+	 */
+	rsb_coo_idx_t blockrows = 0;
+	rsb_coo_idx_t blockcolumns = 0;
+	rsb_coo_idx_t baserow = 0;
+	rsb_coo_idx_t basecolumn = 0;
+	rsb_nnz_idx_t *indptr = mtxAp->indptr;
+	rsb_nnz_idx_t *bindx = mtxAp->bindx;
+	const rsb_coo_idx_t *Mpntr = NULL;
+	const rsb_coo_idx_t *mpntr = NULL;
+	const rsb_coo_idx_t *MIndx = NULL;
+	const rsb_coo_idx_t *mIndx = NULL;
+	rsb_blk_idx_t mI = 0, MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t k = 0;	/* will index a nnz sized array */
+	rsb_nnz_idx_t K = 0;
+
+	if(nnz==0)
+	{
+		/* FIXME: new case, incomplete (useful for diagonal implicit matrices) */
+		K = 0;		/* if nnz == 0 then K == 0 */
+		bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+		return RSB_ERR_NO_ERROR;
+	}
+
+	if(0)
+		return rsb__do_insert_sorted( mtxAp, VA, IA, JA, nnz, pinfop);
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS
+	if(!pinfop)
+	{
+		/* a performance fix */
+		if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			return rsb__do_insert_sorted_optimized_css( mtxAp, VA, JA, IA, nnz );
+		else
+			return rsb__do_insert_sorted_optimized_css( mtxAp, VA, IA, JA, nnz );
+	}
+#endif
+
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mpntr = pinfop->rpntr;
+		Mpntr = pinfop->cpntr;
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		Mpntr = pinfop->rpntr;
+		mpntr = pinfop->cpntr;
+		MIndx = IA;
+		mIndx = JA;
+	}
+
+
+	/*	type double, storage BCOR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCOR )
+{
+	double * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping preceding block columns .. */
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	indptr[ K ]
+	/*K * blockrows * blockcolumns*/
+	/*RSB_BLOCK_OFFSET(mtxAp,K)/mtxAp->el_size*/ /* FIXME : unfinished ! */ 
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const double*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type float, storage BCOR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCOR )
+{
+	float * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping preceding block columns .. */
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	indptr[ K ]
+	/*K * blockrows * blockcolumns*/
+	/*RSB_BLOCK_OFFSET(mtxAp,K)/mtxAp->el_size*/ /* FIXME : unfinished ! */ 
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const float*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type float complex, storage BCOR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCOR )
+{
+	float complex * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping preceding block columns .. */
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	indptr[ K ]
+	/*K * blockrows * blockcolumns*/
+	/*RSB_BLOCK_OFFSET(mtxAp,K)/mtxAp->el_size*/ /* FIXME : unfinished ! */ 
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const float complex*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type double complex, storage BCOR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCOR )
+{
+	double complex * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= Mpntr[MI+1] )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= mpntr[mI+1] )++mI;	/* skipping preceding block columns .. */
+	baserow = Mpntr[MI];
+	basecolumn = mpntr[mI];
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+
+			while( mIndx[k] >= mpntr[mI+1] )++mI;
+			blockcolumns = mpntr[mI+1] - mpntr[mI];
+			basecolumn = mpntr[mI];
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+
+				while( MIndx[k] >= Mpntr[MI+1] )++MI;
+				blockrows    = Mpntr[MI+1] - Mpntr[MI];
+				baserow = Mpntr[MI];
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+
+				while( mIndx[k] >= mpntr[mI+1] )++mI;
+				blockcolumns = mpntr[mI+1] - mpntr[mI];
+				basecolumn = mpntr[mI];
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	indptr[ K ]
+	/*K * blockrows * blockcolumns*/
+	/*RSB_BLOCK_OFFSET(mtxAp,K)/mtxAp->el_size*/ /* FIXME : unfinished ! */ 
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const double complex*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type double, storage BCSR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCSR )
+{
+	double * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= (blockrows   *(MI+1)) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= (blockcolumns*(mI+1)) )++mI;	/* skipping preceding block columns .. */
+	baserow = (blockrows   *(MI));
+	basecolumn = (blockcolumns*(mI));
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			mI = mIndx[k]/blockcolumns;
+			basecolumn = (blockcolumns*(mI));
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+				mI = mIndx[k]/blockcolumns;
+				basecolumn = (blockcolumns*(mI));
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	K * blockrows * blockcolumns
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const double*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type float, storage BCSR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCSR )
+{
+	float * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= (blockrows   *(MI+1)) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= (blockcolumns*(mI+1)) )++mI;	/* skipping preceding block columns .. */
+	baserow = (blockrows   *(MI));
+	basecolumn = (blockcolumns*(mI));
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			mI = mIndx[k]/blockcolumns;
+			basecolumn = (blockcolumns*(mI));
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+				mI = mIndx[k]/blockcolumns;
+				basecolumn = (blockcolumns*(mI));
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	K * blockrows * blockcolumns
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const float*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type float complex, storage BCSR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCSR )
+{
+	float complex * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= (blockrows   *(MI+1)) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= (blockcolumns*(mI+1)) )++mI;	/* skipping preceding block columns .. */
+	baserow = (blockrows   *(MI));
+	basecolumn = (blockcolumns*(mI));
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			mI = mIndx[k]/blockcolumns;
+			basecolumn = (blockcolumns*(mI));
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+				mI = mIndx[k]/blockcolumns;
+				basecolumn = (blockcolumns*(mI));
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	K * blockrows * blockcolumns
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const float complex*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	/*	type double complex, storage BCSR	*/
+	if( mtxAp->typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	if( mtxAp->matrix_storage==RSB_MATRIX_STORAGE_BCSR )
+{
+	double complex * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+
+	while( MIndx[k] >= (blockrows   *(MI+1)) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= (blockcolumns*(mI+1)) )++mI;	/* skipping preceding block columns .. */
+	baserow = (blockrows   *(MI));
+	basecolumn = (blockcolumns*(mI));
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+			mI = mIndx[k]/blockcolumns;
+			basecolumn = (blockcolumns*(mI));
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+				MI = MIndx[k]/blockrows;
+				baserow = (blockrows   *(MI));
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+				mI = mIndx[k]/blockcolumns;
+				basecolumn = (blockcolumns*(mI));
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += 
+	K * blockrows * blockcolumns
+;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += (MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const double complex*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+	errval = RSB_ERR_INTERNAL_ERROR;
+	return errval;
+}
+
+rsb_err_t rsb__dump_block(rsb_type_t type, const void * VA, rsb_blk_idx_t roff, rsb_blk_idx_t coff, rsb_blk_idx_t rows, rsb_blk_idx_t cols )
+{
+	/*!
+	 * Will dump to stdout a dense matrix.
+	 * Used for debugging purposes.
+	 *
+	 * FIXME : should be integrated with the macro subsystem in util.m4, and support column major order, and debugged.
+	 */
+#if RSB_ALLOW_STDOUT
+	register rsb_coo_idx_t i, j;
+
+	if(RSB_BLK_MUL_OVERFLOW(rows,cols))
+		return RSB_ERR_LIMITS;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+		for(i=0;i<rows;++i)for(j=0;j<cols;++j)
+		if(((double*)VA)[cols*i+j]!=((double)(0)) )
+		{ RSB_STDOUT(""
+		"%zd"/* FIXME : this could be any index type! */
+		"\t"
+		"%zd"
+		"\t"
+		RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING
+		"\n",(rsb_printf_int_t)(roff+i+1),(rsb_printf_int_t)(coff+j+1),
+((double*)VA)[cols*i+j]);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if(type == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+		for(i=0;i<rows;++i)for(j=0;j<cols;++j)
+		if(((float*)VA)[cols*i+j]!=((float)(0)) )
+		{ RSB_STDOUT(""
+		"%zd"/* FIXME : this could be any index type! */
+		"\t"
+		"%zd"
+		"\t"
+		RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING
+		"\n",(rsb_printf_int_t)(roff+i+1),(rsb_printf_int_t)(coff+j+1),
+((float*)VA)[cols*i+j]);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if(type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+		for(i=0;i<rows;++i)for(j=0;j<cols;++j)
+		if(((float complex*)VA)[cols*i+j]!=((float complex)(0)) )
+		{ RSB_STDOUT(""
+		"%zd"/* FIXME : this could be any index type! */
+		"\t"
+		"%zd"
+		"\t"
+		RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING
+		"\n",(rsb_printf_int_t)(roff+i+1),(rsb_printf_int_t)(coff+j+1),
+crealf(((float complex*)VA)[cols*i+j]),cimagf(((float complex*)VA)[cols*i+j]));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+		for(i=0;i<rows;++i)for(j=0;j<cols;++j)
+		if(((double complex*)VA)[cols*i+j]!=((double complex)(0)) )
+		{ RSB_STDOUT(""
+		"%zd"/* FIXME : this could be any index type! */
+		"\t"
+		"%zd"
+		"\t"
+		RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING
+		"\n",(rsb_printf_int_t)(roff+i+1),(rsb_printf_int_t)(coff+j+1),
+creal(((double complex*)VA)[cols*i+j]),cimag(((double complex*)VA)[cols*i+j]));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+
+rsb_err_t rsb__dump_blocks(const struct rsb_mtx_t *mtxAp)
+{
+	return RSB_ERR_UNIMPLEMENTED_YET;
+#if 0
+	/*! 
+	 * \ingroup gr_internals
+	 * A debug function for printing out the matrix structure.
+	 *
+	 * FIXME : UNFINISHED
+	 * Note : it is extremely slow.
+	 **/
+	rsb_blk_idx_t i,j;
+	if(!mtxAp)return RSB_ERR_BADARGS;
+	if(!mtxAp->options)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t block structure :\n");
+	
+	/* this prints out the matrix blocks nnz structure */
+	for(i=0;i<mtxAp->M_b;++i)
+	{
+		for(j=0;j<mtxAp->K_b;++j)
+		if((RSB_BITMAP_GET(mtxAp->options->bitmap,mtxAp->M_b,mtxAp->K_b,i,j)))
+		{
+			RSB_STDERR("1");
+		}
+		else
+		{
+			RSB_STDERR("0");
+		}
+		RSB_STDERR("\n");
+	}
+	return RSB_ERR_NO_ERROR;
+#endif
+}
+
+rsb_err_t rsb__test_print_csr(rsb_type_t type, rsb_flags_t flags, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t want_header, FILE*stream)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Dumps out a whole matrix, from its CSR representation.
+	 * 
+	 * Warning : the nonzeros should be sorted on input.
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_coo_idx_t k;
+	if( !stream )goto err;
+	if( !IA )goto err;
+	if( ( !JA || !VA ) && nnz>0  )goto err;
+
+	RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t)rows);
+	/* RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t) nnz); */
+	for(k=0;k<rows+1;++k) { RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t)(IA[k]+1)); }
+	for(k=0;k<nnz   ;++k) { RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t)(JA[k]+1)); }
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING
+				"\n"
+				,((double*)VA)[k]);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if(type == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING
+				"\n"
+				,((float*)VA)[k]);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if(type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING
+				"\n"
+				,crealf(((float complex*)VA)[k]),cimagf(((float complex*)VA)[k]));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING
+				"\n"
+				,creal(((double complex*)VA)[k]),cimag(((double complex*)VA)[k]));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+err:
+	return RSB_ERR_GENERIC_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+
+rsb_err_t rsb__test_print_coo_mm(rsb_type_t type, rsb_flags_t flags, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t want_header, FILE*stream)
+{
+	/**
+	 * \ingroup gr_internals
+	 * Dumps out a whole matrix, from its coordinates, in matrix market format.
+	 * 
+	 * Warning : the nonzeros should be sorted on input.
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_coo_idx_t k;
+	const char * ts = RSB_IS_MATRIX_TYPE_COMPLEX(type)?"complex":"real";
+	const char * ss = RSB_SYMMETRY_STRING(flags);
+	
+	if( !stream )
+	{
+		goto err;
+	}
+
+	if( ( !IA || !JA || !VA ) && nnz > 0 )
+		goto err;
+	if( rows < 0 || cols < 0 || nnz < 0 )
+		goto err;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix coordinate %s %s\n%zd %zd %zd\n",ts,ss,(rsb_printf_int_t)rows,(rsb_printf_int_t)cols,(rsb_printf_int_t)nnz);
+/*		for(k=0;k<nnz;++k) { RSB_FPRINTF(stream,"%6zd %6zd %20g\n",(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((float*)VA)[k]); }*/
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				"%zd"
+				"\t"
+				"%zd"
+				"\t"
+				RSB_MATRIX_STORAGE_DOUBLE_PRINTF_STRING
+				"\n"
+				,(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((double*)VA)[k]);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if(type == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix coordinate %s %s\n%zd %zd %zd\n",ts,ss,(rsb_printf_int_t)rows,(rsb_printf_int_t)cols,(rsb_printf_int_t)nnz);
+/*		for(k=0;k<nnz;++k) { RSB_FPRINTF(stream,"%6zd %6zd %20g\n",(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((float*)VA)[k]); }*/
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				"%zd"
+				"\t"
+				"%zd"
+				"\t"
+				RSB_MATRIX_STORAGE_FLOAT_PRINTF_STRING
+				"\n"
+				,(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((float*)VA)[k]);
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if(type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix coordinate %s %s\n%zd %zd %zd\n",ts,ss,(rsb_printf_int_t)rows,(rsb_printf_int_t)cols,(rsb_printf_int_t)nnz);
+/*		for(k=0;k<nnz;++k) { RSB_FPRINTF(stream,"%6zd %6zd %20g\n",(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((float*)VA)[k]); }*/
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				"%zd"
+				"\t"
+				"%zd"
+				"\t"
+				RSB_MATRIX_STORAGE_FLOAT_COMPLEX_PRINTF_STRING
+				"\n"
+				,(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),crealf(((float complex*)VA)[k]),cimagf(((float complex*)VA)[k]));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if(type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix coordinate %s %s\n%zd %zd %zd\n",ts,ss,(rsb_printf_int_t)rows,(rsb_printf_int_t)cols,(rsb_printf_int_t)nnz);
+/*		for(k=0;k<nnz;++k) { RSB_FPRINTF(stream,"%6zd %6zd %20g\n",(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((float*)VA)[k]); }*/
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				"%zd"
+				"\t"
+				"%zd"
+				"\t"
+				RSB_MATRIX_STORAGE_DOUBLE_COMPLEX_PRINTF_STRING
+				"\n"
+				,(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),creal(((double complex*)VA)[k]),cimag(((double complex*)VA)[k]));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+err:
+	return RSB_ERR_GENERIC_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+
+/*static*/ /*inline*/ size_t rsb__do_sizeof(rsb_type_t type)	{
+		/*
+		 * FIXME : UNUSED ?
+		 */
+		size_t so = 0;
+		switch(type)
+		{
+			/* supported (double,float,float complex,double complex) */
+			case RSB_NUMERICAL_TYPE_DOUBLE 	:
+				so = sizeof(double);
+			break;
+			case RSB_NUMERICAL_TYPE_FLOAT 	:
+				so = sizeof(float);
+			break;
+			case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+				so = sizeof(float complex);
+			break;
+			case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+				so = sizeof(double complex);
+			break;
+			/* unsupported type */
+			default :
+			RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS 
+		}
+		return so;
+	}
+
+rsb_err_t rsb__do_coo_sum( struct rsb_coo_matrix_t*coocp, const void *alphap, const struct rsb_coo_matrix_t*cooap, const void *betap,  const struct rsb_coo_matrix_t*coobp)
+{
+	struct rsb_coo_matrix_t cooa = *cooap, coob = *coobp, cooc = *coocp;
+	rsb_nnz_idx_t /*rnz = 0,*/an, bn, cn;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if(cooa.typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+	double alpha = alphap?*(double*)alphap:((double)(1.0));
+	double beta  = betap ?*(double*)betap :((double)(1.0));
+	for(cn = 0, an = 0, bn = 0;an<cooa.nnz || bn<coob.nnz;)
+	{
+		rsb_nnz_idx_t ap = an, bp = bn;
+		if(cooa.IA[an]==coob.IA[bn] && cooa.JA[an]==coob.JA[bn])
+			cooc.IA[cn] = cooa.IA[an],cooc.JA[cn] = cooa.JA[an],
+			((double*)cooc.VA)[cn] = alpha * ((double*)cooa.VA)[an] + beta * ((double*)coob.VA)[bn],
+			ap = an, bp = bn, ++cn, ++an, ++bn;
+
+		for(;an<cooa.nnz && cooa.IA[an]==cooa.IA[ap] && cooa.JA[an]==cooa.JA[ap] ;++an)
+			//RSB_STDOUT("x> %d %d\n",cooa.IA[an],cooa.JA[an])
+			((double*)cooc.VA)[cn] += alpha * ((double*)cooa.VA)[an];
+
+		for(;bn<coob.nnz && coob.IA[bn]==coob.IA[bp] && coob.JA[bn]==coob.JA[bp] ;++bn)
+			//RSB_STDOUT("x> %d %d\n",coob.IA[bn],coob.JA[bn])
+			((double*)cooc.VA)[cn] += beta  * ((double*)coob.VA)[bn];
+
+		if( bn<coob.nnz )
+		for(;an<cooa.nnz && (cooa.IA[an]<coob.IA[bn] ||
+			       	(cooa.IA[an] <= coob.IA[bn] && cooa.JA[an]<coob.JA[bn]))
+			       	;++an)
+				//RSB_STDOUT("-> %d %d\n",cooa.IA[an],cooa.JA[an]),
+			cooc.IA[cn] = cooa.IA[an], cooc.JA[cn] = cooa.JA[an],
+			((double*)cooc.VA)[cn] = alpha * ((double*)cooa.VA)[an],
+			++cn;
+
+		if( an<cooa.nnz )
+		for(;bn<coob.nnz && (cooa.IA[an]>coob.IA[bn] ||
+			       	(cooa.IA[an]>=coob.IA[bn] && cooa.JA[an]>coob.JA[bn]))
+			       	;++bn)
+			//	RSB_STDOUT("-> %d %d\n",coob.IA[bn],coob.JA[bn]),
+			cooc.IA[cn] = coob.IA[bn],cooc.JA[cn] = coob.JA[bn],
+			((double*)cooc.VA)[cn] = beta * ((double*)coob.VA)[bn],
+			++cn;
+		//RSB_STDOUT("? %d %d\n",an,bn);
+	}
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if(cooa.typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+	float alpha = alphap?*(float*)alphap:((float)(1.0));
+	float beta  = betap ?*(float*)betap :((float)(1.0));
+	for(cn = 0, an = 0, bn = 0;an<cooa.nnz || bn<coob.nnz;)
+	{
+		rsb_nnz_idx_t ap = an, bp = bn;
+		if(cooa.IA[an]==coob.IA[bn] && cooa.JA[an]==coob.JA[bn])
+			cooc.IA[cn] = cooa.IA[an],cooc.JA[cn] = cooa.JA[an],
+			((float*)cooc.VA)[cn] = alpha * ((float*)cooa.VA)[an] + beta * ((float*)coob.VA)[bn],
+			ap = an, bp = bn, ++cn, ++an, ++bn;
+
+		for(;an<cooa.nnz && cooa.IA[an]==cooa.IA[ap] && cooa.JA[an]==cooa.JA[ap] ;++an)
+			//RSB_STDOUT("x> %d %d\n",cooa.IA[an],cooa.JA[an])
+			((float*)cooc.VA)[cn] += alpha * ((float*)cooa.VA)[an];
+
+		for(;bn<coob.nnz && coob.IA[bn]==coob.IA[bp] && coob.JA[bn]==coob.JA[bp] ;++bn)
+			//RSB_STDOUT("x> %d %d\n",coob.IA[bn],coob.JA[bn])
+			((float*)cooc.VA)[cn] += beta  * ((float*)coob.VA)[bn];
+
+		if( bn<coob.nnz )
+		for(;an<cooa.nnz && (cooa.IA[an]<coob.IA[bn] ||
+			       	(cooa.IA[an] <= coob.IA[bn] && cooa.JA[an]<coob.JA[bn]))
+			       	;++an)
+				//RSB_STDOUT("-> %d %d\n",cooa.IA[an],cooa.JA[an]),
+			cooc.IA[cn] = cooa.IA[an], cooc.JA[cn] = cooa.JA[an],
+			((float*)cooc.VA)[cn] = alpha * ((float*)cooa.VA)[an],
+			++cn;
+
+		if( an<cooa.nnz )
+		for(;bn<coob.nnz && (cooa.IA[an]>coob.IA[bn] ||
+			       	(cooa.IA[an]>=coob.IA[bn] && cooa.JA[an]>coob.JA[bn]))
+			       	;++bn)
+			//	RSB_STDOUT("-> %d %d\n",coob.IA[bn],coob.JA[bn]),
+			cooc.IA[cn] = coob.IA[bn],cooc.JA[cn] = coob.JA[bn],
+			((float*)cooc.VA)[cn] = beta * ((float*)coob.VA)[bn],
+			++cn;
+		//RSB_STDOUT("? %d %d\n",an,bn);
+	}
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if(cooa.typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+	float complex alpha = alphap?*(float complex*)alphap:((float complex)(1.0));
+	float complex beta  = betap ?*(float complex*)betap :((float complex)(1.0));
+	for(cn = 0, an = 0, bn = 0;an<cooa.nnz || bn<coob.nnz;)
+	{
+		rsb_nnz_idx_t ap = an, bp = bn;
+		if(cooa.IA[an]==coob.IA[bn] && cooa.JA[an]==coob.JA[bn])
+			cooc.IA[cn] = cooa.IA[an],cooc.JA[cn] = cooa.JA[an],
+			((float complex*)cooc.VA)[cn] = alpha * ((float complex*)cooa.VA)[an] + beta * ((float complex*)coob.VA)[bn],
+			ap = an, bp = bn, ++cn, ++an, ++bn;
+
+		for(;an<cooa.nnz && cooa.IA[an]==cooa.IA[ap] && cooa.JA[an]==cooa.JA[ap] ;++an)
+			//RSB_STDOUT("x> %d %d\n",cooa.IA[an],cooa.JA[an])
+			((float complex*)cooc.VA)[cn] += alpha * ((float complex*)cooa.VA)[an];
+
+		for(;bn<coob.nnz && coob.IA[bn]==coob.IA[bp] && coob.JA[bn]==coob.JA[bp] ;++bn)
+			//RSB_STDOUT("x> %d %d\n",coob.IA[bn],coob.JA[bn])
+			((float complex*)cooc.VA)[cn] += beta  * ((float complex*)coob.VA)[bn];
+
+		if( bn<coob.nnz )
+		for(;an<cooa.nnz && (cooa.IA[an]<coob.IA[bn] ||
+			       	(cooa.IA[an] <= coob.IA[bn] && cooa.JA[an]<coob.JA[bn]))
+			       	;++an)
+				//RSB_STDOUT("-> %d %d\n",cooa.IA[an],cooa.JA[an]),
+			cooc.IA[cn] = cooa.IA[an], cooc.JA[cn] = cooa.JA[an],
+			((float complex*)cooc.VA)[cn] = alpha * ((float complex*)cooa.VA)[an],
+			++cn;
+
+		if( an<cooa.nnz )
+		for(;bn<coob.nnz && (cooa.IA[an]>coob.IA[bn] ||
+			       	(cooa.IA[an]>=coob.IA[bn] && cooa.JA[an]>coob.JA[bn]))
+			       	;++bn)
+			//	RSB_STDOUT("-> %d %d\n",coob.IA[bn],coob.JA[bn]),
+			cooc.IA[cn] = coob.IA[bn],cooc.JA[cn] = coob.JA[bn],
+			((float complex*)cooc.VA)[cn] = beta * ((float complex*)coob.VA)[bn],
+			++cn;
+		//RSB_STDOUT("? %d %d\n",an,bn);
+	}
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if(cooa.typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+	double complex alpha = alphap?*(double complex*)alphap:((double complex)(1.0));
+	double complex beta  = betap ?*(double complex*)betap :((double complex)(1.0));
+	for(cn = 0, an = 0, bn = 0;an<cooa.nnz || bn<coob.nnz;)
+	{
+		rsb_nnz_idx_t ap = an, bp = bn;
+		if(cooa.IA[an]==coob.IA[bn] && cooa.JA[an]==coob.JA[bn])
+			cooc.IA[cn] = cooa.IA[an],cooc.JA[cn] = cooa.JA[an],
+			((double complex*)cooc.VA)[cn] = alpha * ((double complex*)cooa.VA)[an] + beta * ((double complex*)coob.VA)[bn],
+			ap = an, bp = bn, ++cn, ++an, ++bn;
+
+		for(;an<cooa.nnz && cooa.IA[an]==cooa.IA[ap] && cooa.JA[an]==cooa.JA[ap] ;++an)
+			//RSB_STDOUT("x> %d %d\n",cooa.IA[an],cooa.JA[an])
+			((double complex*)cooc.VA)[cn] += alpha * ((double complex*)cooa.VA)[an];
+
+		for(;bn<coob.nnz && coob.IA[bn]==coob.IA[bp] && coob.JA[bn]==coob.JA[bp] ;++bn)
+			//RSB_STDOUT("x> %d %d\n",coob.IA[bn],coob.JA[bn])
+			((double complex*)cooc.VA)[cn] += beta  * ((double complex*)coob.VA)[bn];
+
+		if( bn<coob.nnz )
+		for(;an<cooa.nnz && (cooa.IA[an]<coob.IA[bn] ||
+			       	(cooa.IA[an] <= coob.IA[bn] && cooa.JA[an]<coob.JA[bn]))
+			       	;++an)
+				//RSB_STDOUT("-> %d %d\n",cooa.IA[an],cooa.JA[an]),
+			cooc.IA[cn] = cooa.IA[an], cooc.JA[cn] = cooa.JA[an],
+			((double complex*)cooc.VA)[cn] = alpha * ((double complex*)cooa.VA)[an],
+			++cn;
+
+		if( an<cooa.nnz )
+		for(;bn<coob.nnz && (cooa.IA[an]>coob.IA[bn] ||
+			       	(cooa.IA[an]>=coob.IA[bn] && cooa.JA[an]>coob.JA[bn]))
+			       	;++bn)
+			//	RSB_STDOUT("-> %d %d\n",coob.IA[bn],coob.JA[bn]),
+			cooc.IA[cn] = coob.IA[bn],cooc.JA[cn] = coob.JA[bn],
+			((double complex*)cooc.VA)[cn] = beta * ((double complex*)coob.VA)[bn],
+			++cn;
+		//RSB_STDOUT("? %d %d\n",an,bn);
+	}
+	}
+	else 
+#endif
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__cor_merge_dups(rsb_type_t typecode, void* RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t offB, rsb_nnz_idx_t nnzB, rsb_nnz_idx_t nnzC, const int wv, int wp, rsb_nnz_idx_t *onzp, struct rsb_coo_matrix_t*RSB_RESTRICT coop)
+{
+	/**
+		See rsb__cor_merge.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VB = NULL, *VC = NULL, *VT = NULL;
+	rsb_coo_idx_t * IB = NULL, *JB = NULL;
+	rsb_coo_idx_t * IC = NULL, *JC = NULL;
+	rsb_coo_idx_t * IT = NULL, *JT = NULL;
+	rsb_nnz_idx_t bi = 0, ci = 0, ti = 0;
+	rsb_nnz_idx_t b0 = 0, c0 = 0, t0 = 0;
+	rsb_nnz_idx_t onz = 0;
+	struct rsb_coo_matrix_t coo;
+	size_t es = RSB_SIZEOF(typecode);
+
+	if( nnzB == 0 || nnzC == 0 )
+	{
+		goto ret;
+	}
+
+	b0 = offB;
+	c0 = offB + nnzB;
+	VB = RSB_TYPED_OFF_PTR(typecode,VA,b0);
+	VC = RSB_TYPED_OFF_PTR(typecode,VA,c0);
+	IB = IA + b0;
+	IC = IA + c0;
+	JB = JA + b0;
+	JC = JA + c0;
+
+	RSB_BZERO_P(&coo);
+	coo.nnz = nnzB + nnzC;
+	coo.typecode = typecode;
+
+	if( coop && coop->nnz)
+	{
+		coo = *coop;
+		coo.nnz = nnzB + nnzC; /* necessary */
+	}
+	else
+	{
+		if( NULL == rsb__allocate_coo_matrix_t(&coo) )
+			goto err;
+	}
+
+	IT = coo.IA;
+	JT = coo.JA;
+	VT = coo.VA;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if(typecode == RSB_NUMERICAL_TYPE_DOUBLE )
+	{
+	double * vT = VT;
+	double * vB = VB;
+	double * vC = VC;
+
+again_double:
+	t0 = ti;
+
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	/* FIXME: this works as RSB_FLAG_DUPLICATES_SUM but should support either merge, last, first, ...  */
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+		++onz;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	if( ci < nnzC && bi < nnzB )
+		goto again_double;
+
+       	if   ( bi<nnzB && ci==nnzC )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci==nnzC && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+       	if   ( ci<nnzC && bi==nnzB )
+	{
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti] = vC[ci];
+		++ci,++ti;
+	}
+
+       	while( ci<nnzC && bi==nnzB && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti]+= vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if(typecode == RSB_NUMERICAL_TYPE_FLOAT )
+	{
+	float * vT = VT;
+	float * vB = VB;
+	float * vC = VC;
+
+again_float:
+	t0 = ti;
+
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	/* FIXME: this works as RSB_FLAG_DUPLICATES_SUM but should support either merge, last, first, ...  */
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+		++onz;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	if( ci < nnzC && bi < nnzB )
+		goto again_float;
+
+       	if   ( bi<nnzB && ci==nnzC )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci==nnzC && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+       	if   ( ci<nnzC && bi==nnzB )
+	{
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti] = vC[ci];
+		++ci,++ti;
+	}
+
+       	while( ci<nnzC && bi==nnzB && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti]+= vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if(typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX )
+	{
+	float complex * vT = VT;
+	float complex * vB = VB;
+	float complex * vC = VC;
+
+again_float_complex:
+	t0 = ti;
+
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	/* FIXME: this works as RSB_FLAG_DUPLICATES_SUM but should support either merge, last, first, ...  */
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+		++onz;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	if( ci < nnzC && bi < nnzB )
+		goto again_float_complex;
+
+       	if   ( bi<nnzB && ci==nnzC )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci==nnzC && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+       	if   ( ci<nnzC && bi==nnzB )
+	{
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti] = vC[ci];
+		++ci,++ti;
+	}
+
+       	while( ci<nnzC && bi==nnzB && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti]+= vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	}
+	else 
+#endif
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if(typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX )
+	{
+	double complex * vT = VT;
+	double complex * vB = VB;
+	double complex * vC = VC;
+
+again_double_complex:
+	t0 = ti;
+
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	/* FIXME: this works as RSB_FLAG_DUPLICATES_SUM but should support either merge, last, first, ...  */
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+		++onz;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	if( ci < nnzC && bi < nnzB )
+		goto again_double_complex;
+
+       	if   ( bi<nnzB && ci==nnzC )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci==nnzC && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+       	if   ( ci<nnzC && bi==nnzB )
+	{
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti] = vC[ci];
+		++ci,++ti;
+	}
+
+       	while( ci<nnzC && bi==nnzB && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti]+= vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	}
+	else 
+#endif
+		errval = RSB_ERR_INTERNAL_ERROR;
+
+	coo.nnz -= onz;
+	RSB_COA_MEMCPY(IA,IT,offB,0,(coo.nnz));
+	RSB_COA_MEMCPY(JA,JT,offB,0,(coo.nnz));
+	if(wp)
+	{
+		RSB_A_MEMCPY_parallel(  VA,VT,offB,0,(coo.nnz),es);
+	}
+	else
+	{
+		RSB_A_MEMCPY(  VA,VT,offB,0,(coo.nnz),es);
+	}
+	RSB_ASSERT(rsb__util_is_coo_array_sorted_up_partial_order(IA,coo.nnz));
+	goto done;
+err:
+	errval = RSB_ERR_ENOMEM;
+done:
+	if( coop && coop->nnz)
+		;
+	else
+		rsb__destroy_coo_matrix_t(&coo);
+	RSB_ASSIGN_IF(onzp,onz);
+ret:
+	return errval;
+}
+
+rsb_err_t rsb__do_copy_converted_scaled(const void *RSB_RESTRICT  src, void *RSB_RESTRICT dst, const void *RSB_RESTRICT  alphap, rsb_type_t stype,rsb_type_t dtype, size_t nnz, rsb_trans_t transA)
+{
+	/*!
+	 * Copies scaled and conj-transposed.
+	 * alpha according to src code type.
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_nnz_idx_t nzi;
+
+	if((!dst) || (!src))
+		return RSB_ERR_BADARGS;
+
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE  && dtype == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double alpha = alphap?*(double*)alphap:((double)(1.0));
+		const double*tsrc = src;
+		double*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (double)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE  && dtype == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const double alpha = alphap?*(double*)alphap:((double)(1.0));
+		const double*tsrc = src;
+		float*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (float)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE  && dtype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const double alpha = alphap?*(double*)alphap:((double)(1.0));
+		const double*tsrc = src;
+		float complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conjf((float complex)(alpha*tsrc[nzi])) + 0*I;
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (float complex)(alpha*tsrc[nzi]) + 0*I;
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE  && dtype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double alpha = alphap?*(double*)alphap:((double)(1.0));
+		const double*tsrc = src;
+		double complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conj((double complex)(alpha*tsrc[nzi])) + 0*I;
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (double complex)(alpha*tsrc[nzi]) + 0*I;
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT  && dtype == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const float alpha = alphap?*(float*)alphap:((float)(1.0));
+		const float*tsrc = src;
+		double*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (double)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT  && dtype == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float alpha = alphap?*(float*)alphap:((float)(1.0));
+		const float*tsrc = src;
+		float*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (float)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT  && dtype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float alpha = alphap?*(float*)alphap:((float)(1.0));
+		const float*tsrc = src;
+		float complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conjf((float complex)(alpha*tsrc[nzi])) + 0*I;
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (float complex)(alpha*tsrc[nzi]) + 0*I;
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT  && dtype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const float alpha = alphap?*(float*)alphap:((float)(1.0));
+		const float*tsrc = src;
+		double complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conj((double complex)(alpha*tsrc[nzi])) + 0*I;
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (double complex)(alpha*tsrc[nzi]) + 0*I;
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const float complex alpha = alphap?*(float complex*)alphap:((float complex)(1.0));
+		const float complex*tsrc = src;
+		double*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = crealf((double)(alpha*tsrc[nzi]));
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const float complex alpha = alphap?*(float complex*)alphap:((float complex)(1.0));
+		const float complex*tsrc = src;
+		float*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = crealf((float)(alpha*tsrc[nzi]));
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const float complex alpha = alphap?*(float complex*)alphap:((float complex)(1.0));
+		const float complex*tsrc = src;
+		float complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conjf((float complex)(alpha*tsrc[nzi]));
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (float complex)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const float complex alpha = alphap?*(float complex*)alphap:((float complex)(1.0));
+		const float complex*tsrc = src;
+		double complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conj((double complex)(alpha*tsrc[nzi]));
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (double complex)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		const double complex alpha = alphap?*(double complex*)alphap:((double complex)(1.0));
+		const double complex*tsrc = src;
+		double*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = creal((double)(alpha*tsrc[nzi]));
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		const double complex alpha = alphap?*(double complex*)alphap:((double complex)(1.0));
+		const double complex*tsrc = src;
+		float*tdst = dst;
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = creal((float)(alpha*tsrc[nzi]));
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		const double complex alpha = alphap?*(double complex*)alphap:((double complex)(1.0));
+		const double complex*tsrc = src;
+		float complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conjf((float complex)(alpha*tsrc[nzi]));
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (float complex)(alpha*tsrc[nzi]);
+	}
+	else 
+	if( stype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  && dtype == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		const double complex alpha = alphap?*(double complex*)alphap:((double complex)(1.0));
+		const double complex*tsrc = src;
+		double complex*tdst = dst;
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = conj((double complex)(alpha*tsrc[nzi]));
+		else
+			for(nzi=0;nzi<nnz;++nzi) tdst[nzi] = (double complex)(alpha*tsrc[nzi]);
+	}
+	else 
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb_util_csc2csr(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t*flagsp)
+{
+	/*!
+	 * */
+	rsb_nnz_idx_t nzi = 0, nzo;
+	rsb_coo_idx_t nr, nc;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_bool_t islowtri = RSB_BOOL_TRUE, isupptri = RSB_BOOL_TRUE;
+
+	RSB_BZERO(oIA, sizeof(*oIA)*(m+1));
+	oIA[0] = offo;
+	for(nzi=0;nzi<nnz;++nzi)
+		oIA[IA[nzi]-offi+1]++;
+	for(nr=0;nr<m;++nr)
+		oIA[nr+1]+=oIA[nr];
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	for(nc=0;nc<k;++nc)
+	for(nzi = JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		nzo = oIA[IA[nzi]-offi]++;
+		oJA[nzo] = nc+offo;
+		((double*)oVA)[nzo] = ((const double*)VA)[nzi];
+	}
+	else 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	for(nc=0;nc<k;++nc)
+	for(nzi = JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		nzo = oIA[IA[nzi]-offi]++;
+		oJA[nzo] = nc+offo;
+		((float*)oVA)[nzo] = ((const float*)VA)[nzi];
+	}
+	else 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	for(nc=0;nc<k;++nc)
+	for(nzi = JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		nzo = oIA[IA[nzi]-offi]++;
+		oJA[nzo] = nc+offo;
+		((float complex*)oVA)[nzo] = ((const float complex*)VA)[nzi];
+	}
+	else 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	for(nc=0;nc<k;++nc)
+	for(nzi = JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		nzo = oIA[IA[nzi]-offi]++;
+		oJA[nzo] = nc+offo;
+		((double complex*)oVA)[nzo] = ((const double complex*)VA)[nzi];
+	}
+	else 
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	for(nc=0;nc<k;++nc)
+	for(nzi=JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		oIA[IA[nzi]-offi]--;
+		if(IA[nzi]-offi>nc)isupptri = RSB_BOOL_FALSE;
+		else if(IA[nzi]-offi<nc)islowtri = RSB_BOOL_FALSE;
+	}
+	if(isupptri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+	if(islowtri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+	if(*flagsp) RSB_DO_FLAG_ADD(*flagsp,flags);
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb_util_coo_copy_and_stats(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, rsb_coo_idx_t*m, rsb_coo_idx_t*k, const rsb_nnz_idx_t nnz, const rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t iflags, rsb_flags_t*flagsp)
+{
+	/*!
+         * FIXME: unfinished! shall support also typecode-based zeros removal
+	 * */
+	rsb_nnz_idx_t nzi = 0;
+	rsb_coo_idx_t maxi = 0,maxj = 0;
+	rsb_bool_t islowtri = RSB_BOOL_TRUE,isupptri = RSB_BOOL_TRUE;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_nnz_idx_t lowtrin = 0,upptrin = 0;
+
+	if(nnz<1)
+		goto done;
+
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+	maxi = i, maxj = j;
+	((double*)oVA)[nzi] = ((double*)VA)[nzi];
+	oIA[nzi] = i-offi+offo;
+	oJA[nzi] = j-offi+offo;
+	lowtrin |= (i>j), upptrin |= (i<j);
+	for(nzi=1;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi],j = JA[nzi];
+		maxi = RSB_MAX(maxi, i);
+		maxj = RSB_MAX(maxj, j);
+		((double*)oVA)[nzi] = ((double*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+		lowtrin |= (i>j), upptrin |= (i<j);
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+	maxi = i, maxj = j;
+	((float*)oVA)[nzi] = ((float*)VA)[nzi];
+	oIA[nzi] = i-offi+offo;
+	oJA[nzi] = j-offi+offo;
+	lowtrin |= (i>j), upptrin |= (i<j);
+	for(nzi=1;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi],j = JA[nzi];
+		maxi = RSB_MAX(maxi, i);
+		maxj = RSB_MAX(maxj, j);
+		((float*)oVA)[nzi] = ((float*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+		lowtrin |= (i>j), upptrin |= (i<j);
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+	maxi = i, maxj = j;
+	((float complex*)oVA)[nzi] = ((float complex*)VA)[nzi];
+	oIA[nzi] = i-offi+offo;
+	oJA[nzi] = j-offi+offo;
+	lowtrin |= (i>j), upptrin |= (i<j);
+	for(nzi=1;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi],j = JA[nzi];
+		maxi = RSB_MAX(maxi, i);
+		maxj = RSB_MAX(maxj, j);
+		((float complex*)oVA)[nzi] = ((float complex*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+		lowtrin |= (i>j), upptrin |= (i<j);
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+	maxi = i, maxj = j;
+	((double complex*)oVA)[nzi] = ((double complex*)VA)[nzi];
+	oIA[nzi] = i-offi+offo;
+	oJA[nzi] = j-offi+offo;
+	lowtrin |= (i>j), upptrin |= (i<j);
+	for(nzi=1;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi],j = JA[nzi];
+		maxi = RSB_MAX(maxi, i);
+		maxj = RSB_MAX(maxj, j);
+		((double complex*)oVA)[nzi] = ((double complex*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+		lowtrin |= (i>j), upptrin |= (i<j);
+	}
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	if(upptrin)islowtri = RSB_BOOL_FALSE;
+ 	if(lowtrin)isupptri = RSB_BOOL_FALSE;
+	if(isupptri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+	if(islowtri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+	if(*flagsp) RSB_DO_FLAG_ADD(*flagsp,flags);
+	if(*m) *m = maxi+1;
+	if(*k) *k = maxj+1;
+done:
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb_util_coo_copy(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo)
+{
+	/*!
+         * FIXME: unfinished! shall support also typecode-based zeros removal
+	 * */
+	rsb_nnz_idx_t nzi = 0;
+
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+		((double*)oVA)[nzi] = ((double*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+		((float*)oVA)[nzi] = ((float*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+		((float complex*)oVA)[nzi] = ((float complex*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+	{
+		rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+		((double complex*)oVA)[nzi] = ((double complex*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+	}
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+/* sparse blas level 1 equivalent functions */
+
+int rsb__BLAS_Xusdot(const rsb_type_t typecode, const enum blas_conj_type conj_arg, const rsb_blas_int_t nz, const void*x, const rsb_blas_int_t*indx, const void*y, const rsb_blas_int_t incy, void*r, const enum blas_base_type index_base)
+{
+	/*!
+		\rsb_spblasl1_dot_msg
+		\rsb_warn_untested_msg
+	*/
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	double*xa = (double*)x;
+	double*ya = (double*)y;
+	double*rp = (double*)r;
+	double ac = ((double)(0));
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += xa[nzi] * ya[xi*incy];
+	}
+	RSB_SET_IF_NOT_NULL(rp,ac);
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	float*xa = (float*)x;
+	float*ya = (float*)y;
+	float*rp = (float*)r;
+	float ac = ((float)(0));
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += xa[nzi] * ya[xi*incy];
+	}
+	RSB_SET_IF_NOT_NULL(rp,ac);
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	float complex*xa = (float complex*)x;
+	float complex*ya = (float complex*)y;
+	float complex*rp = (float complex*)r;
+	float complex ac = ((float complex)(0));
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	if( conj_arg == blas_conj )
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += conjf(xa[nzi]) * ya[xi*incy];
+	}
+	else
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += xa[nzi] * ya[xi*incy];
+	}
+	RSB_SET_IF_NOT_NULL(rp,ac);
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	double complex*xa = (double complex*)x;
+	double complex*ya = (double complex*)y;
+	double complex*rp = (double complex*)r;
+	double complex ac = ((double complex)(0));
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	if( conj_arg == blas_conj )
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += conj(xa[nzi]) * ya[xi*incy];
+	}
+	else
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += xa[nzi] * ya[xi*incy];
+	}
+	RSB_SET_IF_NOT_NULL(rp,ac);
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+int rsb__BLAS_Xusaxpy(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*alpha, const void*x, const rsb_blas_int_t*indx, const void*y, const rsb_blas_int_t incy, const enum blas_base_type index_base)
+{
+	/*!
+		\rsb_spblasl1_axpy_msg
+		\rsb_warn_untested_msg
+	*/
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	const double*xa = (const double*)x;
+	double*ya = (double*)y;
+	const double alphav = *(double*)alpha;
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[nzi*incy] += alphav*xa[xi];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	const float*xa = (const float*)x;
+	float*ya = (float*)y;
+	const float alphav = *(float*)alpha;
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[nzi*incy] += alphav*xa[xi];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	const float complex*xa = (const float complex*)x;
+	float complex*ya = (float complex*)y;
+	const float complex alphav = *(float complex*)alpha;
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[nzi*incy] += alphav*xa[xi];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	const double complex*xa = (const double complex*)x;
+	double complex*ya = (double complex*)y;
+	const double complex alphav = *(double complex*)alpha;
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[nzi*incy] += alphav*xa[xi];
+	}
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+int rsb__BLAS_Xusga(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*y, const rsb_blas_int_t incy, void*x, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+{
+	/*!
+		\rsb_spblasl1_ga_msg
+		\rsb_warn_untested_msg
+	*/
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	double*xa = (double*)x;
+	const double*ya = (const double*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	float*xa = (float*)x;
+	const float*ya = (const float*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	float complex*xa = (float complex*)x;
+	const float complex*ya = (const float complex*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	double complex*xa = (double complex*)x;
+	const double complex*ya = (const double complex*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+	}
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+int rsb__BLAS_Xusgz(const rsb_type_t typecode, const rsb_blas_int_t nz, void*y, const rsb_blas_int_t incy, void*x, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+{
+	/*!
+		\rsb_spblasl1_gz_msg
+		\rsb_warn_untested_msg
+	*/
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	double*xa = (double*)x;
+	double*ya = (double*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+		ya[xi*incy] = ((double)(0));
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	float*xa = (float*)x;
+	float*ya = (float*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+		ya[xi*incy] = ((float)(0));
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	float complex*xa = (float complex*)x;
+	float complex*ya = (float complex*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+		ya[xi*incy] = ((float complex)(0));
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	double complex*xa = (double complex*)x;
+	double complex*ya = (double complex*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+		ya[xi*incy] = ((double complex)(0));
+	}
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+int rsb__BLAS_Xussc(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*x, void*y, const rsb_blas_int_t incy, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+{
+	/*!
+		\rsb_spblasl1_sc_msg
+		\rsb_warn_untested_msg
+	*/
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+{
+	const double*xa = (const double*)x;
+	double*ya = (double*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[xi*incy] = xa[nzi];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+{
+	const float*xa = (const float*)x;
+	float*ya = (float*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[xi*incy] = xa[nzi];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+{
+	const float complex*xa = (const float complex*)x;
+	float complex*ya = (float complex*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[xi*incy] = xa[nzi];
+	}
+}
+	else
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+{
+	const double complex*xa = (const double complex*)x;
+	double complex*ya = (double complex*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[xi*incy] = xa[nzi];
+	}
+}
+	else
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+/* blas level 1 equivalent functions */
+
+rsb_err_t rsb__cblas_Xcopy(rsb_type_t typecode, rsb_nnz_idx_t n, const void * x, rsb_nnz_idx_t incx, void * y, rsb_nnz_idx_t incy)
+{
+	return rsb__xcopy_strided_typed(y,x,0,0,n,typecode,incy,incx);
+}
+
+rsb_err_t rsb__cblas_Xnrm2(rsb_type_t type, size_t n, const void * a, rsb_nnz_idx_t incA, void * c){
+	/*!
+	 * c <- sqrt(sum(|a_i|^2))
+         *
+	 * \param a	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see dznrm2 in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+	const double*ta = a;double *tc = c,acc = ((double)(0)),tmp = ((double)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	acc = fabs(ta[(i+0 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+1 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+2 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+3 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+4 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+5 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+6 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+7 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+8 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+9 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+10 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+11 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+12 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+13 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+14 )*incA]);tmp += acc*acc;
+		acc = fabs(ta[(i+15 )*incA]);tmp += acc*acc;
+	}
+for(     ;i<n;++i){ 	acc = fabs(ta[(i+0 )*incA]);tmp += acc*acc;
+	 }
+}
+; 
+	tc[0] = (sqrt(tmp));
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+	const float*ta = a;float *tc = c,acc = ((float)(0)),tmp = ((float)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	acc = fabsf(ta[(i+0 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+1 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+2 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+3 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+4 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+5 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+6 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+7 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+8 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+9 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+10 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+11 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+12 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+13 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+14 )*incA]);tmp += acc*acc;
+		acc = fabsf(ta[(i+15 )*incA]);tmp += acc*acc;
+	}
+for(     ;i<n;++i){ 	acc = fabsf(ta[(i+0 )*incA]);tmp += acc*acc;
+	 }
+}
+; 
+	tc[0] = (sqrtf(tmp));
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+	const float complex*ta = a;float *tc = c,acc = ((float complex)(0)),tmp = ((float complex)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	acc = cabsf(ta[(i+0 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+1 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+2 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+3 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+4 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+5 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+6 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+7 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+8 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+9 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+10 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+11 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+12 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+13 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+14 )*incA]);tmp += acc*acc;
+		acc = cabsf(ta[(i+15 )*incA]);tmp += acc*acc;
+	}
+for(     ;i<n;++i){ 	acc = cabsf(ta[(i+0 )*incA]);tmp += acc*acc;
+	 }
+}
+; 
+	tc[0] = crealf(csqrtf(tmp));
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( type == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+	const double complex*ta = a;double *tc = c,acc = ((double complex)(0)),tmp = ((double complex)(0));
+	{
+for(i=0;i+15<n;i+=16){
+	acc = cabs(ta[(i+0 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+1 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+2 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+3 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+4 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+5 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+6 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+7 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+8 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+9 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+10 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+11 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+12 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+13 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+14 )*incA]);tmp += acc*acc;
+		acc = cabs(ta[(i+15 )*incA]);tmp += acc*acc;
+	}
+for(     ;i<n;++i){ 	acc = cabs(ta[(i+0 )*incA]);tmp += acc*acc;
+	 }
+}
+; 
+	tc[0] = creal(csqrt(tmp));
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+rsb_err_t rsb__cblas_Xdotu_sub(rsb_type_t type, size_t n, const void * x, rsb_nnz_idx_t incx, const void * y, rsb_nnz_idx_t incy, void *dotu){
+	/*!
+	 * */
+	return rsb__vector_mult_sum(x,y,dotu,type,n,incx,incy);
+}
+
+rsb_err_t rsb__cblas_Xscal(rsb_type_t type, size_t n, const void * alphap, void * a, size_t stride){
+	/*!
+	 * a <- a * alpha
+	 * */
+	return rsb_strided_vector_scale(a,alphap,type,n,stride);
+}
+
+rsb_err_t rsb__coo_insertion_sort(rsb_type_t typecode, void* VB, rsb_coo_idx_t * IB, rsb_coo_idx_t * JB, rsb_nnz_idx_t offA, rsb_nnz_idx_t nnzA)
+{
+	/* only for *small* arrays, where allocation of a temporary array is not justified */
+	rsb_coo_idx_t * IA = NULL, *JA = NULL;
+	rsb_nnz_idx_t i, j;
+
+	IA = IB + offA;
+	JA = JB + offA;
+
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE  )
+	{
+		double * VA = (double*) RSB_TYPED_OFF_PTR(typecode,VB,offA);
+		for(i=1;i<nnzA;++i)
+		for(j=i;j>0 && RSB_COO_LT(IA[j],JA[j],IA[j-1],JA[j-1]);--j)
+		{
+			RSB_SWAP(rsb_coo_idx_t,IA[j],IA[j-1]);
+			RSB_SWAP(rsb_coo_idx_t,JA[j],JA[j-1]);
+			RSB_SWAP(double        ,VA[j],VA[j-1]);
+		}
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT  )
+	{
+		float * VA = (float*) RSB_TYPED_OFF_PTR(typecode,VB,offA);
+		for(i=1;i<nnzA;++i)
+		for(j=i;j>0 && RSB_COO_LT(IA[j],JA[j],IA[j-1],JA[j-1]);--j)
+		{
+			RSB_SWAP(rsb_coo_idx_t,IA[j],IA[j-1]);
+			RSB_SWAP(rsb_coo_idx_t,JA[j],JA[j-1]);
+			RSB_SWAP(float        ,VA[j],VA[j-1]);
+		}
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_FLOAT_COMPLEX  )
+	{
+		float complex * VA = (float complex*) RSB_TYPED_OFF_PTR(typecode,VB,offA);
+		for(i=1;i<nnzA;++i)
+		for(j=i;j>0 && RSB_COO_LT(IA[j],JA[j],IA[j-1],JA[j-1]);--j)
+		{
+			RSB_SWAP(rsb_coo_idx_t,IA[j],IA[j-1]);
+			RSB_SWAP(rsb_coo_idx_t,JA[j],JA[j-1]);
+			RSB_SWAP(float complex        ,VA[j],VA[j-1]);
+		}
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+#ifdef RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 
+	if( typecode == RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX  )
+	{
+		double complex * VA = (double complex*) RSB_TYPED_OFF_PTR(typecode,VB,offA);
+		for(i=1;i<nnzA;++i)
+		for(j=i;j>0 && RSB_COO_LT(IA[j],JA[j],IA[j-1],JA[j-1]);--j)
+		{
+			RSB_SWAP(rsb_coo_idx_t,IA[j],IA[j-1]);
+			RSB_SWAP(rsb_coo_idx_t,JA[j],JA[j-1]);
+			RSB_SWAP(double complex        ,VA[j],VA[j-1]);
+		}
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+
+void rsb__coo_to_lr( void * RSB_RESTRICT VBu, rsb_coo_idx_t*RSB_RESTRICT IB, rsb_coo_idx_t*RSB_RESTRICT JB, void * RSB_RESTRICT VAu, rsb_coo_idx_t*RSB_RESTRICT IA, rsb_coo_idx_t*RSB_RESTRICT JA, rsb_coo_idx_t mj, rsb_nnz_idx_t nnzA, rsb_nnz_idx_t nzoffB, rsb_nnz_idx_t nzoffA, rsb_nnz_idx_t*RSB_RESTRICT nzlp, rsb_nnz_idx_t*RSB_RESTRICT nzrp, rsb_coo_idx_t iadd, rsb_coo_idx_t jadd, rsb_flags_t typecode)
+{
+	/*
+	 * Given COO arrays matrices A an (temporary) B, stores the coefficients left of the mj-th column before the one coming after it, respecting the row major ordering.
+	 * A serial function.
+	 * */
+	rsb_nnz_idx_t nzl = 0, nzr = 0, nzi = 0;
+
+	RSB_DEBUG_ASSERT(IA!=IB);
+	RSB_DEBUG_ASSERT(JA!=JB);
+	RSB_DEBUG_ASSERT(mtxAp);
+
+	IA += nzoffA;
+	JA += nzoffA;
+
+	IB += nzoffB;
+	JB += nzoffB;
+	
+switch(typecode)
+{
+			/* supported (double,float,float complex,double complex) */
+case RSB_NUMERICAL_TYPE_DOUBLE 	:
+{
+	double * RSB_RESTRICT VA = VAu; 
+	double * RSB_RESTRICT VB = VBu; 
+	RSB_DEBUG_ASSERT(VA!=VB);
+
+	VA += nzoffA;
+	VB += nzoffB;
+
+	for(nzi=0;nzi<nnzA;++nzi)
+	{
+		if( JA[nzi] < mj )
+		{
+			IB[nzl] = IA[nzi] + iadd;
+			JB[nzl] = JA[nzi] ;
+			VB[nzl] = VA[nzi];
+			nzl++;
+		}
+		else
+		{
+			nzr++;
+			IB[nnzA-nzr] = IA[nzi] + iadd;
+			JB[nnzA-nzr] = JA[nzi] + jadd;
+			VB[nnzA-nzr] = VA[nzi];
+		}
+	}
+
+	/* copy left quadrant back to A */
+	for(nzi=0;nzi<nzl ;++nzi)
+	{
+		IA[nzi] = IB[nzi];
+		JA[nzi] = JB[nzi];
+		VA[nzi] = VB[nzi];
+	}
+	
+	/* copy right quadrant back to A */
+	for(     ;nzi<nnzA;++nzi)
+	{
+		IA[nzi] = IB[nnzA-(1+nzi-nzl)];
+		JA[nzi] = JB[nnzA-(1+nzi-nzl)];
+		VA[nzi] = VB[nnzA-(1+nzi-nzl)];
+	}
+}
+	break;
+case RSB_NUMERICAL_TYPE_FLOAT 	:
+{
+	float * RSB_RESTRICT VA = VAu; 
+	float * RSB_RESTRICT VB = VBu; 
+	RSB_DEBUG_ASSERT(VA!=VB);
+
+	VA += nzoffA;
+	VB += nzoffB;
+
+	for(nzi=0;nzi<nnzA;++nzi)
+	{
+		if( JA[nzi] < mj )
+		{
+			IB[nzl] = IA[nzi] + iadd;
+			JB[nzl] = JA[nzi] ;
+			VB[nzl] = VA[nzi];
+			nzl++;
+		}
+		else
+		{
+			nzr++;
+			IB[nnzA-nzr] = IA[nzi] + iadd;
+			JB[nnzA-nzr] = JA[nzi] + jadd;
+			VB[nnzA-nzr] = VA[nzi];
+		}
+	}
+
+	/* copy left quadrant back to A */
+	for(nzi=0;nzi<nzl ;++nzi)
+	{
+		IA[nzi] = IB[nzi];
+		JA[nzi] = JB[nzi];
+		VA[nzi] = VB[nzi];
+	}
+	
+	/* copy right quadrant back to A */
+	for(     ;nzi<nnzA;++nzi)
+	{
+		IA[nzi] = IB[nnzA-(1+nzi-nzl)];
+		JA[nzi] = JB[nnzA-(1+nzi-nzl)];
+		VA[nzi] = VB[nnzA-(1+nzi-nzl)];
+	}
+}
+	break;
+case RSB_NUMERICAL_TYPE_FLOAT_COMPLEX 	:
+{
+	float complex * RSB_RESTRICT VA = VAu; 
+	float complex * RSB_RESTRICT VB = VBu; 
+	RSB_DEBUG_ASSERT(VA!=VB);
+
+	VA += nzoffA;
+	VB += nzoffB;
+
+	for(nzi=0;nzi<nnzA;++nzi)
+	{
+		if( JA[nzi] < mj )
+		{
+			IB[nzl] = IA[nzi] + iadd;
+			JB[nzl] = JA[nzi] ;
+			VB[nzl] = VA[nzi];
+			nzl++;
+		}
+		else
+		{
+			nzr++;
+			IB[nnzA-nzr] = IA[nzi] + iadd;
+			JB[nnzA-nzr] = JA[nzi] + jadd;
+			VB[nnzA-nzr] = VA[nzi];
+		}
+	}
+
+	/* copy left quadrant back to A */
+	for(nzi=0;nzi<nzl ;++nzi)
+	{
+		IA[nzi] = IB[nzi];
+		JA[nzi] = JB[nzi];
+		VA[nzi] = VB[nzi];
+	}
+	
+	/* copy right quadrant back to A */
+	for(     ;nzi<nnzA;++nzi)
+	{
+		IA[nzi] = IB[nnzA-(1+nzi-nzl)];
+		JA[nzi] = JB[nnzA-(1+nzi-nzl)];
+		VA[nzi] = VB[nnzA-(1+nzi-nzl)];
+	}
+}
+	break;
+case RSB_NUMERICAL_TYPE_DOUBLE_COMPLEX 	:
+{
+	double complex * RSB_RESTRICT VA = VAu; 
+	double complex * RSB_RESTRICT VB = VBu; 
+	RSB_DEBUG_ASSERT(VA!=VB);
+
+	VA += nzoffA;
+	VB += nzoffB;
+
+	for(nzi=0;nzi<nnzA;++nzi)
+	{
+		if( JA[nzi] < mj )
+		{
+			IB[nzl] = IA[nzi] + iadd;
+			JB[nzl] = JA[nzi] ;
+			VB[nzl] = VA[nzi];
+			nzl++;
+		}
+		else
+		{
+			nzr++;
+			IB[nnzA-nzr] = IA[nzi] + iadd;
+			JB[nnzA-nzr] = JA[nzi] + jadd;
+			VB[nnzA-nzr] = VA[nzi];
+		}
+	}
+
+	/* copy left quadrant back to A */
+	for(nzi=0;nzi<nzl ;++nzi)
+	{
+		IA[nzi] = IB[nzi];
+		JA[nzi] = JB[nzi];
+		VA[nzi] = VB[nzi];
+	}
+	
+	/* copy right quadrant back to A */
+	for(     ;nzi<nnzA;++nzi)
+	{
+		IA[nzi] = IB[nnzA-(1+nzi-nzl)];
+		JA[nzi] = JB[nnzA-(1+nzi-nzl)];
+		VA[nzi] = VB[nnzA-(1+nzi-nzl)];
+	}
+}
+	break;
+	/* unsupported type */
+	default :
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS 
+}
+
+	*nzlp = nzl;
+	*nzrp = nzr;
+}
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+/* @endcond */
diff --git a/rsb_util.h b/rsb_util.h
new file mode 100644
index 0000000..5d657ba
--- /dev/null
+++ b/rsb_util.h
@@ -0,0 +1,204 @@
+/* @cond INNERDOC */
+
+/**
+ * @file
+ * @brief
+ * Auxiliary functions.
+ */
+
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/*
+ The code in this file was generated automatically by an M4 script. 
+ It is not meant to be used as an API (Application Programming Interface).
+ p.s.: right now, only row major matrix access is considered.
+
+ */
+
+
+#ifndef RSB_UTIL_H_INCLUDED
+#define RSB_UTIL_H_INCLUDED
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define RSB_WANT_OMP        1
+#define RSB_MAX_OMP_THREADS 4
+
+
+#include "rsb_common.h"
+/* non blas-like functions */
+
+rsb_err_t rsb__util_m4_sanity_check(void);
+const void * rsb__util_increase_by_one(void *p, rsb_nnz_idx_t n, rsb_flags_t typecode);
+void rsb__util_set_area_to_fraction_of_integer(void *p, const int alphai, rsb_flags_t typecode);
+void rsb__util_set_area_to_negated_fraction(void *p, const void *alpha, rsb_flags_t typecode);
+void rsb__util_set_area_to_converted_integer(void *p, rsb_flags_t typecode, const rsb_int n);
+rsb_coo_idx_t * rsb__util_get_partitioning_array( size_t bs, size_t X , rsb_blk_idx_t * X_b, rsb_flags_t flags);
+rsb_err_t rsb__vector_diff(void * c, const void * a, const void * b, rsb_type_t type, size_t n);
+
+
+
+rsb_err_t rsb__vector_norm_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+;
+rsb_err_t rsb__util_vector_sum_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+;
+rsb_err_t rsb__util_vector_sum(void * c, const void * a, rsb_type_t type, size_t n)
+;
+
+
+
+
+
+rsb_err_t rsb__util_vector_add(void * a, const void * alphap, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__util_vector_div(void * a, const void * alphap, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__vector_increase_by_one(void * a, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__util_vector_pow(void * a, rsb_type_t type, const void *y, size_t n)
+;
+rsb_err_t rsb__util_vector_sqrt(void * a, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__vector_scale_inv(void * a, const void * alphap, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__vector_sum_of_abs_diffs(void * c, const void * a, const void * b, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__vector_sum_of_abs(void * c, const void * a, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__vector_to_abs(void * a, rsb_type_t type, size_t n)
+;
+
+
+rsb_err_t rsb__util_set_array_to_converted_integer(void *p, rsb_flags_t typecode, const rsb_nnz_idx_t n, const rsb_nnz_idx_t incp, const rsb_int v)
+;
+rsb_err_t rsb__vectors_left_sum_reduce_and_zero(void * d, void * s, const rsb_type_t typecode, const size_t n, const size_t incd, const size_t off)
+;
+
+
+rsb_err_t rsb__cblas_Xaxpy(rsb_type_t type, size_t n, const void * alphap, const void * x, const int incx, void * y, const int incy)
+;
+rsb_err_t rsb__vector_mult(const void * a, const void * b, void * c, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__xcopy(void * a, const void * b, rsb_nnz_idx_t toi, rsb_nnz_idx_t foi, rsb_nnz_idx_t n,size_t el_size)
+;
+rsb_err_t rsb__do_are_same(const void * ap, const void * bp, rsb_nnz_idx_t n,rsb_type_t typecode, rsb_nnz_idx_t incx, rsb_nnz_idx_t incy)
+;
+
+rsb_err_t rsb__sqrt_of_sum_of_fabs_diffs(const void * a, const void * b, void *err, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__fill_with_increasing_values(void * array, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__util_do_conjugate(void * array, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__util_do_negate(void * array, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__util_find_min(void * minp, const void * array, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+;
+rsb_err_t rsb__util_find_max(void * maxp, const void * array, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+;
+rsb_err_t rsb__util_drop_to_zero_if_above_threshold(void * array, rsb_type_t type, size_t n, const void * threshold)
+;
+rsb_nnz_idx_t rsb__util_count_positive(void * array, rsb_type_t type, size_t n)
+;
+rsb_nnz_idx_t rsb__util_count_negative(void * array, rsb_type_t type, size_t n)
+;
+rsb_err_t rsb__util_drop_to_zero_if_under_threshold(void * array, rsb_type_t type, size_t n, const void * threshold)
+;
+rsb_err_t rsb__fill_with_ones(void * array, rsb_type_t type, size_t n, size_t incx);
+rsb_err_t rsb__debug_print_vectors_diff(const void * v1, const void * v2, size_t n, rsb_type_t type, size_t incx, size_t incy, int onlyfirst);
+rsb_err_t rsb__debug_print_value(const void * v, rsb_type_t type);
+rsb_err_t rsb__debug_print_vector_extra(const void * v1, size_t n, rsb_type_t type, size_t inc, int style, FILE*stream);
+rsb_err_t rsb__debug_print_vector(const void * v1, size_t n, rsb_type_t type, size_t inc);
+rsb_err_t rsb__debug_print_vectors(const void * v1, const void * v2, size_t n, size_t incx, size_t incy, rsb_type_t type);
+
+rsb_err_t rsb__do_account_sorted_optimized_css(
+	 const rsb_coo_idx_t * MIndx, const rsb_coo_idx_t * mIndx,
+	 const rsb_coo_idx_t Mdim, const rsb_coo_idx_t mdim,
+	 const rsb_nnz_idx_t nnz, rsb_nnz_idx_t * elements_per_block_row, rsb_nnz_idx_t * blocks_per_block_row
+)
+;
+rsb_err_t rsb__do_account_sorted_optimized(
+	 struct rsb_mtx_t * mtxAp,
+	 const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	 const rsb_coo_idx_t Idim, const rsb_coo_idx_t Jdim,
+	 const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop,
+rsb_nnz_idx_t * elements_per_block_row, 
+rsb_nnz_idx_t * blocks_per_block_row
+)
+;
+rsb_err_t rsb__do_insert_sorted_optimized_css( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * MIndx, const rsb_coo_idx_t * mIndx, const rsb_nnz_idx_t nnz)
+;
+rsb_err_t rsb__do_insert_sorted_optimized( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop)
+;
+rsb_err_t rsb__dump_block(rsb_type_t type, const void * VA, rsb_blk_idx_t roff, rsb_blk_idx_t coff, rsb_blk_idx_t rows, rsb_blk_idx_t cols )
+;
+rsb_err_t rsb__dump_blocks(const struct rsb_mtx_t *mtxAp)
+;
+rsb_err_t rsb__test_print_csr(rsb_type_t type, rsb_flags_t flags, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t want_header, FILE*stream)
+;
+rsb_err_t rsb__test_print_coo_mm(rsb_type_t type, rsb_flags_t flags, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t want_header, FILE*stream)
+;
+/*static*/ /*inline*/ size_t rsb__do_sizeof(rsb_type_t type);
+rsb_err_t rsb__do_coo_sum( struct rsb_coo_matrix_t*coocp, const void *alphap, const struct rsb_coo_matrix_t*cooap, const void *betap,  const struct rsb_coo_matrix_t*coobp)
+;
+rsb_err_t rsb__cor_merge_dups(rsb_type_t typecode, void* RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t offB, rsb_nnz_idx_t nnzB, rsb_nnz_idx_t nnzC, const int wv, int wp, rsb_nnz_idx_t *onzp, struct rsb_coo_matrix_t*RSB_RESTRICT coop)
+;
+rsb_err_t rsb__do_copy_converted_scaled(const void *RSB_RESTRICT  src, void *RSB_RESTRICT dst, const void *RSB_RESTRICT  alphap, rsb_type_t stype,rsb_type_t dtype, size_t nnz, rsb_trans_t transA)
+;
+rsb_err_t rsb_util_csc2csr(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t*flagsp)
+;
+rsb_err_t rsb_util_coo_copy_and_stats(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, rsb_coo_idx_t*m, rsb_coo_idx_t*k, const rsb_nnz_idx_t nnz, const rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t iflags, rsb_flags_t*flagsp)
+;
+rsb_err_t rsb_util_coo_copy(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo)
+;
+/* sparse blas level 1 equivalent functions */
+
+int rsb__BLAS_Xusdot(const rsb_type_t typecode, const enum blas_conj_type conj_arg, const rsb_blas_int_t nz, const void*x, const rsb_blas_int_t*indx, const void*y, const rsb_blas_int_t incy, void*r, const enum blas_base_type index_base)
+;
+int rsb__BLAS_Xusaxpy(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*alpha, const void*x, const rsb_blas_int_t*indx, const void*y, const rsb_blas_int_t incy, const enum blas_base_type index_base)
+;
+int rsb__BLAS_Xusga(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*y, const rsb_blas_int_t incy, void*x, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+;
+int rsb__BLAS_Xusgz(const rsb_type_t typecode, const rsb_blas_int_t nz, void*y, const rsb_blas_int_t incy, void*x, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+;
+int rsb__BLAS_Xussc(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*x, void*y, const rsb_blas_int_t incy, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+;
+/* blas level 1 equivalent functions */
+
+rsb_err_t rsb__cblas_Xcopy(rsb_type_t typecode, rsb_nnz_idx_t n, const void * x, rsb_nnz_idx_t incx, void * y, rsb_nnz_idx_t incy)
+;
+rsb_err_t rsb__cblas_Xnrm2(rsb_type_t type, size_t n, const void * a, rsb_nnz_idx_t incA, void * c);
+rsb_err_t rsb__cblas_Xdotu_sub(rsb_type_t type, size_t n, const void * x, rsb_nnz_idx_t incx, const void * y, rsb_nnz_idx_t incy, void *dotu);
+rsb_err_t rsb__cblas_Xscal(rsb_type_t type, size_t n, const void * alphap, void * a, size_t stride);
+rsb_err_t rsb__coo_insertion_sort(rsb_type_t typecode, void* VB, rsb_coo_idx_t * IB, rsb_coo_idx_t * JB, rsb_nnz_idx_t offA, rsb_nnz_idx_t nnzA)
+;
+void rsb__coo_to_lr( void * RSB_RESTRICT VBu, rsb_coo_idx_t*RSB_RESTRICT IB, rsb_coo_idx_t*RSB_RESTRICT JB, void * RSB_RESTRICT VAu, rsb_coo_idx_t*RSB_RESTRICT IA, rsb_coo_idx_t*RSB_RESTRICT JA, rsb_coo_idx_t mj, rsb_nnz_idx_t nnzA, rsb_nnz_idx_t nzoffB, rsb_nnz_idx_t nzoffA, rsb_nnz_idx_t*RSB_RESTRICT nzlp, rsb_nnz_idx_t*RSB_RESTRICT nzrp, rsb_coo_idx_t iadd, rsb_coo_idx_t jadd, rsb_flags_t typecode)
+;
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+
+#endif /* RSB_UTIL_H_INCLUDED */
+
+/* @endcond */
diff --git a/rsb_util.m4 b/rsb_util.m4
new file mode 100644
index 0000000..6b9ab63
--- /dev/null
+++ b/rsb_util.m4
@@ -0,0 +1,3345 @@
+dnl
+dnl
+dnl	@author: Michele Martone
+dnl
+/* @cond INNERDOC */
+dnl
+ifelse(LIBMMVBR_INCLUDED_UTIL_M4,1,`',`
+define(`LIBMMVBR_INCLUDED_TYPES_M4',`1')dnl
+dnl `PACK' format will be experimented with in the future :)
+include(`rsb_misc.m4')dnl
+include(`do_unroll.m4')dnl
+/**
+ * @file
+ * @brief
+ * Auxiliary functions.
+ */
+RSB_M4_HEADER_MESSAGE()dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#ifndef RSB_UTIL_H_INCLUDED
+#define RSB_UTIL_H_INCLUDED
+')
+dnl
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+ifdef(`RSB_M4_WANT_OMP',dnl
+dnl	FIXME : this should be moved elsewhere
+`#define RSB_WANT_OMP        '1
+`#define RSB_MAX_OMP_THREADS 'RSB_M4_MAX_OMP_THREADS
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+#include <omp.h>       /* OpenMP parallelism (EXPERIMENTAL) */
+')
+)dnl
+
+dnl
+#include "rsb_common.h"
+dnl #include "rsb_types.h"
+dnl 
+dnl
+dnl
+dnl	FIXME : COMMENT THIS FILE
+dnl	-------------------------
+dnl
+dnl
+dnl
+/* non blas-like functions */
+dnl
+
+dnl
+rsb_err_t rsb__util_m4_sanity_check(void)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+		There are bugs in the m4 macros or a bad m4 implementation which will trigger this test to fail.
+		We are interested in catching them, as we should rely on a sane m4 environment.
+	*/
+	RSB_M4_DEBUGINFO(``$0'')
+	if(
+		RSB_M4_XOR(0,0)!=0 ||
+		RSB_M4_XOR(1,0)!=1 || 
+		RSB_M4_XOR(0,1)!=1 || 
+		RSB_M4_XOR(1,1)!=0 ||
+		RSB_M4_AND(0,0)!=0 ||
+		RSB_M4_AND(1,0)!=0 ||
+		RSB_M4_AND(0,1)!=0 ||
+		RSB_M4_AND(1,1)!=1 ||
+		RSB_M4_OR(0,0)!=0 ||
+		RSB_M4_OR(1,0)!=1 ||
+		RSB_M4_OR(0,1)!=1 ||
+		RSB_M4_OR(1,1)!=1 ||
+		0
+		)
+		goto err;
+	return RSB_ERR_NO_ERROR;
+err:
+	return RSB_ERR_INTERNAL_ERROR;
+}
+')dnl
+dnl
+
+dnl
+const void * rsb__util_increase_by_one(void *p, rsb_nnz_idx_t n, rsb_flags_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ) {(((mtype*)p)[n])+=1;return p;}
+	else 
+#endif
+')dnl
+	return NULL;
+}
+')dnl
+dnl
+
+dnl
+void rsb__util_set_area_to_fraction_of_integer(void *p, const int alphai, rsb_flags_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*
+		alpha NULL will imply 1
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ) {*(mtype*)p = 1;*(mtype*)p/=alphai;}
+	else 
+#endif
+')dnl
+	return;
+}
+')dnl
+dnl
+
+dnl
+void rsb__util_set_area_to_negated_fraction(void *p, const void *alpha, rsb_flags_t typecode)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*
+		alpha NULL will imply 1
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ) {*(mtype*)p = -1;if(alpha)*(mtype*)p/=(*(mtype*)alpha);}
+	else 
+#endif
+')dnl
+	return;
+}
+')dnl
+dnl
+
+dnl
+void rsb__util_set_area_to_converted_integer(void *p, rsb_flags_t typecode, const rsb_int n)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ) {*(mtype*)p = (mtype)n;}
+	else 
+#endif
+')dnl
+	return;
+}
+')dnl
+dnl
+
+dnl
+rsb_coo_idx_t * rsb__util_get_partitioning_array( size_t bs, size_t X , rsb_blk_idx_t * X_b, rsb_flags_t flags)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * Given a block size (be it rows or columns), an element size X in bytes,
+	 * and a dimension (rows or columns), returns an array containing the 
+	 * indices of the elements in each block.
+	 *
+	 * Therefore, the allocated arrays 
+	 *
+	 * \param bs	the block size
+	 * \param X	the rows or columns count
+	 * \param X_b	on output, the allocated array elements count : (X+bs-1)/bs
+	 * \return NULL on error;  a valid array pointer on success
+	 *
+	 * FIXME : why not size_t ? or maybe rsb_size_t ?
+	 * */
+	size_t i;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_coo_idx_t * p_x = NULL;
+
+	*X_b = (X+bs-1)/bs;
+
+	/* WARNING : 1 is the extreme limit before overflow :) */
+	if( ( ((size_t)(*X_b)) < ((size_t)((X+bs-1)/bs))) || (RSB_BLK_ADD_OVERFLOW(*X_b,1)) )
+	{
+		/* overflow. should print some message. */
+		errval = RSB_ERR_LIMITS;goto err;
+	}
+
+	p_x = rsb__malloc(sizeof(rsb_coo_idx_t)*(*X_b+1));
+	if(! p_x) goto err;
+	/* note: should use some perrno some day */
+
+	/* note the last block size : it is the same, regardless congruences */
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`*X_b',` p_x[i+LI] = (i+LI)*bs;
+	')
+dnl	for(i = 0;i<*X_b;++i)p_x[i] = i*bs;
+
+	/* FIXME : this point should be remarked and documented way better ! */
+	if(flags&(RSB_FLAG_WANT_BCSS_STORAGE|RSB_FLAG_WANT_FIXED_BLOCKING_VBR))
+		p_x[*X_b] = *X_b*bs;	/* the last element of p_x is the index of the last matrix row/column    + 1  */
+	else
+		p_x[*X_b] = X;	/* the last element of p_x is the index of the last matrix row/column    + 1  */
+	
+	return p_x;
+err:
+	RSB_CONDITIONAL_FREE(p_x);
+	rsb__do_perror(NULL,errval);
+	return NULL;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_diff(void * c, const void * a, const void * b, rsb_type_t type, size_t n)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * c <- a-b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy,dcopy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*ta = a,*tb = b;mtype *tc = c;
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		tc[i+LI] = ta[i+LI]-tb[i+LI];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_vector_norm_square(void * c, const void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * c <- a^T*a
+         *
+	 * \param a	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*ta = a;mtype *tc = c;
+		tc[0] = RSB_M4_ZERO(mtype);
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		tc[0]+=ta[i+LI]*ta[i+LI];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_vector_norm(void * c, const void * a, rsb_type_t type, size_t n)
+{
+	/*!
+	 * c <- sqrt(a^T*a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_err_t errval;
+	if(!c)
+		return RSB_ERR_BADARGS;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		mtype*cp = (mtype*)c;
+		errval = rsb_vector_norm_square(cp,a,type,n);
+		*cp = RSB_M4_SQRT(mtype,*cp);
+	}
+	else 
+#endif
+')dnl
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_vector_norm_square_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+{
+	/*!
+	 * c <- a^T*a
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(inc==1)
+		return rsb_vector_norm_square(c,a,type,n);
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*ta = a;mtype *tc = c;
+		tc[0] = RSB_M4_ZERO(mtype);
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		tc[0]+=ta[(i+LI)*inc]*ta[(i+LI)*inc];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_norm_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * c <- sqrt(a^T*a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_err_t errval;
+	if(!c)
+		return RSB_ERR_BADARGS;
+	if(inc==1)
+		return rsb_vector_norm(c,a,type,n);
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		mtype*cp = (mtype*)c;
+		errval = rsb_vector_norm_square_strided(cp,a,type,n,inc);
+		*cp = RSB_M4_SQRT(mtype,*cp);
+	}
+	else 
+#endif
+')dnl
+		errval = RSB_ERR_UNSUPPORTED_TYPE;
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_vector_sum_strided(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * c <- sum(a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		register mtype acc = RSB_M4_ZERO(mtype); const mtype*ta = a; mtype*tc = c;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		acc+=ta[(i+LI)*inc];
+	'); 
+		tc[0] = acc;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_vector_sum(void * c, const void * a, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * c <- sum(a)
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype*ta = a; mtype*tc = c; tc[0] = RSB_M4_ZERO(mtype);
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	tc[0]+=ta[i+LI];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+dnl
+dnl	rsb_err_t rsb_blas_Xdot(void * c, const void * a, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+dnl	{
+dnl		cblas_ddot(n,a,1,a,1)
+dnl	}
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb__vector_mult_sum(const void * a, const void * b, void * c, rsb_type_t type, size_t n, const int inca, const int incb)
+{
+	/*!
+	 * c <- sum(a*b)
+	 * It is allowed to give c == a or c == b or a==b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see ddot in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * 
+	 * p.s.: this routine is, numerically speaking, a crime!
+	 * 
+	 * */
+	size_t i;
+	if(a==b && inca==incb)
+		return rsb_vector_norm_square_strided(c,a,type,n,inca);
+	if(inca == 1 && incb == 1)
+	{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*tb = b; const mtype*ta = a; mtype*tc = c,cacc = RSB_M4_ZERO(mtype);
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		cacc+=ta[i+LI]*tb[i+LI];
+	'); 
+		*tc = cacc;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	else
+	{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*tb = b; const mtype*ta = a; mtype*tc = c,cacc = RSB_M4_ZERO(mtype);
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		cacc+=ta[inca*(i+LI)]*tb[incb*(i+LI)];
+	'); 
+		*tc = cacc;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_fill_with_zeros_nostride(void * array, rsb_type_t type, size_t n)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will zero the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ){
+	mtype*ta = array;
+RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`ta[i+LI] = RSB_M4_ZERO(mtype);')}
+	else 
+#endif
+')dnl
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_fill_with_zeros(void * array, rsb_type_t type, size_t n, size_t incx)
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will zero the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(incx==1)
+		return rsb_fill_with_zeros_nostride(array,type,n);
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ){
+	mtype*ta = array;
+RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`ta[(i+LI)*incx] = RSB_M4_ZERO(mtype);')}
+	else 
+#endif
+')dnl
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_vector_scale(void * a, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * a <- a * alpha
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param alphap scaling value (if NULL assumed to be zero)
+	 * \param n	the input array length
+	 * \note see dscal in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(alphap==NULL || RSB_IS_ELEMENT_ZERO(alphap,type))
+		return rsb_fill_with_zeros(a,type,n,1);
+		
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype alpha = *(mtype*)alphap; mtype*ta = a;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI]*=alpha;
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_strided_vector_scale(void * a, const void * alphap, rsb_type_t type, size_t n, size_t stride)
+{
+	/*!
+	 * a <- a * alpha
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see dscal in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(stride==1)
+		return rsb_vector_scale(a,alphap,type,n);
+	if(alphap==NULL || RSB_IS_ELEMENT_ZERO(alphap,type))
+		return rsb_fill_with_zeros(a,type,n,stride);
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype alpha = *(mtype*)alphap; mtype*ta = a;
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		ta[stride*(i+LI)]*=alpha;
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_vector_add(void * a, const void * alphap, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * a <- a + alpha
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+		
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype alpha = *(mtype*)alphap; mtype*ta = a;
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		ta[i+LI]+=alpha;
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_vector_div(void * a, const void * alphap, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * this is a benchmark-oriented function only..
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+		
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype alpha = *(mtype*)alphap; mtype*ta = a;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI]/=alpha;
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_increase_by_one(void * a, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+		
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{ mtype*ta = a;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI]+=RSB_M4_ONE(mtype);
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_vector_pow(void * a, rsb_type_t type, const void *y, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(!a || !y)
+		goto err;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		mtype ty = *(mtype*)y,*ta = a;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI] = RSB_M4_POW(mtype,ta[i+LI],ty);
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+err:
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_vector_sqrt(void * a, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(!a)goto err;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{mtype*ta = a;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI] = RSB_M4_SQRT(mtype,ta[i+LI]);
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+err:
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_scale_inv(void * a, const void * alphap, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * a <- 1/a * alpha
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see dscal in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	if(!alphap)
+		return RSB_ERR_BADARGS;
+		
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		mtype alphai = RSB_M4_ONE(mtype)/(*(mtype*)alphap);
+		return rsb_vector_scale(a,&alphai,type,n);
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_sum_of_abs_diffs(void * c, const void * a, const void * b, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*ap = a,*bp = b;
+		mtype ac = RSB_M4_ZERO(mtype);
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		ac += RSB_M4_ABS(mtype,ap[i+LI]-bp[i+LI]);
+		'); 
+		*((mtype*)(c)) = ac;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_sum_of_abs(void * c, const void * a, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype*ap = a;
+		mtype ac = RSB_M4_ZERO(mtype);
+		RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+		ac += RSB_M4_ABS(mtype,ap[i+LI]);
+		'); 
+		*((mtype*)(c)) = ac;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_to_abs(void * a, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{mtype*ta = a;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI] = RSB_M4_ABS(mtype,ta[i+LI]);
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_alpha_sum(void * a, const void * b, const void * alphap, rsb_type_t type, size_t n)
+{
+	/*!
+	 * a <- a + alpha * b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype alpha = alphap ? *(mtype*)alphap : RSB_M4_ONE(mtype);
+	mtype*ta = a; const mtype*tb = b;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI]+=alpha*tb[i+LI];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifelse(1,0,`dnl
+/* redundant code (see rsb__cblas_Xaxpy) */
+rsb_err_t rsb_vectors_sum(void * a, const void * b, rsb_type_t typecode, const void *alphap, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * a <- a + alpha * b
+         *
+	 * \param array	an array pointer
+	 * \param typecode	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if( !alphap || RSB_IS_ELEMENT_ONE(alphap,typecode))
+	{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{mtype*ta = a; const mtype*tb = b;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[i+LI]+=tb[i+LI];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	}
+	else
+		return rsb_alpha_sum(a,b,alphap,typecode,n);
+dnl 	{
+dnl foreach(`mtype',RSB_M4_TYPES,`dnl
+dnl `#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+dnl 	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+dnl 	{const mtype alpha = *((const mtype*)alphap);
+dnl 	mtype*ta = a; const mtype*tb = b;
+dnl 	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+dnl 	ta[i+LI]+=alpha*tb[i+LI];
+dnl 	'); 
+dnl 	}
+dnl 	else 
+dnl #endif
+dnl ')dnl
+dnl 	return RSB_ERR_UNSUPPORTED_TYPE	;
+dnl 	}
+
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_set_array_to_converted_integer(void *p, rsb_flags_t typecode, const rsb_nnz_idx_t n, const rsb_nnz_idx_t incp, const rsb_int v)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * */
+	size_t i;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	mtype*tp = p; const mtype tv = (mtype)v;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	tp[((i+LI)*incp)] = tv;
+	'); 
+	}
+	else
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vectors_left_sum_reduce_and_zero(void * d, void * s, const rsb_type_t typecode, const size_t n, const size_t incd, const size_t off)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * d[off:off+n-1] <- s[off:off+n-1] 
+	 * s[off:off+n-1] <- 0
+         *
+	 * \param array	an array pointer
+	 * \param typecode	a valid type code
+	 * \param incd	the the stride of d
+	 * \param off offset in the vectors
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	mtype*td = d,*ts = s;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	td[(off+i+LI)*incd]+=ts[(off+i+LI)];
+	ts[(off+i+LI)] = RSB_M4_ZERO(mtype);
+	'); 
+	}
+	else
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifelse(1,0,`dnl
+/* redundant code -- see rsb__cblas_Xaxpy */
+rsb_err_t rsb_vectors_sum_scale_strided(void * a, const void * b, rsb_type_t typecode, const void *alphap, size_t n, size_t inca, size_t incb)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * a <- a + alpha * b
+         *
+	 * \param array	an array pointer
+	 * \param typecode	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy in BLAS
+	 * TODO: declare alpha as a const local variable, so the compiler will not contempt aliasing.
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(inca==1 && incb==1 /*&& ( !alphap || RSB_IS_ELEMENT_ONE(alphap,typecode))*/ )
+		return rsb_vectors_sum(a,b,typecode,alphap,n);
+
+	if( !alphap || RSB_IS_ELEMENT_ONE(alphap,typecode))
+	{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	mtype*ta = a; const mtype*tb = b;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[(i+LI)*inca]+=tb[(i+LI)*incb];
+	'); 
+	}
+#endif
+')dnl
+	}
+	else
+	{
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{const mtype alpha = *((const mtype*)alphap);
+	mtype*ta = a; const mtype*tb = b;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[(i+LI)*inca]+=alpha*tb[(i+LI)*incb];
+	'); 
+	}
+#endif
+')dnl
+	}
+dnl	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb_alpha_sum_strided(void * a, const void * b, const void * alphap, rsb_type_t type, size_t n, int inca, int incb)
+{
+	/*!
+	 * a <- a + alpha * b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see daxpy in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(inca == 1 && incb == 1)
+		return rsb_alpha_sum(a,b,alphap,type,n);
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype alpha = alphap ? *(mtype*)alphap : RSB_M4_ONE(mtype);
+	mtype*ta = a; const mtype*tb = b;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ta[inca*(i+LI)]+=alpha*tb[incb*(i+LI)];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__cblas_Xaxpy(rsb_type_t type, size_t n, const void * alphap, const void * x, const int incx, void * y, const int incy)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * y <- y + alpha * x
+         */
+	return rsb_alpha_sum_strided(y,x,alphap,type,n,incy,incx);
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__vector_mult(const void * a, const void * b, void * c, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * c <- a*b
+	 * It is allowed to give c == a or c == b or a == b
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * 
+	 * FIXME : useless ?
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype*ta = a; const mtype*tb = b; mtype*tc = c;
+dnl	//const mtype omega = *(mtype*)omegap;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	tc[i+LI] = ta[i+LI]*tb[i+LI];
+	'); 
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__xcopy(void * a, const void * b, rsb_nnz_idx_t toi, rsb_nnz_idx_t foi, rsb_nnz_idx_t n,size_t el_size)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * a[toi:toi+n] <- b[foi:foi+n] 
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_memcpy(((rsb_byte_t*)a)+el_size*toi,((const rsb_byte_t*)b)+el_size*foi,el_size*n);
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_are_same(const void * ap, const void * bp, rsb_nnz_idx_t n,rsb_type_t typecode, rsb_nnz_idx_t incx, rsb_nnz_idx_t incy)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 *
+	 * \return \rsberrcodemsg
+	 *
+	 * For cases like 1+0I differing from 1-0I ..
+	 * */
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	rsb_nnz_idx_t i;
+	const mtype *a = ap; const mtype *b = bp;
+
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	if( a[incx*(i+LI)]!=b[incy*(i+LI)]) goto differing;
+')dnl
+dnl	for(i=0;i<n;++i)
+dnl		if( a[incx*i]!=b[incy*i])
+dnl			return RSB_ERR_GENERIC_ERROR;
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE;
+differing:
+	return RSB_ERR_GENERIC_ERROR;
+}
+')dnl
+dnl
+
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+static rsb_err_t rsb__xcopy_strided_typed(void * a, const void * b, rsb_nnz_idx_t toi, rsb_nnz_idx_t foi, rsb_nnz_idx_t n,rsb_type_t typecode, rsb_nnz_idx_t incx, rsb_nnz_idx_t incy)
+{
+	/*!
+	 * a[toi:toi+n] <- b[foi:foi+n] 
+         *
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	if(incx==1 && incy==1)
+		return rsb__xcopy(a,b,toi,foi,n,RSB_SIZEOF(typecode));
+	/* else */
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	rsb_nnz_idx_t i;
+	mtype *ap = a; const mtype *bp = b;
+	ap+=toi;
+	bp+=foi;
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	ap[(i+LI)*incx] = bp[(i+LI)*incy];
+	'); 
+		return RSB_ERR_NO_ERROR;
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__sqrt_of_sum_of_fabs_diffs(const void * a, const void * b, void *err, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * Will compute the square root of the sum of the squares of the vectors elements differences.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * 
+	 * FIXME
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+ifelse(mtype,`int',`dnl
+	/* UHM ...  */
+	{
+	double acc = RSB_M4_ZERO(double);
+	const mtype*ta = a, *tb = b;
+	*((double*)err) = RSB_M4_ZERO(double);
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	acc+=(ta[i+LI]-tb[i+LI])*(ta[i+LI]-tb[i+LI]);
+	'); 
+	*((double*)err) = sqrt(acc);
+	}
+',`dnl
+	{
+	const mtype*ta = a; const mtype*tb = b;
+	*((mtype*)err) = RSB_M4_ZERO(mtype);
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	*((mtype*)(err))+=(ta[i+LI]-tb[i+LI])*(ta[i+LI]-tb[i+LI]);
+	'); 
+	*((mtype*)err) = RSB_M4_SQRT(mtype,(*((mtype*)err)));
+	}
+')dnl
+	else 
+#endif
+')dnl
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__fill_with_increasing_values(void * array, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 * FIXME : document me
+	 * starts with one.
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{ 
+	mtype*ta = array;
+RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`ta[i+LI] = (const mtype)(i+LI+1);')
+	}
+	else 
+#endif
+')dnl
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_do_conjugate(void * array, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 * FIXME: copy over itself..
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ){
+	mtype*ta = array;
+RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`ta[i+LI] = RSB_M4_CONJ_SYM(mtype,`n',RSB_M4_SYMBOL_HERMITIAN)(ta[i+LI]);')}
+	else 
+`#endif'
+')dnl
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_do_negate(void * array, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will negate the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{ 
+	mtype*ta = array;
+RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`ta[i+LI] = -ta[i+LI];')}
+	else 
+#endif
+')dnl
+		return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_find_min(void * minp, const void * array, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(n<1)return RSB_ERR_BADARGS;
+	if(inc<1)return RSB_ERR_BADARGS;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{const mtype * ap = array;mtype *mp = minp;
+	*mp = *ap;for(i = 1;i<n;++i){if(RSB_M4_ABS(mtype,ap[i*inc])<RSB_M4_ABS(mtype,*mp) )*mp = ap[i*inc];
+	}}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_find_max(void * maxp, const void * array, rsb_type_t type, size_t n, rsb_nnz_idx_t inc)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+	if(n<1)return RSB_ERR_BADARGS;
+	if(inc<1)return RSB_ERR_BADARGS;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{const mtype * ap = array;mtype *mp = maxp;
+	*mp = *ap;for(i=1;i<n;++i){if(RSB_M4_ABS(mtype,ap[i*inc])>RSB_M4_ABS(mtype,*mp))*mp = ap[i*inc];
+	}}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_drop_to_zero_if_above_threshold(void * array, rsb_type_t type, size_t n, const void * threshold)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{const mtype th = (*(const mtype*)(threshold)); mtype*ta = array;
+	for(i = 0;i<n;++i)
+	{if(RSB_M4_ABS(mtype,th)<RSB_M4_ABS(mtype,ta[i]))ta[i] = RSB_M4_ZERO(mtype);}}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_nnz_idx_t rsb__util_count_positive(void * array, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i, c = 0;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{	mtype*ta = array;
+		 for(i=0;i<n;++i)
+			c+=(RSB_M4_CREAL(mtype,ta[i])>(RSB_M4_REALT(mtype))0);
+	}else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return c;
+}
+')dnl
+dnl
+
+dnl
+rsb_nnz_idx_t rsb__util_count_negative(void * array, rsb_type_t type, size_t n)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i, c = 0;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{	mtype*ta = array;
+		 for(i=0;i<n;++i)
+			c+=(RSB_M4_CREAL(mtype,ta[i])<(RSB_M4_REALT(mtype))0);
+	}else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return c;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__util_drop_to_zero_if_under_threshold(void * array, rsb_type_t type, size_t n, const void * threshold)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ) {
+	const mtype th = (*(mtype*)(threshold)); mtype*ta = ((mtype*)(array));
+	for(i=0;i<n;++i){if(RSB_M4_ABS(mtype,th)>RSB_M4_ABS(mtype,ta[i]))ta[i] = RSB_M4_ZERO(mtype);}}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__fill_with_ones(void * array, rsb_type_t type, size_t n, size_t incx)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * \ingroup gr_vec
+	 * Will set to one the input n elements long array of type type.
+	 * \param array	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 *
+	 * \return \rsberrcodemsg
+	 * TODO:RENAME: rsb__fill_with_ones -> rsb__val_fill_with_ones.
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) ){
+	mtype*ta = ((mtype*)(array));
+ for(i=0;i<n;++i) {ta[i*incx] = RSB_M4_ONE(mtype);}}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__debug_print_vectors_diff(const void * v1, const void * v2, size_t n, rsb_type_t type, size_t incx, size_t incy, int onlyfirst)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*! 
+	 * A debug function for printing the difference of two vectors of a specified type, in parallel.
+	 * FIXME : It should take into account thresholds specific to each numerical type.
+	 **/
+#if RSB_ALLOW_STDOUT
+	size_t i, differing = 0;
+	if(!v1 || !v2)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t vectors diff :\n");
+	
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype *v1p = v1,*v2p = v2; RSB_M4_REALT(mtype) th = 0.0001;
+		for(i=0;i<n ;++i) 
+		ifelse(mtype,`double complex',`if(creal(v1p[i*incx])-creal(v2p[i*incy])>th)/*FIXME : incomplete check*/',`dnl
+		ifelse(mtype,`float complex',`if(crealf(v1p[i*incx])-crealf(v2p[i*incy])>th)/*FIXME : incomplete check*/',`dnl
+		ifelse(mtype,`complex',       `if(creal(v1p[i*incx])-creal(v2p[i*incy])>th)/*FIXME : incomplete check*/',`dnl
+		ifelse(mtype,`int',       `if(v1p[i*incx]-v2p[i*incy])',`dnl
+		ifelse(mtype,`char',       `if(v1p[i*incx]-v2p[i*incy])',`dnl
+if(fabs((double)(v1p[i*incx]-v2p[i*incy]))>th)/*FIXME : incomplete check*/
+')dnl
+')dnl
+')dnl
+')dnl
+')dnl
+{		differing++;
+		if((onlyfirst==0)||(onlyfirst>differing))
+		RSB_STDOUT("%zd : "RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)" "RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)"\n",(rsb_printf_int_t)i,dnl
+		ifelse(mtype,`double complex',`creal(v1p[i*incx]),cimag(v1p[i*incx]),creal(v2p[i*incy]),cimag(v2p[i*incy])',`dnl
+		ifelse(mtype,`float complex',`crealf(v1p[i*incx]),cimagf(v1p[i*incx]),crealf(v2p[i*incy]),cimagf(v2p[i*incy])',`dnl
+		ifelse(mtype,`complex',`creal(v1p[i*incx]),cimag(v1p[i*incx]),creal(v2p[i*incy]),cimag(v2p[i*incy])',`dnl
+v1p[i*incx],v2p[i*incy]`'dnl
+')dnl
+')dnl
+')dnl
+		);
+}
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	if(differing>onlyfirst)RSB_STDOUT("...(for a total of %zd differing entries)...\n",(rsb_printf_int_t)(differing-onlyfirst));
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__debug_print_value(const void * v, rsb_type_t type)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*! 
+	 **/
+#if RSB_ALLOW_STDOUT
+	if(!v)return RSB_ERR_BADARGS;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype *v1p = v;
+		RSB_STDOUT(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype),dnl
+		ifelse(mtype,`double complex',`creal(v1p[0]),cimag(v1p[0])',`dnl
+		ifelse(mtype,`float complex',`crealf(v1p[0]),cimagf(v1p[0])',`dnl
+		ifelse(mtype,`complex',`creal(v1p[0]),cimag(v1p[0])',`dnl
+v1p[0]`'dnl
+')dnl
+')dnl
+')dnl
+		);
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__debug_print_vector_extra(const void * v1, size_t n, rsb_type_t type, size_t inc, int style, FILE*stream)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*! 
+	 * A debug function for printing two vectors of a specified type, in parallel.
+	 **/
+#if RSB_ALLOW_STDOUT
+	rsb_nnz_idx_t i;
+	int want_header = ( style == 0x1 );
+	const char * ts = RSB_IS_MATRIX_TYPE_COMPLEX(type)?"complex":"real";
+	const char * ss = RSB_SYMMETRY_STRING(RSB_FLAG_NOFLAGS);
+	
+	if( n < 0 )
+		goto errb;
+
+	if(!v1 || !stream)
+		goto errb;
+
+	/*if(!want_header)
+		RSB_STDERR("\t vectors  :\n");*/
+	
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype *v1p = v1;
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix array %s %s\n%zd %zd\n",ts,ss,(rsb_printf_int_t)n,(rsb_printf_int_t)1);
+		for(i=0;i<n;++i) 
+		RSB_FPRINTF(stream,RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype) "\n",dnl
+		ifelse(mtype,`double complex',`creal(v1p[i*inc]),cimag(v1p[i*inc])',`dnl
+		ifelse(mtype,`float complex',`crealf(v1p[i*inc]),cimagf(v1p[i*inc])',`dnl
+		ifelse(mtype,`complex',`creal(v1p[i*inc]),cimag(v1p[i*inc])',`dnl
+v1p[i*inc]`'dnl
+')dnl
+')dnl
+')dnl
+		);
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+errb:
+	return RSB_ERR_BADARGS;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__debug_print_vector(const void * v1, size_t n, rsb_type_t type, size_t inc)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	return rsb__debug_print_vector_extra(v1, n, type, inc, 0x0, stdout);
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__debug_print_vectors(const void * v1, const void * v2, size_t n, size_t incx, size_t incy, rsb_type_t type)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*! 
+	 * A debug function for printing two vectors of a specified type, in parallel.
+	 **/
+#if RSB_ALLOW_STDOUT
+	size_t i;
+	if(!v1 || !v2)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t vectors  :\n");
+	
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		const mtype *v1p = v1,*v2p = v2;
+		for(i=0;i<n;++i) 
+		RSB_STDOUT(RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)" "RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)"\n",dnl
+RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype,`v1p[(i)*incx]'),dnl
+RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype,`v2p[(i)*incy]')dnl
+);
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+')dnl
+dnl
+dnl
+')
+dnl
+dnl
+dnl
+dnl ifdef(`ONLY_WANT_HEADERS',`dnl
+dnl #ifndef RSB_UTIL_H_INCLUDED
+dnl #define RSB_UTIL_H_INCLUDED
+dnl ')
+dnl
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`',`dnl
+dnl
+dnl
+')dnl
+dnl
+dnl
+dnl
+
+rsb_err_t rsb__do_account_sorted_optimized_css(
+	 const rsb_coo_idx_t * MIndx, const rsb_coo_idx_t * mIndx,
+	 const rsb_coo_idx_t Mdim, const rsb_coo_idx_t mdim,
+	 const rsb_nnz_idx_t nnz, rsb_nnz_idx_t * elements_per_block_row, rsb_nnz_idx_t * blocks_per_block_row
+)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	 	\ingroup gr_internals
+
+		elements_per_block_row and blocks_per_block_row arrays should be blanked.
+		FIXME : missing error handling.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n = 0;
+
+	if(blocks_per_block_row)
+	for(n=0;n<nnz;++n)
+	{
+		RSB_DEBUG_ASSERT(MIndx[n]<Mdim);
+		RSB_DEBUG_ASSERT(MIndx[n]>=0);
+		elements_per_block_row[MIndx[n]]++;
+		blocks_per_block_row  [MIndx[n]]++;
+	}
+	else
+	for(n=0;n<nnz;++n)
+	{
+		RSB_DEBUG_ASSERT(MIndx[n]<Mdim);
+		RSB_DEBUG_ASSERT(MIndx[n]>=0);
+		elements_per_block_row[MIndx[n]]++;
+	}
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_account_sorted_optimized(
+	 struct rsb_mtx_t * mtxAp,
+	 const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA,
+	 const rsb_coo_idx_t Idim, const rsb_coo_idx_t Jdim,
+	 const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop,
+rsb_nnz_idx_t * elements_per_block_row, 
+rsb_nnz_idx_t * blocks_per_block_row
+)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	 *	\ingroup gr_internals
+	 * 	FIXME : document this
+	 */
+	rsb_coo_idx_t blockrows = 0;
+	rsb_coo_idx_t blockcolumns = 0;
+	rsb_coo_idx_t baserow = 0;
+	rsb_coo_idx_t basecolumn = 0;
+dnl	rsb_nnz_idx_t block_count = 0;
+dnl	rsb_nnz_idx_t *indptr = mtxAp->indptr;
+dnl	rsb_nnz_idx_t *bpntr = mtxAp->bpntr;
+dnl	rsb_nnz_idx_t *bindx = mtxAp->bindx;
+	const rsb_coo_idx_t *Mpntr = NULL;
+	const rsb_coo_idx_t *mpntr = NULL;
+	const rsb_coo_idx_t *MIndx = NULL;
+	const rsb_coo_idx_t *mIndx = NULL;
+	rsb_blk_idx_t mI = 0, MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t k = 0;	/* will index a nnz sized array */
+	int K = 0;
+	
+	if(0)
+	//if( flags & RSB_FLAG_SHOULD_DEBUG )
+		errval = rsb__do_account_sorted( mtxAp, IA, JA, nnz, pinfop, elements_per_block_row, blocks_per_block_row);
+
+	if(nnz==0)
+	{
+		/* FIXME: new case, incomplete (useful for diagonal implicit matrices) */
+		return RSB_ERR_NO_ERROR;
+	}
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS
+	if(!pinfop)
+	{
+		/* a performance fix */
+		if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			return rsb__do_account_sorted_optimized_css(JA,IA,Jdim,Idim,nnz,elements_per_block_row,blocks_per_block_row);
+		else
+			return rsb__do_account_sorted_optimized_css(IA,JA,Idim,Jdim,nnz,elements_per_block_row,blocks_per_block_row);
+	}
+#endif
+	
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mpntr = pinfop->rpntr;
+		Mpntr = pinfop->cpntr;
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		Mpntr = pinfop->rpntr;
+		mpntr = pinfop->cpntr;
+		MIndx = IA;
+		mIndx = JA;
+	}
+
+foreach(`matrix_storage',RSB_M4_MATRIX_STORAGE,`dnl
+	/*	storage matrix_storage	*/
+	if( mtxAp->`matrix_storage'==RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(matrix_storage) )
+{
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+pushdef(`RSB_M4_Mpntr',`(blockrows   *($1))')dnl
+pushdef(`RSB_M4_mpntr',`(blockcolumns*($1))')dnl
+',`dnl
+pushdef(`RSB_M4_Mpntr',`Mpntr[$1]')dnl
+pushdef(`RSB_M4_mpntr',`mpntr[$1]')dnl
+')dnl
+
+
+	k = mI = MI = K=0;
+	while( MIndx[k] >= RSB_M4_Mpntr(MI+1) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= RSB_M4_mpntr(mI+1) )++mI;	/* skipping preceding block columns .. */
+	blockrows    = RSB_M4_Mpntr(MI+1) - RSB_M4_Mpntr(MI);
+	blockcolumns = RSB_M4_mpntr(mI+1) - RSB_M4_mpntr(mI);
+	baserow = RSB_M4_Mpntr(MI);
+	basecolumn = RSB_M4_mpntr(mI);
+	*elements_per_block_row = 0;
+	*blocks_per_block_row   = 0;	
+	elements_per_block_row[MI*0] += blockrows * blockcolumns;
+	blocks_per_block_row[MI]   +=1;
+
+	while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k,(rsb_printf_int_t) (MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+			mI = mIndx[k]/blockcolumns;
+',`
+			while( mIndx[k] >= RSB_M4_mpntr(mI+1) )++mI;
+			blockcolumns = RSB_M4_mpntr(mI+1) - RSB_M4_mpntr(mI);
+')dnl
+			basecolumn = RSB_M4_mpntr(mI);
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+				MI = MIndx[k]/blockrows;
+',`
+				while( MIndx[k] >= RSB_M4_Mpntr(MI+1) )++MI;
+				blockrows    = RSB_M4_Mpntr(MI+1) - RSB_M4_Mpntr(MI);
+')dnl
+				baserow = RSB_M4_Mpntr(MI);
+			}
+			else
+			{
+				/* same block row  */
+			}
+			*elements_per_block_row += blockrows * blockcolumns;
+			blocks_per_block_row[MI]   +=1;
+			++K;
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+			MI = MIndx[k]/blockrows;
+',`
+			while( MIndx[k] >= RSB_M4_Mpntr(MI+1) )++MI;
+			blockrows    = RSB_M4_Mpntr(MI+1) - RSB_M4_Mpntr(MI);
+')dnl
+			baserow = RSB_M4_Mpntr(MI);
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+				mI = mIndx[k]/blockcolumns;
+',`
+				mI = 0;
+				while( mIndx[k] >= RSB_M4_mpntr(mI+1) )++mI;
+				blockcolumns = RSB_M4_mpntr(mI+1) - RSB_M4_mpntr(mI);
+')dnl
+				basecolumn = RSB_M4_mpntr(mI);
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			/* get rid of this var : elements_per_block_row */
+			*elements_per_block_row += blockrows * blockcolumns;
+			blocks_per_block_row[MI]   +=1;
+			++K;
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		++k;
+	}
+	errval = RSB_ERR_NO_ERROR;goto ret;
+	}
+popdef(`RSB_M4_Mpntr')dnl
+popdef(`RSB_M4_mpntr')dnl
+')dnl
+dnl
+	errval = RSB_ERR_INTERNAL_ERROR;
+ret:	return errval;
+}
+dnl
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_insert_sorted_optimized_css( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * MIndx, const rsb_coo_idx_t * mIndx, const rsb_nnz_idx_t nnz)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	 	\ingroup gr_internals
+
+		elements_per_block_row and blocks_per_block_row arrays should be blanked.
+		FIXME : missing error handling.
+	*/
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t n = 0;
+
+	/* in case of RSB_FLAG_EXPERIMENTAL_IN_PLACE_CSR, they are equal */
+	if(mtxAp->VA != VA)
+		rsb_memcpy(mtxAp->VA  ,VA  ,mtxAp->el_size*nnz);
+
+	for(n=0;n<nnz+1;++n)
+		mtxAp->indptr[n] = n;
+
+	for(n=0;n<mtxAp->nnz;++n)
+		mtxAp->bindx [n] = mIndx[n];
+	mtxAp->bindx [nnz] = 0;
+
+	// should also set bindx, indptr, 
+	RSB_DO_ERR_RETURN(errval)
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_insert_sorted_optimized( struct rsb_mtx_t * mtxAp, const void *VA, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const rsb_nnz_idx_t nnz, const struct rsb_mtx_partitioning_info_t * pinfop)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*
+	 *	FIXME ! UNFINISHED 
+	 * 	and please note that linked format is incomplete, so it does not support well block column major
+	 */
+	rsb_coo_idx_t blockrows = 0;
+	rsb_coo_idx_t blockcolumns = 0;
+	rsb_coo_idx_t baserow = 0;
+	rsb_coo_idx_t basecolumn = 0;
+dnl	rsb_nnz_idx_t block_count = 0;
+	rsb_nnz_idx_t *indptr = mtxAp->indptr;
+dnl	rsb_nnz_idx_t *bpntr = mtxAp->bpntr;
+	rsb_nnz_idx_t *bindx = mtxAp->bindx;
+	const rsb_coo_idx_t *Mpntr = NULL;
+	const rsb_coo_idx_t *mpntr = NULL;
+	const rsb_coo_idx_t *MIndx = NULL;
+	const rsb_coo_idx_t *mIndx = NULL;
+	rsb_blk_idx_t mI = 0, MI = 0;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	rsb_nnz_idx_t k = 0;	/* will index a nnz sized array */
+	rsb_nnz_idx_t K = 0;
+
+	if(nnz==0)
+	{
+		/* FIXME: new case, incomplete (useful for diagonal implicit matrices) */
+		K = 0;		/* if nnz == 0 then K == 0 */
+		bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+		return RSB_ERR_NO_ERROR;
+	}
+
+	if(0)
+		return rsb__do_insert_sorted( mtxAp, VA, IA, JA, nnz, pinfop);
+
+#if RSB_WANT_EXPERIMENTAL_NO_EXTRA_CSR_ALLOCATIONS
+	if(!pinfop)
+	{
+		/* a performance fix */
+		if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+			return rsb__do_insert_sorted_optimized_css( mtxAp, VA, JA, IA, nnz );
+		else
+			return rsb__do_insert_sorted_optimized_css( mtxAp, VA, IA, JA, nnz );
+	}
+#endif
+
+	if(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER)
+	{
+		mpntr = pinfop->rpntr;
+		Mpntr = pinfop->cpntr;
+		mIndx = IA;
+		MIndx = JA;
+	}
+	else
+	{
+		Mpntr = pinfop->rpntr;
+		mpntr = pinfop->cpntr;
+		MIndx = IA;
+		mIndx = JA;
+	}
+
+
+foreach(`matrix_storage',RSB_M4_MATRIX_STORAGE,`dnl
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	/*	type mtype, storage matrix_storage	*/
+	if( mtxAp->typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	if( mtxAp->`matrix_storage'==RSB_M4_MATRIX_STORAGE_PREPROCESSOR_SYMBOL(matrix_storage) )
+{
+	mtype * dst = mtxAp->VA;
+	k = mI = MI = 0;K = 0;
+#if RSB_EXPERIMENTAL_USE_PURE_BCSS_FOR_CONSTRUCTOR
+/*	rsb__get_blocking_size(mtxAp, &blockrows, &blockcolumns);*/
+	rsb__get_physical_blocking_size(mtxAp, &blockrows, &blockcolumns);
+	RSB_ASSERT( blockrows && blockcolumns);
+#else
+	blockrows    = Mpntr[MI+1] - Mpntr[MI];
+	blockcolumns = mpntr[mI+1] - mpntr[mI];
+#endif
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+pushdef(`RSB_M4_Mpntr',`(blockrows   *($1))')dnl
+pushdef(`RSB_M4_mpntr',`(blockcolumns*($1))')dnl
+',`dnl
+pushdef(`RSB_M4_Mpntr',`Mpntr[$1]')dnl
+pushdef(`RSB_M4_mpntr',`mpntr[$1]')dnl
+')dnl
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+pushdef(`RSB_M4_IBO',`(MIndx[k]-baserow)+(mIndx[k]-basecolumn)*blockrows')dnl
+',`dnl
+pushdef(`RSB_M4_IBO',`(MIndx[k]-baserow)*blockcolumns+(mIndx[k]-basecolumn)')dnl
+')dnl
+pushdef(`RSB_M4_BLOCK_OFFSET',`
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+	K * blockrows * blockcolumns
+',`dnl
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+	RSB_BLOCK_OFFSET(mtxAp,K)
+dnl	indptr[ K ] seems not adequate
+',`dnl
+	indptr[ K ]
+	/*K * blockrows * blockcolumns*/
+	/*RSB_BLOCK_OFFSET(mtxAp,K)/mtxAp->el_size*/ /* FIXME : unfinished ! */ 
+')dnl
+')dnl
+dnl
+')dnl
+
+	while( MIndx[k] >= RSB_M4_Mpntr(MI+1) )++MI;	/* skipping preceding block rows .. */
+	while( mIndx[k] >= RSB_M4_mpntr(mI+1) )++mI;	/* skipping preceding block columns .. */
+dnl	blockrows    = RSB_M4_Mpntr(MI+1) - RSB_M4_Mpntr(MI);
+dnl	blockcolumns = RSB_M4_mpntr(mI+1) - RSB_M4_mpntr(mI);
+	baserow = RSB_M4_Mpntr(MI);
+	basecolumn = RSB_M4_mpntr(mI);
+	bindx [ K ] = mI;			/* a new block */
+	indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;	/* FIXME : DUPLICATION ?! see later */
+
+dnl 	DELETE THIS
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+	{
+		if(RSB_WANT_VERBOSE_MESSAGES)
+			RSB_INFO("initializing linked lists stuff.\n");
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),mI,MI,blockcolumns,blockrows,basecolumn,baserow)
+',`dnl
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),MI,mI,blockrows,blockcolumns,baserow,basecolumn)
+')dnl
+	}
+')dnl
+
+dnl	dst =  mtxAp->VA;
+dnl	dst += RSB_M4_IBO;
+dnl	dst += RSB_M4_BLOCK_OFFSET;
+dnl	{rsb_blk_idx_t ibo = 0;/* FIXME */
+dnl ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+dnl 		ibo = RSB_GET_INTRA_BLOCK_OFFSET(mIndx[k],MIndx[k],mI,MI,mtxAp) ;
+dnl ',`dnl
+dnl 		ibo = RSB_GET_INTRA_BLOCK_OFFSET(MIndx[k],mIndx[k],MI,mI,mtxAp) ;
+dnl ')dnl
+dnl 		dst += ibo;
+dnl	}
+
+	if( (mtxAp->flags & RSB_FLAG_SORTED_INPUT ) != 0 && 1 /* ONLY FOR 1 X 1 BLOCKED */)
+	{
+		//RSB_STDERR("rsb__do_insert_sorted_optimized : TODO : please specialize for specific blockings ! \n");
+	}
+
+while(RSB_LIKELY(k<nnz))
+	{
+#ifdef DEBUG
+		if( MIndx[k] < baserow  )
+		{
+			RSB_ERROR("k=%zd : (%zd %zd) is not ok\n",k, (rsb_printf_int_t)(MIndx[k]+1),(rsb_printf_int_t)(mIndx[k]+1));
+			RSB_STDERR("(minor dim. index %zd < base row %zd)\n",(rsb_printf_int_t)MIndx[k] , (rsb_printf_int_t)baserow);
+			errval = RSB_ERR_INTERNAL_ERROR;
+			goto err;/* NOTE : this jump could be evil */
+		}
+#endif
+
+		if( mIndx[k] >= basecolumn+blockcolumns  )
+		{
+			/* new block column, for sure */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+			mI = mIndx[k]/blockcolumns;
+',`
+			while( mIndx[k] >= RSB_M4_mpntr(mI+1) )++mI;
+			blockcolumns = RSB_M4_mpntr(mI+1) - RSB_M4_mpntr(mI);
+')dnl
+			basecolumn = RSB_M4_mpntr(mI);
+
+			if( MIndx[k] >= baserow+blockrows  )
+			{
+				/* new block row  */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+				MI = MIndx[k]/blockrows;
+',`
+				while( MIndx[k] >= RSB_M4_Mpntr(MI+1) )++MI;
+				blockrows    = RSB_M4_Mpntr(MI+1) - RSB_M4_Mpntr(MI);
+')dnl
+				baserow = RSB_M4_Mpntr(MI);
+			}
+			else
+			{
+				/* same block row  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),mI,MI,blockcolumns,blockrows,basecolumn,baserow)
+',`dnl
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),MI,mI,blockrows,blockcolumns,baserow,basecolumn)
+')dnl
+')dnl
+		}
+		else
+		if( MIndx[k] >= baserow+blockrows  )
+		{
+			/* new row block, for sure */
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+				MI = MIndx[k]/blockrows;
+',`
+				while( MIndx[k] >= RSB_M4_Mpntr(MI+1) )++MI;
+				blockrows    = RSB_M4_Mpntr(MI+1) - RSB_M4_Mpntr(MI);
+')dnl
+				baserow = RSB_M4_Mpntr(MI);
+
+			if( mIndx[k] < basecolumn  )
+			{
+				/* new row block, new block column  */
+				mI = 0;
+ifelse(RSB_M4_IS_FORMAT_BCSS(matrix_storage),1,`dnl
+				mI = mIndx[k]/blockcolumns;
+',`
+				while( mIndx[k] >= RSB_M4_mpntr(mI+1) )++mI;
+				blockcolumns = RSB_M4_mpntr(mI+1) - RSB_M4_mpntr(mI);
+')dnl
+				basecolumn = RSB_M4_mpntr(mI);
+			}
+			else
+			{
+				/* new row block, same column  */
+			}
+			++K;
+			bindx [ K ] = mI;			/* a new block */
+			indptr[ K+1 ] = indptr[ K  ] + blockrows * blockcolumns;
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+ifelse(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),1,`dnl
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),mI,MI,blockcolumns,blockrows,basecolumn,baserow)
+',`
+	RSB_BLOCK_TRAILING_STRUCT_SET(RSB_BLOCK_TRAILING_STRUCT_GET(mtxAp,K),MI,mI,blockrows,blockcolumns,baserow,basecolumn)
+')dnl
+')dnl
+		}
+		else
+		{
+			/* same block row for sure */
+		}
+		dst =  mtxAp->VA;
+                RSB_DEBUG_ASSERT(mI>=0);
+                RSB_DEBUG_ASSERT(MI>=0);
+
+ifelse(RSB_M4_IS_FORMAT_LINKED_LIST(matrix_storage),1,`dnl
+		/* :( */
+		dst = (mtype*) ( ((rsb_byte_t*)dst)+RSB_BLOCK_OFFSET(mtxAp,K) );
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+dnl		dst = (mtype*)((rsb_byte_t*)dst)+(K+1)*RSB_BLOCK_EXTRA_BYTES;
+		rsb_nnz_idx_t ibo = 0;
+		if(RSB_UNLIKELY(mtxAp->flags & RSB_FLAG_WANT_COLUMN_MAJOR_ORDER))
+			ibo = RSB_GET_INTRA_BLOCK_OFFSET(mIndx[k],MIndx[k],mI,MI,mtxAp) ;
+		else
+			ibo = RSB_GET_INTRA_BLOCK_OFFSET(MIndx[k],mIndx[k],MI,mI,mtxAp) ;
+		dst = (mtype*) (((rsb_byte_t*)dst)+ibo);
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+',`
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += RSB_M4_BLOCK_OFFSET;
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst += RSB_M4_IBO;
+')dnl
+		RSB_DEBUG_ASSERT(((rsb_byte_t*)dst)>=((rsb_byte_t*)mtxAp->VA));
+		dst[0] = ((const mtype*)VA)[k];
+		++k;
+	}
+	if(nnz)++K;	/* if nnz == 0 then K = 0 */
+	bindx[K] = 0;	// the first element off the working bindx should be set to a safe value
+	return RSB_ERR_NO_ERROR;	/* FIXME ! */
+}
+popdef(`RSB_M4_Mpntr')dnl
+popdef(`RSB_M4_mpntr')dnl
+popdef(`RSB_M4_IBO')dnl
+popdef(`RSB_M4_BLOCK_OFFSET')dnl
+dnl
+')dnl
+')dnl
+	errval = RSB_ERR_INTERNAL_ERROR;
+	return errval;
+}
+dnl
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__dump_block(rsb_type_t type, const void * VA, rsb_blk_idx_t roff, rsb_blk_idx_t coff, rsb_blk_idx_t rows, rsb_blk_idx_t cols )
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * Will dump to stdout a dense matrix.
+	 * Used for debugging purposes.
+	 *
+	 * FIXME : should be integrated with the macro subsystem in util.m4, and support column major order, and debugged.
+	 */
+#if RSB_ALLOW_STDOUT
+	register rsb_coo_idx_t i, j;
+
+	if(RSB_BLK_MUL_OVERFLOW(rows,cols))
+		return RSB_ERR_LIMITS;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if(type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	{
+		for(i=0;i<rows;++i)for(j=0;j<cols;++j)
+		if(((mtype*)VA)[cols*i+j]!=RSB_M4_ZERO(mtype) )
+		{ RSB_STDOUT(""
+dnl :( were %10 %10 % 20 ...
+		"%zd"/* FIXME : this could be any index type! */
+		"\t"
+		"%zd"
+		"\t"
+		RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)
+		"\n",(rsb_printf_int_t)(roff+i+1),(rsb_printf_int_t)(coff+j+1),
+RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype,`((mtype*)VA)[cols*i+j]'));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__dump_blocks(const struct rsb_mtx_t *mtxAp)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	return RSB_ERR_UNIMPLEMENTED_YET;
+#if 0
+	/*! 
+	 * \ingroup gr_internals
+	 * A debug function for printing out the matrix structure.
+	 *
+	 * FIXME : UNFINISHED
+	 * Note : it is extremely slow.
+	 **/
+	rsb_blk_idx_t i,j;
+	if(!mtxAp)return RSB_ERR_BADARGS;
+	if(!mtxAp->options)return RSB_ERR_BADARGS;
+
+	RSB_STDERR("\t block structure :\n");
+	
+	/* this prints out the matrix blocks nnz structure */
+	for(i=0;i<mtxAp->M_b;++i)
+	{
+		for(j=0;j<mtxAp->K_b;++j)
+		if((RSB_BITMAP_GET(mtxAp->options->bitmap,mtxAp->M_b,mtxAp->K_b,i,j)))
+		{
+			RSB_STDERR("1");
+		}
+		else
+		{
+			RSB_STDERR("0");
+		}
+		RSB_STDERR("\n");
+	}
+	return RSB_ERR_NO_ERROR;
+#endif
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__test_print_csr(rsb_type_t type, rsb_flags_t flags, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t want_header, FILE*stream)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	 * \ingroup gr_internals
+	 * Dumps out a whole matrix, from its CSR representation.
+	 * 
+	 * Warning : the nonzeros should be sorted on input.
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_coo_idx_t k;
+	if( !stream )goto err;
+	if( !IA )goto err;
+	if( ( !JA || !VA ) && nnz>0  )goto err;
+
+	RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t)rows);
+	/* RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t) nnz); */
+	for(k=0;k<rows+1;++k) { RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t)(IA[k]+1)); }
+	for(k=0;k<nnz   ;++k) { RSB_FPRINTF(stream,"%zd\n",(rsb_printf_int_t)(JA[k]+1)); }
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if(type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	{
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)
+				"\n"
+				,RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype,`((mtype*)VA)[k]'));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+')dnl
+err:
+	return RSB_ERR_GENERIC_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__test_print_coo_mm(rsb_type_t type, rsb_flags_t flags, const rsb_coo_idx_t * IA, const rsb_coo_idx_t * JA, const void * VA, rsb_coo_idx_t rows, rsb_coo_idx_t cols, rsb_nnz_idx_t nnz, rsb_bool_t want_header, FILE*stream)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+	 * \ingroup gr_internals
+	 * Dumps out a whole matrix, from its coordinates, in matrix market format.
+	 * 
+	 * Warning : the nonzeros should be sorted on input.
+	 */
+#if RSB_ALLOW_STDOUT
+	rsb_coo_idx_t k;
+	const char * ts = RSB_IS_MATRIX_TYPE_COMPLEX(type)?"complex":"real";
+	const char * ss = RSB_SYMMETRY_STRING(flags);
+	
+	if( !stream )
+	{
+		goto err;
+	}
+
+	if( ( !IA || !JA || !VA ) && nnz > 0 )
+		goto err;
+	if( rows < 0 || cols < 0 || nnz < 0 )
+		goto err;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if(type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	{
+		if(want_header)RSB_FPRINTF(stream,"%%%%MatrixMarket matrix coordinate %s %s\n%zd %zd %zd\n",ts,ss,(rsb_printf_int_t)rows,(rsb_printf_int_t)cols,(rsb_printf_int_t)nnz);
+/*		for(k=0;k<nnz;++k) { RSB_FPRINTF(stream,"%6zd %6zd %20g\n",(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),((float*)VA)[k]); }*/
+		for(k=0;k<nnz;++k)
+		{
+			RSB_FPRINTF(stream,
+				"%zd"
+				"\t"
+				"%zd"
+				"\t"
+				RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_STRING(mtype)
+				"\n"
+				,(rsb_printf_int_t)(IA[k]+1),(rsb_printf_int_t)(JA[k]+1),dnl
+RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_PRINTF_ARG(mtype,`((mtype*)VA)[k]'));
+		}
+		return RSB_ERR_NO_ERROR;
+	}
+#endif
+')dnl
+err:
+	return RSB_ERR_GENERIC_ERROR;
+#else
+	return RSB_ERR_UNSUPPORTED_FEATURE; 
+#endif
+}
+')dnl
+dnl
+
+dnl
+/*static*/ /*inline*/ size_t rsb__do_sizeof(rsb_type_t type)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+	{
+		/*
+		 * FIXME : UNUSED ?
+		 */
+		size_t so = 0;
+		switch(type)
+		{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+			case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+				so = sizeof(type);
+			break;
+')dnl
+			/* unsupported type */
+			default :
+			RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS 
+		}
+		return so;
+	}
+')dnl
+dnl
+
+dnl
+ifdef(`1',`0',`dnl
+rsb_err_t rsb__do_coo_sum( struct rsb_coo_matrix_t*coocp, const void *alphap, const struct rsb_coo_matrix_t*cooap, const void *betap,  const struct rsb_coo_matrix_t*coobp)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	struct rsb_coo_matrix_t cooa = *cooap, coob = *coobp, cooc = *coocp;
+	rsb_nnz_idx_t /*rnz = 0,*/an, bn, cn;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if(cooa.typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	{
+	mtype alpha = alphap?*(mtype*)alphap:RSB_M4_ONE(mtype);
+	mtype beta  = betap ?*(mtype*)betap :RSB_M4_ONE(mtype);
+	for(cn = 0, an = 0, bn = 0;an<cooa.nnz || bn<coob.nnz;)
+	{
+		rsb_nnz_idx_t ap = an, bp = bn;
+		if(cooa.IA[an]==coob.IA[bn] && cooa.JA[an]==coob.JA[bn])
+			cooc.IA[cn] = cooa.IA[an],cooc.JA[cn] = cooa.JA[an],
+			((mtype*)cooc.VA)[cn] = alpha * ((mtype*)cooa.VA)[an] + beta * ((mtype*)coob.VA)[bn],
+			ap = an, bp = bn, ++cn, ++an, ++bn;
+
+		for(;an<cooa.nnz && cooa.IA[an]==cooa.IA[ap] && cooa.JA[an]==cooa.JA[ap] ;++an)
+			//RSB_STDOUT("x> %d %d\n",cooa.IA[an],cooa.JA[an])
+			((mtype*)cooc.VA)[cn] += alpha * ((mtype*)cooa.VA)[an];
+
+		for(;bn<coob.nnz && coob.IA[bn]==coob.IA[bp] && coob.JA[bn]==coob.JA[bp] ;++bn)
+			//RSB_STDOUT("x> %d %d\n",coob.IA[bn],coob.JA[bn])
+			((mtype*)cooc.VA)[cn] += beta  * ((mtype*)coob.VA)[bn];
+
+		if( bn<coob.nnz )
+		for(;an<cooa.nnz && (cooa.IA[an]<coob.IA[bn] ||
+			       	(cooa.IA[an] <= coob.IA[bn] && cooa.JA[an]<coob.JA[bn]))
+			       	;++an)
+				//RSB_STDOUT("-> %d %d\n",cooa.IA[an],cooa.JA[an]),
+			cooc.IA[cn] = cooa.IA[an], cooc.JA[cn] = cooa.JA[an],
+			((mtype*)cooc.VA)[cn] = alpha * ((mtype*)cooa.VA)[an],
+			++cn;
+
+		if( an<cooa.nnz )
+		for(;bn<coob.nnz && (cooa.IA[an]>coob.IA[bn] ||
+			       	(cooa.IA[an]>=coob.IA[bn] && cooa.JA[an]>coob.JA[bn]))
+			       	;++bn)
+			//	RSB_STDOUT("-> %d %d\n",coob.IA[bn],coob.JA[bn]),
+			cooc.IA[cn] = coob.IA[bn],cooc.JA[cn] = coob.JA[bn],
+			((mtype*)cooc.VA)[cn] = beta * ((mtype*)coob.VA)[bn],
+			++cn;
+		//RSB_STDOUT("? %d %d\n",an,bn);
+	}
+	}
+	else 
+#endif
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+')dnl
+dnl
+
+dnl
+dnl
+rsb_err_t rsb__cor_merge_dups(rsb_type_t typecode, void* RSB_RESTRICT VA, rsb_coo_idx_t * RSB_RESTRICT IA, rsb_coo_idx_t * RSB_RESTRICT JA, rsb_nnz_idx_t offB, rsb_nnz_idx_t nnzB, rsb_nnz_idx_t nnzC, const int wv, int wp, rsb_nnz_idx_t *onzp, struct rsb_coo_matrix_t*RSB_RESTRICT coop)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/**
+		See rsb__cor_merge.
+	 */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+	void *VB = NULL, *VC = NULL, *VT = NULL;
+	rsb_coo_idx_t * IB = NULL, *JB = NULL;
+	rsb_coo_idx_t * IC = NULL, *JC = NULL;
+	rsb_coo_idx_t * IT = NULL, *JT = NULL;
+	rsb_nnz_idx_t bi = 0, ci = 0, ti = 0;
+	rsb_nnz_idx_t b0 = 0, c0 = 0, t0 = 0;
+	rsb_nnz_idx_t onz = 0;
+	struct rsb_coo_matrix_t coo;
+	size_t es = RSB_SIZEOF(typecode);
+
+	if( nnzB == 0 || nnzC == 0 )
+	{
+		goto ret;
+	}
+
+	b0 = offB;
+	c0 = offB + nnzB;
+	VB = RSB_TYPED_OFF_PTR(typecode,VA,b0);
+	VC = RSB_TYPED_OFF_PTR(typecode,VA,c0);
+	IB = IA + b0;
+	IC = IA + c0;
+	JB = JA + b0;
+	JC = JA + c0;
+
+	RSB_BZERO_P(&coo);
+	coo.nnz = nnzB + nnzC;
+	coo.typecode = typecode;
+
+	if( coop && coop->nnz)
+	{
+		coo = *coop;
+		coo.nnz = nnzB + nnzC; /* necessary */
+	}
+	else
+	{
+		if( NULL == rsb__allocate_coo_matrix_t(&coo) )
+			goto err;
+	}
+
+	IT = coo.IA;
+	JT = coo.JA;
+	VT = coo.VA;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if(typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype))
+	{
+	mtype * vT = VT;
+	mtype * vB = VB;
+	mtype * vC = VC;
+
+again`_'RSB_M4_CHOPSPACES(mtype):
+	t0 = ti;
+
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_LT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	/* FIXME: this works as RSB_FLAG_DUPLICATES_SUM but should support either merge, last, first, ...  */
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+		++onz;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_EQ(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+	t0 = ti;
+       	if   ( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi] + vC[ci];
+		++bi,++ci,++ti;
+	}
+
+       	while( bi<nnzB && ci<nnzC && RSB_COO_GT(IB[bi],JB[bi],IC[ci],JC[ci]) && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi] + vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	if( ci < nnzC && bi < nnzB )
+		goto again`_'RSB_M4_CHOPSPACES(mtype);
+
+       	if   ( bi<nnzB && ci==nnzC )
+	{
+		IT[ti] = IB[bi];
+		JT[ti] = JB[bi];
+		vT[ti] = vB[bi];
+		++bi,++ti;
+	}
+
+       	while( bi<nnzB && ci==nnzC && ti > 0 && RSB_COO_EQ(IB[bi],JB[bi],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		vT[ti] += vB[bi];
+		++bi;
+		++ti;
+		++onz;
+	}
+
+       	if   ( ci<nnzC && bi==nnzB )
+	{
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti] = vC[ci];
+		++ci,++ti;
+	}
+
+       	while( ci<nnzC && bi==nnzB && ti > 0 && RSB_COO_EQ(IC[ci],JC[ci],IT[ti-1],JT[ti-1]) )
+	{
+		--ti;
+		IT[ti] = IC[ci];
+		JT[ti] = JC[ci];
+		vT[ti]+= vC[ci];
+		++ci;
+		++ti;
+		++onz;
+	}
+
+	}
+	else 
+#endif
+')dnl
+		errval = RSB_ERR_INTERNAL_ERROR;
+
+	coo.nnz -= onz;
+	RSB_COA_MEMCPY(IA,IT,offB,0,(coo.nnz));
+	RSB_COA_MEMCPY(JA,JT,offB,0,(coo.nnz));
+	if(wp)
+	{
+		RSB_A_MEMCPY_parallel(  VA,VT,offB,0,(coo.nnz),es);
+	}
+	else
+	{
+		RSB_A_MEMCPY(  VA,VT,offB,0,(coo.nnz),es);
+	}
+	RSB_ASSERT(rsb__util_is_coo_array_sorted_up_partial_order(IA,coo.nnz));
+	goto done;
+err:
+	errval = RSB_ERR_ENOMEM;
+done:
+	if( coop && coop->nnz)
+		;
+	else
+		rsb__destroy_coo_matrix_t(&coo);
+	RSB_ASSIGN_IF(onzp,onz);
+ret:
+	return errval;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__do_copy_converted_scaled(const void *RSB_RESTRICT  src, void *RSB_RESTRICT dst, const void *RSB_RESTRICT  alphap, rsb_type_t stype,rsb_type_t dtype, size_t nnz, rsb_trans_t transA)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * Copies scaled and conj-transposed.
+	 * alpha according to src code type.
+	 * \return \rsberrcodemsg
+	 * */
+	rsb_nnz_idx_t nzi;
+
+	if((!dst) || (!src))
+		return RSB_ERR_BADARGS;
+
+foreach(`mtypea',RSB_M4_TYPES,`dnl
+foreach(`mtypeb',RSB_M4_TYPES,`dnl
+	if( stype == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtypea) && dtype == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtypeb) )
+	{
+		const mtypea alpha = alphap?*(mtypea*)alphap:RSB_M4_ONE(mtypea);
+		const mtypea*tsrc = src;
+		mtypeb*tdst = dst;
+ifelse(RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(mtypeb)),1,`dnl
+		if(RSB_DOES_CONJUGATE(transA))
+			for(nzi=0;nzi<nnz;++nzi) RSB_M4_ASSIGN(mtypeb,mtypea,`tdst[nzi]',`RSB_M4_CONJ(`(mtypeb)(alpha*tsrc[nzi])',mtypeb,RSB_M4_TRANS_C,RSB_M4_SYMBOL_UNSYMMETRIC)')
+		else
+')dnl
+			for(nzi=0;nzi<nnz;++nzi) RSB_M4_ASSIGN(mtypeb,mtypea,`tdst[nzi]',`(mtypeb)(alpha*tsrc[nzi])')
+	}
+	else 
+')dnl
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb_util_csc2csr(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, rsb_coo_idx_t m, rsb_coo_idx_t k, rsb_nnz_idx_t nnz, rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t*flagsp)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * */
+	rsb_nnz_idx_t nzi = 0, nzo;
+	rsb_coo_idx_t nr, nc;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_bool_t islowtri = RSB_BOOL_TRUE, isupptri = RSB_BOOL_TRUE;
+
+	RSB_BZERO(oIA, sizeof(*oIA)*(m+1));
+	oIA[0] = offo;
+	for(nzi=0;nzi<nnz;++nzi)
+		oIA[IA[nzi]-offi+1]++;
+	for(nr=0;nr<m;++nr)
+		oIA[nr+1]+=oIA[nr];
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	for(nc=0;nc<k;++nc)
+	for(nzi = JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		nzo = oIA[IA[nzi]-offi]++;
+		oJA[nzo] = nc+offo;
+		((mtype*)oVA)[nzo] = ((const mtype*)VA)[nzi];
+	}
+	else 
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	for(nc=0;nc<k;++nc)
+	for(nzi=JA[nc]-offi;nzi<JA[nc+1]-offi;++nzi)
+	{
+		oIA[IA[nzi]-offi]--;
+		if(IA[nzi]-offi>nc)isupptri = RSB_BOOL_FALSE;
+		else if(IA[nzi]-offi<nc)islowtri = RSB_BOOL_FALSE;
+	}
+	if(isupptri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+	if(islowtri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+	if(*flagsp) RSB_DO_FLAG_ADD(*flagsp,flags);
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb_util_coo_copy_and_stats(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, rsb_coo_idx_t*m, rsb_coo_idx_t*k, const rsb_nnz_idx_t nnz, const rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo, rsb_flags_t iflags, rsb_flags_t*flagsp)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+         * FIXME: unfinished! shall support also typecode-based zeros removal
+	 * */
+	rsb_nnz_idx_t nzi = 0;
+	rsb_coo_idx_t maxi = 0,maxj = 0;
+	rsb_bool_t islowtri = RSB_BOOL_TRUE,isupptri = RSB_BOOL_TRUE;
+	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+	rsb_nnz_idx_t lowtrin = 0,upptrin = 0;
+
+	if(nnz<1)
+		goto done;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+	maxi = i, maxj = j;
+	((mtype*)oVA)[nzi] = ((mtype*)VA)[nzi];
+	oIA[nzi] = i-offi+offo;
+	oJA[nzi] = j-offi+offo;
+	lowtrin |= (i>j), upptrin |= (i<j);
+	for(nzi=1;RSB_LIKELY(nzi<nnz);++nzi)
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL(`nzi',`LI',`1',`nnz',`
+dnl	/* if ( is non zero ... ) */
+	{
+dnl		if(IA[nzi+LI]>maxi) maxi = IA[nzi+LI];
+dnl		if(JA[nzi+LI]>maxj) maxj = JA[nzi+LI];
+dnl		const rsb_coo_idx_t i = IA[nzi+LI],j = JA[nzi+LI];
+		rsb_coo_idx_t i = IA[nzi],j = JA[nzi];
+		maxi = RSB_MAX(maxi, i);
+		maxj = RSB_MAX(maxj, j);
+		((mtype*)oVA)[nzi] = ((mtype*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+dnl		((mtype*)oVA)[nzi+LI] = ((mtype*)VA)[nzi+LI];
+dnl		oIA[nzi+LI] = i-offi+offo;
+dnl		oJA[nzi+LI] = j-offi+offo;
+dnl		if(IA[nzi+LI]>JA[nzi+LI])isupptri = RSB_BOOL_FALSE;
+dnl		else if(IA[nzi+LI]<JA[nzi+LI])islowtri = RSB_BOOL_FALSE;
+		lowtrin |= (i>j), upptrin |= (i<j);
+	}
+dnl	')
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	if(upptrin)islowtri = RSB_BOOL_FALSE;
+ 	if(lowtrin)isupptri = RSB_BOOL_FALSE;
+	if(isupptri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_UPPER_TRIANGULAR);
+	if(islowtri) RSB_DO_FLAG_ADD(flags,RSB_FLAG_LOWER_TRIANGULAR);
+	if(*flagsp) RSB_DO_FLAG_ADD(*flagsp,flags);
+	if(*m) *m = maxi+1;
+	if(*k) *k = maxj+1;
+done:
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb_util_coo_copy(const void *RSB_RESTRICT VA, const rsb_coo_idx_t * RSB_RESTRICT IA, const rsb_coo_idx_t * RSB_RESTRICT JA, void *RSB_RESTRICT oVA, rsb_coo_idx_t * RSB_RESTRICT oIA, rsb_coo_idx_t * RSB_RESTRICT oJA, const rsb_nnz_idx_t nnz, const rsb_type_t typecode, const rsb_coo_idx_t offi, const rsb_coo_idx_t offo)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+         * FIXME: unfinished! shall support also typecode-based zeros removal
+	 * */
+	rsb_nnz_idx_t nzi = 0;
+dnl	rsb_flags_t flags = RSB_FLAG_NOFLAGS;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	for(nzi=0;RSB_LIKELY(nzi<nnz);++nzi)
+dnl	RSB_M4_SIMPLE_LOOP_UNROLL(`nzi',`LI',`1',`nnz',`
+dnl	/* if ( is non zero ... ) */
+	{
+dnl		const rsb_coo_idx_t i = IA[nzi+LI],j = JA[nzi+LI];
+		rsb_coo_idx_t i = IA[nzi], j = JA[nzi];
+		((mtype*)oVA)[nzi] = ((mtype*)VA)[nzi];
+		oIA[nzi] = i-offi+offo;
+		oJA[nzi] = j-offi+offo;
+dnl		((mtype*)oVA)[nzi+LI] = ((mtype*)VA)[nzi+LI];
+dnl		oIA[nzi+LI] = i-offi+offo;
+dnl		oJA[nzi+LI] = j-offi+offo;
+	}
+dnl	')
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+dnl done:
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+/* sparse blas level 1 equivalent functions */
+
+dnl
+int rsb__BLAS_Xusdot(const rsb_type_t typecode, const enum blas_conj_type conj_arg, const rsb_blas_int_t nz, const void*x, const rsb_blas_int_t*indx, const void*y, const rsb_blas_int_t incy, void*r, const enum blas_base_type index_base)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+		\rsb_spblasl1_dot_msg
+		\rsb_warn_untested_msg
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	mtype*xa = (mtype*)x;
+	mtype*ya = (mtype*)y;
+	mtype*rp = (mtype*)r;
+	mtype ac = RSB_M4_ZERO(mtype);
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+ifelse(RSB_M4_AND(RSB_M4_IS_COMPLEX_TYPE(mtype)),1,`dnl
+	if( conj_arg == blas_conj )
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += RSB_M4_CONJ(xa[nzi],mtype,RSB_M4_TRANS_C,RSB_M4_SYMBOL_UNSYMMETRIC) * ya[xi*incy];
+	}
+	else
+')dnl
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		ac += xa[nzi] * ya[xi*incy];
+	}
+	RSB_SET_IF_NOT_NULL(rp,ac);
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+int rsb__BLAS_Xusaxpy(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*alpha, const void*x, const rsb_blas_int_t*indx, const void*y, const rsb_blas_int_t incy, const enum blas_base_type index_base)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+		\rsb_spblasl1_axpy_msg
+		\rsb_warn_untested_msg
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	const mtype*xa = (const mtype*)x;
+	mtype*ya = (mtype*)y;
+	const mtype alphav = *(mtype*)alpha;
+	rsb_blas_int_t nzi, xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[nzi*incy] += alphav*xa[xi];
+	}
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+int rsb__BLAS_Xusga(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*y, const rsb_blas_int_t incy, void*x, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+		\rsb_spblasl1_ga_msg
+		\rsb_warn_untested_msg
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	mtype*xa = (mtype*)x;
+	const mtype*ya = (const mtype*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+	}
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+int rsb__BLAS_Xusgz(const rsb_type_t typecode, const rsb_blas_int_t nz, void*y, const rsb_blas_int_t incy, void*x, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+		\rsb_spblasl1_gz_msg
+		\rsb_warn_untested_msg
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	mtype*xa = (mtype*)x;
+	mtype*ya = (mtype*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+    		xa[nzi] = ya[xi*incy];
+		ya[xi*incy] = RSB_M4_ZERO(mtype);
+	}
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+int rsb__BLAS_Xussc(const rsb_type_t typecode, const rsb_blas_int_t nz, const void*x, void*y, const rsb_blas_int_t incy, const rsb_blas_int_t*indx, const enum blas_base_type index_base)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+		\rsb_spblasl1_sc_msg
+		\rsb_warn_untested_msg
+	*/
+foreach(`mtype',RSB_M4_TYPES,`dnl
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+{
+	const mtype*xa = (const mtype*)x;
+	mtype*ya = (mtype*)y;
+	rsb_blas_int_t nzi,xi;
+	if( index_base == blas_one_base )
+		ya-=incy;
+	for(nzi=0;RSB_LIKELY(nzi<nz);++nzi)
+	{
+		xi = indx[nzi];
+		ya[xi*incy] = xa[nzi];
+	}
+}
+	else
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+/* blas level 1 equivalent functions */
+dnl
+
+dnl
+rsb_err_t rsb__cblas_Xcopy(rsb_type_t typecode, rsb_nnz_idx_t n, const void * x, rsb_nnz_idx_t incx, void * y, rsb_nnz_idx_t incy)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	return rsb__xcopy_strided_typed(y,x,0,0,n,typecode,incy,incx);
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__cblas_Xnrm2(rsb_type_t type, size_t n, const void * a, rsb_nnz_idx_t incA, void * c)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * c <- sqrt(sum(|a_i|^2))
+         *
+	 * \param a	an array pointer
+	 * \param type	a valid type code
+	 * \param n	the input array length
+	 * \note see dznrm2 in BLAS
+	 *
+	 * \return \rsberrcodemsg
+	 * */
+	size_t i;
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( type == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+	const mtype*ta = a;RSB_M4_REALT(mtype) *tc = c,acc = RSB_M4_ZERO(mtype),tmp = RSB_M4_ZERO(mtype);
+	RSB_M4_SIMPLE_LOOP_UNROLL(`i',`LI',`0',`n',`dnl
+	acc = RSB_M4_ABS(mtype,ta[(i+LI)*incA]);tmp += acc*acc;
+	'); 
+	tc[0] = RSB_M4_CREAL(mtype,RSB_M4_SQRT(mtype,tmp));
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__cblas_Xdotu_sub(rsb_type_t type, size_t n, const void * x, rsb_nnz_idx_t incx, const void * y, rsb_nnz_idx_t incy, void *dotu)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * */
+	return rsb__vector_mult_sum(x,y,dotu,type,n,incx,incy);
+}
+')dnl
+dnl
+
+dnl
+rsb_err_t rsb__cblas_Xscal(rsb_type_t type, size_t n, const void * alphap, void * a, size_t stride)dnl
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*!
+	 * a <- a * alpha
+	 * */
+	return rsb_strided_vector_scale(a,alphap,type,n,stride);
+}
+')dnl
+dnl
+
+dnl
+dnl
+rsb_err_t rsb__coo_insertion_sort(rsb_type_t typecode, void* VB, rsb_coo_idx_t * IB, rsb_coo_idx_t * JB, rsb_nnz_idx_t offA, rsb_nnz_idx_t nnzA)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/* only for *small* arrays, where allocation of a temporary array is not justified */
+	rsb_coo_idx_t * IA = NULL, *JA = NULL;
+	rsb_nnz_idx_t i, j;
+
+	IA = IB + offA;
+	JA = JB + offA;
+
+foreach(`mtype',RSB_M4_TYPES,`dnl
+`#ifdef 'RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype)
+	if( typecode == RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) )
+	{
+		mtype * VA = (mtype*) RSB_TYPED_OFF_PTR(typecode,VB,offA);
+		for(i=1;i<nnzA;++i)
+		for(j=i;j>0 && RSB_COO_LT(IA[j],JA[j],IA[j-1],JA[j-1]);--j)
+		{
+			RSB_SWAP(rsb_coo_idx_t,IA[j],IA[j-1]);
+			RSB_SWAP(rsb_coo_idx_t,JA[j],JA[j-1]);
+			RSB_SWAP(mtype        ,VA[j],VA[j-1]);
+		}
+	}
+	else 
+#endif /* RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(mtype) */
+')dnl
+	return RSB_ERR_UNSUPPORTED_TYPE	;
+	return RSB_ERR_NO_ERROR;
+}
+')dnl
+dnl
+
+void rsb__coo_to_lr( void * RSB_RESTRICT VBu, rsb_coo_idx_t*RSB_RESTRICT IB, rsb_coo_idx_t*RSB_RESTRICT JB, void * RSB_RESTRICT VAu, rsb_coo_idx_t*RSB_RESTRICT IA, rsb_coo_idx_t*RSB_RESTRICT JA, rsb_coo_idx_t mj, rsb_nnz_idx_t nnzA, rsb_nnz_idx_t nzoffB, rsb_nnz_idx_t nzoffA, rsb_nnz_idx_t*RSB_RESTRICT nzlp, rsb_nnz_idx_t*RSB_RESTRICT nzrp, rsb_coo_idx_t iadd, rsb_coo_idx_t jadd, rsb_flags_t typecode)
+ifdef(`ONLY_WANT_HEADERS',`;',`dnl
+{
+	/*
+	 * Given COO arrays matrices A an (temporary) B, stores the coefficients left of the mj-th column before the one coming after it, respecting the row major ordering.
+	 * A serial function.
+	 * */
+	rsb_nnz_idx_t nzl = 0, nzr = 0, nzi = 0;
+
+	RSB_DEBUG_ASSERT(IA!=IB);
+	RSB_DEBUG_ASSERT(JA!=JB);
+	RSB_DEBUG_ASSERT(mtxAp);
+
+	IA += nzoffA;
+	JA += nzoffA;
+
+	IB += nzoffB;
+	JB += nzoffB;
+	
+switch(typecode)
+{
+			/* supported RSB_M4_MATRIX_TYPES */
+foreach(`type',RSB_M4_MATRIX_TYPES,`dnl
+case RSB_M4_NUMERICAL_TYPE_PREPROCESSOR_SYMBOL(type)	:
+{
+	type * RSB_RESTRICT VA = VAu; 
+	type * RSB_RESTRICT VB = VBu; 
+	RSB_DEBUG_ASSERT(VA!=VB);
+
+	VA += nzoffA;
+	VB += nzoffB;
+
+	for(nzi=0;nzi<nnzA;++nzi)
+	{
+		if( JA[nzi] < mj )
+		{
+			IB[nzl] = IA[nzi] + iadd;
+			JB[nzl] = JA[nzi] ;
+			VB[nzl] = VA[nzi];
+			nzl++;
+		}
+		else
+		{
+			nzr++;
+			IB[nnzA-nzr] = IA[nzi] + iadd;
+			JB[nnzA-nzr] = JA[nzi] + jadd;
+			VB[nnzA-nzr] = VA[nzi];
+		}
+	}
+
+	/* copy left quadrant back to A */
+	for(nzi=0;nzi<nzl ;++nzi)
+	{
+		IA[nzi] = IB[nzi];
+		JA[nzi] = JB[nzi];
+		VA[nzi] = VB[nzi];
+	}
+	
+	/* copy right quadrant back to A */
+	for(     ;nzi<nnzA;++nzi)
+	{
+		IA[nzi] = IB[nnzA-(1+nzi-nzl)];
+		JA[nzi] = JB[nnzA-(1+nzi-nzl)];
+		VA[nzi] = VB[nnzA-(1+nzi-nzl)];
+	}
+}
+	break;
+')dnl
+	/* unsupported type */
+	default :
+	RSB_NULL_STATEMENT_FOR_COMPILER_HAPPINESS 
+}
+
+	*nzlp = nzl;
+	*nzrp = nzr;
+}')dnl
+dnl
+
+dnl
+#ifdef __cplusplus
+}
+#endif  /* __cplusplus */
+dnl
+dnl
+ifdef(`ONLY_WANT_HEADERS',`
+#endif /* RSB_UTIL_H_INCLUDED */
+')
+dnl
+/* @endcond */
+dnl
diff --git a/rsbench.c b/rsbench.c
new file mode 100644
index 0000000..807b239
--- /dev/null
+++ b/rsbench.c
@@ -0,0 +1,601 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2016 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC  */
+/*!
+ @file
+ @author Michele Martone
+ @brief
+ This is the main program used to benchmark and test our library.
+ This should be the swiss army knife program for our library.
+ */
+/*
+  This not an example program: to be built, it needs all of the internal library headers.
+ */
+
+#include <stdlib.h>
+#include "rsb.h"
+#include "rsb_test_matops.h"
+#include "rsb_failure_tests.h"
+#include "rsb_internals.h"
+#if RSB_WITH_SPARSE_BLAS_INTERFACE 
+#include "rsb_libspblas_handle.h"
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE  */
+#include "rsb_libspblas_tests.h"
+//#include "rsb-config.h"
+#if RSB_WANT_ACTION
+#include <signal.h>
+#if defined(RSB_WANT_ACTION_SIGNAL)
+#else /* defined(RSB_WANT_ACTION_SIGNAL) */
+#include <bits/sigaction.h>
+#endif /* defined(RSB_WANT_ACTION_SIGNAL) */
+#endif /* RSB_WANT_ACTION */
+
+#define RSB_WANT_PERMISSIVE_RSBENCH 1
+#define RSB_SHALL_UPDATE_COMPLETEBENCHS 1
+#define RSB_WANT_REDUCED_RSB_M4_MATRIX_META_OPS 1 
+
+
+#if RSB_WITH_LIKWID
+#define RSB_RSBENCH_EXEC(FEXP) {rsb_err_t errval;RSB_LIKWID_MARKER_INIT;errval=RSB_ERR_TO_PROGRAM_ERROR(FEXP);RSB_LIKWID_MARKER_EXIT;return errval;}
+#else /* RSB_WITH_LIKWID */
+#define RSB_RSBENCH_EXEC(FEXP) {return RSB_ERR_TO_PROGRAM_ERROR(FEXP);}
+#endif /* RSB_WITH_LIKWID */
+
+#if RSB_WANT_ACTION
+	int rsb__quit_rsbench;
+#if defined(RSB_WANT_ACTION_SIGNAL)
+#else /* defined(RSB_WANT_ACTION_SIGNAL) */
+	struct sigaction rsb_osa;
+#endif /* defined(RSB_WANT_ACTION_SIGNAL) */
+
+void rsb__sigh(int signal)
+{
+	/* TODO: extend this mechanism optionally to the library itself. */
+
+	if( rsb__quit_rsbench == 0 )
+	{
+		RSBENCH_STDOUT("\n");
+		RSBENCH_STDOUT("====================================================\n");
+		RSBENCH_STDOUT("Caught signal %d: will terminate as soon as possible.\n",signal);
+		RSBENCH_STDOUT("  ( next time won't catch the signal anymore ).\n");
+		RSBENCH_STDOUT("====================================================\n");
+		RSBENCH_STDOUT("\n");
+		rsb__quit_rsbench++;
+	}
+	else
+	if( rsb__quit_rsbench == 1 )
+	{
+#if defined(RSB_WANT_ACTION_SIGNAL)
+#else /* defined(RSB_WANT_ACTION_SIGNAL) */
+		sigaction(SIGINT,&rsb_osa,NULL);
+#endif /* defined(RSB_WANT_ACTION_SIGNAL) */
+	}
+}
+
+void rsb__sigr(void)
+{
+	rsb__quit_rsbench = 0;
+	{
+#if RSB_WANT_ACTION_SIGNAL
+		/* signal() is part of C99 */
+		signal(SIGINT,&rsb__sigh); /* not to be called from a threaded environment ... */
+#else /* RSB_WANT_ACTION_SIGNAL */
+		/* sigaction() is part of POSIX, not part of C99 */
+		struct sigaction act;
+		RSB_BZERO_P(&act);
+		RSB_BZERO_P(&rsb_osa);
+		act.sa_handler  = rsb__sigh;
+		sigemptyset(&act.sa_mask);
+    		sigaction(SIGINT, &act,&rsb_osa);
+/*
+		sigaction(SIGUSR1, &act, &rsb_osa);
+		sigaction(SIGUSR2, &act, &rsb_osa);
+
+		sigaction(SIGQUIT,&act,&rsb_osa);
+		sigaction(SIGTERM,&act,&rsb_osa);
+
+		sigaction(SIGABRT,&act,&rsb_osa);
+		sigaction(SIGTSTP,&act,&rsb_osa);
+
+		sigaction(SIGBUS, &act,&rsb_osa);
+		sigaction(SIGILL, &act,&rsb_osa);
+	    	sigaction(SIGSEGV,&act,&rsb_osa);
+*/
+#endif /* RSB_WANT_ACTION_SIGNAL */
+	}
+}
+#endif /* RSB_WANT_ACTION */
+
+rsb_err_t rsb__print_configuration_string_rsbench(const char *pn, rsb_char_t * cs, rsb_bool_t wci)
+{
+	/* TODO: output buffer length check */
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_WANT_MKL
+#ifdef mkl_get_version
+	MKLVersion mv;
+#endif /* mkl_get_version */
+#endif /* RSB_WANT_MKL */
+	if(!cs)
+	{
+		errval = RSB_ERR_BADARGS;
+		goto err;
+	}
+	errval = rsb__print_configuration_string(pn, cs, wci);
+	if(wci == RSB_BOOL_FALSE)
+		goto err;
+#if RSB_WANT_MKL
+#ifdef mkl_get_version
+	mkl_get_version(&mv);
+	sprintf(cs+strlen(cs),"MKL:%d.%d-%d, %s, %s, %s, %s\n",mv.MajorVersion,mv.MinorVersion,mv.UpdateVersion,mv.ProductStatus,mv.Build,mv.Processor,mv.Platform);
+#else /* mkl_get_version */
+	sprintf(cs+strlen(cs),"MKL:version unknown.\n");
+#endif /* mkl_get_version */
+#else /* RSB_WANT_MKL */
+	sprintf(cs+strlen(cs),"MKL:not linked.\n");
+#endif /* RSB_WANT_MKL */
+#if RSB_WANT_XDR_SUPPORT
+	sprintf(cs+strlen(cs),"XDR support: on.\n");
+#else /* RSB_WANT_XDR_SUPPORT */
+	sprintf(cs+strlen(cs),"XDR support: off.\n");
+#endif /* RSB_WANT_XDR_SUPPORT */
+err:
+	return errval;
+}
+
+static int rsb__main_help(const int argc, char * const argv[], int default_program_operation, const char * program_codes, rsb_option *options)
+{
+			//RSB_STDOUT(
+			printf(
+				/*"[OBSOLETE DOCUMENTATION] \n"*/
+				"Usage: %s [OPTIONS] \n"
+				"  or:  %s [ -o OPCODE] [ -O {subprogram-code}] [ {subprogram-specific-arguments} ] \n"
+				"%s "RSB_INFOMSG_SAK"."
+				"\n"
+				"\n"
+				//"\tOne may choose {option} among:\n"
+				//"\t-I for getting system information and some micro benchmarking\n"
+				"\t\n"
+				"\tChoose {subprogram-code} among:\n"
+				"\tr for the reference benchmark (will produce a machine specific file)\n"
+				"\tc for the complete benchmark\n"
+				"\te for the matrix experimentation code\n"
+				"\td for a single matrix dumpout\n"
+				"\tb for the (current, going to be obsoleted) benchmark\n"
+				"\tt for some matrix construction tests\n"
+				"\to obsolete, will soon be removed\n"
+				"\n"
+				"\t{subprogram-specific-arguments} will be available from the subprograms.\n"
+				"\te.g.: %s      -O b -h   will show the current benchmark subprogram's options\n"
+				"\te.g.: %s -o a -O b -h   will show the spmv     benchmark subprogram's options\n"
+				"\te.g.: %s -o n -O b -h   will show the negation benchmark subprogram's options\n"
+//				"\te.g.: %s -o A -O b    will run all of the benchmark programs.\n"
+				"\n\tThe default {subprogram-code} is '%c'\n"
+				"\n\tWith OPCODE among '%s'\n"/* TODO: fix this description, as it is too laconic. */
+				"\n"
+				,argv[0]
+				,argv[0]
+				,rsb__basename(argv[0])
+				,rsb__basename(argv[0])
+				,rsb__basename(argv[0])
+				,rsb__basename(argv[0])
+				,default_program_operation
+				,program_codes
+				);
+			if(options)
+				rsb_test_help_and_exit(argv[0],options,0);
+	return 0;
+}
+
+int rsb_genmm_main(int argc,char *argv[]);
+int rsb_mtx_ls_main(int argc,char *argv[]);
+
+int main(const int argc, char * argv[])
+{
+	rsb_option options[] = {
+	    {"help",			no_argument, NULL, 'h' },
+	    {"matrix-operation",	required_argument, NULL, 'o' },
+	    {"subprogram-operation",	required_argument, NULL, 'O' },
+	    {"information",		no_argument, NULL, 'I' },
+	    {"configuration",		no_argument, NULL, 'C' },
+	    {"hardware-counters",	no_argument, NULL, 'H' },
+	    {"experiments",		no_argument, NULL, 'e' },
+	    {"version",			no_argument, NULL, 'v' },
+	    {"blas-testing",		no_argument, NULL, 'B' },
+	    {"quick-blas-testing",		required_argument, NULL, 'Q' },
+	    {"error-testing",		required_argument, NULL, 'E' },
+	    {"fp-bench",		no_argument, NULL, 'F' },
+	    {"transpose-test",		no_argument, NULL, 't' },
+	    {"limits-testing",		no_argument, NULL, 0x6c696d74 },
+	    {"guess-blocking",		no_argument, NULL, 'G' },	/* will pass guess parameters here some day (FIXME: obsolete) */
+	    {"generate-matrix",		no_argument, NULL, 'g' }, /* should be synced to rsb_genmm_main */
+	    {"plot-matrix",		no_argument, NULL,  0x50505050},/* should be synced to rsb_dump_postscript */
+	    {"matrix-ls",		no_argument, NULL,  0x006D6C73},/* should be synced to rsb_mtx_ls_main */
+	    {"read-performance-record",		required_argument, NULL,  0x7270720a},/*  */
+	    {"help-read-performance-record",		no_argument, NULL,  0x72707268},/*  */
+	    {0,0,0,0}
+	};
+
+	/*
+	 * NOTE: this implies that unless an argument reset mechanism is implemented here,
+	 * the o and O options will be forwarded to the host program!
+	 * */
+	//const char default_operation='v';
+	const char default_operation='a';
+	char operation=default_operation;
+	//const char default_program_operation='r';
+	const char default_program_operation='b';
+	char program_operation=default_program_operation;
+	rsb_err_t errval = RSB_ERR_NO_ERROR;
+#if RSB_WANT_REDUCED_RSB_M4_MATRIX_META_OPS
+	const char * program_codes = "a"
+#else /* RSB_WANT_REDUCED_RSB_M4_MATRIX_META_OPS */
+	const char * program_codes = "avms"
+#endif /* RSB_WANT_REDUCED_RSB_M4_MATRIX_META_OPS */
+#if !RSB_SHALL_UPDATE_COMPLETEBENCHS
+		"c"
+#endif /* RSB_SHALL_UPDATE_COMPLETEBENCHS */
+#ifdef RSB_OPTYPE_INDEX_SPSV_UXUA
+		"t"
+#endif
+		"inS";
+	int program_not_chosen=1;
+	struct rsb_tester_options_t to;
+	rsb_char_t cs[RSB_MAX_VERSION_STRING_LENGTH];
+	int c;
+
+	rsb_blas_tester_options_init(&to);
+
+	for (;program_not_chosen;)
+	{
+		int opt_index = 0;
+		c = rsb_getopt_long(argc,argv,"CP:"
+				"gBGvMHIho:O:"
+/* #if RSB_WANT_EXPERIMENTS_CODE
+				"e"
+#endif */ /* RSB_WANT_EXPERIMENTS_CODE */
+				"FQ:E:",options,&opt_index);
+		if (c == -1)break;
+		switch (c)
+		{
+			case 'F':
+				/*
+				 * Floating point mini-benchmark
+				 * */
+				return (rsb_lib_init(RSB_NULL_INIT_OPTIONS) == RSB_ERR_NO_ERROR && rsb__fp_benchmark() == RSB_ERR_NO_ERROR) ?RSB_PROGRAM_ERROR:RSB_PROGRAM_SUCCESS;
+			break;
+			case 'G':
+				/*
+				 * Sparse GEMM preliminary code.
+				 * TODO: remove this temporary case, as it may break other functionality with the G flag.
+				 * */
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb_do_spgemm_test_code(argc-1,argv+1));
+			case 'g':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb_genmm_main(argc,argv));
+			break;
+			case 0x006D6C73:
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb_mtx_ls_main(argc,argv));
+			break;
+			case 0x7270720a:
+			case 0x72707268:
+			goto qos;
+			break;
+			case 0x50505050:
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb_dump_postscript(argc,argv));
+			break;
+			case 0x6c696d74:
+			{
+				if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+					goto berr;
+				RSB_DO_ERROR_CUMULATE(errval,rsb_blas_limit_cases_tester());
+				if((!RSB_WANT_PERMISSIVE_RSBENCH) && RSB_SOME_ERROR(errval))goto ferr;
+				return RSB_ERR_TO_PROGRAM_ERROR(errval);
+			}
+			break;
+			case 'E':
+			{
+				if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+					goto berr;
+				RSB_DO_ERROR_CUMULATE(errval,rsb_blas_failure_tester(optarg));
+				if((!RSB_WANT_PERMISSIVE_RSBENCH) && RSB_SOME_ERROR(errval))goto ferr;
+				return RSB_ERR_TO_PROGRAM_ERROR(errval);
+			}
+			case 'Q':
+				to.mtt = rsb__util_atof(optarg);
+				if(strstr(optarg,"R")!=NULL)to.rrm=RSB_BOOL_TRUE;
+				if(strstr(optarg,"U")!=NULL)to.tur=RSB_BOOL_TRUE;
+				if(strstr(optarg,"Q")!=NULL)to.wqt=RSB_BOOL_TRUE;
+				if(strstr(optarg,"q")!=NULL)to.wqc=RSB_BOOL_TRUE;
+				if(strstr(optarg,"C")!=NULL)to.wcs=RSB_BOOL_TRUE;
+			case 'B':
+			RSB_SIGHR
+			/* Sparse BLAS test.  */
+//#if RSB_WITH_SPARSE_BLAS_INTERFACE 
+#if 1
+			{
+				if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+					goto berr;
+#if RSB_ALLOW_INTERNAL_GETENVS
+				if(getenv("RSB_RSBENCH_BBMB") && rsb__util_atoi(getenv("RSB_RSBENCH_BBMB")) )
+					goto bbmb;
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+				RSB_DO_ERROR_CUMULATE(errval,rsb_blas_runtime_limits_tester());
+				if((!RSB_WANT_PERMISSIVE_RSBENCH) && RSB_SOME_ERROR(errval))goto ferr;
+#if 0
+				/*  TODO: this is here temporarily */
+				RSB_DO_ERROR_CUMULATE(errval,rsb__do_lock_test());
+#endif
+				RSB_DO_ERROR_CUMULATE(errval,rsb_blas_mini_tester());
+				if((!RSB_WANT_PERMISSIVE_RSBENCH) && RSB_SOME_ERROR(errval))goto ferr;
+				//RSB_LIKWID_MARKER_INIT;
+				//RSB_LIKWID_MARKER_R_START("RSB-QUICKTEST");
+#if RSB_ALLOW_INTERNAL_GETENVS
+bbmb:
+#endif /* RSB_ALLOW_INTERNAL_GETENVS */
+				RSB_DO_ERROR_CUMULATE(errval,rsb_blas_bigger_matrices_tester(&to));/* TODO: options should be passed here */
+				//RSB_LIKWID_MARKER_R_STOP("RSB-QUICKTEST");
+				//RSB_LIKWID_MARKER_EXIT;
+				if((!RSB_WANT_PERMISSIVE_RSBENCH) && RSB_SOME_ERROR(errval))
+					goto ferr;
+				goto ferr;
+			}
+#else
+				RSB_STDERR("no Sparse BLAS interface built.\n");
+				//return -1;
+				return 0;
+#endif
+			break;
+			case 'C':
+				if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+					goto err;
+				errval = rsb__print_configuration_string_rsbench(argv[0],cs,RSB_BOOL_TRUE);
+				printf("%s",cs);
+				goto verr;
+			break;
+			case 'v':
+				if((errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS)) != RSB_ERR_NO_ERROR)
+					goto err;
+				errval = rsb__print_configuration_string_rsbench(argv[0],cs,RSB_BOOL_FALSE);
+				printf("%s",cs);
+				goto verr;
+			break;
+			case 'M':
+				return
+				(rsb_lib_init(RSB_NULL_INIT_OPTIONS) == RSB_ERR_NO_ERROR && rsb__memory_benchmark() == RSB_ERR_NO_ERROR) ?RSB_PROGRAM_ERROR:RSB_PROGRAM_SUCCESS;
+			break;
+			case 'H':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb_hc_main());		/* preliminary */
+			break;
+			case 'I':
+				rsb_lib_init(RSB_NULL_INIT_OPTIONS);
+			return
+				RSB_ERR_TO_PROGRAM_ERROR(rsb_perror(NULL,rsb__sys_info()));
+			break;
+			/*
+#if RSB_WANT_EXPERIMENTS_CODE
+			case 'e':
+				return rsb_exp_bcsr_guess_experiments(argc,argv);
+			break;
+#endif */ /* RSB_WANT_EXPERIMENTS_CODE */
+			case 'P':
+		{
+			errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS);
+			if(RSB_SOME_ERROR(errval))
+				goto err;
+			errval = rsb_file_mtx_save(rsb_file_mtx_load(optarg,RSB_FLAG_DEFAULT_MATRIX_FLAGS,RSB_NUMERICAL_TYPE_DEFAULT,NULL),NULL);
+			RSB_MASK_OUT_SOME_ERRORS(errval)
+			goto verr;
+		}
+			break;
+			case 'o':
+				operation=*optarg;
+			break;
+			case 'O':
+				program_operation=*optarg;
+				program_not_chosen=0;
+			break;
+			case 'h':
+				/* getsubopt may come in help here */
+				return rsb__main_help(argc, argv,default_program_operation,program_codes,options);
+			break;
+			/*
+			case 't':
+				return rsb__main_transpose(argc,argv);
+			break;	    	
+			*/
+			default:
+			{
+			}
+		}
+	}
+
+qos:	/* quit option selection */
+
+	if(c == 0x72707268)
+	{
+		errval = rsb__pr_dumpfiles(NULL,0);
+		return RSB_ERR_TO_PROGRAM_ERROR(errval);
+	}
+	if(c == 0x7270720a)
+	{
+/*
+		if(argc == 3)
+			return rsb__pr_dumpfile(optarg);
+		if(argc == 3)
+			return rsb__pr_dumpfiles(&optarg,1);
+ */
+
+		if(argc >= 3)
+		{
+			const int RSB__PR_DUMP_MAXARGS = 1024; /* TODO: temporary */
+			const rsb_char_t*fna[RSB__PR_DUMP_MAXARGS];
+			int i;
+			for(i=2;i<RSB_MIN(RSB__PR_DUMP_MAXARGS,argc);++i)
+				fna[i-2] = argv[i];
+			errval = rsb__pr_dumpfiles(fna,i-2);
+			return RSB_ERR_TO_PROGRAM_ERROR(errval);
+		}
+	}
+
+	if(program_not_chosen)
+		return rsb__main_help(argc, argv,default_program_operation,program_codes,options);
+
+	switch (program_operation)	/* O */
+	{
+		case 'r':
+	{
+			/*
+			 * A benchmark to compute (machine,compiled) reference performance values.
+			 * */
+			errval = rsb_lib_init(RSB_NULL_INIT_OPTIONS);
+			if(errval == RSB_ERR_NO_ERROR)
+				goto verr;
+			errval = rsb__do_referencebenchmark(); /* FIXME: probably obsolete */
+			RSB_MASK_OUT_SOME_ERRORS(errval)
+			goto verr;
+	}
+		break;
+		case 'R':
+			/**/
+			/*
+			 * Dump current (hardcoded) performance info without computing anything.
+			 * */
+			errval = rsb__dump_current_global_reference_performance_info(); /* FIXME: probably obsolete */
+			RSB_MASK_OUT_SOME_ERRORS(errval)
+			goto verr;
+		break;
+#if !RSB_SHALL_UPDATE_COMPLETEBENCHS
+		case 'c':
+			/* A complete benchmark.  TODO: this is broken / old; needs a revamp, or oblivion  */
+			errval = rsb_do_completebenchmark(argc,argv); /* FIXME: probably obsolete */
+			RSB_MASK_OUT_SOME_ERRORS(errval)
+			goto verr;
+		break;
+#endif /* RSB_SHALL_UPDATE_COMPLETEBENCHS */
+		case 'd':
+			/*
+			 * A single matrix dump (almost useless).
+			 * */
+			return rsb_test_dump_main(argc,argv); /* FIXME: probably obsolete */
+		break;
+		case 'e':
+			/*
+			 * The matrix experimentation code.
+			 * */
+			RSB_STDERR("this option was obsoleted by -oS -Ob\n");
+			/*return rsb_test_main_block_partitioned_matrix_stats(argc,argv); */ /* FIXME: probably obsolete */
+			return -1;
+		break;
+		case 'b':
+		{
+			/*
+			 * The current reference benchmark.
+			 * */
+			RSB_SIGHR
+			switch(operation)	/* o */
+			{
+#ifdef RSB_HAVE_OPTYPE_SPMV_UAUA
+				case 'a':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_spmv_uaua(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_SPMV_UAUA */
+#if !RSB_WANT_REDUCED_RSB_M4_MATRIX_META_OPS
+#ifdef RSB_HAVE_OPTYPE_SPMV_UAUZ
+				case 'v':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_spmv_uauz(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_SPMV_UAUZ */
+#ifdef RSB_HAVE_OPTYPE_SPMM_AZ
+				case 'm':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_spmm_az(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_SPMM_AZ */
+#ifdef RSB_HAVE_OPTYPE_SCALE
+				case 's':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_scale(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_SCALE */
+#ifdef RSB_HAVE_OPTYPE_SPMV_UXUX
+				case 'c':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_spmv_uxux(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_SPMV_UXUX */
+#ifdef RSB_HAVE_OPTYPE_INFTY_NORM
+				case 'i':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_infty_norm(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_INFTY_NORM */
+#ifdef RSB_HAVE_OPTYPE_NEGATION
+				case 'n':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_negation(argc,argv));
+				break;
+#endif /* RSB_HAVE_OPTYPE_NEGATION */
+#endif /* RSB_WANT_REDUCED_RSB_M4_MATRIX_META_OPS */
+#ifdef RSB_OPTYPE_INDEX_SPSV_UXUA
+				case 't':
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_spsv_uxua(argc,argv));
+				break;
+#endif /* RSB_OPTYPE_INDEX_SPSV_UXUA */
+#if 1	/* this is a special case */
+				case 'S':
+				//return rsb__main_block_partitioned_sort_only(argc,argv);//old
+				return RSB_ERR_TO_PROGRAM_ERROR(rsb__main_block_partitioned_mat_stats(argc,argv));//new
+				break;
+#endif
+				default:
+				RSB_STDERR(
+					"You did not choose a correct operation code.\n"
+					"Choose one among %s.\n",program_codes
+					);
+				errval = RSB_ERR_UNSUPPORTED_OPERATION;
+				RSB_DO_ERR_RETURN(errval)
+			}
+		}
+		break;
+#if 0
+		case 't': /* to reintegrate, add 't' to program_codes */
+			/*
+			 * A whole matrix repartitioning test.
+			 * */
+			return RSB_ERR_TO_PROGRAM_ERROR(rsb_test_main_block_partitioned_construction_test(argc,argv));
+		break;
+#endif
+		default:
+			RSB_STDERR("You did not choose an action. See help:\n");
+			return rsb__main_help(argc, argv,default_program_operation,program_codes,NULL);
+		return RSB_PROGRAM_SUCCESS;
+    	}
+	goto err;
+ferr:
+	/* rsb__getrusage(); */
+	RSB_DO_ERROR_CUMULATE(errval,rsb_lib_exit(RSB_NULL_EXIT_OPTIONS));
+berr:
+	if(RSB_SOME_ERROR(errval))
+		rsb_perror(NULL,errval);
+verr:
+	return RSB_ERR_TO_PROGRAM_ERROR(errval);
+err:
+	if(RSB_SOME_ERROR(errval))
+		rsb_perror(NULL,errval);
+	return RSB_PROGRAM_ERROR;
+}
+
+/* @endcond */
diff --git a/sbtc.c b/sbtc.c
new file mode 100644
index 0000000..399c55c
--- /dev/null
+++ b/sbtc.c
@@ -0,0 +1,96461 @@
+/*                                                                                                                            
+
+Copyright (C) 2008-2014 Michele Martone
+
+This file is part of librsb.
+
+librsb is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published
+by the Free Software Foundation; either version 3 of the License, or
+(at your option) any later version.
+
+librsb is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with librsb; see the file COPYING.
+If not, see <http://www.gnu.org/licenses/>.
+
+*/
+/* @cond INNERDOC */
+/*!
+ * @file
+ * @author Michele Martone 
+ * @brief This file is part of the Octave based test suite for librsb
+ */
+#include <stdio.h>
+#ifdef SBTC_USE_RSB_H
+#include <rsb.h>
+#endif /* SBTC_USE_RSB_H */
+#include <complex.h>
+#ifdef RSB_RSB_H_INCLUDED
+#include "rsb_internals.h"
+#define RSB_BLAS_SUPPORT_EMPTY 1
+#define RSB_BLAS_SUPPORTED_TYPE(T) ((errval = rsb__BLAS_is_type_supported(T)) != RSB_ERR_UNSUPPORTED_TYPE) 
+#endif /* RSB_RSB_H_INCLUDED */
+#ifndef RSB_RSB_H_INCLUDED
+#include <blas_sparse.h>
+#define RSB_PROGRAM_SUCCESS 0
+#define RSB_PROGRAM_ERROR (-1)
+#define RSB_ERR_NO_ERROR 0
+#define RSB_ERROR printf
+#define RSB_WITH_SPARSE_BLAS_INTERFACE 1
+#define RSB_BLAS_SUPPORTED_TYPE(T) 1
+#define rsb_err_t int
+#define RSB_ERR_UNSUPPORTED_TYPE 0x004
+#define rsb_nnz_idx_t int
+#define rsb_coo_idx_t int
+#define RSB_BLAS_ERROR -1
+#define RSB_BLAS_NO_ERROR 0
+#define RSB_BLAS_SUPPORT_EMPTY 0
+#define rsb__debug_print_vectors_diff(A1,A2,A3,A4,A5,A6,A7) RSB_ERR_NO_ERROR
+int rsb__do_are_same(void*v1_,void*v2_,int n,int typecode,int s1,int s2){ char*v1=(char*)v1_,*v2=(char*)v2_; int vi,bi,bs; switch(typecode){case('S'):bs=4;break;case('C'): case('D'):bs=8;break;case('Z'):bs=16;break;default: return RSB_ERR_NO_ERROR; } for(vi=0;vi< n;++vi) for(bi=0;bi<bs;++bi) if(v1[vi*bs*s1+bi] != v2[vi*bs*s2+bi]) return RSB_BLAS_ERROR; return RSB_ERR_NO_ERROR;}
+#endif /* RSB_RSB_H_INCLUDED */
+int rsb_sbtc_print_vec(void*v,int n,int typecode){ float*fv=(float*)v; double*dv=(double*)v; int vi,fl=1,fi; if(typecode=='C' || typecode=='Z')fl=2; if(typecode=='S' || typecode=='C')for(vi=0;vi<n;++vi){for(fi=0;fi<fl;++fi)printf("%f" ,fv[vi*fl+fi]);printf("\n");} if(typecode=='D' || typecode=='Z')for(vi=0;vi<n;++vi){for(fi=0;fi<fl;++fi)printf(" %lf",dv[vi*fl+fi]);printf("\n");} ; return RSB_ERR_NO_ERROR;}
+	static rsb_err_t ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 1 2\n"	" y' = \n 6\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 6\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 9\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 15, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 0 0\n"	" y' = \n 15\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 2
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 2, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 15, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 2\n 3 0\n"	" y' = \n 15\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 0\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 2
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 2, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 2\n 2 0\n"	" y' = \n 12\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 9, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 9\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 3\n"	" y' = \n 6\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 3
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float VA[]={ 1, 3, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 15, 0, 12, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 2 1\n"	" y' = \n 15\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 4
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 4, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 0, 15, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 4\n 2 0\n"	" y' = \n 12\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 5, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 1 3\n"	" y' = \n 5\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 3\n"	" y' = \n 4\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 4\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 7, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 0 3\n"	" y' = \n 4\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 3\n"	" y' = \n 4\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 3
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 3, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 7, 5 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 2 0\n"	" y' = \n 7\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 4\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 5 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 2 2\n"	" y' = \n 6\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 6\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 1, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 2 0\n"	" y' = \n 6\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 7, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 3 2\n"	" y' = \n 7\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 4
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 4 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, -1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 4\n"	" y' = \n 2\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 1
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float VA[]={ 1, 1, 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n 0\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 2\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 0 0\n"	" y' = \n 1\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 1\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 2\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -1, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 3\n 0 0\n"	" y' = \n -1\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 3
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 3, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 3\n 1 0\n"	" y' = \n 1\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 0\n"	" y' = \n 1\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 2\n 0 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 2 3\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 5
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 5, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, -2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 5\n 1 0\n"	" y' = \n 1\n -2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 2
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float VA[]={ 1, 2, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -9, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 2\n 3 0\n"	" y' = \n -9\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, -6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 0 2\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	float VA[]={ 1, 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, -3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 2 0\n"	" y' = \n 0\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 0\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, -6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 3\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, -3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 2\n 0 0\n"	" y' = \n 0\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 3
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double VA[]={ 1, 3, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 15, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 2 0\n"	" y' = \n 15\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 12 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 3\n 0 0\n"	" y' = \n 6\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 1
+ 2 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double VA[]={ 1, 1, 2, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 12, 12 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 2 2\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 6\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 4 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 18, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 4 0\n"	" y' = \n 18\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 12, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 6\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 0\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 12, 3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 2 0\n"	" y' = \n 12\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 6\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 6\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 6\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 15, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 3 0\n"	" y' = \n 15\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 0\n"	" y' = \n 5\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 2
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double VA[]={ 1, 2, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 1 0\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 5
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 5 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 0 5\n"	" y' = \n 4\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 5
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 5 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 8, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 5\n"	" y' = \n 4\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 2 0\n"	" y' = \n 6\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 0\n"	" y' = \n 5\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 0 0\n"	" y' = \n 4\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 3 1\n"	" y' = \n 7\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 4
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 4 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 8, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 0 4\n"	" y' = \n 4\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 3
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 3\n 1 0\n"	" y' = \n 5\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -1, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 3\n 0 1\n"	" y' = \n -1\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 2 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 1\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 0 0\n"	" y' = \n 1\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 2\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 0\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 2\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 1\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 1
+ 5 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double VA[]={ 1, 1, 5, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 5 1\n"	" y' = \n -3\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 3 0\n"	" y' = \n 2\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 4
+ 0 5
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 4, 5 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, -6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 4\n 0 5\n"	" y' = \n 2\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, -1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 0 3\n"	" y' = \n 2\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 3 0\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 0 2\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 3\n 0 0\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 0\n"	" y' = \n 0\n 3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 1
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double VA[]={ 1, 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, -9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 3 1\n"	" y' = \n -3\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 0 0\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 0 0\n"	" y' = \n 0\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 1 0\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 3
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0, -6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 3\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 2
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -6, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 2 2\n"	" y' = \n -6\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+2*I, 0+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+12*I, 6+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+12i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1+2*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+6*I, 3+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 0+0i\n 0+0i 0+2i\n"	" y' = \n 6+6i\n 3+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 3+3i
+ 1+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 3+3*I, 1+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+-15*I, 15+-9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 3+3i\n 1+3i 1+0i\n"	" y' = \n 9+-15i\n 15+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 4+2i
+ 5+2i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 4+2*I, 5+2*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 18+12*I, 0+0*I, 18+12*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 4+2i\n 5+2i 0+2i\n"	" y' = \n 18+12i\n 18+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+1i
+ 0+1i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+1*I, 0+1*I, 3+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+9*I, 0+0*I, 15+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 1+1i\n 0+1i 3+0i\n"	" y' = \n 6+9i\n 15+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 1+2i 0+8i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+2*I, 1+2*I, 0+8*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+-12*I, 0+0*I, 3+-30*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 0+2i\n 1+2i 0+8i\n"	" y' = \n 9+-12i\n 3+-30i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+3i
+ 0+3i 0+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+3*I, 0+3*I, 0+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+15*I, 3+21*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 0+3i\n 0+3i 0+4i\n"	" y' = \n 6+15i\n 3+21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+4i
+ 0+4i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+4*I, 0+4*I, 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+18*I, 6+18*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 1+4i\n 0+4i 0+2i\n"	" y' = \n 6+18i\n 6+18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 2+6i
+ 3+6i 1+6i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+6*I, 3+6*I, 1+6*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15+-24*I, 12+-36*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 2+6i\n 3+6i 1+6i\n"	" y' = \n 15+-24i\n 12+-36i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+1i
+ 5+1i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+1*I, 5+1*I, 1+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 9+9*I, 0+0*I, 21+9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 1+1i\n 5+1i 1+2i\n"	" y' = \n 9+9i\n 21+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+4i
+ 2+4i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+4*I, 2+4*I, 3+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12+18*I, 0+0*I, 12+12*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 0+4i\n 2+4i 3+0i\n"	" y' = \n 12+18i\n 12+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+1i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+1*I, 1+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 9+-9*I, 0+0*I, 6+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 0+1i\n 1+1i 1+0i\n"	" y' = \n 9+-9i\n 6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+2i
+ 0+2i 0+6i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+2*I, 0+2*I, 0+6*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+4*I, 3+8*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 0+2i\n 0+2i 0+6i\n"	" y' = \n 4+4i\n 3+8i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+1i
+ 0+1i 0+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+1*I, 0+1*I, 0+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+3*I, 3+5*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 0+1i\n 0+1i 0+4i\n"	" y' = \n 4+3i\n 3+5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+4i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 0+4*I, 3+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 7+-6*I, 3+-4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 0+4i\n 3+4i 0+0i\n"	" y' = \n 7+-6i\n 3+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+0i
+ 2+0i 0+10i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+0*I, 2+0*I, 0+10*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5+2*I, 0+0*I, 5+10*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 1+0i\n 2+0i 0+10i\n"	" y' = \n 5+2i\n 5+10i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+5i
+ 0+5i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+5*I, 0+5*I, 1+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+7*I, 0+0*I, 5+7*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 1+5i\n 0+5i 1+2i\n"	" y' = \n 4+7i\n 5+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+5i
+ 0+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 0+5*I, 0+5*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+-7*I, 0+0*I, 3+-5*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 0+5i\n 0+5i 0+0i\n"	" y' = \n 4+-7i\n 3+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1+2*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+2*I, 4+0*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 0+0i\n 0+0i 1+0i\n"	" y' = \n 4+2i\n 4+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 3+4i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 3+4*I, 0+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+6*I, 6+4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 3+4i\n 0+4i 0+0i\n"	" y' = \n 4+6i\n 6+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+1i
+ 0+1i 4+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+1*I, 0+1*I, 4+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-3*I, 7+-5*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 0+1i\n 0+1i 4+4i\n"	" y' = \n 4+-3i\n 7+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 3+2i
+ 1+2i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 3+2*I, 1+2*I, 3+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 7+4*I, 0+0*I, 7+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 3+2i\n 1+2i 3+0i\n"	" y' = \n 7+4i\n 7+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 3+0i
+ 3+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 3+0*I, 3+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 7+2*I, 0+0*I, 6+0*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 3+0i\n 3+0i 0+0i\n"	" y' = \n 7+2i\n 6+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+0i
+ 0+0i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+2*I, 1+0*I, 1+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-2*I, 0+0*I, 5+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 1+0i\n 0+0i 1+2i\n"	" y' = \n 4+-2i\n 5+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 1+1i
+ 2+1i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+1*I, 2+1*I, 3+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1+-3*I, -2+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 1+1i\n 2+1i 3+0i\n"	" y' = \n 1+-3i\n -2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 2+1i
+ 0+1i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+1*I, 0+1*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+-3*I, 1+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 2+1i\n 0+1i 0+2i\n"	" y' = \n 2+-3i\n 1+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+1i
+ 3+1i 5+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+1*I, 3+1*I, 5+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -1+3*I, -2+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 0+1i\n 3+1i 5+0i\n"	" y' = \n -1+3i\n -2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 2+1i
+ 5+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+1*I, 5+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-3*I, 0+0*I, -3+-1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 2+1i\n 5+1i 1+0i\n"	" y' = \n 0+-3i\n -3+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 2+0i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+0*I, 1+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-2*I, 0+0*I, 2+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 0+0i\n 2+0i 1+2i\n"	" y' = \n 0+-2i\n 2+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+1i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+1*I, 0+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+3*I, 0+0*I, 2+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+3i\n 2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 1+1i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 1+1*I, 1+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-3*I, 2+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 1+1i\n 1+1i 0+0i\n"	" y' = \n 1+-3i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 1+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 1+1*I, 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-3*I, 2+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 1+1i\n 0+1i 0+0i\n"	" y' = \n 2+-3i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+1i
+ 0+1i 0+8i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+1*I, 0+1*I, 0+8*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+3*I, 3+9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 0+1i\n 0+1i 0+8i\n"	" y' = \n 2+3i\n 3+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+5i
+ 0+5i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+5*I, 0+5*I, 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-7*I, 0+0*I, 3+-7*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 0+5i\n 0+5i 0+2i\n"	" y' = \n 2+-7i\n 3+-7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 2+2i
+ 0+2i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+2*I, 0+2*I, 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-4*I, 0+0*I, 1+-4*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 2+2i\n 0+2i 0+2i\n"	" y' = \n 2+-4i\n 1+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+2i 3+1i
+ 2+1i 2+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 3+1*I, 2+1*I, 2+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+3*I, 0+0*I, -2+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 3+1i\n 2+1i 2+0i\n"	" y' = \n 0+3i\n -2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 2+2i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+2*I, 2+2*I, 1+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-12*I, -6+-12*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 0+2i\n 2+2i 1+2i\n"	" y' = \n 0+-12i\n -6+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 2+7i
+ 0+7i 2+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+7*I, 0+7*I, 2+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-27*I, -9+-21*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 2+7i\n 0+7i 2+0i\n"	" y' = \n 0+-27i\n -9+-21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+6*I, 3+0*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 0+0i\n 0+0i 0+0i\n"	" y' = \n 0+6i\n 3+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 1+0i
+ 3+0i 2+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+0*I, 3+0*I, 2+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+-6*I, 0+0*I, -12+0*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 1+0i\n 3+0i 2+0i\n"	" y' = \n -3+-6i\n -12+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	float complex VA[]={ 1+2*I, 0+1*I, 0+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-9*I, 0+0*I, 3+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 0+1i\n 0+1i 0+0i\n"	" y' = \n 0+-9i\n 3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 1+5i
+ 0+5i 0+8i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+5*I, 0+5*I, 0+8*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+21*I, 0+0*I, 0+39*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 1+5i\n 0+5i 0+8i\n"	" y' = \n 0+21i\n 0+39i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 2+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+2*I, 2+0*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-6*I, -6+0*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 0+0i\n 2+0i 1+0i\n"	" y' = \n 0+-6i\n -6+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 0+2i 0+8i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+2*I, 0+2*I, 0+8*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-12*I, 3+-30*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 0+2i\n 0+2i 0+8i\n"	" y' = \n 0+-12i\n 3+-30i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 3+0i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+2*I, 3+0*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+6*I, -9+0*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 3+0i\n 0+0i 1+0i\n"	" y' = \n 0+6i\n -9+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+3i
+ 1+3i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 0+3*I, 1+3*I, 3+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-15*I, 0+0*I, -9+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 0+3i\n 1+3i 3+0i\n"	" y' = \n 0+-15i\n -9+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 1+1i
+ 1+1i 0+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	float complex VA[]={ 1+2*I, 1+1*I, 1+1*I, 0+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -3+-9*I, 0+0*I, 0+-15*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 1+1i\n 1+1i 0+4i\n"	" y' = \n -3+-9i\n 0+-15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1+2*I, 3+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+6*I, 0+0*I, -6+0*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 0+0i\n 0+0i 3+0i\n"	" y' = \n 0+6i\n -6+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+0i
+ 3+0i 1+6i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+0*I, 3+0*I, 1+6*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9+6*I, 15+18*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 1+0i\n 3+0i 1+6i\n"	" y' = \n 9+6i\n 15+18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+1i
+ 4+1i 1+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+1*I, 4+1*I, 1+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 18+9*I, 9+15*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 1+1i\n 4+1i 1+4i\n"	" y' = \n 18+9i\n 9+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+1i
+ 5+1i 1+8i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+1*I, 5+1*I, 1+8*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 21+-9*I, 6+-27*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 0+1i\n 5+1i 1+8i\n"	" y' = \n 21+-9i\n 6+-27i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 3+0i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+2*I, 3+0*I, 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+6*I, 0+0*I, 12+6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 0+0i\n 3+0i 0+2i\n"	" y' = \n 6+6i\n 12+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+1i
+ 0+1i 0+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+1*I, 0+1*I, 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+9*I, 0+0*I, 3+9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 0+1i\n 0+1i 0+2i\n"	" y' = \n 6+9i\n 3+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 2+0i
+ 1+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 2+0*I, 1+0*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9+-6*I, 0+0*I, 12+0*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 2+0i\n 1+0i 1+0i\n"	" y' = \n 9+-6i\n 12+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+3i
+ 2+3i 0+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+3*I, 2+3*I, 0+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+15*I, 9+21*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 1+3i\n 2+3i 0+4i\n"	" y' = \n 9+15i\n 9+21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 3+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 0 };
+	double complex VA[]={ 1+2*I, 3+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 15+6*I, 3+0*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 0+0i\n 3+0i 0+0i\n"	" y' = \n 15+6i\n 3+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 3+0i
+ 0+0i 2+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+2*I, 3+0*I, 2+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-6*I, 18+0*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 3+0i\n 0+0i 2+0i\n"	" y' = \n 6+-6i\n 18+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 4+4i
+ 0+4i 2+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 4+4*I, 0+4*I, 2+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 18+18*I, 0+0*I, 9+12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+2i 4+4i\n 0+4i 2+0i\n"	" y' = \n 18+18i\n 9+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 2+0i
+ 3+0i 2+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 2+0*I, 3+0*I, 2+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 15+6*I, 0+0*I, 15+0*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+2i 2+0i\n 3+0i 2+0i\n"	" y' = \n 15+6i\n 15+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+2i 1+1i
+ 0+1i 3+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+1*I, 0+1*I, 3+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-9*I, 0+0*I, 15+-15*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+2i 1+1i\n 0+1i 3+4i\n"	" y' = \n 6+-9i\n 15+-15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+0i
+ 0+0i 2+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+2*I, 1+0*I, 2+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5+2*I, 5+4*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 1+0i\n 0+0i 2+4i\n"	" y' = \n 5+2i\n 5+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+1i
+ 1+1i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+1*I, 1+1*I, 1+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5+3*I, 5+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 1+1i\n 1+1i 1+2i\n"	" y' = \n 5+3i\n 5+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+7i
+ 0+7i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+7*I, 0+7*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+-9*I, 4+-7*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 0+7i\n 0+7i 1+0i\n"	" y' = \n 4+-9i\n 4+-7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+0i
+ 3+0i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+0*I, 3+0*I, 3+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5+2*I, 0+0*I, 9+0*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 1+0i\n 3+0i 3+0i\n"	" y' = \n 5+2i\n 9+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 2+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 2+1*I, 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+3*I, 0+0*I, 5+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 2+1i\n 0+1i 0+0i\n"	" y' = \n 4+3i\n 5+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+4i
+ 3+4i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+4*I, 3+4*I, 1+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 7+-6*I, 0+0*I, 5+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 1+4i\n 3+4i 1+2i\n"	" y' = \n 7+-6i\n 5+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 2+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 2+1*I, 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+3*I, 3+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 2+1i\n 0+1i 0+0i\n"	" y' = \n 6+3i\n 3+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 3+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 3+1*I, 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+3*I, 6+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 3+1i\n 0+1i 0+0i\n"	" y' = \n 4+3i\n 6+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 2+0i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+2*I, 2+0*I, 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-2*I, 4+-2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 0+0i\n 2+0i 1+2i\n"	" y' = \n 6+-2i\n 4+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 1+2i
+ 3+2i 2+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+2*I, 3+2*I, 2+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5+4*I, 0+0*I, 8+4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+2i 1+2i\n 3+2i 2+2i\n"	" y' = \n 5+4i\n 8+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+1i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+1*I, 2+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+3*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+2i 0+1i\n 2+1i 1+0i\n"	" y' = \n 6+3i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+2i 0+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 0+3*I, 0+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-5*I, 0+0*I, 3+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+2i 0+3i\n 0+3i 0+0i\n"	" y' = \n 4+-5i\n 3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 4+1i
+ 1+1i 2+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 4+1*I, 1+1*I, 2+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -2+-3*I, 0+-5*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 4+1i\n 1+1i 2+4i\n"	" y' = \n -2+-3i\n 0+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-2*I, 3+0*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 0+0i\n 0+0i 0+0i\n"	" y' = \n 2+-2i\n 3+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1+2*I, 3+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 0+0i\n 0+0i 3+0i\n"	" y' = \n 2+2i\n 0+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 1+5i
+ 0+5i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+5*I, 0+5*I, 3+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1+-7*I, 0+0*I, 0+-5*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 1+5i\n 0+5i 3+0i\n"	" y' = \n 1+-7i\n 0+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 2+0i 5+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+2*I, 2+0*I, 5+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-2*I, 0+0*I, -2+0*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 0+0i\n 2+0i 5+0i\n"	" y' = \n 0+-2i\n -2+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 1+3i
+ 3+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 1+3*I, 3+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -1+5*I, 0+0*I, 2+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 1+3i\n 3+3i 0+0i\n"	" y' = \n -1+5i\n 2+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 1+0i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+0*I, 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-2*I, 1+-2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 0+0i\n 1+0i 1+2i\n"	" y' = \n 2+-2i\n 1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+0i
+ 0+0i 2+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1+2*I, 2+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-2*I, 1+-2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 0+0i\n 0+0i 2+2i\n"	" y' = \n 2+-2i\n 1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 2+5i
+ 0+5i 0+6i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 2+5*I, 0+5*I, 0+6*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+7*I, 1+11*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 2+5i\n 0+5i 0+6i\n"	" y' = \n 2+7i\n 1+11i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 1+0i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1+-2*I, 0+0*I, 3+0*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+2i 1+0i\n 0+0i 0+0i\n"	" y' = \n 1+-2i\n 3+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+2i
+ 2+2i 1+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+2*I, 2+2*I, 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-4*I, 0+0*I, 2+-4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+2i 0+2i\n 2+2i 1+2i\n"	" y' = \n 0+-4i\n 2+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+2i 0+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 0+2*I, 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+4*I, 0+0*I, 3+2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+2i 0+2i\n 0+2i 0+0i\n"	" y' = \n 2+4i\n 3+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+1i
+ 1+1i 3+6i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+1*I, 1+1*I, 3+6*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-9*I, -9+-21*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 0+1i\n 1+1i 3+6i\n"	" y' = \n 0+-9i\n -9+-21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 1+1i
+ 2+1i 3+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+1*I, 2+1*I, 3+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+-9*I, -9+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 1+1i\n 2+1i 3+0i\n"	" y' = \n -6+-9i\n -9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+2*I, 1+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3+12*I, 0+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 0+2i\n 1+2i 1+0i\n"	" y' = \n -3+12i\n 0+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 2+0i
+ 1+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 2+0*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+-6*I, 0+0*I, 0+0*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 2+0i\n 1+0i 0+0i\n"	" y' = \n -6+-6i\n 0+0i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+3i
+ 0+3i 0+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+3*I, 0+3*I, 0+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-15*I, 0+0*I, 3+-21*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 0+3i\n 0+3i 0+4i\n"	" y' = \n 0+-15i\n 3+-21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+0i
+ 1+0i 2+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+2*I, 1+0*I, 2+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3+6*I, 0+0*I, -3+12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 0+0i\n 1+0i 2+4i\n"	" y' = \n -3+6i\n -3+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 1+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 1+3*I, 0+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-15*I, 3+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 1+3i\n 0+3i 0+0i\n"	" y' = \n -3+-15i\n 3+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 1+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 0 };
+	double complex VA[]={ 1+2*I, 1+3*I, 0+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-15*I, 0+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 1+3i\n 0+3i 0+0i\n"	" y' = \n 0+-15i\n 0+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 3+0i
+ 1+0i 2+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 3+0*I, 1+0*I, 2+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+6*I, -12+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 3+0i\n 1+0i 2+2i\n"	" y' = \n -3+6i\n -12+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 0+2i 3+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+2*I, 0+2*I, 3+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-12*I, 0+0*I, -6+-18*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+2i 0+2i\n 0+2i 3+4i\n"	" y' = \n 0+-12i\n -6+-18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 0+2i
+ 1+2i 3+4i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 0+2*I, 1+2*I, 3+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-12*I, 0+0*I, -6+-18*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+2i 0+2i\n 1+2i 3+4i\n"	" y' = \n -3+-12i\n -6+-18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+2i 3+2i
+ 0+2i 2+2i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=4;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1, 1 };
+	int JA[]={ 0, 1, 0, 1 };
+	double complex VA[]={ 1+2*I, 3+2*I, 0+2*I, 2+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+12*I, 0+0*I, -12+12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+2i 3+2i\n 0+2i 2+2i\n"	" y' = \n 0+12i\n -12+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 6, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 6, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 3, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 3, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 6, 0, 3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 6, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 5
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 5, 1 };/* type is float */
+
+	float x[]={ 3, 0, 18, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 18, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 5\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 18\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 3, 0, 6, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 2, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 2, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 2\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 3, 1 };/* type is float */
+
+	float x[]={ 1, 0, 4, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 4, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 4\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -1, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -1, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ -1, -2 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ -1, 0, -2, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -2, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 3, 1 };/* type is float */
+
+	float x[]={ -1, 0, -4, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -4, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -4\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ -3, -6 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 4
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float VA[]={ 1, 4, 1 };/* type is float */
+
+	float x[]={ -3, -15 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -15 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -15\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ 12, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 12, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 3, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ 3, 12 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 12 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ 6, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 6, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ 3, 0, 6, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ 3, 0, 6, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ 4, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 4, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 4
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 4, 1 };/* type is double */
+
+	double x[]={ 1, 5 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 5 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 5\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ 1, 2 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ 4, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 4, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 4
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 4, 1 };/* type is double */
+
+	double x[]={ 1, 0, 5, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 5, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 5\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 2, 1 };/* type is double */
+
+	double x[]={ 1, 0, 3, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -1, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -1, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -1, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -1, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 2, 1 };/* type is double */
+
+	double x[]={ -1, 0, -3, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ -1, 0, -2, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -2, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -3, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ -3, -6 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 2, 1 };/* type is double */
+
+	double x[]={ -3, -9 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -9 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -3, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double VA[]={ 1, 2, 1 };/* type is double */
+
+	double x[]={ -3, 0, -9, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -9, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -3, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ 3, 3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1, 2, 1 };/* type is float complex */
+
+	float complex x[]={ 3, 9 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3, 9 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 3+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 3+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 12+-6*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3+0*I, 12+-6*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 3+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 12+-6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 1+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 6+3*I, 0+0*I, 3+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 6+3*I, 0+0*I, 3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 1+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 6+3i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 0+0*I, 3+6*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3+0*I, 0+0*I, 3+6*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+5i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 0+5*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 0+0*I, 3+-15*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3+0*I, 0+0*I, 3+-15*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+5i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+-15i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ 1, 2 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ 1, 2 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 3+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 3+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 0+0*I, 4+1*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+0*I, 0+0*I, 4+1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 3+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 4+1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 1+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 1+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 0+0*I, 2+-2*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+0*I, 0+0*I, 2+-2*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 1+2i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 2+-2i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+3i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 0+3*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -1+-3*I, -1+0*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+-3*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+3i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-3i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -1+0*I, -1+-2*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+0*I, -1+-2*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -1+-2i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 2+3i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 2+3*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -1+0*I, -3+3*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+0*I, -3+3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 2+3i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -3+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ -2, 0, -1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -2, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 3+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 3+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -1+0*I, 0+0*I, -4+-1*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1+0*I, 0+0*I, -4+-1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 3+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -4+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1, 3, 1 };/* type is float complex */
+
+	float complex x[]={ -1, 0, -4, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1, 0, -4, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -4\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ -6, -3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -6, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -3+0*I, -3+-3*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3+0*I, -3+-3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -3+-3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ -3, -3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 2+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	float complex VA[]={ 1+0*I, 2+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -3+0*I, 0+0*I, -9+6*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3+0*I, 0+0*I, -9+6*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 2+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -9+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 1, 1 };/* type is double complex */
+
+	double complex x[]={ 6, 3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 6, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 3+6*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3+0*I, 3+6*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 3+-6*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3+0*I, 3+-6*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+-6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 3
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 3, 1 };/* type is double complex */
+
+	double complex x[]={ 12, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 12, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 1+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 0+0*I, 6+6*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3+0*I, 0+0*I, 6+6*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 1+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 6+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+3i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 0+0*I, 3+-9*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3+0*I, 0+0*I, 3+-9*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+3i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+-9i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+2i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+2*I, 1+0*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1+2*I, 1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+2i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 1+3i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 1+3*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 2+3*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1+0*I, 2+3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 1+3i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 2+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 1+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 2+-1*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1+0*I, 2+-1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 1+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 2+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 2, 1 };/* type is double complex */
+
+	double complex x[]={ 3, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+5i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+5*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 0+0*I, 1+5*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1+0*I, 0+0*I, 1+5*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 0+5i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 1+5i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 3+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 3+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 0+0*I, 4+-1*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1+0*I, 0+0*I, 4+-1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 3+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 4+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -1+-1*I, -1+0*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1+-1*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-1i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 2+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 2+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -1+0*I, -3+-1*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1+0*I, -3+-1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 2+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -3+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -1, -1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 1, 1 };/* type is double complex */
+
+	double complex x[]={ -2, 0, -1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -2, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 2, 1 };/* type is double complex */
+
+	double complex x[]={ -1, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 2
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 2, 1 };/* type is double complex */
+
+	double complex x[]={ -1, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -3, -3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -3+0*I, -3+-3*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3+0*I, -3+-3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -3+-3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 1+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -3+0*I, -6+3*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3+0*I, -6+3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 1+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -6+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 3+1i
+ 0+0i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1+0*I, 3+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -12+-3*I, 0+0*I, -3+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -12+-3*I, 0+0*I, -3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1+0i 3+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -12+-3i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 1
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 0, 1 };
+	int JA[]={ 0, 1, 1 };
+	double complex VA[]={ 1, 1, 1 };/* type is double complex */
+
+	double complex x[]={ -3, 0, -6, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -3, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };/* type is float */
+
+	float x[]={ 3, 12 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 12 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };/* type is float */
+
+	float x[]={ 12, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 12, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };/* type is float */
+
+	float x[]={ 9, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 9, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n 9\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };/* type is float */
+
+	float x[]={ 3, 0, 12, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 12, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 3, 0, 3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 6, 0, 3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 6, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 1, 2 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };/* type is float */
+
+	float x[]={ 3, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 3, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ 2, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 2, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n 2\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };/* type is float */
+
+	float x[]={ 3, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ -1, -2 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };/* type is float */
+
+	float x[]={ -4, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -4, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n -4\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -1, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ -1, 0, -2, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -2, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };/* type is float */
+
+	float x[]={ -3, -6 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };/* type is float */
+
+	float x[]={ -3, 0, -9, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -9, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 3, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 3, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 3, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 3, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 3, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ 12, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 12, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 5 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 5, 1 };/* type is double */
+
+	double x[]={ 6, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 6, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 5 1\n"	" y = \n 1\n 1\n"	" y' = \n 6\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ 2, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 2, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n 2\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ -1, -2 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ -2, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -2, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };/* type is double */
+
+	double x[]={ -2, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -2, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ -1, 0, -4, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -4, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -4\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -1, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -1, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -3, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 4 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 4, 1 };/* type is double */
+
+	double x[]={ -15, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -15, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 4 1\n"	" y = \n 9\n 9\n"	" y' = \n -15\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 6 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 6, 1 };/* type is double */
+
+	double x[]={ -21, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -21, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 6 1\n"	" y = \n 9\n 9\n"	" y' = \n -21\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };/* type is double */
+
+	double x[]={ -3, 0, -12, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -12, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -3, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };/* type is double */
+
+	double x[]={ -3, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 6+3*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3+0*I, 6+3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 6+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ 3, 3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 3+-3*I, 3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3+-3*I, 3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+-3i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ 3, 0, 6, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 3, 1 };/* type is float complex */
+
+	float complex x[]={ 12, 0, 3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 12, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 6+-6*I, 0+0*I, 3+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 6+-6*I, 0+0*I, 3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+0i\n 1+2i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 6+-6i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 3+2*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1+0*I, 3+2*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 2+2i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 3+2i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 2, 1 };/* type is float complex */
+
+	float complex x[]={ 3, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 3, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 0+0*I, 1+1*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+0*I, 0+0*I, 1+1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 1+1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ 1+2*I, 0+0*I, 1+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+2*I, 0+0*I, 1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+2i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+2i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 3, 1 };/* type is float complex */
+
+	float complex x[]={ 4, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 4, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ -1, -2 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -1+-1*I, -1+0*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+-1*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-1i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+4*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -2+4*I, -1+0*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -2+4*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 0+0i\n 1+4i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -2+4i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 3, 1 };/* type is float complex */
+
+	float complex x[]={ -1, 0, -4, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1, 0, -4, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -4\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ -2, 0, -1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -2, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+4*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -2+4*I, 0+0*I, -1+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -2+4*I, 0+0*I, -1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 0+0i\n 1+4i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -2+4i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ -3, -3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -3+-6*I, -3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3+-6*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+2i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+-6i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+4*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -3+12*I, -3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3+12*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+4i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+12i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };/* type is float complex */
+
+	float complex x[]={ -6, 0, -3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -6, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+3*I, 1+0*I };/* type is float complex */
+
+	float complex x[]={ -3+9*I, 0+0*I, -3+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3+9*I, 0+0*I, -3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+3i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+9i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ 3, 3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 2, 1 };/* type is double complex */
+
+	double complex x[]={ 9, 3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 9, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n 9\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ 3, 3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 0+0*I, 6+3*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3+0*I, 0+0*I, 6+3*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 6+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+6*I, 0+0*I, 3+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3+6*I, 0+0*I, 3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+2i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+6i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };/* type is double complex */
+
+	double complex x[]={ 6, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 6, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 3+1*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1+0*I, 3+1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 2+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 3+1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 3+1*I, 1+0*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 3+1*I, 1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 0+0i\n 2+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 3+1i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 6+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 6+2*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 0+0*I, 7+2*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1+0*I, 0+0*I, 7+2*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 6+2i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 7+2i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 2+1*I, 0+0*I, 1+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 2+1*I, 0+0*I, 1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 2+1i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ 1+-1*I, 0+0*I, 1+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1+-1*I, 0+0*I, 1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+-1i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -1, -1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -1, -1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 2, 1 };/* type is double complex */
+
+	double complex x[]={ -3, -1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -3, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n -3\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -1+0*I, 0+0*I, -1+-3*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1+0*I, 0+0*I, -1+-3*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 0+3i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -1+-3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -1, 0, -1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -2+1*I, 0+0*I, -1+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -2+1*I, 0+0*I, -1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -2+1i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+3*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -3+0*I, -12+-9*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3+0*I, -12+-9*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1+0i 0+0i\n 3+3i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -12+-9i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -3+-3*I, -3+0*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3+-3*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+-3i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 4+6i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 4+6*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -15+18*I, -3+0*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -15+18*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 0+0i\n 4+6i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -15+18i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };/* type is double complex */
+
+	double complex x[]={ -3, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };/* type is double complex */
+
+	double complex x[]={ -6, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -6, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+3*I, 1+0*I };/* type is double complex */
+
+	double complex x[]={ -9+9*I, 0+0*I, -3+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -9+9*I, 0+0*I, -3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 0+0i\n 2+3i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -9+9i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=e blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 15, 15 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 0, 12, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 5, 5 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 7, 7 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 6 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 6, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -4, 0, -4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 6\n 6 1\n"	" y' = \n -4\n -4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -3, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -6, 0, -6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -3, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 15, 15 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 12, 0, 12, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 5 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 5, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -3, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 5\n 5 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -1, -1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -1, -1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -1, -1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -9, -9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -6, 0, -6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -9, -9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -6, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+8i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+8*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+24*I, 6+24*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+8i\n 0+8i 1+0i\n"	" y' = \n 6+24i\n 6+24i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 12+6*I, 12+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n 12+6i\n 12+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 12+-3*I, 12+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n 12+-3i\n 12+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+3*I, 0+0*I, 9+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 9+3i\n 9+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+4*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+12*I, 0+0*I, 9+12*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 1+4i\n 1+4i 1+0i\n"	" y' = \n 9+12i\n 9+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+-9*I, 0+0*I, 6+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 6+-9i\n 6+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 2, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12, 12 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 3, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15, 15 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+3*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15+-9*I, 15+-9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 3+3i\n 3+3i 1+0i\n"	" y' = \n 15+-9i\n 15+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 3, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15, 0, 15, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+3*I, 0+0*I, 6+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 6+3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 2, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12, 0, 12, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+1*I, 4+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5, 5 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+6i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+6*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+-6*I, 6+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 2+6i\n 2+6i 1+0i\n"	" y' = \n 6+-6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+2*I, 0+0*I, 4+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+8i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+8*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5+8*I, 0+0*I, 5+8*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+8i\n 1+8i 1+0i\n"	" y' = \n 5+8i\n 5+8i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 3, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 4 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 4, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 8, 8 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 8\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 4 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 4, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 8, 8 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 8\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+-1*I, 5+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 5+-1i\n 5+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+2*I, 0+0*I, 4+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+-1*I, 0+0*I, 5+-1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 5+-1i\n 5+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -1+-3*I, -1+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+3i\n 3+3i 1+0i\n"	" y' = \n -1+-3i\n -1+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1+1*I, 1+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 1+1i\n 1+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+-2*I, 0+0*I, 2+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 2+-2i\n 2+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -1+-3*I, 0+0*I, -1+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+3i\n 3+3i 1+0i\n"	" y' = \n -1+-3i\n -1+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+1*I, 0+0*I, 2+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 2, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-1*I, 1+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 1+-1i\n 1+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+1*I, 0+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n 0+1i\n 0+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-4*I, 0+0*I, 2+-4*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 2+-4i\n 2+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+1*I, 0+0*I, 1+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 1+1i\n 1+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3, -3 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-6*I, 0+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 0+-6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+3*I, 0+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -6+-9*I, 0+0*I, -6+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 2+3i\n 2+3i 1+0i\n"	" y' = \n -6+-9i\n -6+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -6+-9*I, 0+0*I, -6+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 2+3i\n 2+3i 1+0i\n"	" y' = \n -6+-9i\n -6+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+6*I, 0+0*I, 0+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 5 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 5, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -15, -15 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 5\n 5 1\n"	" y' = \n -15\n -15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-3*I, 0+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+9*I, 0+9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 0+9i\n 0+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+4*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -3+-12*I, 0+0*I, -3+-12*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+4i\n 1+4i 1+0i\n"	" y' = \n -3+-12i\n -3+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+6i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+6*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -6+18*I, 0+0*I, -6+18*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 2+6i\n 2+6i 1+0i\n"	" y' = \n -6+18i\n -6+18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9+3*I, 9+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 9+3i\n 9+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+3*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 12+9*I, 12+9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+3i\n 2+3i 1+0i\n"	" y' = \n 12+9i\n 12+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+-6*I, 6+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+-6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+8i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+8*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 12+24*I, 0+0*I, 12+24*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+8i\n 2+8i 1+0i\n"	" y' = \n 12+24i\n 12+24i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+4*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 15+-12*I, 0+0*I, 15+-12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 3+4i\n 3+4i 1+0i\n"	" y' = \n 15+-12i\n 15+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+12*I, 6+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 6+12i\n 6+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+3*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 15+9*I, 15+9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 3+3i\n 3+3i 1+0i\n"	" y' = \n 15+9i\n 15+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-9*I, 6+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 6+-9i\n 6+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+3*I, 0+0*I, 6+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 6+3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+3*I, 0+0*I, 6+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 6+3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-3*I, 0+0*I, 6+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 6+-3i\n 6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 7+1*I, 7+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 3+1i\n 3+1i 1+0i\n"	" y' = \n 7+1i\n 7+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+2*I, 4+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+-3*I, 4+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+3*I, 0+0*I, 4+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+3i\n 4+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+2*I, 4+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+2*I, 4+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4, 4 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+5*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+5*I, 0+0*I, 4+5*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+5i\n 0+5i 1+0i\n"	" y' = \n 4+5i\n 4+5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-2*I, 0+0*I, 4+-2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+-2i\n 4+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 2 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+5*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-5*I, 2+-5*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+5i\n 0+5i 1+0i\n"	" y' = \n 2+-5i\n 2+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+5*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+5*I, 0+5*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 2+5i\n 2+5i 1+0i\n"	" y' = \n 0+5i\n 0+5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1+-1*I, 0+0*I, 1+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 1+-1i\n 1+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2, 2 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-1*I, 2+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+4*I, 2+4*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 2+4i\n 2+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1+-2*I, 0+0*I, 1+-2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n 1+-2i\n 1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+3*I, 0+0*I, 2+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 2+3i\n 2+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-9*I, 0+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 0+-9i\n 0+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3+-3*I, -3+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n -3+-3i\n -3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+3*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3+9*I, -3+9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+3i\n 1+3i 1+0i\n"	" y' = \n -3+9i\n -3+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 2, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6, 0, -6, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3+-3*I, 0+0*I, -3+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n -3+-3i\n -3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+3*I, 0+0*I, -6+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n -6+3i\n -6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0, 0 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-6*I, -3+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n -3+-6i\n -3+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -6+3*I, -6+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n -6+3i\n -6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+5*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+15*I, 0+0*I, 0+15*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+5i\n 0+5i 1+0i\n"	" y' = \n 0+15i\n 0+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 15, 15 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 7, 7 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 5, 5 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -1, -1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -3, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 6 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 6, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -18, -18 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 6\n 6 1\n"	" y' = \n -18\n -18\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 3, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -9, -9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float VA[]={ 1, 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -6, -6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 5 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 5, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 21, 0, 21, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 5\n 5 1\n"	" y' = \n 21\n 21\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 7, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 2 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 2, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 5 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 5, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 5\n 5 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 1 0
+ 4 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 4, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -2, 0, -2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n -2\n -2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -3, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 6 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 6, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -18, 0, -18, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 6\n 6 1\n"	" y' = \n -18\n -18\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -9, -9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 6 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 6, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -18, 0, -18, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 6\n 6 1\n"	" y' = \n -18\n -18\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double VA[]={ 1, 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 6 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+6*I, 6+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+-3*I, 6+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 6+-3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 15+-3*I, 0+0*I, 15+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n 15+-3i\n 15+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+3*I, 0+0*I, 9+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 9+3i\n 9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+5*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+-15*I, 6+15*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+-5i\n 0+5i 1+0i\n"	" y' = \n 6+-15i\n 6+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+3*I, 6+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 6+3i\n 6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+-6*I, 6+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+-6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15+3*I, 0+0*I, 15+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n 15+3i\n 15+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+2*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 7+-2*I, 7+2*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 3+-2i\n 3+2i 1+0i\n"	" y' = \n 7+-2i\n 7+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+1*I, 4+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 4+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5, 5 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+5*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+5*I, 0+0*I, 4+-5*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-5i\n 0+5i 1+0i\n"	" y' = \n 4+5i\n 4+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+-1*I, 0+0*I, 6+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 2+-1i\n 2+1i 1+0i\n"	" y' = \n 6+-1i\n 6+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4, 4 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+4*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+4*I, 5+-4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n 5+4i\n 5+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-4*I, 4+4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 4+-4i\n 4+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-2*I, 0+0*I, 4+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 4+-2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+2*I, 0+0*I, 5+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n 5+2i\n 5+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+7i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+7*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+7*I, 2+-7*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+-7i\n 0+7i 1+0i\n"	" y' = \n 2+7i\n 2+-7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -1+-1*I, -1+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n -1+-1i\n -1+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 5+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 5+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+3*I, -3+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 5+-3i\n 5+3i 1+0i\n"	" y' = \n -3+3i\n -3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+4*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1+4*I, 0+0*I, 1+-4*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n 1+4i\n 1+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+1*I, 0+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+-1i\n 2+1i 1+0i\n"	" y' = \n 0+1i\n 0+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+3*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-3*I, 1+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+-3i\n 1+3i 1+0i\n"	" y' = \n 1+-3i\n 1+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+3*I, 2+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 2+3i\n 2+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+1*I, 0+0*I, 2+-1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-1*I, 0+0*I, 1+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 1+-1i\n 1+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+3*I, 0+0*I, 2+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 2+3i\n 2+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 3+1*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -9+3*I, -9+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n -9+3i\n -9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+6i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+6*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-18*I, 0+18*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+-6i\n 0+6i 1+0i\n"	" y' = \n 0+-18i\n 0+18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+12*I, 0+-12*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 0+12i\n 0+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+3*I, 1+0*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+9*I, 0+0*I, -3+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+-3i\n 1+3i 1+0i\n"	" y' = \n -3+9i\n -3+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+6*I, 0+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 4+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 4+5*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -12+-15*I, -12+15*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 4+-5i\n 4+5i 1+0i\n"	" y' = \n -12+-15i\n -12+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 1+3*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -3+9*I, -3+-9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+-3i\n 1+3i 1+0i\n"	" y' = \n -3+9i\n -3+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	float complex VA[]={ 1, 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	float complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6, 6 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+-9*I, 6+9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 6+-9i\n 6+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 12+-3*I, 0+0*I, 12+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 2+-1i\n 2+1i 1+0i\n"	" y' = \n 12+-3i\n 12+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+6*I, 0+0*I, 6+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+-6*I, 0+0*I, 6+6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+-6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+5*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 12+-15*I, 12+15*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 2+-5i\n 2+5i 1+0i\n"	" y' = \n 12+-15i\n 12+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+3*I, 9+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 9+3i\n 9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-12*I, 6+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 6+-12i\n 6+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+-3*I, 0+0*I, 9+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 9+-3i\n 9+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+3*I, 0+0*I, 6+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 6+3i\n 6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+4*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 15+-12*I, 0+0*I, 15+12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 3+-4i\n 3+4i 1+0i\n"	" y' = \n 15+-12i\n 15+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4, 4 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 6+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 6+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 10+1*I, 10+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 6+-1i\n 6+1i 1+0i\n"	" y' = \n 10+1i\n 10+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 7+-2*I, 7+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 3+-2i\n 3+2i 1+0i\n"	" y' = \n 7+-2i\n 7+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5+-2*I, 0+0*I, 5+2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n 5+-2i\n 5+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+-1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-1*I, 4+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+2*I, 4+-2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-1*I, 4+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+3*I, 0+0*I, 4+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 4+3i\n 4+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-3*I, 0+0*I, 4+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1+1*I, 1+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 1+1i\n 1+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-4*I, 2+4*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 2+-4i\n 2+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -1+2*I, -1+-2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 3+-2i\n 3+2i 1+0i\n"	" y' = \n -1+2i\n -1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+4*I, 0+0*I, 2+-4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 2+4i\n 2+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+7i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+7*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-7*I, 0+0*I, 2+7*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-7i\n 0+7i 1+0i\n"	" y' = \n 2+-7i\n 2+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1+2*I, 0+0*I, 1+-2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n 1+2i\n 1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 2+5i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 2+5*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+5*I, 0+-5*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+-5i\n 2+5i 1+0i\n"	" y' = \n 0+5i\n 0+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2, 2 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+1*I, 2+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1+2*I, 0+0*I, 1+-2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n 1+2i\n 1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1+-2*I, 0+0*I, 1+2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n 1+-2i\n 1+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 3, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 0 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 0, 1 };
+	double complex VA[]={ 1, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0, 0 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+4*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-12*I, 0+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 0+-12i\n 0+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+6*I, 0+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -9+6*I, 0+0*I, -9+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 3+-2i\n 3+2i 1+0i\n"	" y' = \n -9+6i\n -9+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 3 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 3, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+6*I, 0+0*I, 0+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+3i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+3*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+9*I, 0+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 0+9i\n 0+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+2i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+2*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-6*I, 0+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 0+-6i\n 0+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 1+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 1+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+3*I, -3+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n -3+3i\n -3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 0+1i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 0+1*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1 0
+ 1 1
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1, 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:e */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 1+0i 0+0i
+ 3+4i 1+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=3;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1, 1 };
+	int JA[]={ 0, 0, 1 };
+	double complex VA[]={ 1+0*I, 3+4*I, 1+0*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -9+12*I, 0+0*I, -9+-12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 3+-4i\n 3+4i 1+0i\n"	" y' = \n -9+12i\n -9+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=e blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 15 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 3\n 0 1\n"	" y' = \n 6\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 6\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 9, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n 9\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 15, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 3 1\n"	" y' = \n 15\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 6\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 2
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 2, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 5 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 2\n 1 1\n"	" y' = \n 6\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 6\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 5 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 5 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 5 1\n"	" y' = \n 9\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 4\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 4\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 7 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 0 1\n"	" y' = \n 4\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 2
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 5, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 1 1\n"	" y' = \n 5\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 4\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 4\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 4\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 4
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 4, 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, -2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 4\n 1 1\n"	" y' = \n 1\n -2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 2\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n 1\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 2
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 2, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 1 1\n"	" y' = \n 1\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 2\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n 1\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 3 1\n"	" y' = \n -1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 2
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 2, 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -1, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 2\n 3 1\n"	" y' = \n -1\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -9, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 3 1\n"	" y' = \n -9\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, -9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 3 1\n"	" y' = \n 0\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 3
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 3, 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, -6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 0\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 5
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 5 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, -15 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 5\n 0 1\n"	" y' = \n 0\n -15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 0\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float VA[]={ 1, 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -6, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n -6\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 12, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 12\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 6\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 12 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 6\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 2
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 2, 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 12, 0, 15, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 2\n 3 1\n"	" y' = \n 12\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 15, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 0 1\n"	" y' = \n 15\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 9\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 12, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 12\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 12, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 12\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 4\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 0 1\n"	" y' = \n 4\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 3
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 3\n 1 1\n"	" y' = \n 5\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 3
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 3, 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 1 1\n"	" y' = \n 5\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 2
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 2, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 5
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 5, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 5\n 3 1\n"	" y' = \n 9\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 5\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 1
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 1, 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 3 1\n"	" y' = \n 7\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 3
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 0, 5, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 1 1\n"	" y' = \n 7\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 3
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 3, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 1 1\n"	" y' = \n 5\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 1, 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 5, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n 6\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 2\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 3
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 3, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, -1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 2 1\n"	" y' = \n 0\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 2\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 0\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 0\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 6 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 6 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -4, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 6 1\n"	" y' = \n -4\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 1\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 2\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 0\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 3
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 3, 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -6, 0, -9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 3\n 2 1\n"	" y' = \n -6\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n -3\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 2
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 2, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 1 1\n"	" y' = \n -3\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 4 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -12, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 4 1\n"	" y' = \n -12\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double VA[]={ 1, 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 2+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+3*I, 0+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 12+9*I, 6+9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 2+3i\n 0+3i 1+0i\n"	" y' = \n 12+9i\n 6+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 1+6i
+ 1+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+6*I, 1+6*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+18*I, 9+18*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 1+6i\n 1+6i 1+0i\n"	" y' = \n 9+18i\n 9+18i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 9 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 6\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+6*I, 0+0*I, 6+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 3 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 0, 15, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 3\n 0 1\n"	" y' = \n 6\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+1i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+1*I, 3+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 15+-3*I, 0+0*I, 6+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+1i\n 3+1i 1+0i\n"	" y' = \n 15+-3i\n 6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 1+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+1*I, 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 9+3*I, 6+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 1+1i\n 0+1i 1+0i\n"	" y' = \n 9+3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 3+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 3+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15+6*I, 6+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+2i\n 3+2i 1+0i\n"	" y' = \n 15+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+-6*I, 6+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+-6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 2+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+6*I, 0+0*I, 12+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+2i\n 2+2i 1+0i\n"	" y' = \n 6+6i\n 12+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 2+3i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+3*I, 1+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 9+9*I, 0+0*I, 12+9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+3i\n 1+3i 1+0i\n"	" y' = \n 9+9i\n 12+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1, 2 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n 12\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+2i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+2*I, 2+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5+2*I, 6+2*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+2i\n 2+2i 1+0i\n"	" y' = \n 5+2i\n 6+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+2*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+2*I, 5+2*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 5+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+4i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+4*I, 3+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 7+-4*I, 5+-4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+4i\n 3+4i 1+0i\n"	" y' = \n 7+-4i\n 5+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+1*I, 0+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5+1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+1i\n 0+1i 1+0i\n"	" y' = \n 5+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+6i
+ 1+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+6*I, 1+6*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5+6*I, 0+0*I, 4+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+6i\n 1+6i 1+0i\n"	" y' = \n 5+6i\n 4+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 0, 4, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 6\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+2i
+ 5+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 5+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+2*I, 9+2*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+2i\n 5+2i 1+0i\n"	" y' = \n 4+2i\n 9+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6, 4 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 6\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 2+7i
+ 2+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+7*I, 2+7*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+-7*I, 6+-7*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 2+7i\n 2+7i 1+0i\n"	" y' = \n 6+-7i\n 6+-7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 3+2i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 3+2*I, 1+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+2*I, 0+0*I, 7+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 3+2i\n 1+2i 1+0i\n"	" y' = \n 5+2i\n 7+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+2*I, 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-2*I, 0+0*I, 5+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+2i\n 0+2i 1+0i\n"	" y' = \n 4+-2i\n 5+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 2+3i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+3*I, 1+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-3*I, 1+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+3i\n 1+3i 1+0i\n"	" y' = \n 0+-3i\n 1+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 2 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 1 1\n"	" y' = \n 1\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2, 2 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 2+2i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+2*I, 2+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+-2*I, 0+0*I, 0+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n 0+-2i\n 0+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+5i
+ 0+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+5*I, 0+5*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+-5*I, 0+0*I, 2+-5*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+5i\n 0+5i 1+0i\n"	" y' = \n 2+-5i\n 2+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 4
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 4, 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 0, -2, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 4\n 1 1\n"	" y' = \n 1\n -2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 2+2i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+2*I, 2+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-2*I, 0+-2*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n 0+-2i\n 0+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 3+5i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 3+5*I, 1+5*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-5*I, -1+-5*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+5i\n 1+5i 1+0i\n"	" y' = \n 1+-5i\n -1+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 4+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 4+1*I, 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+1*I, -2+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 4+1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n -2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 2, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 1\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 1+1i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+1*I, 1+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-1*I, 0+0*I, 1+-1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 1+-1i\n 1+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 2\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+2*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+-6*I, 0+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+2i\n 0+2i 1+0i\n"	" y' = \n -3+-6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 1+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+-6*I, 0+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+2i\n 1+2i 1+0i\n"	" y' = \n -3+-6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+4i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+4*I, 0+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+12*I, 0+12*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 0+12i\n 0+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+2*I, 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+-6*I, 0+0*I, 0+-6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+2i\n 0+2i 1+0i\n"	" y' = \n -3+-6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 2+10i
+ 2+10i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 2+10*I, 2+10*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -6+-30*I, 0+0*I, -6+-30*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 2+10i\n 2+10i 1+0i\n"	" y' = \n -6+-30i\n -6+-30i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 3+1i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 3+1*I, 2+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -6+3*I, 0+0*I, -9+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 3+1i\n 2+1i 1+0i\n"	" y' = \n -6+3i\n -9+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+3*I, 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-9*I, 0+-9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 0+-9i\n 0+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 1
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1, 2 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -6, -3 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 2 1\n"	" y' = \n -6\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+2*I, 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+6*I, 0+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+2i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 1+2*I, 1+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -3+-6*I, 0+0*I, -3+-6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n -3+-6i\n -3+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+1i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+1*I, 2+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -6+-3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+1i\n 2+1i 1+0i\n"	" y' = \n -6+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	float complex VA[]={ 0+1*I, 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+3*I, 0+0*I, 0+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+2*I, 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+6*I, 6+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+4i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+4*I, 0+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+12*I, 6+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 6+12i\n 6+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 1+3i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+3*I, 2+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 12+-9*I, 9+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 1+3i\n 2+3i 1+0i\n"	" y' = \n 12+-9i\n 9+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 2+1i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 2+1*I, 3+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 15+3*I, 0+0*I, 12+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+1i\n 3+1i 1+0i\n"	" y' = \n 15+3i\n 12+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 12, 0, 6, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 12\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+5i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+5*I, 1+5*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+15*I, 9+15*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+5i\n 1+5i 1+0i\n"	" y' = \n 6+15i\n 9+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 2+2i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 2+2*I, 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+6*I, 12+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+2i\n 1+2i 1+0i\n"	" y' = \n 9+6i\n 12+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+3i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+3*I, 1+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+-9*I, 6+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+3i\n 1+3i 1+0i\n"	" y' = \n 9+-9i\n 6+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 2+3i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 2+3*I, 1+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 12+9*I, 0+0*I, 9+9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 2+3i\n 1+3i 1+0i\n"	" y' = \n 12+9i\n 9+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 5+3i
+ 3+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 5+3*I, 3+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 15+9*I, 0+0*I, 21+9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 5+3i\n 3+3i 1+0i\n"	" y' = \n 15+9i\n 21+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+4i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+4*I, 0+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-12*I, 0+0*I, 6+-12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 6+-12i\n 6+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5, 4 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n 5\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 2+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 2+1*I, 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+1*I, 6+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 2+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 6+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+3*I, 0+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+-3*I, 5+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 5+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 4+6i
+ 0+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 4+6*I, 0+6*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 8+6*I, 0+0*I, 4+6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 4+6i\n 0+6i 1+0i\n"	" y' = \n 8+6i\n 4+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 5+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 5+1*I, 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+1*I, 0+0*I, 9+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 5+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 9+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+3*I, 0+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+-3*I, 0+0*I, 4+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+1*I, 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5+1*I, 4+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+1i\n 0+1i 1+0i\n"	" y' = \n 5+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+1i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+1*I, 1+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5+1*I, 5+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 5+1i\n 5+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+6i
+ 1+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+6*I, 1+6*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5+-6*I, 4+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+6i\n 1+6i 1+0i\n"	" y' = \n 5+-6i\n 4+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 4+2i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 4+2*I, 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 8+2*I, 0+0*I, 5+2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 4+2i\n 1+2i 1+0i\n"	" y' = \n 8+2i\n 5+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 2 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4, 0, 6, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 4\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 1+1i
+ 4+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+1*I, 4+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 8+-1*I, 0+0*I, 5+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+1i\n 4+1i 1+0i\n"	" y' = \n 8+-1i\n 5+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+3*I, 0+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-3*I, 2+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 2+-3i\n 2+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 2
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 2, 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1, 0 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 1 1\n"	" y' = \n 1\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+7i
+ 3+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+7*I, 3+7*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -1+7*I, 2+7*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+7i\n 3+7i 1+0i\n"	" y' = \n -1+7i\n 2+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+1*I, 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-1*I, 0+0*I, 2+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 2+1i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 2+1*I, 2+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-1*I, 0+0*I, 0+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n 0+-1i\n 0+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 3+3i
+ 5+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 3+3*I, 5+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3+3*I, 0+0*I, -1+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 3+3i\n 5+3i 1+0i\n"	" y' = \n -3+3i\n -1+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2, 0 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 2 1\n"	" y' = \n 2\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 3+1i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 3+1*I, 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-1*I, -1+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n -1+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+7i
+ 3+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+7*I, 3+7*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -1+7*I, 2+7*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+7i\n 3+7i 1+0i\n"	" y' = \n -1+7i\n 2+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+3i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+3*I, 2+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+3i\n 2+3i 1+0i\n"	" y' = \n 2+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 2 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2, 0, 0, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 0 1\n"	" y' = \n 2\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+4i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+4*I, 0+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+4*I, 0+0*I, 2+4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 2+4i\n 2+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3, 0 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 0 1\n"	" y' = \n -3\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 3+4i
+ 2+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 3+4*I, 2+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+-12*I, -9+-12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 3+4i\n 2+4i 1+0i\n"	" y' = \n -6+-12i\n -9+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+4i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+4*I, 0+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+12*I, -3+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+4i\n 0+4i 1+0i\n"	" y' = \n 0+12i\n -3+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+5i
+ 3+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+5*I, 3+5*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-15*I, 0+0*I, -9+-15*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+5i\n 3+5i 1+0i\n"	" y' = \n 0+-15i\n -9+-15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 3+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+2*I, 3+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -9+-6*I, 0+0*I, 0+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+2i\n 3+2i 1+0i\n"	" y' = \n -9+-6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+3i
+ 3+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+3*I, 3+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -9+9*I, 0+0*I, 0+9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+3i\n 3+3i 1+0i\n"	" y' = \n -9+9i\n 0+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 1
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1, 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3, -3 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 3+3i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 3+3*I, 0+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-9*I, -9+-9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 3+3i\n 0+3i 1+0i\n"	" y' = \n 0+-9i\n -9+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+3i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 1+3*I, 2+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -6+9*I, -3+9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+3i\n 2+3i 1+0i\n"	" y' = \n -6+9i\n -3+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+5i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+5*I, 1+5*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-15*I, 0+0*I, -3+-15*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+5i\n 1+5i 1+0i\n"	" y' = \n 0+-15i\n -3+-15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+4i
+ 2+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 0+4*I, 2+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -6+-12*I, 0+0*I, 0+-12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+4i\n 2+4i 1+0i\n"	" y' = \n -6+-12i\n 0+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:g; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 3+2i
+ 3+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=2;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0, 1 };
+	int JA[]={ 1, 0 };
+	double complex VA[]={ 3+2*I, 3+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -9+6*I, 0+0*I, -9+6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 3+2i\n 3+2i 1+0i\n"	" y' = \n -9+6i\n -9+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=g diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 3, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ 3, 6 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ 3, 6 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 4
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 4 };/* type is float */
+
+	float x[]={ 15, 0, 3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 15, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 15\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ 3, 0, 6, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 2 };/* type is float */
+
+	float x[]={ 3, 0, 9, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 9, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ 2, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 2, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 2\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -2, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -2, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -1, -2 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -1, -2 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 2 };/* type is float */
+
+	float x[]={ -3, -9 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -9 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float VA[]={ 2 };/* type is float */
+
+	float x[]={ -9, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -9, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -9\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_su_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ 6, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 6, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 3, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ 3, 12 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 12 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 2 };/* type is double */
+
+	double x[]={ 9, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 9, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 9\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 3, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 3, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ 4, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 4, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ 1, 0, 2, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 2, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 2 };/* type is double */
+
+	double x[]={ -3, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -3, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -3\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -1, -2 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 5
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 5 };/* type is double */
+
+	double x[]={ -1, -6 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 5\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -1, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -1, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 4
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 4 };/* type is double */
+
+	double x[]={ -1, 0, -5, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -5, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -5\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -6, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -6, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ -3, -12 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -12 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -3, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ -12, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -12, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -12\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -3, 0, -6, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_su_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -3, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };/* type is float complex */
+
+	float complex x[]={ 6, 3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 6, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 0+3*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 3+9*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3+0*I, 3+9*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 0+3i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+9i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };/* type is float complex */
+
+	float complex x[]={ 3, 6 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3, 6 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ 3, 0, 3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 1+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1+2*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 0+0*I, 6+6*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3+0*I, 0+0*I, 6+6*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 1+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 6+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 1+3i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1+3*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 0+0*I, 6+-9*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3+0*I, 0+0*I, 6+-9*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 1+3i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 6+-9i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };/* type is float complex */
+
+	float complex x[]={ 1, 2 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };/* type is float complex */
+
+	float complex x[]={ 1, 2 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 0+3*I };/* type is float complex */
+
+	float complex x[]={ 1+3*I, 0+0*I, 1+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+3*I, 0+0*I, 1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+3i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+3i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 2+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 2+2*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 0+0*I, 3+2*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+0*I, 0+0*I, 3+2*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 2+2i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 3+2i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 2+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 2+1*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 0+0*I, 3+-1*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+0*I, 0+0*I, 3+-1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 2+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 3+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 3 };/* type is float complex */
+
+	float complex x[]={ -4, -1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -4, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -4\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 0+1*I };/* type is float complex */
+
+	float complex x[]={ -1+0*I, -1+-1*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+0*I, -1+-1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -1+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 4
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 4 };/* type is float complex */
+
+	float complex x[]={ -1, -5 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1, -5 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -5\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 0+1*I };/* type is float complex */
+
+	float complex x[]={ -1+-1*I, 0+0*I, -1+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1+-1*I, 0+0*I, -1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-1i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 0+2*I };/* type is float complex */
+
+	float complex x[]={ -1+0*I, 0+0*I, -1+-2*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1+0*I, 0+0*I, -1+-2*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -1+-2i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ -1, 0, -1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1+2*I };/* type is float complex */
+
+	float complex x[]={ -6+-6*I, -3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -6+-6*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1+0i 1+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -6+-6i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 2+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 2+1*I };/* type is float complex */
+
+	float complex x[]={ -3+0*I, -9+-3*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3+0*I, -9+-3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 2+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -9+-3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 0+2*I };/* type is float complex */
+
+	float complex x[]={ -3+0*I, -3+6*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3+0*I, -3+6*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 0+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -3+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 2+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 2+1*I };/* type is float complex */
+
+	float complex x[]={ -9+-3*I, 0+0*I, -3+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -9+-3*I, 0+0*I, -3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1+0i 2+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -9+-3i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 3 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -12, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -12, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_su_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	float complex VA[]={ 1 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -6, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 0+1*I };/* type is double complex */
+
+	double complex x[]={ 3+3*I, 3+0*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3+3*I, 3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+3i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 2+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 2+2*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 9+6*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3+0*I, 9+6*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1+0i 2+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 9+6i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 3
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 3 };/* type is double complex */
+
+	double complex x[]={ 3, 12 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3, 12 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 3\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 3, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 3, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 0+1*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 0+0*I, 3+-3*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3+0*I, 0+0*I, 3+-3*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+-3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 2 };/* type is double complex */
+
+	double complex x[]={ 3, 1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 3, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 1 };/* type is double complex */
+
+	double complex x[]={ 1, 2 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 4
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 4 };/* type is double complex */
+
+	double complex x[]={ 1, 0, 5, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1, 0, 5, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 5\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 0+3*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 0+0*I, 1+-3*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1+0*I, 0+0*I, 1+-3*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 0+3i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 1+-3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 0+1*I };/* type is double complex */
+
+	double complex x[]={ -1+-1*I, -1+0*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1+-1*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-1i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 1+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 1+1*I };/* type is double complex */
+
+	double complex x[]={ -1+0*I, -2+-1*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1+0*I, -2+-1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 1+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -2+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+1i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 0+1*I };/* type is double complex */
+
+	double complex x[]={ -1+0*I, -1+1*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1+0*I, -1+1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 0+1i\n 0+0i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -1+1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 2 };/* type is double complex */
+
+	double complex x[]={ -3, 0, -1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -3\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ -1, 0, -1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 2
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 2 };/* type is double complex */
+
+	double complex x[]={ -1, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 2\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 3+2i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 3+2*I };/* type is double complex */
+
+	double complex x[]={ -12+-6*I, -3+0*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -12+-6*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1+0i 3+2i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -12+-6i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+3i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 0+3*I };/* type is double complex */
+
+	double complex x[]={ -3+0*I, -3+-9*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3+0*I, -3+-9*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+3i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -3+-9i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 4
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 4 };/* type is double complex */
+
+	double complex x[]={ -3, -15 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3, -15 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 4\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -15\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 1
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 1 };/* type is double complex */
+
+	double complex x[]={ -6, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -6, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 1\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 1+3i
+ 0+0i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 0 };
+	int JA[]={ 1 };
+	double complex VA[]={ 1+3*I };/* type is double complex */
+
+	double complex x[]={ -3+0*I, 0+0*I, -6+-9*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3+0*I, 0+0*I, -6+-9*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 1+3i\n 0+0i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+0i\n -6+-9i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_su_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:u; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ -3, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=u diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 3, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 5 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 5 };/* type is float */
+
+	float x[]={ 18, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 18, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 5 1\n"	" y = \n 9\n 9\n"	" y' = \n 18\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };/* type is float */
+
+	float x[]={ 12, 3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 12, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };/* type is float */
+
+	float x[]={ 3, 0, 9, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 9, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 3, 0, 3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 3, 0, 3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ 1, 2 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 2 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 5 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 5 };/* type is float */
+
+	float x[]={ 6, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 6, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 5 1\n"	" y = \n 1\n 1\n"	" y' = \n 6\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };/* type is float */
+
+	float x[]={ 3, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -1, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -1, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -2, -1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ -2, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -1, 0, -2, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -2, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -2, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -2, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -1, 0, -1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };/* type is float */
+
+	float x[]={ -3, -9 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -9 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };/* type is float */
+
+	float x[]={ -6, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -6, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, -3 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 5 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 5 };/* type is float */
+
+	float x[]={ -18, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -18, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 5 1\n"	" y = \n 9\n 9\n"	" y' = \n -18\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:s; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};/* type is float */
+
+	float x[]={ -3, 0, -3, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_sussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ 3, 12 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 12 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 12\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 3, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ 12, 3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 12, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };/* type is double */
+
+	double x[]={ 3, 0, 9, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 9, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ 12, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 12, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ 12, 0, 3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 12, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };/* type is double */
+
+	double x[]={ 3, 1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 3, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };/* type is double */
+
+	double x[]={ 3, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n 3\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ -1, -4 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -4 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -4\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -2, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -2, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -2\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -1, -1 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -1, 0, -2, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -2, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -2\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -1, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };/* type is double */
+
+	double x[]={ -4, 0, -1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ -4, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n -4\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -3, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -3, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};/* type is double */
+
+	double x[]={ -3, -3 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -3, 0, -6, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -3, 0, -6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -6, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -6, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:d; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };/* type is double */
+
+	double x[]={ -6, 0, -3, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ -6, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dussv(transT,alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 3+3*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3+0*I, 3+3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 3+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3 };/* type is float complex */
+
+	float complex x[]={ 12, 3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 12, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };/* type is float complex */
+
+	float complex x[]={ 3+-6*I, 3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3+-6*I, 3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+2i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+-6i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };/* type is float complex */
+
+	float complex x[]={ 3+0*I, 0+0*I, 6+3*I, 0+0*I };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3+0*I, 0+0*I, 6+3*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 6+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ 3, 0, 3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3 };/* type is float complex */
+
+	float complex x[]={ 12, 0, 3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ 12, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 9\n 9\n"	" y' = \n 12\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 2+1*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1+0*I, 2+1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 2+1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };/* type is float complex */
+
+	float complex x[]={ 2+-1*I, 1+0*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 2+-1*I, 1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 2+-1i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };/* type is float complex */
+
+	float complex x[]={ 1+0*I, 0+0*I, 1+3*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+0*I, 0+0*I, 1+3*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 0+3i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 1+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };/* type is float complex */
+
+	float complex x[]={ 1+1*I, 0+0*I, 1+0*I, 0+0*I };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1+1*I, 0+0*I, 1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+1i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ 1, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ -1, -1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };/* type is float complex */
+
+	float complex x[]={ -1+-2*I, -1+0*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+-2*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+2i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-2i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };/* type is float complex */
+
+	float complex x[]={ -1+3*I, -1+0*I };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ -1+3*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+3i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+3i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ -1, 0, -1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -1, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n -1\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n -3\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -1, 0 };/* reference x */
+	float complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n -3\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ -3, -3 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -3, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+2*I };/* type is float complex */
+
+	float complex x[]={ -6+-6*I, -3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -6+-6*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+0i\n 1+2i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -6+-6i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+3*I };/* type is float complex */
+
+	float complex x[]={ -12+9*I, -3+0*I };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ -12+9*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1+0i 0+0i\n 3+3i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -12+9i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };/* type is float complex */
+
+	float complex x[]={ -3, 0, -6, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -6, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -6\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ -3, 0, -3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:c; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};/* type is float complex */
+
+	float complex x[]={ -3, 0, -3, 0 };/* reference x */
+	float complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+1*I };/* type is double complex */
+
+	double complex x[]={ 3+0*I, 9+3*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3+0*I, 9+3*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1+0i 0+0i\n 2+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 3+0i\n 9+3i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1 };/* type is double complex */
+
+	double complex x[]={ 6, 3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 6, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n 6\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+1*I };/* type is double complex */
+
+	double complex x[]={ 9+-3*I, 3+0*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 9+-3*I, 3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1+0i 0+0i\n 2+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n 9+-3i\n 3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 3, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 3, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-T * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 3, 0, 3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n 3\n 3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 1, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 1\n 1\n"	" y' = \n 1\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };/* type is double complex */
+
+	double complex x[]={ 2+1*I, 1+0*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 2+1*I, 1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 2+1i\n 1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3 };/* type is double complex */
+
+	double complex x[]={ 4, 1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ 4, 1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };/* type is double complex */
+
+	double complex x[]={ 1+0*I, 0+0*I, 1+1*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 1+0*I, 0+0*I, 1+1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n 1+0i\n 1+1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3 };/* type is double complex */
+
+	double complex x[]={ 4, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 4, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-T * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3 };/* type is double complex */
+
+	double complex x[]={ 4, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ 4, 0, 1, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- 1 A^-H * y \n"" A = \n 1 0\n 3 1\n"	" y = \n 1\n 1\n"	" y' = \n 4\n 1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+1*I };/* type is double complex */
+
+	double complex x[]={ -1+0*I, -4+-1*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -1+0*I, -4+-1*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 3+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -4+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };/* type is double complex */
+
+	double complex x[]={ -2+-1*I, -1+0*I };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -2+-1*I, -1+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+0i\n 1+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -2+-1i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };/* type is double complex */
+
+	double complex x[]={ -3, -1 };/* reference x */
+	double complex cy[]={ 1, 1 };/* reference cy after */
+	double complex y[]={ -3, -1 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 1\n 1\n"	" y' = \n -3\n -1\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };/* type is double complex */
+
+	double complex x[]={ -1+0*I, 0+0*I, -1+-1*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1+0*I, 0+0*I, -1+-1*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-1 * y \n"" A = \n 1+0i 0+0i\n 0+1i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+0i\n -1+-1i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+3*I };/* type is double complex */
+
+	double complex x[]={ -1+-3*I, 0+0*I, -1+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1+-3*I, 0+0*I, -1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+3i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+-3i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+3*I };/* type is double complex */
+
+	double complex x[]={ -1+3*I, 0+0*I, -1+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double complex y[]={ -1+3*I, 0+0*I, -1+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -1 A^-H * y \n"" A = \n 1+0i 0+0i\n 0+3i 1+0i\n"	" y = \n 1\n 1\n"	" y' = \n -1+3i\n -1+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };/* type is double complex */
+
+	double complex x[]={ -3, -9 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3, -9 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };/* type is double complex */
+
+	double complex x[]={ -3+-6*I, -3+0*I };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -3+-6*I, -3+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+0i\n 0+2i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -3+-6i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1 };/* type is double complex */
+
+	double complex x[]={ -6, -3 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ -6, -3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 1 1\n"	" y = \n 9\n 9\n"	" y' = \n -6\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:n kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };/* type is double complex */
+
+	double complex x[]={ -3, 0, -9, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -9, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-1 * y \n"" A = \n 1 0\n 2 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -9\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:t kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 4+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 4+1*I };/* type is double complex */
+
+	double complex x[]={ -15+-3*I, 0+0*I, -3+0*I, 0+0*I };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -15+-3*I, 0+0*I, -3+0*I, 0+0*I };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-T * y \n"" A = \n 1+0i 0+0i\n 4+1i 1+0i\n"	" y = \n 9\n 9\n"	" y' = \n -15+-3i\n -3+0i\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:ussv; type:z; trans:c kind:l; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};/* type is double complex */
+
+	double complex x[]={ -3, 0, -3, 0 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ -3, 0, -3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- -3 A^-H * y \n"" A = \n 1 0\n 0 1\n"	" y = \n 9\n 9\n"	" y' = \n -3\n -3\n";
+	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zussv(transT,&alpha,A,y,incx) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=l diag=i blocks=1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 4 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 18, 18 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 18\n 18\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 15, 0, 15, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 4 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 18, 18 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 18\n 18\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 7, 7 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 4 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 8, 0, 8, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 8\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -1, -1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -1, -1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 4 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -2, 0, -2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n -2\n -2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -9, -9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -3, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 15, 15 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 0, 7, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 1 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -6, 0, -6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9, 9 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 9+-3*I, 9+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 9+-3i\n 9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+7*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+21*I, 0+0*I, 6+21*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+7i\n 0+7i 1+0i\n"	" y' = \n 6+21i\n 6+21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 12+6*I, 0+0*I, 12+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n 12+6i\n 12+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 4+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 4+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 18+-9*I, 0+0*I, 18+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 4+3i\n 4+3i 1+0i\n"	" y' = \n 18+-9i\n 18+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 9+6*I, 9+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n 9+6i\n 9+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 9+6*I, 9+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n 9+6i\n 9+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15, 15 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+12*I, 0+0*I, 6+12*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 6+12i\n 6+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+6*I, 0+0*I, 6+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12+-6*I, 0+0*I, 12+-6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n 12+-6i\n 12+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+6*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+6*I, 4+6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+6i\n 0+6i 1+0i\n"	" y' = \n 4+6i\n 4+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 4+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 4+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 8+3*I, 8+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 4+3i\n 4+3i 1+0i\n"	" y' = \n 8+3i\n 8+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+-3*I, 4+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+2*I, 0+0*I, 4+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 4+2i\n 4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+3*I, 0+0*I, 4+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+3i\n 4+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+1*I, 5+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 5+1i\n 5+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+3*I, 4+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+3i\n 4+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-1*I, 4+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+2*I, 0+0*I, 6+2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n 6+2i\n 6+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+-3*I, 0+0*I, 4+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2, 2 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1, 1 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+7*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1+7*I, 1+7*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+7i\n 1+7i 1+0i\n"	" y' = \n 1+7i\n 1+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -1+-1*I, 0+0*I, -1+-1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+1i\n 3+1i 1+0i\n"	" y' = \n -1+-1i\n -1+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+1*I, 0+0*I, 2+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+5*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 1+-5*I, 1+-5*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 1+5i\n 1+5i 1+0i\n"	" y' = \n 1+-5i\n 1+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-1*I, 2+-1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2, 2 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-2*I, 0+0*I, 2+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 2+-2i\n 2+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+3*I, 0+0*I, 0+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 2+3i\n 2+3i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -9+-12*I, -9+-12*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 3+4i\n 3+4i 1+0i\n"	" y' = \n -9+-12i\n -9+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+9*I, -3+9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+3i\n 1+3i 1+0i\n"	" y' = \n -3+9i\n -3+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -6+-3*I, 0+0*I, -6+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n -6+-3i\n -6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -6, 0, -6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-3*I, 0+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-9*I, 0+0*I, 0+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 0+-9i\n 0+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+9*I, 0+0*I, 0+9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 0+9i\n 0+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+6*I, 6+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+6*I, 6+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9+-3*I, 9+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 9+-3i\n 9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+9*I, 0+0*I, 6+9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 6+9i\n 6+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+9*I, 6+9*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+3i\n 0+3i 1+0i\n"	" y' = \n 6+9i\n 6+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+6*I, 6+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 12, 12 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+7*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+21*I, 0+0*I, 6+21*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+7i\n 0+7i 1+0i\n"	" y' = \n 6+21i\n 6+21i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 12+-9*I, 0+0*I, 12+-9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 2+3i\n 2+3i 1+0i\n"	" y' = \n 12+-9i\n 12+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 5+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 5+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9+3*I, 9+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 5+3i\n 5+3i 1+0i\n"	" y' = \n 9+3i\n 9+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+1*I, 6+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 2+1i\n 2+1i 1+0i\n"	" y' = \n 6+1i\n 6+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6, 6 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+5*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+5*I, 0+0*I, 4+5*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+5i\n 0+5i 1+0i\n"	" y' = \n 4+5i\n 4+5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5+-4*I, 0+0*I, 5+-4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+4i\n 1+4i 1+0i\n"	" y' = \n 5+-4i\n 5+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 7+2*I, 7+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 3+2i\n 3+2i 1+0i\n"	" y' = \n 7+2i\n 7+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+1*I, 4+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-1*I, 4+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 4+1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5+2*I, 0+0*I, 5+2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n 5+2i\n 5+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+6*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-6*I, 0+0*I, 4+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+6i\n 0+6i 1+0i\n"	" y' = \n 4+-6i\n 4+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-1*I, 2+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+5*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-5*I, 2+-5*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+5i\n 0+5i 1+0i\n"	" y' = \n 2+-5i\n 2+-5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 1+1*I, 1+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+1i\n 1+1i 1+0i\n"	" y' = \n 1+1i\n 1+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-1*I, 0+0*I, 2+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+3*I, 0+0*I, 0+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 2+3i\n 2+3i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-4*I, 0+-4*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+4i\n 2+4i 1+0i\n"	" y' = \n 0+-4i\n 0+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-1*I, 2+-1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+2*I, 2+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 2+2i\n 2+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+6*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1+-6*I, 0+0*I, 1+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 1+6i\n 1+6i 1+0i\n"	" y' = \n 1+-6i\n 1+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+10i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+10*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -1+-10*I, 0+0*I, -1+-10*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 3+10i\n 3+10i 1+0i\n"	" y' = \n -1+-10i\n -1+-10i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+4*I, 0+0*I, 2+4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 2+4i\n 2+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+11i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+11*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+-33*I, -6+-33*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 2+11i\n 2+11i 1+0i\n"	" y' = \n -6+-33i\n -6+-33i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+-6*I, -6+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 2+2i\n 2+2i 1+0i\n"	" y' = \n -6+-6i\n -6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+6*I, 0+6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+-12*I, 0+0*I, 0+-12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+4i\n 0+4i 1+0i\n"	" y' = \n 0+-12i\n 0+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -9+-6*I, 0+0*I, -9+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 3+2i\n 3+2i 1+0i\n"	" y' = \n -9+-6i\n -9+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+3*I, 0+0*I, 0+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-6*I, -3+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+2i\n 1+2i 1+0i\n"	" y' = \n -3+-6i\n -3+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+5*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-15*I, -3+-15*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+5i\n 1+5i 1+0i\n"	" y' = \n -3+-15i\n -3+-15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+3*I, 0+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+-3*I, 0+0*I, 0+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+-3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+5*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-15*I, 0+0*I, -3+-15*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+5i\n 1+5i 1+0i\n"	" y' = \n -3+-15i\n -3+-15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:s; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+3*I, 0+0*I, 0+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=s diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 15, 15 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 12, 12 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 12\n 12\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 15, 0, 15, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 15, 0, 15, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 7, 7 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 4 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 5, 5 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 7, 7 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 4 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -2, -2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n -2\n -2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -1, -1 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 2 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -3, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -6, -6 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -9, 0, -9, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 2 };		/* x: 2 */
+	float x[]={ 1, 1 };/* reference x */
+	float cy[]={ -6, 0, -6, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 3 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -9, -9 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float VA[]={ 1 };		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ -3, -3 };/* reference cy after */
+	float y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:s; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float x[]={ 1, 0, 1, 0 };/* reference x */
+	float cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	float y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('s')){printf("type=s unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_suscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_suscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_susmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'S',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'S',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=s dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'S');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 15, 15 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 15, 15 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 9, 0, 9, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 4 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 8, 8 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 8\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 5 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 4 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 7, 7 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 7\n 7\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 6, 6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 4 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 8, 0, 8, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 8\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 2, 2 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 1, 0, 1, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 1\n 1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double x[]={ 1, 1 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 2 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -6, -6 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n -6\n -6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 3 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -9, -9 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -9\n -9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, -3 };/* reference cy after */
+	double y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double VA[]={ 1 };		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t td_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:d; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double x[]={ 1, 0, 1, 0 };/* reference x */
+	double cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('d')){printf("type=d unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_duscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_duscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_dusmv(transT,alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'D',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'D',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=d dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'D');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6+-12*I, 6+12*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 6+-12i\n 6+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 6 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 12+-9*I, 12+9*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 2+-3i\n 2+3i 1+0i\n"	" y' = \n 12+-9i\n 12+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 12+9*I, 0+0*I, 12+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+-3i\n 2+3i 1+0i\n"	" y' = \n 12+9i\n 12+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+5*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12+-15*I, 12+15*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 2+-5i\n 2+5i 1+0i\n"	" y' = \n 12+-15i\n 12+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+6*I, 6+-6*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+-3*I, 6+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 6+-3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+-9*I, 0+0*I, 6+9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 6+-9i\n 6+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 12+9*I, 0+0*I, 12+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 2+-3i\n 2+3i 1+0i\n"	" y' = \n 12+9i\n 12+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 15, 0, 15, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n 15\n 15\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+-3*I, 4+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 5+3*I, 5+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+-3i\n 1+3i 1+0i\n"	" y' = \n 5+3i\n 5+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4+-3*I, 4+3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 4+-3i\n 4+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 2 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 6, 0, 6, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1 2\n 2 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 7+-4*I, 0+0*I, 7+4*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 3+-4i\n 3+4i 1+0i\n"	" y' = \n 7+-4i\n 7+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+-1*I, 5+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 5+-1i\n 5+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4+3*I, 4+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 4+3i\n 4+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4, 4 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 4, 0, 4, 0 };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 6+2*I, 0+0*I, 6+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 2+-2i\n 2+2i 1+0i\n"	" y' = \n 6+2i\n 6+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+3*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 5+-3*I, 0+0*I, 5+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 1+-3i\n 1+3i 1+0i\n"	" y' = \n 5+-3i\n 5+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2, 2 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+5*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 1+-5*I, 1+5*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 1+-5i\n 1+5i 1+0i\n"	" y' = \n 1+-5i\n 1+5i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+7*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+7*I, 2+-7*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-7i\n 0+7i 1+0i\n"	" y' = \n 2+7i\n 2+-7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -1+4*I, 0+0*I, -1+-4*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 3+-4i\n 3+4i 1+0i\n"	" y' = \n -1+4i\n -1+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+-1*I, 0+0*I, 2+1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 2+1*I, 0+0*I, 2+-1*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+4*I, 2+-4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 2+4i\n 2+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-1*I, 2+1*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+4*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -1+4*I, -1+-4*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 3+-4i\n 3+4i 1+0i\n"	" y' = \n -1+4i\n -1+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 2+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+2*I, 0+0*I, 0+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 2+-2i\n 2+2i 1+0i\n"	" y' = \n 0+2i\n 0+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+7*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+-7*I, 0+0*I, 2+7*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-7i\n 0+7i 1+0i\n"	" y' = \n 2+-7i\n 2+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+2*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 2+2*I, 0+0*I, 2+-2*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 2+2i\n 2+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	float complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0, 0 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3, -3 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+3*I, 0+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+3*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ 0+9*I, 0+0*I, 0+-9*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 0+9i\n 0+-9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+2*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+-6*I, 0+0*I, -3+6*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n -3+-6i\n -3+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+4*I };		/* x: 2 */
+	float complex x[]={ 1, 1 };/* reference x */
+	float complex cy[]={ -3+12*I, 0+0*I, -3+-12*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n -3+12i\n -3+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1 };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -3, -3 };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+5i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+5*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-15*I, 0+15*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+-5i\n 0+5i 1+0i\n"	" y' = \n 0+-15i\n 0+15i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -9+3*I, -9+-3*I };/* reference cy after */
+	float complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n -9+3i\n -9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 1+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -3+3*I, 0+0*I, -3+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n -3+3i\n -3+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 0+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ 0+-3*I, 0+0*I, 0+3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 0+-3i\n 0+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:c; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	float complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	float complex VA[]={ 3+1*I };		/* x: 4 */
+	float complex x[]={ 1, 0, 1, 0 };/* reference x */
+	float complex cy[]={ -9+3*I, 0+0*I, -9+-3*I, 0+0*I };/* reference cy after */
+	float complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n -9+3i\n -9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('c')){printf("type=c unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_cuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_cuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_cusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'C',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'C',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=c dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'C');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6, 6 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9, 9 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 9\n 9\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+-3*I, 6+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 6+-3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+-3*I, 0+0*I, 6+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 6+-3i\n 6+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 9+3*I, 0+0*I, 9+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 9+3i\n 9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+3*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 6+-9*I, 0+0*I, 6+9*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 0+-3i\n 0+3i 1+0i\n"	" y' = \n 6+-9i\n 6+9i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6, 6 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 6\n 6\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+6*I, 6+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+-12*I, 9+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n 9+-12i\n 9+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+-6*I, 0+0*I, 6+6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^1 * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+-6i\n 6+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 6+6*I, 0+0*I, 6+-6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^T * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 6+6i\n 6+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 9+-3*I, 0+0*I, 9+3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 3 A^H * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n 9+-3i\n 9+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 5+-4*I, 5+4*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n 5+-4i\n 5+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+4*I, 4+-4*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 4+4i\n 4+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4, 4 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+-1*I, 0+0*I, 4+1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 4+4*I, 0+0*I, 4+-4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 0+-4i\n 0+4i 1+0i\n"	" y' = \n 4+4i\n 4+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 7+-4*I, 0+0*I, 7+4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 3+-4i\n 3+4i 1+0i\n"	" y' = \n 7+-4i\n 7+4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4, 4 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 4\n 4\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+3i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+3*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 7+3*I, 7+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 3+-3i\n 3+3i 1+0i\n"	" y' = \n 7+3i\n 7+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 4+-1*I, 4+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 4+-1i\n 4+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5, 0, 5, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^1 * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n 5\n 5\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 5+4*I, 0+0*I, 5+-4*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^T * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n 5+4i\n 5+-4i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=1;
+	/*
+ A = 
+ 0 0
+ 4 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 4 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 8, 0, 8, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + 1 A^H * x \n"" A = \n 1 4\n 4 1\n"	" y' = \n 8\n 8\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 2 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+-1*I, 2+1*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+-1i\n 2+1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 2 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+1*I, 0+0*I, 2+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2, 0, 2, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 2\n 2\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 2+1*I, 0+0*I, 2+-1*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 2+1i\n 2+-1i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+6i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+6*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -1+6*I, -1+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1+0i 3+-6i\n 3+6i 1+0i\n"	" y' = \n -1+6i\n -1+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 6+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 6+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -4+-2*I, -4+2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 6+-2i\n 6+2i 1+0i\n"	" y' = \n -4+-2i\n -4+2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 1+2*I, 1+-2*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n 1+2i\n 1+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0 0
+ 3 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3 };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -1, 0, -1, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^1 * x \n"" A = \n 1 3\n 3 1\n"	" y' = \n -1\n -1\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+7i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+7*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+-7*I, 0+0*I, 2+7*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^T * x \n"" A = \n 1+0i 0+-7i\n 0+7i 1+0i\n"	" y' = \n 2+-7i\n 2+7i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-1;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 2+2*I, 0+0*I, 2+-2*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -1 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 2+2i\n 2+-2i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+6*I, 0+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+4*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+-12*I, -6+12*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 2+-4i\n 2+4i 1+0i\n"	" y' = \n -6+-12i\n -6+12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0+3*I, 0+-3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-1i\n 0+1i 1+0i\n"	" y' = \n 0+3i\n 0+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ 0, 0, 0, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 1 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1 };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -3, 0, -3, 0 };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1 1\n 1 1\n"	" y' = \n -3\n -3\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=1;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 2+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 2+1*I };		/* x: 2 */
+	double complex x[]={ 1, 1 };/* reference x */
+	double complex cy[]={ -6+3*I, 0+0*I, -6+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 2+-1i\n 2+1i 1+0i\n"	" y' = \n -6+3i\n -6+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0 0
+ 0 0
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=0;
+	int nr=2;
+	int nc=2;
+	int IA[]={ -1 /* a dummy value */};
+	int JA[]={ -1 /* a dummy value */};
+	double complex VA[]={ -1 /* a dummy value */};		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0, 0 };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1 0\n 0 1\n"	" y' = \n 0\n 0\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-3*I, -3+3*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+-1i\n 1+1i 1+0i\n"	" y' = \n -3+-3i\n -3+3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=1;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 0+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 0+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ 0+6*I, 0+-6*I };/* reference cy after */
+	double complex y[]={ 3, 3 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 0+-2i\n 0+2i 1+0i\n"	" y' = \n 0+6i\n 0+-6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',1,1) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',1,1,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:n kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_no_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+4i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+4*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+12*I, 0+0*I, -3+-12*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^1 * x \n"" A = \n 1+0i 1+-4i\n 1+4i 1+0i\n"	" y' = \n -3+12i\n -3+-12i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:t kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_trans;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 1+2i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 1+2*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -3+-6*I, 0+0*I, -3+6*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^T * x \n"" A = \n 1+0i 1+-2i\n 1+2i 1+0i\n"	" y' = \n -3+-6i\n -3+6i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+	static rsb_err_t tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(void)
+{
+	/* op:usmv; type:z; trans:c kind:h; diag:i */
+	rsb_err_t errval = RSB_BLAS_ERROR;
+	blas_sparse_matrix A = -1;
+	enum blas_trans_type transT=blas_conj_trans ;
+	int incx=2;
+	int incy=2;
+	double complex alpha=-3;
+	/*
+ A = 
+ 0+0i 0+0i
+ 3+1i 0+0i
+*/
+	/* declaration of VA,IA,JA */
+ 	int nnz=1;
+	int nr=2;
+	int nc=2;
+	int IA[]={ 1 };
+	int JA[]={ 0 };
+	double complex VA[]={ 3+1*I };		/* x: 4 */
+	double complex x[]={ 1, 0, 1, 0 };/* reference x */
+	double complex cy[]={ -9+3*I, 0+0*I, -9+-3*I, 0+0*I };/* reference cy after */
+	double complex y[]={ 3, 0, 3, 0 };/* y */
+
+	
+	const char*lsc="System and hardcoded solution: y' <- y + -3 A^H * x \n"" A = \n 1+0i 3+-1i\n 3+1i 1+0i\n"	" y' = \n -9+3i\n -9+-3i\n"	" y = \n 3\n 3\n"	" x = \n 1\n 1\n";	if(!RSB_BLAS_SUPPORTED_TYPE('z')){printf("type=z unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf("empty matrices are unsupported: skipping test.\n");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}
+	A = BLAS_zuscr_begin(nr,nc);
+	if( A == -1 )
+		{RSB_ERROR("uscr_begin() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("ussp() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)
+		{RSB_ERROR("uscr_insert_entries() gave %d!\n",A);goto ferr;}
+	if( BLAS_zuscr_end(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("uscr_end() gave %d!\n",A);goto ferr;}
+	if( BLAS_zusmv(transT,&alpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	if( rsb__do_are_same(y,cy,nr,'Z',2,2) != RSB_ERR_NO_ERROR )
+	{
+		rsb__debug_print_vectors_diff(y,cy,nr,'Z',2,2,RSB_VECTORS_DIFF_DISPLAY_N);
+		goto ferr;
+	}
+		else printf("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok\n");
+
+	if( BLAS_usds(A) != RSB_BLAS_NO_ERROR )
+		{RSB_ERROR("!\n");goto ferr;}
+	goto ok;
+ferr:
+	RSB_ERROR("type=z dims=2x2 sym=h diag=i blocks=1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok\n");
+	RSB_ERROR(lsc);
+	RSB_ERROR("Computed solution: y'=\n");
+	rsb_sbtc_print_vec(y,nr,'Z');
+err:
+	return errval;
+ok:	return RSB_ERR_NO_ERROR;
+}
+
+#if RSB_WITH_SPARSE_BLAS_INTERFACE
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+
+int main(void)
+ {
+ int errval;int passed=0,failed=0,skipped=0;
+#ifdef RSB_RSB_H_INCLUDED
+	if( rsb_lib_init(RSB_NULL_INIT_OPTIONS) != RSB_ERR_NO_ERROR)
+goto err;
+#endif /* RSB_RSB_H_INCLUDED */
+#if RSB_WITH_SPARSE_BLAS_INTERFACE
+	 errval = ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_de_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_de_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_de_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sg_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sg_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sg_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_su_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_su_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_su_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_su_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_su_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_su_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_su_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_su_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_su_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_su_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_su_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_su_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sl_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sl_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sl_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sl_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sl_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sl_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sl_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sl_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sl_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sl_di_ussv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sl_di_ussv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sl_di_ussv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_ss_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_ss_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_ss_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = td_sh_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = td_sh_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("td_sh_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy1();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy1() failed!\n");
+	 errval = tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy2() failed!\n");
+	 errval = tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy2();
+	if( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}
+	if( errval != RSB_ERR_NO_ERROR )RSB_ERROR("tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy2() failed!\n");
+printf("	PASSED:%d\n	SKIPPED:%d (tests for BLAS types/matrix types excluded at configure/make time are skipped)\n	FAILED:%d (if any check failed, this may indicate a bug)\n",passed,skipped,failed);
+#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */
+
+	if(failed)goto err;
+
+	return RSB_PROGRAM_SUCCESS;
+ err: return RSB_PROGRAM_ERROR;
+
+}
+/* @endcond */
diff --git a/sbtc.m b/sbtc.m
new file mode 100755
index 0000000..ad4306b
--- /dev/null
+++ b/sbtc.m
@@ -0,0 +1,80 @@
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Sparse Blas Tester Code.
+#
+
+source("./sbtg.m")
+
+res=rsb_octave_license("c");
+res=sprintf("%s%s",res,"/* @cond INNERDOC */\n");
+res=sprintf("%s%s",res,rsb_octave_doc_c_header);
+res=sprintf("%s%s",res,"\
+#include <stdio.h>\n\
+#ifdef SBTC_USE_RSB_H\n\
+#include <rsb.h>\n\
+#endif /* SBTC_USE_RSB_H */\n\
+#include <complex.h>\n\
+#ifdef RSB_RSB_H_INCLUDED\n\
+#include \"rsb_internals.h\"\n\
+#define RSB_BLAS_SUPPORT_EMPTY 1\n\
+#define RSB_BLAS_SUPPORTED_TYPE(T) ((errval = rsb__BLAS_is_type_supported(T)) != RSB_ERR_UNSUPPORTED_TYPE) \n\
+#endif /* RSB_RSB_H_INCLUDED */\n\
+#ifndef RSB_RSB_H_INCLUDED\n\
+#include <blas_sparse.h>\n\
+#define RSB_PROGRAM_SUCCESS 0\n\
+#define RSB_PROGRAM_ERROR (-1)\n\
+#define RSB_ERR_NO_ERROR 0\n\
+#define RSB_ERROR printf\n\
+#define RSB_WITH_SPARSE_BLAS_INTERFACE 1\n\
+#define RSB_BLAS_SUPPORTED_TYPE(T) 1\n\
+#define rsb_err_t int\n\
+#define RSB_ERR_UNSUPPORTED_TYPE 0x004\n\
+#define rsb_nnz_idx_t int\n\
+#define rsb_coo_idx_t int\n\
+#define RSB_BLAS_ERROR -1\n\
+#define RSB_BLAS_NO_ERROR 0\n\
+#define RSB_BLAS_SUPPORT_EMPTY 0\n\
+#define rsb__debug_print_vectors_diff(A1,A2,A3,A4,A5,A6,A7) RSB_ERR_NO_ERROR\n\
+int rsb__do_are_same(void*v1_,void*v2_,int n,int typecode,int s1,int s2){ char*v1=(char*)v1_,*v2=(char*)v2_; int vi,bi,bs; switch(typecode){case('S'):bs=4;break;case('C'): case('D'):bs=8;break;case('Z'):bs=16;break;default: return RSB_ERR_NO_ERROR; } for(vi=0;vi< n;++vi) for(bi=0;bi<bs;++bi) if(v1[vi*bs*s1+bi] != v2[vi*bs*s2+bi]) return RSB_BLAS_ERROR; return RSB_ERR_NO_ERROR;}\n\
+#endif /* RSB_RSB_H_INCLUDED */\n\
+int rsb_sbtc_print_vec(void*v,int n,int typecode){ float*fv=(float*)v; double*dv=(double*)v; int vi,fl=1,fi; if(typecode=='C' || typecode=='Z')fl=2; if(typecode=='S' || typecode=='C')for(vi=0;vi<n;++vi){for(fi=0;fi<fl;++fi)printf(\"%f\" ,fv[vi*fl+fi]);printf(\"\\n\");} if(typecode=='D' || typecode=='Z')for(vi=0;vi<n;++vi){for(fi=0;fi<fl;++fi)printf(\" %lf\",dv[vi*fl+fi]);printf(\"\\n\");} ; return RSB_ERR_NO_ERROR;}\n\
+");
+printf("%s",res);
+#quit
+res=sprintf("%s%s","" ,"#if RSB_WITH_SPARSE_BLAS_INTERFACE\n");
+res=sprintf("%s%s",res,all_test("c","decl"));
+res=sprintf("%s%s",res,"#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */\n");
+res=sprintf("%s%s",res,"\nint main(void)\n {\n int errval;int passed=0,failed=0,skipped=0;\n");
+res=sprintf("%s%s",res,"#ifdef RSB_RSB_H_INCLUDED\n");
+res=sprintf("%s%s",res,"\tif( rsb_lib_init(RSB_NULL_INIT_OPTIONS) != RSB_ERR_NO_ERROR)\ngoto err;\n");
+#res=sprintf("%s%s",res,"\tif( rsb_blas_mini_tester() != RSB_ERR_NO_ERROR)\ngoto err;\n");
+res=sprintf("%s%s",res,"#endif /* RSB_RSB_H_INCLUDED */\n");
+res=sprintf("%s%s",res,"#if RSB_WITH_SPARSE_BLAS_INTERFACE\n");
+printf("%s",res);
+all_test("c","CALL");
+res=sprintf("%s","printf(\"	PASSED:%d\\n	SKIPPED:%d (tests for BLAS types/matrix types excluded at configure/make time are skipped)\\n	FAILED:%d (if any check failed, this may indicate a bug)\\n\",passed,skipped,failed);\n");
+res=sprintf("%s%s",res,"#endif /* RSB_WITH_SPARSE_BLAS_INTERFACE */\n");
+res=sprintf("%s%s",res,"\n	if(failed)goto err;\n");
+res=sprintf("%s%s",res,"\n	return RSB_PROGRAM_SUCCESS;\n err: return RSB_PROGRAM_ERROR;\n");
+res=sprintf("%s%s",res,"\n}\n");
+res=sprintf("%s%s",res,"");
+res=sprintf("%s%s",res,"/* @endcond */\n");
+printf("%s",res);
+
diff --git a/sbtf.F90 b/sbtf.F90
new file mode 100644
index 0000000..01e9467
--- /dev/null
+++ b/sbtf.F90
@@ -0,0 +1,87196 @@
+! /*                                                                                                                            
+! 
+! Copyright (C) 2008-2014 Michele Martone
+! 
+! This file is part of librsb.
+! 
+! librsb is free software; you can redistribute it and/or modify it
+! under the terms of the GNU Lesser General Public License as published
+! by the Free Software Foundation; either version 3 of the License, or
+! (at your option) any later version.
+! 
+! librsb is distributed in the hope that it will be useful, but WITHOUT
+! ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+! FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+! License for more details.
+! 
+! You should have received a copy of the GNU Lesser General Public
+! License along with librsb; see the file COPYING.
+! If not, see <http://www.gnu.org/licenses/>.
+! 
+! */
+!
+!> @cond INNERDOC
+!> @file
+!> @author Michele Martone 
+!> @brief This file is part of the Octave based test suite for librsb
+!
+! Sparse BLAS fortran interface testing code
+!
+! FIXME: missing library initialization!
+! FIXME: using zero based indices is only partially supprted!
+!
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/9, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 3
+! 5 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 3, 5, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/21, 15/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 2
+! 1 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/9, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 3
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 3 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 18, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 5/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/21, 0, 3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 3
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 3, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 15, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/7, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 1
+! 6 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 6/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/10, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 2
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 5, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/5, 0, 3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 3
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 3, 3, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 4
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 4/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 7/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/5, 0, 3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 1 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 1
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 1 6
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 6/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 2
+! 3 5
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 2, 3, 5/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-1, 0, -4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-1, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/1, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, -1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 1 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 4 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-2, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 1
+! 1 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 1, 1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 5
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 5, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, -18/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 4
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, -12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 1
+! 2 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*4 :: VA(4)=&
+          &(/1, 1, 2, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-9, 0, 3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 1 4
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 4/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-3, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-9, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 3 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, -12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/15, 3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 3 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/15, 0, 12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 3
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 3, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 3 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 18/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/9, 3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 4/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 1
+! 2 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 1, 2, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 3
+! 1 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 3, 1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/7, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 2 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 2
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 4
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 4/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 7/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 3 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/7, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-1, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 5 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 5, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-3, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 3
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 3, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, -1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 6 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 6, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-4, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 3
+! 1 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 3, 1, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/1, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 3
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 2
+! 3 3
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 2, 3, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, -12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 1
+! 3 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      REAL*8 :: VA(4)=&
+          &(/1, 1, 3, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, -6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 4
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 4/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, -12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 1
+! 0 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-6, 3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 5 2
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 5, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, -18, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, -3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 1+6i
+! 0+6i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,6.e0), (0.e0,6.e0), (0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,24.e0), (3,30)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 3+3i
+! 3+3i 2+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,3.e0), (3.e0,3.e0), (2,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(15.e0,15.e0), (18,27)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 0+7i
+! 0+7i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,7.e0), (0.e0,7.e0), (0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-27.e0), (3,-33)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 0+1i
+! 0+1i 3+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0.e0,1.e0), (3,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,9.e0), (0.e0,0.e0), (12.e0,15.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 0+3i
+! 1+3i 3+8i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (1.e0,3.e0), (3,8)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,15.e0), (0.e0,0.e0), (12.e0,33.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 2+1i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,1.e0), (2,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(12.e0,-9.e0), (0.e0,0.e0), (9.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 1+3i
+! 0+3i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (0.e0,3.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,15.e0), (3,21)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 3+3i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,3.e0), (1,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,15.e0), (12,9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 2+3i
+! 3+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,3.e0), (3,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(15.e0,-15.e0), (9,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 1+1i
+! 0+1i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (0.e0,1.e0), (2,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,9.e0), (0.e0,0.e0), (9.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 0+2i
+! 0+2i 1+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (0.e0,2.e0), (1,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,12.e0), (0.e0,0.e0), (6.e0,12.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+2i 3+2i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,2.e0), (1,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,-12.e0), (0.e0,0.e0), (12.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 0+0i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,2.e0), (3,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 2+0i
+! 3+0i 1+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,0.e0), (3.e0,0.e0), (1,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(7.e0,2.e0), (6,2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 0+1i
+! 3+1i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (3.e0,1.e0), (0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(7.e0,-3.e0), (3,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 1+3i
+! 0+3i 1+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (0.e0,3.e0), (1,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,5.e0), (0.e0,0.e0), (4.e0,7.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,3.e0), (0.e0,0.e0), (3.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 3+0i
+! 1+0i 4+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,0.e0), (1.e0,0.e0), (4,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,-2.e0), (0.e0,0.e0), (10.e0,0.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 3+0i
+! 0+0i 1+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,0.e0), (1,6)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(7.e0,2.e0), (4,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 0+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,5.e0), (3,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 2+0i
+! 1+0i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,0.e0), (1.e0,0.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(5.e0,-2.e0), (5,-4)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 0+4i
+! 0+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,4.e0), (0.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,6.e0), (0.e0,0.e0), (4.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 0+2i
+! 1+2i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (1.e0,2.e0), (0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,4.e0), (0.e0,0.e0), (3.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+2i 3+2i
+! 3+2i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,2.e0), (3.e0,2.e0), (0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(7.e0,-4.e0), (0.e0,0.e0), (6.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 1+1i
+! 5+1i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (5.e0,1.e0), (0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,-3.e0), (-2,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 1+2i
+! 3+2i 3+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (3.e0,2.e0), (3,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-1.e0,-4.e0), (-1,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,3.e0), (3,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 0+3i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-5.e0), (0.e0,0.e0), (2.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 2+8i
+! 3+8i 1+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,8.e0), (3.e0,8.e0), (1,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-1.e0,-10.e0), (0.e0,0.e0), (0.e0,-14.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 0+0i
+! 0+0i 3+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (3,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,2.e0), (0.e0,0.e0), (0.e0,0.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 0+3i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-5.e0), (2,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 1+1i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,-3.e0), (1,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 1+1i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,3.e0), (1,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 0+0i
+! 0+0i 5+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (5,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-2.e0), (0.e0,0.e0), (-2.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 2+2i
+! 1+2i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,2.e0), (1.e0,2.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(1.e0,-4.e0), (0.e0,0.e0), (1.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+2i 0+6i
+! 3+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,6.e0), (3,6)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-1.e0,8.e0), (0.e0,0.e0), (3.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 0+2i
+! 0+2i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (0.e0,2.e0), (2,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-12.e0), (-3,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 1+2i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (2,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-6.e0,-12.e0), (0,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 0+6i
+! 3+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,6.e0), (3,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-9.e0,24.e0), (3,18)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 2+2i
+! 0+2i 1+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,2.e0), (0.e0,2.e0), (1,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-6.e0,-12.e0), (0.e0,0.e0), (0.e0,-24.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 3+3i
+! 0+3i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,3.e0), (0.e0,3.e0), (0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,-15.e0), (0.e0,0.e0), (-6.e0,-21.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 1+1i
+! 0+1i 1+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (0.e0,1.e0), (1,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,9.e0), (0.e0,0.e0), (-3.e0,21.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 0+0i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-6.e0), (3,0)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 2+6i
+! 1+6i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,6.e0), (1.e0,6.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,-24.e0), (-3,-30)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 1+2i
+! 4+2i 2+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (4.e0,2.e0), (2,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-12.e0,12.e0), (-6,18)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 0+1i
+! 0+1i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0.e0,1.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,-9.e0), (0.e0,0.e0), (3.e0,-15.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 1+1i
+! 5+1i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (5.e0,1.e0), (0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-15.e0,-9.e0), (0.e0,0.e0), (0.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+2i 1+3i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*8 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,15.e0), (0.e0,0.e0), (-3.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+2i
+! 3+2i 1+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (3.e0,2.e0), (1,6)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,12.e0), (15,24)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+0i
+! 1+0i 2+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (2,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,6.e0), (9,12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+0i
+! 4+0i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (4.e0,0.e0), (2,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(18.e0,-6.e0), (9,0)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 2+3i
+! 0+3i 0+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,3.e0), (0.e0,3.e0), (0,6)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(12.e0,15.e0), (0.e0,0.e0), (3.e0,27.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 3+0i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (3,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,6.e0), (0.e0,0.e0), (12.e0,0.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+4i
+! 3+4i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,4.e0), (3.e0,4.e0), (2,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(15.e0,-18.e0), (0.e0,0.e0), (9.e0,-12.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+0i
+! 2+0i 1+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,0.e0), (1,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,6.e0), (12,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+5i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,5.e0), (0,5)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,21.e0), (3,15)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 0+2i
+! 0+2i 2+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (0.e0,2.e0), (2,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-12.e0), (9,-18)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 5+0i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 1/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (5,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(21.e0,6.e0), (0.e0,0.e0), (3.e0,0.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 1+0i
+! 0+0i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,6.e0), (0.e0,0.e0), (6.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+2i 3+2i
+! 1+2i 6+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,2.e0), (1.e0,2.e0), (6,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(9.e0,-12.e0), (0.e0,0.e0), (30.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 0+0i
+! 2+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (2,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,2.e0), (5,0)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 3+2i
+! 4+2i 0+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,2.e0), (4.e0,2.e0), (0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(8.e0,4.e0), (6,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 3+1i
+! 3+1i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,1.e0), (3.e0,1.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(7.e0,-3.e0), (6,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 0+0i
+! 0+0i 2+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,2.e0), (0.e0,0.e0), (5.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 3+0i
+! 0+0i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,0.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,2.e0), (0.e0,0.e0), (6.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 0+2i
+! 2+2i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (2.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-4.e0), (0.e0,0.e0), (3.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 1+3i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (2,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,5.e0), (5,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 3+1i
+! 0+1i 4+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,1.e0), (0.e0,1.e0), (4,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,3.e0), (10,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 2+0i
+! 0+0i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (2.e0,0.e0), (0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-2.e0), (5,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 1+1i
+! 2+1i 2+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,1.e0), (2.e0,1.e0), (2,6)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,3.e0), (0.e0,0.e0), (7.e0,7.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 0+1i
+! 3+1i 3+8i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (3.e0,1.e0), (3,8)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(7.e0,3.e0), (0.e0,0.e0), (6.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+2i 0+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-5.e0), (0.e0,0.e0), (3.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+3i
+! 0+3i 2+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,3.e0), (0.e0,3.e0), (2,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-5.e0), (1,-7)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 3+3i
+! 0+3i 2+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (3.e0,3.e0), (0.e0,3.e0), (2,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-5.e0), (-2,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 2+1i
+! 0+1i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (2.e0,1.e0), (0.e0,1.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,3.e0), (1,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+0i
+! 1+0i 3+8i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (3,8)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-2.e0), (0.e0,0.e0), (-1.e0,-8.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 3+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-3.e0), (0.e0,0.e0), (0.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 1+3i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,5.e0), (0.e0,0.e0), (1.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+0i
+! 0+0i 4+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (4,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-2.e0), (-1,0)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-3.e0), (3,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 3+0i
+! 0+0i 3+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,0.e0), (3,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,2.e0), (-3,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+1i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-3.e0), (0.e0,0.e0), (2.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,4.e0), (0,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-6.e0), (0.e0,0.e0), (3.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+2i 0+2i
+! 0+2i 2+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (0.e0,2.e0), (2,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,4.e0), (0.e0,0.e0), (1.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+3i
+! 0+3i 1+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (0.e0,3.e0), (1,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,-15.e0), (0,-21)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 0+1i
+! 1+1i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,1.e0), (1.e0,1.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,-9.e0), (3,-9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 0+0i
+! 1+0i 2+4i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (2,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,6.e0), (-3,12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+0i
+! 2+0i 3+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (2.e0,0.e0), (3,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-3.e0,-6.e0), (0.e0,0.e0), (-12.e0,0.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+4i
+! 3+4i 0+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,4.e0), (3.e0,4.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-9.e0,-18.e0), (0.e0,0.e0), (0.e0,-18.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+2i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-6.e0,12.e0), (0.e0,0.e0), (0.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 3+6i
+! 0+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (3.e0,6.e0), (0,6)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-9.e0,-24.e0), (3,-18)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 0+4i
+! 1+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (0.e0,4.e0), (1,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,-18.e0), (3,-12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 0+2i
+! 2+2i 2+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (0.e0,2.e0), (2.e0,2.e0), (2,6)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-6.e0,12.e0), (-3,24)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+2i
+! 1+2i 1+2i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,2.e0), (1.e0,2.e0), (1,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-3.e0,-12.e0), (0.e0,0.e0), (-3.e0,-12.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,2.e0), (1.e0,3.e0), (0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-15.e0), (0.e0,0.e0), (0.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+2i 1+0i
+! 4+0i 0+6i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=4
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(4)=&
+          &(/1, 1, 2, 2/)
+      INTEGER :: JA(4)=&
+          &(/1, 2, 1, 2/)
+      COMPLEX*16 :: VA(4)=&
+          &(/(1.e0,2.e0), (1.e0,0.e0), (4.e0,0.e0), (0,6)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-12.e0,6.e0), (0.e0,0.e0), (0.e0,18.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/6, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/3, 0, 6, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 2/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 3/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/2, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 1 4
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 5, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/-2, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/-1, -2/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/-1, -3/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/-3, -6/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -6, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -9, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/9, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/3, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/3, 6/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 6, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 1 5
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 5, 1/)
+      REAL*8 :: x(2)=(/1, 6/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 2/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/-2, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/-1, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/-1, -2/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/-3, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/-3, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(2)=(/-3, -12/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-6, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -6, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 2+3i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(9.e0,9.e0), (3,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 3+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(3.e0,0.e0), (12,3)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(2)=(/3, 9/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/3, 0, 12, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(2)=(/4, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 2/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 4, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(-1.e0,-1.e0), (-1,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(2)=(/-1, -4/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(-1.e0,0.e0), (-1,1)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 5+4i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (5.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-6.e0,-4.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+2i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-1.e0,-2.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+2i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-1.e0,2.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/-3, -3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/-3, -3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 1+3i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(-3.e0,0.e0), (-6,9)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 4
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 4, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -15, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 2
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -9, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(3.e0,3.e0), (3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/3, 3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 4+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (4.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(3.e0,0.e0), (15,-3)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+2i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(3.e0,6.e0), (0.e0,0.e0), (3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 3+3i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(3.e0,0.e0), (0.e0,0.e0), (12.e0,9.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 1+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(2.e0,1.e0), (1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(1.e0,0.e0), (1,1)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+3i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(1.e0,0.e0), (1,-3)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 1+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(2.e0,1.e0), (0.e0,0.e0), (1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 1+5i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,5.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(-2.e0,-5.e0), (-1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 3+2i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(-1.e0,0.e0), (-4,-2)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 3
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/-4, 0, -1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 1
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/-1, 0, -2, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-1.e0,1.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-3, -3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-3, -3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-3, -3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 3+1i
+! 0+0i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 1, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 2, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-12.e0,-3.e0), (0.e0,0.e0), (-3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/3, 6/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/6, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/3, 0, 12, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/9, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/9, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 3/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/-1, -3/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/-1, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/-2, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/-3, -6/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 6 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 6, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -21, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/-9, 0, -3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/3, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/3, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/3, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 9, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(4)=&
+          &(/12, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 3/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/2, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/2, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/-1, -2/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/-2, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/-1, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/-3, -6/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/-9, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/-3, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -9, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(3.e0,0.e0), (3,3)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(6.e0,6.e0), (3,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(9.e0,-3.e0), (3,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/3, 0, 9, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(6.e0,3.e0), (0.e0,0.e0), (3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(3.e0,-9.e0), (0.e0,0.e0), (3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(1.e0,0.e0), (2,1)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(2)=(/3, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(1.e0,0.e0), (0.e0,0.e0), (1.e0,1.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(1.e0,1.e0), (0.e0,0.e0), (1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/4, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(-1.e0,0.e0), (-1,-1)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-1, 0, -2, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-2, 0, -1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-1.e0,3.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(2)=(/-3, -9/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 2+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/(-9.e0,-9.e0), (-3,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(2)=(/-6, -3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-3.e0,0.e0), (0.e0,0.e0), (-3.e0,-6.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*16 :: x(2)=(/3, 12/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(6.e0,3.e0), (3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(2)=(/6, 3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/6, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 4, 1/)
+      COMPLEX*16 :: x(2)=(/1, 5/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,4.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(2.e0,4.e0), (1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(2)=(/2, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 4, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/5, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(1.e0,-1.e0), (0.e0,0.e0), (1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 4, 1/)
+      COMPLEX*16 :: x(2)=(/-5, -1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-1.e0,-2.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,-1.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-3.e0,2.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/-3, -3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(-3.e0,-3.e0), (-3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/(-3.e0,9.e0), (-3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/-6, 0, -3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-6.e0,3.e0), (0.e0,0.e0), (-3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=e blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_de_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/15, 15/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/15, 15/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 5 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 5, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/7, 7/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/8, 0, 8, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-1, -1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-9, -9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-12, 0, -12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/7, 7/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/7, 7/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, -9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, -9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,3.e0), (6,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,9.e0), (6,9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(12.e0,-6.e0), (12,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,12.e0), (0.e0,0.e0), (9.e0,12.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(12.e0,3.e0), (0.e0,0.e0), (12.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-6.e0), (0.e0,0.e0), (6.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,3.e0), (6,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+5i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,5.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,15.e0), (6,15)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/6, 6/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 3+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(15.e0,6.e0), (0.e0,0.e0), (15.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(12.e0,-3.e0), (0.e0,0.e0), (12.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(5.e0,2.e0), (5,2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+6i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,6.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,-6.e0), (4,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,1.e0), (0.e0,0.e0), (5.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 6+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (6.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(10.e0,-1.e0), (0.e0,0.e0), (10.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+6i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,6.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,6.e0), (4,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-2.e0), (6,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,3.e0), (0.e0,0.e0), (5.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,1.e0), (0.e0,0.e0), (5.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,-4.e0), (0.e0,0.e0), (4.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,-1.e0), (1,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 5 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 5, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+5i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,5.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-5.e0), (0.e0,0.e0), (2.e0,-5.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-3.e0), (0.e0,0.e0), (2.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,1.e0), (0.e0,0.e0), (2.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 1+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,-4.e0), (1,-4)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 3+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-1.e0,-4.e0), (0.e0,0.e0), (-1.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 3+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-1.e0,1.e0), (0.e0,0.e0), (-1.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,-6.e0), (-3,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,-6.e0), (-3,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 4+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (4.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-12.e0,3.e0), (-12,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 2+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-6.e0,-12.e0), (0.e0,0.e0), (-6.e0,-12.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-6.e0,-6.e0), (0.e0,0.e0), (-6.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/-6, 0, -6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/-9, -9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-9.e0), (0,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+9i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,9.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,27.e0), (0,27)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 3+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-9.e0,-3.e0), (0.e0,0.e0), (-9.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 2+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-6.e0,-9.e0), (0.e0,0.e0), (-6.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,9.e0), (6,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 4+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (4.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(18.e0,9.e0), (18,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,4.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-12.e0), (6,-12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 3+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(15.e0,9.e0), (0.e0,0.e0), (15.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,6.e0), (0.e0,0.e0), (6.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,3.e0), (9,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/6, 6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-3.e0), (6,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(9.e0,6.e0), (0.e0,0.e0), (9.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 5 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 5, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/21, 0, 21, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/5, 5/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 4+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (4.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(8.e0,1.e0), (8,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+5i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,5.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-5.e0), (4,-5)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,2.e0), (0.e0,0.e0), (6.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-3.e0), (0.e0,0.e0), (4.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,2.e0), (5,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,-1.e0), (5,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 3+5i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,5.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(7.e0,5.e0), (0.e0,0.e0), (7.e0,5.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,-2.e0), (0.e0,0.e0), (5.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-1, -1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 4+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (4.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-2.e0,-3.e0), (0.e0,0.e0), (-2.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 2+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,4.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-4.e0), (0.e0,0.e0), (0.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 1+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,4.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,-4.e0), (1,-4)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-2.e0), (0,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 5+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (5.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,1.e0), (-3,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-2.e0), (0.e0,0.e0), (2.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-2.e0), (0.e0,0.e0), (2.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,2.e0), (0.e0,0.e0), (2.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 3+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-9.e0,9.e0), (-9,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 3+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-9.e0,-9.e0), (0.e0,0.e0), (-9.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 3+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-9.e0,-9.e0), (0.e0,0.e0), (-9.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,9.e0), (0.e0,0.e0), (0.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,3.e0), (0,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/-6, 0, -6, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-6.e0,-3.e0), (0.e0,0.e0), (-6.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 1 0
+! 6 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 6, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/10, 0, 10, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-1, -1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-2, 0, -2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-1, -1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-12, -12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*4 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-6, 0, -6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/18, 0, 18, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 4 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      REAL*8 :: VA(3)=&
+          &(/1, 4, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-12, 0, -12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+5i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,5.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(12.e0,-15.e0), (12,15)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,9.e0), (9,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-3.e0), (6,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(12.e0,-3.e0), (0.e0,0.e0), (12.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-6.e0), (0.e0,0.e0), (6.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-9.e0), (6,9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,12.e0), (9,-12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,-6.e0), (9,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(12.e0,-12.e0), (0.e0,0.e0), (12.e0,12.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 3+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(7.e0,-1.e0), (7,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,3.e0), (4,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+11i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,11.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,11.e0), (0.e0,0.e0), (4.e0,-11.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,-2.e0), (0.e0,0.e0), (4.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/7, 7/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 3+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(7.e0,1.e0), (7,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 2+7i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,7.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-7.e0), (6,7)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 2+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 3+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(7.e0,1.e0), (0.e0,0.e0), (7.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,-1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,1.e0), (2,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 3+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,4.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-1.e0,4.e0), (0.e0,0.e0), (-1.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 5 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 5, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 2+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,2.e0), (0.e0,0.e0), (0.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,1.e0), (0,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/-1, -1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(1.e0,1.e0), (0.e0,0.e0), (1.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,6.e0), (-3,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+7i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,7.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,21.e0), (0,-21)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,6.e0), (0.e0,0.e0), (0.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-3.e0,-3.e0), (0.e0,0.e0), (-3.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,9.e0), (0.e0,0.e0), (0.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,-6.e0), (-3,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,3.e0), (-3,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*8 :: VA(2)=(/1, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (0.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*8 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/-6, 0, -6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/6, 6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+4i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,4.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,12.e0), (9,-12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(12.e0,-9.e0), (12,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(12.e0,3.e0), (0.e0,0.e0), (12.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-6.e0), (6,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,6.e0), (6,-6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 2+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(12.e0,-9.e0), (12,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(9.e0,-3.e0), (0.e0,0.e0), (9.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,9.e0), (0.e0,0.e0), (6.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 1+0i 0+0i
+! 3+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(15.e0,-3.e0), (0.e0,0.e0), (15.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-1.e0), (4,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/5, 5/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+6i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,6.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-6.e0), (4,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-3.e0), (0.e0,0.e0), (4.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-3.e0), (4,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,1.e0), (5,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-3.e0), (4,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-3.e0), (0.e0,0.e0), (4.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 1+0i 0+0i
+! 0+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-2.e0), (0.e0,0.e0), (4.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,1.e0), (0.e0,0.e0), (2.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 3+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-1.e0,-3.e0), (0.e0,0.e0), (-1.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-1, -1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 1+2i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,2.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,-2.e0), (1,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,1.e0), (2,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 1+6i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,6.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(1.e0,6.e0), (0.e0,0.e0), (1.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-3.e0), (0.e0,0.e0), (2.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 1+0i 0+0i
+! 0+6i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,6.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,6.e0), (0.e0,0.e0), (2.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,9.e0), (0,-9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 2 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 2, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-6, -6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 3+3i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (3.e0,3.e0), (1,0)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-9.e0,9.e0), (0.e0,0.e0), (-9.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 0 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/1, 2/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 0+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (0.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-3.e0), (0,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 3 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 3, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-9, -9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 1+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (1.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-3.e0,3.e0), (0.e0,0.e0), (-3.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1 0
+! 1 1
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/1, 1, 1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 1+0i 0+0i
+! 2+1i 1+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=3
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(3)=&
+          &(/1, 2, 2/)
+      INTEGER :: JA(3)=&
+          &(/1, 1, 2/)
+      COMPLEX*16 :: VA(3)=&
+          &(/(1.e0,0.e0), (2.e0,1.e0), (1,0)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-6.e0,3.e0), (0.e0,0.e0), (-6.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=e blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 3
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/3, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/15, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/4/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/18, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/12, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/4/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/18, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/9, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 12/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 1
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 15, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 2
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/2, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 5, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 7, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 5, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 5, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/7, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 2
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/2, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 1
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 5
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/5, 2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 1
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-9, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-9, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, -6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*4 :: VA(2)=(/1, 1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, -6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/5/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-15, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/9, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/12, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 18, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 3
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/3, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/9, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 18, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 3
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/3, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 4
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 8/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 2
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/2, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 7, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 7, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 2
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/2, 2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 3
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/3, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/7, 7/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/1, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/7, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 4
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/4, 3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-1, -2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 3
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/3, 3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-1, -1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 1
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/1, 2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/1, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, -9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      REAL*8 :: VA(2)=(/1, 1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-9, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-3, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-9, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, -3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sg_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/1, 3/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 15/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,2.e0), (0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,6.e0), (6,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 2+1i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(2.e0,1.e0), (2,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(12.e0,-3.e0), (12,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 1+2i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (1,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,6.e0), (0.e0,0.e0), (9.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 1+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,1.e0), (3,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(15.e0,-3.e0), (0.e0,0.e0), (9.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 1+5i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,5.e0), (0,5)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,15.e0), (6,15)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/6, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+4i
+! 1+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,4.e0), (1,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,-12.e0), (6,-12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 1+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+5i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,5.e0), (0,5)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,15.e0), (0.e0,0.e0), (6.e0,15.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 1+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(5.e0,1.e0), (4,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/5, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 1
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/1, 4/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/8, 5/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 1
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/1, 2/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/5, 0, 6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 1+3i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,3.e0), (1,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,3.e0), (0.e0,0.e0), (5.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/3/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/7, 0, 4, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/5, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,2.e0), (0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,2.e0), (4,2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,4.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,4.e0), (0.e0,0.e0), (4.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+5i
+! 1+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,5.e0), (1,5)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,5.e0), (0.e0,0.e0), (4.e0,5.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 2+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(2.e0,4.e0), (0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,-4.e0), (0.e0,0.e0), (6.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/2, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 1+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,2.e0), (0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-2.e0), (1,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+3i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,3.e0), (1,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,3.e0), (2,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 1+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,1.e0), (3,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(1.e0,-1.e0), (0.e0,0.e0), (-1.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,4.e0), (0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-4.e0), (0.e0,0.e0), (2.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 1+1i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,1.e0), (2,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,1.e0), (0.e0,0.e0), (1.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 1+7i
+! 2+7i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,7.e0), (2,7)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,-7.e0), (0,-7)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/3/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/2, -1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/2, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+2i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,2.e0), (2,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-2.e0), (0.e0,0.e0), (0.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 1+1i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(1.e0,1.e0), (4,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-2.e0,-1.e0), (0.e0,0.e0), (1.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/2, 0, 0, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/0, -3/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,1.e0), (1,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,3.e0), (0,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 2
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/2, 4/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/-6, 0, -12, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,1.e0), (2,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-6.e0,-3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+3i
+! 3+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,3.e0), (3,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-9.e0,9.e0), (0.e0,0.e0), (0.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,1.e0), (4,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-3.e0), (-12,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/3/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/-9, 0/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 3+2i
+! 4+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(3.e0,2.e0), (4,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-12.e0,6.e0), (-9,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,1.e0), (4,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-12.e0,-3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*8 :: VA(2)=(/(0.e0,1.e0), (0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,3.e0), (0.e0,0.e0), (0.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/2/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/12, 6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+5i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,5.e0), (0,5)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,15.e0), (6,15)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-6.e0), (6,-6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+3i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,3.e0), (1,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,9.e0), (0.e0,0.e0), (9.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 1
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/1, 3/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/15, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 3+2i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(3.e0,2.e0), (2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(12.e0,-6.e0), (0.e0,0.e0), (15.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 3+1i
+! 5+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(3.e0,1.e0), (5,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(15.e0,3.e0), (21,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 3+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(3.e0,3.e0), (0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,9.e0), (15,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 2+1i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(2.e0,1.e0), (1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,-3.e0), (12,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (3,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,3.e0), (0.e0,0.e0), (15.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 2+1i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(2.e0,1.e0), (2,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(12.e0,3.e0), (0.e0,0.e0), (12.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,4.e0), (0,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-12.e0), (0.e0,0.e0), (6.e0,-12.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 1+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,1.e0), (4,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 1+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,1.e0), (5,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/4, 5/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 1+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,3.e0), (0,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,3.e0), (0.e0,0.e0), (4.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+2i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,2.e0), (1,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,-2.e0), (0.e0,0.e0), (4.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+3i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,3.e0), (0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,3.e0), (4,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/7, 4/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-1.e0), (4,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 3+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(3.e0,1.e0), (3,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(7.e0,1.e0), (0.e0,0.e0), (7.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+1i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 1+2i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (1,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,-2.e0), (0.e0,0.e0), (5.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 5+1i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(5.e0,1.e0), (2,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-3.e0,-1.e0), (0,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/2, 2/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+2i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,2.e0), (1,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,2.e0), (2,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 1
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/1, 1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 6+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(6.e0,4.e0), (0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-4.e0), (0.e0,0.e0), (-4.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+6i
+! 0+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,6.e0), (0,6)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,6.e0), (0.e0,0.e0), (2.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-2.e0), (2,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-1.e0), (2,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 1+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,2.e0), (1,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (3,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-1.e0), (0.e0,0.e0), (-1.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+3i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,3.e0), (2,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (2.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 2
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/2, 3/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/-1, 0, 0, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,1.e0), (3,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-3.e0), (-9,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 1+7i
+! 0+7i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,7.e0), (0,7)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-21.e0), (-3,-21)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 1+4i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,4.e0), (0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,12.e0), (-3,12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/0, 0, -9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 1+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-6.e0), (0.e0,0.e0), (-3.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+3i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,3.e0), (2,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-6.e0,9.e0), (0.e0,0.e0), (0.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+3i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,3.e0), (1,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-9.e0), (-3,-9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+2i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(0.e0,2.e0), (0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,6.e0), (0,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/0, 0, -9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 1+1i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=2
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(2)=(/1, 2/)
+      INTEGER :: JA(2)=(/2, 1/)
+      COMPLEX*16 :: VA(2)=(/(1.e0,1.e0), (0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (-3.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/-3, 0, 0, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=g diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/3, 0, 12, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 3/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 2, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-1, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-1, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-1, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 4
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/4/)
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -5, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/-3, -9/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/-6, 0, -3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -9, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -9, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_su_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/3, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/3, 6/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/3, 6/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 9, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 9, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/2, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 4/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 2/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 2, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 4
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(2)=(/-5, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/-1, -4/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 4
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(2)=(/-1, -5/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/-4, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -2, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/-6, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/-3, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/-3, -12/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/-9, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_su_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/6, 3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/3, 6/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/3, 3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 1+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(6.e0,3.e0), (0.e0,0.e0), (3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(3.e0,0.e0), (0.e0,0.e0), (3.e0,6.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/2, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 1+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(1,2)/)
+      COMPLEX*8 :: x(2)=(/(1.e0,0.e0), (2,2)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/(1.e0,0.e0), (1,-1)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(1.e0,0.e0), (0.e0,0.e0), (1.e0,2.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(1.e0,0.e0), (0.e0,0.e0), (1.e0,-2.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(2)=(/-3, -1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 2+3i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(2,3)/)
+      COMPLEX*8 :: x(2)=(/(-1.e0,0.e0), (-3,3)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 2+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(2,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-3.e0,-2.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 2+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(2,1)/)
+      COMPLEX*8 :: x(2)=(/(-9.e0,-3.e0), (-3,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/-3, -6/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/(-3.e0,0.e0), (-3,3)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-3.e0,0.e0), (0.e0,0.e0), (-3.e0,-3.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-3.e0,0.e0), (0.e0,0.e0), (-3.e0,3.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_su_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/6, 3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/3, 3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 5
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/5/)
+      COMPLEX*16 :: x(2)=(/3, 18/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(3.e0,0.e0), (0.e0,0.e0), (3.e0,6.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 1+3i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(1,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(3.e0,0.e0), (0.e0,0.e0), (6.e0,-9.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/(1.e0,1.e0), (1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/1, 2/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 2+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(2,1)/)
+      COMPLEX*16 :: x(2)=(/(1.e0,0.e0), (3,-1)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 3
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 4, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 3+3i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(3,3)/)
+      COMPLEX*16 :: x(2)=(/(-4.e0,-3.e0), (-1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 2
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/2/)
+      COMPLEX*16 :: x(2)=(/-1, -3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 2+3i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(2,3)/)
+      COMPLEX*16 :: x(2)=(/(-1.e0,0.e0), (-3,3)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+3i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,-3.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 1+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-2.e0,-1.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 2+2i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(2,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-3.e0,2.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 1
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/-6, -3/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 4+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(4,1)/)
+      COMPLEX*16 :: x(2)=(/(-3.e0,0.e0), (-15,-3)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 3+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(3,1)/)
+      COMPLEX*16 :: x(2)=(/(-3.e0,0.e0), (-12,3)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 3+3i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(3,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-12.e0,-9.e0), (0.e0,0.e0), (-3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+1i
+! 0+0i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/1/)
+      INTEGER :: JA(1)=(/2/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-3.e0,0.e0), (0.e0,0.e0), (-3.e0,3.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_upper_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=u diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_su_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/3, 12/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/6, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/3, 3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/3, 0, 6, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/2, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/2, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-1, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/-4, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-1, -1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/-4, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/5/)
+      REAL*4 :: x(2)=(/-3, -18/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/-3, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/-6, -3/)! reference x 
+      REAL*4 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*4 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -9, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/-6, 0, -3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sl_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/3, 12/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/6, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/5/)
+      REAL*8 :: x(2)=(/18, 3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 2/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/2, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 2, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/3, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/-1, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/-2, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/-1, -1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/-3, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/-9, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/-3, -3/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/-12, 0, -3, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sl_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(2)=(/(3.e0,0.e0), (6,3)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/6, 3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/(3.e0,-3.e0), (3,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(3.e0,0.e0), (0.e0,0.e0), (3.e0,3.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(4)=&
+          &(/9, 0, 3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/(1.e0,0.e0), (1,1)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/3/)
+      COMPLEX*8 :: x(2)=(/4, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(1.e0,0.e0), (0.e0,0.e0), (2.e0,1.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/2, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(1.e0,-1.e0), (0.e0,0.e0), (1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(4,1)/)
+      COMPLEX*8 :: x(2)=(/(-1.e0,0.e0), (-5,-1)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,1)/)
+      COMPLEX*8 :: x(2)=(/(-3.e0,-1.e0), (-1,0)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-2.e0,1.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,4)/)
+      COMPLEX*8 :: x(2)=(/(-3.e0,0.e0), (-3,-12)/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/-6, -3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(2)=(/-9, -3/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-3.e0,0.e0), (0.e0,0.e0), (-12.e0,-3.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/5/)
+      COMPLEX*8 :: x(4)=&
+          &(/-18, 0, -3, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/(-3.e0,3.e0), (0.e0,0.e0), (-3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sl_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/2/)
+      COMPLEX*16 :: x(2)=(/3, 9/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,2)/)
+      COMPLEX*16 :: x(2)=(/(12.e0,6.e0), (3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,2)/)
+      COMPLEX*16 :: x(2)=(/(6.e0,-6.e0), (3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(3.e0,0.e0), (0.e0,0.e0), (9.e0,9.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(4)=&
+          &(/12, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/3, 0, 3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(2)=(/(2.e0,1.e0), (1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/2, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 4, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(4)=&
+          &(/4, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/-1, -1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(4,1)/)
+      COMPLEX*16 :: x(2)=(/(-5.e0,-1.e0), (-1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/(-1.e0,1.e0), (-1,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,0.e0), (0.e0,0.e0), (-1.e0,-3.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-1.e0,-2.e0), (0.e0,0.e0), (-1.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/-1, 0, -1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(2)=(/(-3.e0,0.e0), (-3,-6)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,3)/)
+      COMPLEX*16 :: x(2)=(/(-6.e0,-9.e0), (-3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,1)/)
+      COMPLEX*16 :: x(2)=(/(-9.e0,3.e0), (-3,0)/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-3.e0,0.e0), (0.e0,0.e0), (-6.e0,-3.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/-3, 0, -3, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/(-12.e0,3.e0), (0.e0,0.e0), (-3.e0,0.e0), (0,0)/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/0, 0, 0, 0/)! y 
+
+      y=x
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_lower_triangular,istat)
+      CALL ussp(A,blas_unit_diag,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL ussv(transT,alpha,A,y,incx,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=l diag=i blocks=&
+          &1x1 ussv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sl_di_ussv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/15, 15/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/5/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/4/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-2, -2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/15, 15/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-2, 0, -2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/4/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-2, -2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-1, -1/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-6, -6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_ss_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,9.e0), (6,9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-18.e0), (6,-18)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,12.e0), (0.e0,0.e0), (6.e0,12.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 2+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,5)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(12.e0,-15.e0), (0.e0,0.e0), (12.e0,-15.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(15.e0,12.e0), (15,12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,6.e0), (6,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+8i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,8)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,24.e0), (0.e0,0.e0), (6.e0,24.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,3.e0), (6,3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,-1.e0), (4,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,3.e0), (0.e0,0.e0), (4.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,-6.e0), (0.e0,0.e0), (5.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/5, 5/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,-2.e0), (6,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,1.e0), (0.e0,0.e0), (4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 2+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,5)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,-5.e0), (0.e0,0.e0), (6.e0,-5.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-2.e0), (2,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-2.e0), (2,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-1.e0), (0.e0,0.e0), (2.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+7i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,7)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,7.e0), (0.e0,0.e0), (0.e0,7.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-2.e0), (2,-2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(1.e0,-1.e0), (1,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-1.e0,1.e0), (-1,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-3.e0), (0.e0,0.e0), (2.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-1.e0), (0.e0,0.e0), (2.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,3.e0), (0.e0,0.e0), (0.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-18.e0), (0,-18)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-12.e0), (0,-12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(4,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-12.e0,-3.e0), (0.e0,0.e0), (-12.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-9.e0,-9.e0), (0.e0,0.e0), (-9.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-9.e0,3.e0), (0.e0,0.e0), (-9.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-6.e0), (0,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,6.e0), (-3,6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-3.e0,-9.e0), (0.e0,0.e0), (-3.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,9.e0), (0.e0,0.e0), (0.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 4+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(4,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(18.e0,6.e0), (18,6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 2+7i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,7)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(12.e0,21.e0), (12,21)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-6.e0), (6,-6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-9.e0), (0.e0,0.e0), (6.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 1+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,12.e0), (9,12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(12.e0,3.e0), (12,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 1+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,6)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(9.e0,-18.e0), (9,-18)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,9.e0), (0.e0,0.e0), (6.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,6.e0), (0.e0,0.e0), (6.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(15.e0,-3.e0), (0.e0,0.e0), (15.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/2/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/6, 6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 3+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(7.e0,2.e0), (7,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-4.e0), (4,-4)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-2.e0), (0.e0,0.e0), (4.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/5, 5/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-3.e0), (4,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,4.e0), (0.e0,0.e0), (4.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,4.e0), (0.e0,0.e0), (4.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-3.e0), (0.e0,0.e0), (4.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-1, -1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,1.e0), (1,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-4.e0), (0.e0,0.e0), (2.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,2.e0), (0.e0,0.e0), (0.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-2.e0), (0,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+9i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,9)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-9.e0), (0,-9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(1.e0,1.e0), (1,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-4.e0), (0.e0,0.e0), (2.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(4,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-2.e0,1.e0), (0.e0,0.e0), (-2.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-9, -9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/2/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-6, -6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-9.e0), (0.e0,0.e0), (0.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,3.e0), (0.e0,0.e0), (0.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,-3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 4+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(4,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-12.e0,3.e0), (-12,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-9.e0,-6.e0), (0.e0,0.e0), (-9.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-3.e0,6.e0), (0.e0,0.e0), (-3.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_symmetric,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=s diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/15, 15/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 4 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/4/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/18, 18/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/7, 0, 7, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-1, 0, -1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/5/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/1, 1/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 5 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/5/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-1, 0, -1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/2/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(2)=(/-9, -9/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-9, 0, -9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(2)=(/1, 1/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*4 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/3/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-9, 0, -9, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*4 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*4 :: VA(1)=(/1/)
+      REAL*4 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*4 :: cy(4)=&
+          &(/-3, 0, -3, 0/)! reference cy after 
+      REAL*4 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL suscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=s dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/9, 9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/12, 12/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/6, 6/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/9, 0, 9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/15, 0, 15, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/7, 7/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/5, 5/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/4, 4/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/2, 2/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/1, 0, 1, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(2)=(/0, 0/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-9, 0, -9, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(2)=(/1, 1/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/3/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-9, -9/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/1/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      REAL*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/0, 0, 0, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      REAL*8 :: alpha=-3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      REAL*8 :: VA(1)=(/2/)
+      REAL*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      REAL*8 :: cy(4)=&
+          &(/-6, 0, -6, 0/)! reference cy after 
+      REAL*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL duscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=d dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE td_sh_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(15.e0,-18.e0), (15,18)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,9.e0), (6,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,-3.e0), (0.e0,0.e0), (9.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(9.e0,3.e0), (0.e0,0.e0), (9.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/6, 6/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(6.e0,9.e0), (6,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(15.e0,-12.e0), (15,12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(15.e0,-6.e0), (0.e0,0.e0), (15.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,6.e0), (0.e0,0.e0), (6.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/6, 0, 6, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(4.e0,-2.e0), (4,2)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/5, 5/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,4)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,-4.e0), (0.e0,0.e0), (4.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(5.e0,3.e0), (0.e0,0.e0), (5.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(4.e0,-2.e0), (0.e0,0.e0), (4.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/3/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/7, 7/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/4, 4/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 5+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(5,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(9.e0,-1.e0), (9,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 2+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,5)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(6.e0,5.e0), (0.e0,0.e0), (6.e0,-5.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 3+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(7.e0,-4.e0), (0.e0,0.e0), (7.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/2/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-1.e0), (2,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,1.e0), (0,-1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,6)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,6.e0), (0.e0,0.e0), (2.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 3+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,5)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-1.e0,-5.e0), (0.e0,0.e0), (-1.e0,5.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(2.e0,-1.e0), (2,1)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/1, 1/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,4.e0), (0.e0,0.e0), (2.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(2.e0,-3.e0), (0.e0,0.e0), (2.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/2, 0, 2, 0/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,2)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,6.e0), (0,-6)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,5)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(0.e0,-15.e0), (0,15)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,3)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-6.e0,9.e0), (-6,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 5+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(5,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-15.e0,3.e0), (0.e0,0.e0), (-15.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,-3.e0), (0.e0,0.e0), (0.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,1)/)
+      COMPLEX*8 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,3.e0), (0.e0,0.e0), (0.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(1,4)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-3.e0,12.e0), (-3,-12)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/1/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/-3, -3/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 2+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(2,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(2)=(/(-6.e0,9.e0), (-6,-9)/)! reference cy after 
+      COMPLEX*8 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,3)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,9.e0), (0.e0,0.e0), (0.e0,-9.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(3,1)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(-9.e0,-3.e0), (0.e0,0.e0), (-9.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*8 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*8 :: VA(1)=(/(0,5)/)
+      COMPLEX*8 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*8 :: cy(4)=&
+          &(/(0.e0,15.e0), (0.e0,0.e0), (0.e0,-15.e0), (0,0)/)! reference cy after 
+      COMPLEX*8 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL cuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=c dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/6, 6/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,6.e0), (6,-6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 3 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/3/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/15, 15/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-6.e0), (0.e0,0.e0), (6.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,3.e0), (0.e0,0.e0), (6.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-3.e0), (0.e0,0.e0), (6.e0,3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/9, 9/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 5+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(5,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(21.e0,12.e0), (21,-12)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,-9.e0), (6,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 3+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(15.e0,-9.e0), (0.e0,0.e0), (15.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0 0
+! 2 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/2/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/12, 0, 12, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=3
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-9.e0), (0.e0,0.e0), (6.e0,9.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,-3.e0), (4,3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(4.e0,3.e0), (4,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,5)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,-5.e0), (5,5)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/4, 0, 4, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/5, 0, 5, 0/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(6.e0,-2.e0), (0.e0,0.e0), (6.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 3+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(7.e0,-2.e0), (7,2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 2+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(6.e0,4.e0), (6,-4)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(5.e0,-4.e0), (5,4)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(5.e0,-1.e0), (0.e0,0.e0), (5.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,1.e0), (0.e0,0.e0), (4.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(4.e0,-2.e0), (0.e0,0.e0), (4.e0,2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha= 1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,1.e0), (0,-1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-1.e0), (2,1)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 3+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,3)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-1.e0,3.e0), (-1,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(1.e0,1.e0), (0.e0,0.e0), (1.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,-4.e0), (0.e0,0.e0), (2.e0,4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,4.e0), (0.e0,0.e0), (2.e0,-4.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,3.e0), (2,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+5i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,5)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(2.e0,-5.e0), (2,5)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,2.e0), (0,-2)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,2.e0), (0.e0,0.e0), (2.e0,-2.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 6+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(6,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-4.e0,-1.e0), (0.e0,0.e0), (-4.e0,1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-1
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(2.e0,1.e0), (0.e0,0.e0), (2.e0,-1.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-1 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 1 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/1/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/-3, -3/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0 0
+! 0 0
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=0
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/-999999/) ! fortran does not support empty arrays
+      INTEGER :: JA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: VA(1)=(/-999999/) ! fortran does not support empty arrays
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/0, 0/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(0.e0,3.e0), (0,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 1+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(1,1)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-3.e0,3.e0), (0.e0,0.e0), (-3.e0,-3.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,2)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,-6.e0), (0.e0,0.e0), (0.e0,6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=1
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 4+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(4,4)/)
+      COMPLEX*16 :: x(2)=(/1, 1/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-12.e0,12.e0), (0.e0,0.e0), (-12.e0,-12.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=1 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+1i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,1)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-9.e0,3.e0), (-9,-3)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+3i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,3)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-9.e0,-9.e0), (-9,9)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=1
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(2)=(/(-9.e0,6.e0), (-9,-6)/)! reference cy after 
+      COMPLEX*16 :: y(2)=(/3, 3/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=1 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy1 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_no_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 0+6i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(0,6)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(0.e0,18.e0), (0.e0,0.e0), (0.e0,-18.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=n is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_trans
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 3+4i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(3,4)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-9.e0,-12.e0), (0.e0,0.e0), (-9.e0,12.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=t is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy2 
+! 
+      SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      USE blas_sparse
+      IMPLICIT NONE
+      INTEGER::errval,istat=0,i
+      INTEGER::A
+      INTEGER::transT=blas_conj_trans 
+      INTEGER :: incx=2
+      INTEGER :: incy=2
+      COMPLEX*16 :: alpha=-3
+! A =
+! 0+0i 0+0i
+! 2+2i 0+0i
+
+      ! declaration of VA,IA,JA 
+      INTEGER :: nnz=1
+      INTEGER :: nr=2
+      INTEGER :: nc=2
+      INTEGER :: IA(1)=(/2/)
+      INTEGER :: JA(1)=(/1/)
+      COMPLEX*16 :: VA(1)=(/(2,2)/)
+      COMPLEX*16 :: x(4)=&
+          &(/1, 0, 1, 0/)! reference x 
+      COMPLEX*16 :: cy(4)=&
+          &(/(-6.e0,6.e0), (0.e0,0.e0), (-6.e0,-6.e0), (0,0)/)! reference cy after 
+      COMPLEX*16 :: y(4)=&
+          &(/3, 0, 3, 0/)! y will be overwritten
+
+      errval=0
+      CALL zuscr_begin(nr,nc,A,errval)
+      IF(errval.NE.0)GOTO 9999
+      CALL ussp(A,blas_unit_diag,istat)
+      CALL ussp(A,blas_lower_hermitian,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL uscr_end(A,istat)
+      IF(istat.NE.0)GOTO 9997
+      CALL usmv(transT,alpha,A,x,incx,y,incy,istat)
+      IF(istat.NE.0)GOTO 9997
+      DO i=1,2
+        IF(y(i).NE.cy(i))PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is not ok"
+        IF(y(i).NE.cy(i))GOTO 9997
+      ENDDO
+      PRINT*,"type=z dims=2x2 sym=h diag=i blocks=&
+          &1x1 usmv alpha=-3 beta= 1 incx=2 incy=2 trans=c is ok"
+      GOTO 9998
+9997      errval=-1
+9998      CONTINUE
+      CALL usds(A,istat)
+      IF(istat.NE.0)errval=-1
+9999      CONTINUE
+      end SUBROUTINE tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy2 
+!
+      PROGRAM main
+
+      USE rsb
+      INTEGER :: passed=0,failed=0,errval=0
+      info = rsb_lib_init(RSB_NULL_INIT_OPTIONS)
+      IF(info.NE.0)THEN
+      STOP 1
+      ENDIF
+      CALL       ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_de_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_de_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sg_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_su_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sl_di_ussv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_ss_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       ts_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       td_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tc_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_ap1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr1_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr1_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr3_bp1_ix1_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy1(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_n_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_t_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      CALL       tz_sh_di_usmv_2_c_anr3_bp1_ix2_iy2(errval)
+      IF(errval.LT.0)failed=failed+1
+      IF(errval.EQ.0)passed=passed+1
+      
+      PRINT *,"PASSED:",passed
+      PRINT *,"FAILED:",failed
+      info = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)
+      IF(info.NE.0)THEN
+      STOP 1
+      ENDIF
+      IF(failed.NE.0)THEN
+      STOP 1
+      ENDIF
+      END PROGRAM main
+
+
+
+!> @endcond
diff --git a/sbtf.m b/sbtf.m
new file mode 100644
index 0000000..d8d442b
--- /dev/null
+++ b/sbtf.m
@@ -0,0 +1,58 @@
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# Sparse Blas Tester Code.
+#
+
+source("./sbtg.m")
+
+res=rsb_octave_license("f");
+res=[res,"!\n"];
+res=[res,rsb_octave_doc_f_header];
+res=[res,"!\n"];
+res=[res,"! Sparse BLAS fortran interface testing code\n"];
+res=[res,"!\n"];
+res=[res,"! FIXME: missing library initialization!\n"];
+res=[res,"! FIXME: using zero based indices is only partially supprted!\n"];
+res=[res,"!\n"];
+printf("%s",res);
+all_test("f","decl");
+res=sprintf("%s%s","","!\n");
+res=[res,findent,"PROGRAM main\n\n",findent,"USE rsb\n",findent,"INTEGER :: passed=0,failed=0,errval=0\n"];
+res=sprintf("%s%s",res,findent,"info = rsb_lib_init(RSB_NULL_INIT_OPTIONS)\n");
+res=[res,findent,"IF(info.NE.0)THEN\n"];
+res=[res,findent,"STOP 1\n"];
+res=[res,findent,"ENDIF\n"];
+printf("%s",res);
+all_test("f","CALL");
+res=["" ,findent,"PRINT *,\"PASSED:\",passed\n"];
+res=[res,findent,"PRINT *,\"FAILED:\",failed\n"];
+res=[res,findent,"info = rsb_lib_exit(RSB_NULL_EXIT_OPTIONS)\n"];
+res=[res,findent,"IF(info.NE.0)THEN\n"];
+res=[res,findent,"STOP 1\n"];
+res=[res,findent,"ENDIF\n"];
+res=[res,findent,"IF(failed.NE.0)THEN\n"];
+res=[res,findent,"STOP 1\n"];
+res=[res,findent,"ENDIF\n"];
+res=[res,findent,"END PROGRAM main\n"];
+res=sprintf("%s%s",res,"\n");
+res=sprintf("%s%s",res,"\n\n");
+res=sprintf("%s%s",res,rsb_octave_doc_f_footer);
+printf("%s",res);
+
diff --git a/sbtg-types.m b/sbtg-types.m
new file mode 100644
index 0000000..022ebde
--- /dev/null
+++ b/sbtg-types.m
@@ -0,0 +1,6 @@
+
+
+#	Supported types  :(float,double,float complex,double complex)
+#	Unsupported types:()
+blas_type_codes_array_=['s','d','c','z',-1];
+global blas_type_codes_array=blas_type_codes_array_(1:length(blas_type_codes_array_)-1);
diff --git a/sbtg-types.m4 b/sbtg-types.m4
new file mode 100644
index 0000000..ed1a3f7
--- /dev/null
+++ b/sbtg-types.m4
@@ -0,0 +1,15 @@
+dnl
+include(`libspblas_macros.m4')dnl
+include(`rsb_fortran_macros.m4')dnl
+dnl
+`#	'Supported types  :RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES
+`#	'Unsupported types:RSB_M4_SPBLAS_MATRIX_UNSUPPORTED_TYPES
+blas_type_codes_array_=[dnl
+dnl
+foreach(`type',RSB_M4_SPBLAS_MATRIX_SUPPORTED_TYPES,`dnl
+singlequote(RSB_M4_SPBLAS_TYPE_CHARCODE(type))`,'dnl
+')dnl
+-1];
+global blas_type_codes_array=blas_type_codes_array_(1:length(blas_type_codes_array_)-1);dnl
+dnl
+
diff --git a/sbtg.m b/sbtg.m
new file mode 100644
index 0000000..4d77afe
--- /dev/null
+++ b/sbtg.m
@@ -0,0 +1,1095 @@
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# (Octave based) Sparse BLAS Tester Generating code
+#
+global rsb_octave_doc_f_header="!> @cond INNERDOC\n!> @file\n!> @author Michele Martone \n!> @brief This file is part of the Octave based test suite for librsb\n";
+global rsb_octave_doc_f_footer="!> @endcond\n";
+global rsb_octave_doc_c_header="/*!\n * @file\n * @author Michele Martone \n * @brief This file is part of the Octave based test suite for librsb\n */\n";
+global matrix_diagonal=['e','i']; # explicit or implicit, FIXME
+#global matrix_diagonal=['e']; # explicit or implicit, FIXME
+#
+#global matrix_types_array=['g'];
+#global matrix_types_array=['u'];
+#global matrix_types_array=['l'];
+#global matrix_types_array=['g','s'];
+#global matrix_types_array=['g','l'];
+#global matrix_types_array=['u','l'];
+#global matrix_types_array=['u','g'];
+#global matrix_types_array=['g','u','l','s','h'];
+global matrix_types_array=['g','u','l','s','h'];
+#
+global blas_trans_codes_array=['n','t','c'];
+#global blas_trans_codes_array=['n'];
+#global blas_trans_codes_array=['c'];
+#
+global findent="      ";
+global ftab="  ";
+#
+#global alpha_array=[1];
+#global alpha_array=[-1];
+#global alpha_array=[1,-1];
+global alpha_array=[3,1,-1,-3];
+#global alpha_array=[4,1,-1,-4];
+#
+#global beta_array=[1,-1]; # FIXME: beta != 1 is unsupported by the sparse blas
+global beta_array=[1,0]; # FIXME: beta != 1 is unsupported by the sparse blas
+#
+global incx_array=[1,2];
+#global incx_array=[1];
+#global incx_array=[2];
+#
+global incy_array=[1,2];
+#global incy_array=[1];
+#
+#global beta_array=[4,1,-1,-4];
+#
+#global blas_type_codes_array=['s','d','c','z'];
+#global blas_type_codes_array=['s'];
+source("./sbtg-types.m")
+#
+#global max_random_matrix_size_a=[1,2,3,4];
+#global max_random_matrix_size_a=[1,2,3,4,5];
+#
+global max_random_matrix_size_a=[2];
+#
+global blas_op_codes_num=2;
+#
+#global blas_op_codes_num=1;
+#
+global sbtg_random_fixed_seq=0; # if 1, the generated file will be the same on multiple runs
+global sbtg_random_init_seed=[0];
+global sbtg_want_only_full_matrices=0; # if 1, the generated matrices will be full
+
+function res=iscomplextype(tc)
+	res=(tc=='c' || tc=='z');
+end
+
+function res=sbtg_rand(n)
+	global sbtg_random_fixed_seq;
+	global sbtg_random_init_seed;
+
+	if sbtg_random_fixed_seq
+		rand("state",sbtg_random_init_seed);
+		sbtg_random_fixed_seq=0;
+	endif
+	res=rand(n);
+end 
+
+function res=dump_c_vec(v,lang)
+	if nargin < 2
+		lang="c";
+	endif
+	if lang == "c"
+		res="{ ";
+		vv=vec(v);
+		if(size(vv,1)>0)
+		  if(iscomplex(v))
+			for i=1:size(vv,1)-1; res=sprintf("%s%d+%d*I, ",res,real(vv(i)),imag(vv(i))); endfor
+			i=size(vv,1);res=sprintf("%s%d+%d*I ",res,real(vv(i)),imag(vv(i)));
+		  else
+			for i=1:size(vv,1)-1; res=sprintf("%s%d, ",res,vv(i)); endfor
+			i=size(vv,1);res=sprintf("%s%d ",res,vv(i)); 
+		  endif
+		else
+			res=sprintf("%s-1 /* a dummy value */",res); 
+		endif
+		res=sprintf("%s};",res);
+	else
+		res="(/";
+		vv=vec(v);
+		vlen=size(vv,1)-1;
+		if(iscomplex(v))
+			for i=1:vlen; res=sprintf("%s(%d.e0,%d.e0), ",res,real(vv(i)),imag(vv(i))); endfor
+		else
+			for i=1:vlen; res=sprintf("%s%d, ",res,vv(i)); endfor
+		endif
+		if size(vv,1)>0
+			if(iscomplex(v))
+				res=sprintf("%s(%d,%d)",res,real(vv(size(vv,1))),imag(vv(size(vv,1))));
+			else
+				res=sprintf("%s%d",res,vv(size(vv,1)));
+			endif
+		endif
+		res=sprintf("%s/)",res);
+	endif
+end
+
+function res=dump_vec(v,type,id,comment,lang)
+	if(nargin<5)
+		lang="c";
+	endif
+	if lang == "c"
+		res=sprintf("%s %s[]=%s",type,id,dump_c_vec(v));
+	else
+		if size(v,1)==0
+			vv=[-999999];
+		else
+			vv=v;
+		endif
+		if size(v,1)>2
+			bs="&\n          &";
+		else
+			bs="";
+		endif
+		res=sprintf("%s :: %s(%d)=%s%s",type,id,size(vv,1),bs,dump_c_vec(vv,lang));
+		if size(v,1)==0
+			res=[res," ! fortran does not support empty arrays"];
+		endif
+	endif
+	if(nargin>=4)
+		res=sprintf("%s%s",res,comment);
+	endif
+end
+
+function res=decl_var(lang,type,id,val)
+	if lang == "c"
+		res=sprintf("%s %s=%d;\n",type,id,val);
+	else
+		res=sprintf("%s :: %s=%d\n",type,id,val);
+	endif
+end
+
+function res=dump_c_coo(a,ts,lang)
+	#lang ignored, for now
+	#global ts;
+	global findent;
+	#
+	s=sparse(a);
+	nz=nnz(a);
+	nr=size(a,1);
+	nc=size(a,2);
+	I=zeros(nz,1);
+	JI=zeros(nz,1);
+	V=zeros(nz,1);
+	l=1;
+	for i=1:nr;
+	for j=1:nc;
+		if(s(i,j)!=0)
+			I(l)=i-1;
+			JI(l)=j-1;
+			V(l)=s(i,j);
+			++l;
+		endif
+	endfor
+	endfor
+	res="";
+	if lang == "c"
+#		its="rsb_coo_idx_t";
+		its="int";
+#		res=sprintf("%s	rsb_nnz_idx_t nnz=%d;\n",res,nz);
+		res=sprintf("%s	int nnz=%d;\n",res,nz);
+		res=sprintf("%s	%s nr=%d;\n",res,its,nr);
+		res=sprintf("%s	%s nc=%d;\n",res,its,nc);
+		res=sprintf("%s\t%s\n\t%s\n\t%s",res,dump_vec(I,its,"IA"),dump_vec(JI,its,"JA"),dump_vec(V,ts,"VA"));
+	else
+	# FIXME: WRITE ME
+		its="INTEGER";
+		res=[res,findent,decl_var(lang,"INTEGER","nnz",nz)];
+		res=[res,findent,decl_var(lang,"INTEGER","nr",nr)];
+		res=[res,findent,decl_var(lang,"INTEGER","nc",nc)];
+		res=[sprintf("%s%s%s\n%s%s\n%s%s",res,findent,dump_vec(I+1,its,"IA","",lang),findent,dump_vec(JI+1,its,"JA","",lang),findent,dump_vec(V,ts,"VA","",lang))];
+	endif
+end
+
+function a=want_here_op(o)
+	global op;
+	global main;
+	global oops;
+	global extra_ops;
+	if main
+#		a=want_op(o)
+		a=0;
+		a=findstr(strcat(extra_ops,","),o);
+		if a
+			a=a(1);
+		else
+			a=0;
+		endif
+	else
+		a=(strmatch(op,o,"exact"));
+	endif
+end
+
+function d=mydiagf(m)
+	d=m-(tril(m,-1)+triu(m,1));
+end
+
+function s=gen_test_matrix(op,n,tc,mti,mdi)
+	global matrix_types_array;
+	global matrix_diagonal;
+	global sbtg_want_only_full_matrices;
+	# generate a sparse random matrix sized n
+	m=factorial(4);
+#	s=mod(ceil(sbtg_rand(n)*m),4);
+	s=zeros(n);
+	s+=1*ceil(sbtg_rand(n)>.7);
+	s+=2*ceil(sbtg_rand(n)>.8);
+	s+=3*ceil(sbtg_rand(n)>.9);
+	if sbtg_want_only_full_matrices ; s+=1; endif
+	s(1,1)=1; # fix to avoid empty matrices
+	if (nargin>=3 && iscomplextype(tc)) # complex type: adding a random imaginary part
+		si=gen_test_matrix(op,n);
+	#	si=si*i;
+		si=(si+transpose(si))*i; # more chances to get nonzeros
+		s=si+s;
+	endif
+#	if want_here_op("spsv") ||  want_here_op("ussv") 
+	if nargin>=4 # matrix type index
+		mt=matrix_types_array(mti);
+	else
+		mt='g'; # normal
+	endif
+	if (length(op)>=4 && (op(1:4)=="spsv" ||  op(1:4)=="ussv")) || (mt != 'g')
+		if (nargin == 3 && iscomplextype(tc)) # complex type: real diagonal
+			for k=1:n
+				s(k,k)=real(s(k,k));
+			endfor
+		endif
+	   if mt=='u'
+		   s=triu(s);
+	   else
+		   s=tril(s);
+           endif
+	   s=(s-mydiagf(s))+speye(n);
+	endif
+	if nargin>=5
+		md=matrix_diagonal(mdi);
+	else
+		md='e';
+	endif
+	if md=='i' # diagonal implicit (1)
+		for k=1:n
+			s(k,k)=0;
+		endfor
+	endif
+end
+
+function res=check_message(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy,tc,op,lang)
+	global blas_trans_codes_array;
+	global matrix_diagonal;
+	global blas_type_codes_array;
+	global matrix_types_array;
+	mt=matrix_types_array(mti);
+	md=matrix_diagonal(mdi);
+	trans=blas_trans_codes_array(transi);
+	bs="";
+	if nargin >= 13
+	if lang == "f"
+		bs="&\n          &";
+	endif
+	endif
+	res=sprintf("type=%s dims=%dx%d sym=%s diag=%s blocks=%s%dx%d %s alpha=%2d beta=%2d incx=%d incy=%d trans=%c",
+		tc,size(a,1),size(a,2),mt,md,bs,br,bc,op,alpha,beta,incx,incy,trans);
+end
+
+function res=check_csmm(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy,tc,op,lang)
+	global findent;
+	global ftab;
+	global matrix_diagonal;
+	#
+	md=matrix_diagonal(mdi);
+	ts="int";
+	res="";
+	n=size(a,1);
+	if nargin <= 6+2+2
+		typecode="typecode";
+	else
+		#typecode="RSB_BLAS_C_HANDLER_TO_C_TYPECODE(A)";
+		typecode=["'",toupper(tc),"'"];
+	endif
+	if nargin < 7+2+2
+		tc='d';
+	endif
+	if nargin < 8+2+2
+		op="spmv";
+	endif
+	if nargin < 11+2
+		lang="c";
+	endif
+	cm=[check_message(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy,tc,op,lang)];
+	nok=[cm," is not ok"];
+	ok=[cm," is ok"];
+	if lang == "c"
+	res=[res,sprintf("	if( rsb__do_are_same(y,cy,nr,%s,%d,%d) != RSB_ERR_NO_ERROR )",typecode,incy,incy),"\n	{\n"];
+	res=[res,"\t	rsb__debug_print_vectors_diff(y,cy,nr,",typecode,sprintf(",%d,%d,RSB_VECTORS_DIFF_DISPLAY_N",incy,incy),");\n"];
+#	res=[res,"		RSB_ERROR(\"",nok,"\\n\");\n"];
+#	res=[res,"		goto err;\n	}\n		else printf(\"",ok,"\\n\");\n"];
+	res=[res,"		goto ferr;\n	}\n		else printf(\"",ok,"\\n\");\n"];
+#	res=[res,sprintf("\nif(memcmp(y,cy,sizeof(%s)*nr))",ts),"\n{\n"];
+#	res=[res,"\t	if(( errval = rsb__debug_print_vectors_diff(y,cy,nr,",typecode,")) != RSB_ERR_NO_ERROR)\n		goto err;"];
+#	res=[res,sprintf("\nRSB_OCTAVE_ERROR(\"spmv test matrix %d/%d blocked %d x %d is not ok\\n\");\n",n,u,br,bc)];
+#	res=[res,printf("\n}else printf(\"spmv test matrix %d/%d blocked %d x %d is ok\\n\");\n",n,u,br,bc)];
+#	res=[res,sprintf("\nRSB_OCTAVE_ERROR(\"spmv test matrix %d blocked %d x %d is not ok\\n\");\n",n,br,bc)];
+#	res=[res,sprintf("		RSB_ERROR(\"%s test matrix %d blocked %d x %d is not ok\\n\");\n",op,n,br,bc)];
+#	res=[res,sprintf("		goto err;\n	}\n	else\n		printf(\"%s test matrix %d blocked %d x %d is ok\\n\");\n",op,n,br,bc) ];
+#	res=[res,sprintf("\nprintf(\"spmv test\\n\");\n")];
+	elseif lang == "f"
+		res=[res,findent,sprintf("DO i=1,%d\n",size(a,1))]; # FIXME: with transA and non square matrices, this breaks
+		res=[res,findent,ftab,"IF(y(i).NE.cy(i))PRINT*,\"",nok,"\"\n"];
+		res=[res,findent,ftab,"IF(y(i).NE.cy(i))GOTO 9997\n"];
+#		res=[res,findent,findent,"IF(y(i).NE.cy(i))THEN\n"];
+#		res=[res,findent,findent,findent,"errval =-1\n"];
+#		res=[res,findent,findent,findent,"GOTO 9997\n"];
+#		res=[res,findent,findent,"ENDIF\n"];
+		res=[res,findent,"ENDDO\n"];
+		res=[res,findent,"PRINT*,\"",ok,"\"\n"];
+#		res=[res,"rsb__debug_print_vectors_diff(y,cy,nr,",typecode,");\n"];
+#		res=[res,"RSB_ERROR(\"",check_message(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy,tc,op)," is not ok\\n\");\n"];
+#		res=[res,sprintf("		goto err;\n	}\n		else printf(\"%s is ok\\n\");\n",check_message(a,mti,mdi,br,bc,alpha,beta,transi,incx,incy,tc,op) ) ];
+	elseif lang == "p"
+		res=[res,findent,sprintf("DO i=1,%d\n",size(a,1))]; # FIXME: with transA and non square matrices, this breaks
+#		res=[res,findent,findent,"IF(y(i).NE.cy(i))PRINT*,\"",nok,"\"\n"];
+		res=[res,findent,findent,"IF(y(i).NE.cy(i))PRINT*,\"results mismatch:\",y,\"instead of\",cy","\n"];
+		res=[res,findent,findent,"IF(y(i).NE.cy(i))info=-1\n"];
+		res=[res,findent,findent,"IF(y(i).NE.cy(i))GOTO 9996\n"];
+		res=[res,findent,"ENDDO\n"];
+#		res=[res,findent,"PRINT*,\"",ok,"\"\n"];
+	endif
+end 
+
+function res=check_spsv(a,mti,mdi,br,bc,alpha,beta_,transi,incx,incy,tc,op,lang)
+	if nargin==6+2+2
+		res=check_csmm(a,mti,mdi,br,bc,alpha,beta_,transi,incx,incy);
+	elseif nargin==7+2+2
+		res=check_csmm(a,mti,mdi,br,bc,alpha,beta_,transi,incx,incy,tc);
+	elseif nargin==8+2+2
+		res=check_csmm(a,mti,mdi,br,bc,alpha,beta_,transi,incx,incy,tc,op);
+	elseif nargin==8+2+2+1
+		res=check_csmm(a,mti,mdi,br,bc,alpha,beta_,transi,incx,incy,tc,op,lang);
+	endif
+end 
+
+function sx=stride_apply(x,incx)
+	# we use zeros as sentinel values, but we could use some big values or nan's as well (e.g.: 99999)
+	sx=zeros(size(x,1)*incx,1);
+	for i=1:size(x,1)
+		sx((i-1)*incx+1)=x(i);
+	end
+end
+
+function res=dump_csmm(a,mti,mdi,br,bc,alpha,beta,transi,ts,incx,incy,lang)
+	global findent;
+	global matrix_types_array;
+	global matrix_diagonal;
+	#
+	extra=br+bc; # padding far more than necessary: will do no harm
+	extra=0; # FIXME: for now it's ok
+	#
+	if nargin < 10+2
+		lang="c";
+	endif
+	#
+	mt=matrix_types_array(mti);
+	md=matrix_diagonal(mdi);
+	#
+	#incx=1;incy=1;
+	nr=size(a,1);
+	nc=size(a,2);
+	x=zeros(nc+extra,1);
+	x(1:nc)=ones(nc,1);
+	y=zeros(nr+extra,1);
+	y(1:nr)=ones(nc,1)*3;
+	cy=zeros(nr+extra,1);
+	# we obtain a matrix from its packed version
+	if mt == 's'
+		aa=a+transpose(a);
+		for k=1:nr;aa(k,k)-=a(k,k); end
+	elseif mt == 'h'
+		aa=a+a';
+		for k=1:nr;aa(k,k)-=a(k,k); end
+	else
+		aa=a;
+	endif
+	if md=='i'
+		aa=aa+speye(nr);
+	endif
+	# we compute the vectors
+	if transi==3
+		cy(1:nr)=beta*y(1:nr)+alpha*aa'*x(1:nc);
+		transc='H';
+	elseif transi==2
+		cy(1:nr)=beta*y(1:nr)+alpha*transpose(aa)*x(1:nc);
+		transc='T';
+	elseif transi==1
+		cy(1:nr)=beta*y(1:nr)+alpha*aa *x(1:nc);
+		transc='1';
+	endif
+	res="";
+	scy=stride_apply(cy,incy);
+	sy=stride_apply(y,incy);
+	sx=stride_apply(x,incx);
+	if lang == "c"
+		res=[res,sprintf("		/* x: %d */\n",length(sx))];
+		res=[res,"\t",dump_vec(sx,ts,"x","/* reference x */\n")];
+		res=[res,"\t",dump_vec(scy,ts,"cy","/* reference cy after */\n")];
+		sbcy=sy;
+#		res=[res,"\t",dump_vec(sbcy,ts,"bcy","/* reference bcy before */\n")];
+#		res=[res,"\t",dump_vec(sy,ts,"y","/* y */\n"),"\n"];
+#		res=[res,"\t",sprintf("rsb_memcpy(y,bcy,(%d*%d+%d)*sizeof(%s)); /* because y will get overwritten otherwise */\n",incy,nr,extra,ts)];
+		res=[res,"\t",dump_vec(sbcy,ts,"y","/* y */\n"),"\n"];
+		res=sprintf("%s\t\n	const char*lsc=\"System and hardcoded solution: y' <- y + %d A^%c * x \\n\"%s",res,alpha,transc,print_matrix(aa,"k"));
+		res=sprintf("%s\t%s",res,print_matrix(cy,"k","y'"));
+		res=sprintf("%s\t%s",res,print_matrix(y,"k","y"));
+		res=sprintf("%s\t%s;",res,print_matrix(x,"k","x"));
+	else
+		res=[res,findent,dump_vec(sx,ts,"x","! reference x \n",lang)];
+		res=[res,findent,dump_vec(scy,ts,"cy","! reference cy after \n",lang)];
+		sbcy=sy;
+#		res=[res,findent,dump_vec(sbcy,ts,"bcy","! reference bcy before \n",lang)];
+#		res=[res,findent,dump_vec(sy,ts,"y","! y will be overwritten\n",lang),"\n"];
+#		res=[res,findent,"y=bcy\n"];
+		res=[res,findent,dump_vec(sbcy,ts,"y","! y will be overwritten\n",lang),"\n"];
+	endif
+end 
+
+function res=dump_spsv(a,mdi,br,bc,alpha,nrhs,transi,ts,incx,incy,lang)
+	global findent;
+	global matrix_diagonal;
+	#
+	md=matrix_diagonal(mdi);
+	#
+	if nargin < 11
+		lang="c";
+	endif
+	# FIXME: problems with incy != 1
+	beta=0;
+	extra=br+bc; # padding far more than necessary: will do no harm
+	extra=0; # FIXME
+	nr=size(a,1);
+	nc=size(a,2);
+	x=zeros(nc+extra,1);
+	x(1:nc)=ones(nc,1);
+	y=zeros(nr+extra,1);
+	y(1:nr)=ones(nc,1)*0;
+	cy=zeros(nr+extra,1);
+#	cy(1:nr)=beta*y(1:nr)+alpha*a*x(1:nc);
+	if md=='i'
+		aa=a+speye(nr);
+	else
+		aa=a;
+	endif
+	if(transi==1)
+		cy(1:nr)=beta*y(1:nr)+aa *x(1:nc);
+		transc='1';
+	elseif(transi==2)
+		cy(1:nr)=beta*y(1:nr)+transpose(aa)*x(1:nc);
+		transc='T';
+	elseif(transi==3)
+		cy(1:nr)=beta*y(1:nr)+aa'*x(1:nc);
+		transc='H';
+	endif
+	tmp=cy;cy=x*alpha*alpha;x=tmp*alpha;
+#	if nr<20 
+#		printf("/* \n");
+#		a
+#		printf("*/ \n");
+#	endif
+	scy=stride_apply(cy,incy);
+	sy=stride_apply(y,incy);
+	sx=stride_apply(x,incx);
+	res="";
+	if lang == "c"
+		res=[res,sprintf("/* type is %s */\n",ts),"\n"];
+		res=[res,"\t",dump_vec(sx,ts,"x","/* reference x */\n")]; # for ot-spsv.c
+		res=[res,"\t",dump_vec(scy,ts,"cy","/* reference cy after */\n")];
+	#	sbcy=sy;
+	#	res=[res,"\t",dump_vec(sbcy,ts,"bcy","/* reference y before */\n")];
+	#	res=[res,"\t",dump_vec(sy,ts,"y","/* y */\n"),"\n"];
+		res=[res,"\t",dump_vec(sx,ts,"y","/* y */\n"),"\n"];
+		res=sprintf("%s\t\n	const char*lsc=\"System and hardcoded solution: y' <- %d A^-%c * y \\n\"%s",res,alpha,transc,print_matrix(aa,"k"));
+		res=sprintf("%s\t%s",res,print_matrix(cy,"k","y"));
+		res=sprintf("%s\t%s;\n",res,print_matrix(x,"k","y'"));
+	#	res=[res,"\t",sprintf("rsb_memcpy(y,bcy,%d*sizeof(%s)); /* because y will get overwritten otherwise */\n",nr+extra,ts)];
+	#	res=[res,"\t",sprintf("rsb_memcpy(y,x,%d*sizeof(%s)); /* because y will get overwritten otherwise */\n",nr+extra,ts)];
+	#	res=[res,"\t",sprintf("rsb_memcpy(y,x,(%d*%d+%d)*sizeof(%s)); /* because y will get overwritten otherwise */\n",incy,nr,extra,ts)];
+	else
+		res=[res,findent,dump_vec(sx,ts,"x","! reference x \n",lang)];
+		res=[res,findent,dump_vec(scy,ts,"cy","! reference cy after \n",lang)];
+		sbcy=sy;
+	#	res=[res,findent,dump_vec(sbcy,ts,"bcy","! reference bcy before \n",lang)];
+		res=[res,findent,dump_vec(sy,ts,"y","! y \n",lang),"\n"];
+		res=[res,findent,"y=x\n"];
+	end 
+end 
+
+function res=print_matrix(a,lang,lab)
+	ms="";
+	if nargin<3
+		lab="A";
+	endif
+#	if nargin<2
+#		lang="c";
+#	endif
+	if lang == "c"
+		hc="";
+		co=["/*\n ",lab," = \n"];
+		cc="*/\n";
+		el="\n";
+	elseif lang == "k"
+		hc="";
+		co=["\" ",lab," = \\n"];
+		cc="\"";
+		el="\\n";
+	else # f
+		hc="!";
+		co=["! ",lab," =\n"];
+		cc="";
+		el="\n";
+	endif
+	res=co;
+	for i=1:size(a,1)
+	ms=[ms,hc];
+	for j=1:size(a,2)
+		if(iscomplex(a))
+			ms=sprintf("%s %d+%di",ms,real(a(i,j)),imag(a(i,j)));
+		else
+			ms=sprintf("%s %d",ms,a(i,j));
+		endif
+	end
+		ms=sprintf("%s%s",ms,el);
+	end
+	res=sprintf("%s%s%s",res,ms,cc);
+end
+
+function trc=blas_trans_char_array(tri)
+	if tri==1
+	trc="'n'";
+	elseif tri==2
+	trc="'t'";
+	elseif tri==3
+	trc="'c'";
+	else
+	trrc="'error :-)'";
+	end
+end
+
+function tr=blas_trans_array(tri)
+	if tri==1
+	tr="blas_no_trans";
+	elseif tri==2
+	tr="blas_trans";
+	elseif tri==3
+	tr="blas_conj_trans ";
+	end
+end
+
+function op=blas_types_array(tc,lang)
+	if nargin == 1
+		lang="c";
+	endif
+	if(lang=="c")
+		if tc=='s'
+		op="float";
+		elseif tc=='d'
+		op="double";
+		elseif tc=='c'
+		op="float complex";
+		elseif tc=='z'
+		op="double complex";
+		end
+	else
+		if tc=='s'
+		op="REAL*4";
+		elseif tc=='d'
+		op="REAL*8";
+		elseif tc=='c'
+		op="COMPLEX*8";
+		elseif tc=='z'
+		op="COMPLEX*16";
+		end
+	end
+end
+
+function s=pointer_symbol_if_type(tc)
+	if tc=='s'
+	s="";
+	elseif tc=='d'
+	s="";
+	elseif tc=='c'
+	s="&";
+	elseif tc=='z'
+	s="&";
+	end
+end
+
+function op=blas_op_codes_array(oi)
+	if oi==1
+	op="usmv";
+	elseif oi==2
+	op="ussv";
+	else
+	# error
+	op="????"
+	end
+end
+
+function res=blas_tester_function(what,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy)
+	global blas_trans_codes_array;
+	global matrix_types_array;
+	global blas_type_codes_array;
+	global findent;
+	global matrix_diagonal;
+	#
+	n=rms;
+	op=blas_op_codes_array(oi);
+	bc=tc;#blas_type_codes_array(ti);
+	tr=blas_trans_array(tri);
+	#tc=blas_type_codes_array(ti);
+	tn=blas_types_array(tc,lang);
+	btc=blas_trans_codes_array(tri);
+	trc=blas_trans_char_array(tri);
+	mt=matrix_types_array(mti);
+	md=matrix_diagonal(mdi);
+	#
+	res="";
+	id="";
+	#
+	#
+	if what=="id  "
+		if alpha<1
+			alphas=sprintf("nr%d",-alpha);
+		else
+			alphas=sprintf("p%d", alpha);
+		endif
+		if beta<1
+			betas=sprintf("nr%d",-beta);
+		else
+			betas=sprintf("p%d", beta);
+		endif
+		res=sprintf("t%s_s%s_d%s_%s_%d_%c_a%s_b%s_ix%d_iy%d",tc,mt,md,op,rms,btc,alphas,betas,incx,incy);
+	else
+		id=blas_tester_function("id  ",lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy);
+	endif
+	#
+	if what=="comm"
+		res=sprintf("/* op:%s; type:%s; trans:%s kind:%s; diag:%s */", blas_op_codes_array(oi),bc,btc,mt,md);
+	endif
+	#
+	if lang=="c"
+	#
+	if what=="CALL"
+		res=sprintf("%s()",blas_tester_function("id  ",lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy));
+	endif
+	#
+	if what=="decl"
+		#bih="blas_invalid_handle";
+		bih="-1";
+		res=sprintf("static rsb_err_t %s(void)\n{\n\t%s\n",
+		blas_tester_function("id  ",lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy),
+		blas_tester_function("comm",lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy)
+		);
+		a=gen_test_matrix(op,n,tc,mti,mdi);
+#		res=[res,"	rsb_err_t errval = RSB_ERR_NO_ERROR;\n"];
+		res=[res,"	rsb_err_t errval = RSB_BLAS_ERROR;\n"];
+		res=[res,"\tblas_sparse_matrix A = ",bih,";\n"];
+		res=[res,sprintf("\tenum blas_trans_type transT=%s;\n",tr)];
+		res=[res,sprintf("\tint incx=%d;\n",incx)];
+		if op=="usmv"
+			res=[res,sprintf("\tint incy=%d;\n",incy)];
+		endif
+		res=[res,sprintf("\t%s alpha=%d;\n",tn,alpha)];
+		res=sprintf("%s\t%s",res,print_matrix(a,"c"));
+		res=sprintf("%s\t/* declaration of VA,IA,JA */\n %s",res,dump_c_coo(a,tn,"c"));
+		if op=="usmv"
+			res=[res,dump_csmm(a,mti,mdi,1,1,alpha,beta,tri,tn,incx,incy)];
+		else
+			nrhs=1; res=[res,dump_spsv(a,mdi,1,1,alpha,nrhs,tri,tn,incx,incy)];
+		endif
+
+		res=sprintf("%s\t%s%s%s%s%s\n",res,"if(!RSB_BLAS_SUPPORTED_TYPE('",bc,"')){printf(\"type=",bc," unsupported: skipping test.\\n\");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}");
+		res=sprintf("%s\t%s%s%s\n",res,"if((nnz == 0 ) && !RSB_BLAS_SUPPORT_EMPTY ){ printf(\"empty matrices are unsupported: skipping test.\\n\");errval=RSB_ERR_UNSUPPORTED_TYPE;goto err;}\n");
+		res=sprintf("%s\t%s%s%s\n",res,"A = BLAS_",bc,"uscr_begin(nr,nc);");
+#		gotoferrlabel="goto ferr;";
+		gotoferrlabel="{RSB_ERROR(\"!\\n\");goto ferr;}";
+		gotoferrlabel_pah="{RSB_ERROR(\"uscr_begin() gave %d!\\n\",A);goto ferr;}";
+		gotoferrlabel_ussp="{RSB_ERROR(\"ussp() gave %d!\\n\",A);goto ferr;}";
+		gotoferrlabel_inse="{RSB_ERROR(\"uscr_insert_entries() gave %d!\\n\",A);goto ferr;}";
+		gotoferrlabel_end="{RSB_ERROR(\"uscr_end() gave %d!\\n\",A);goto ferr;}";
+#		res=sprintf("%s\t%s%s\n",res,"if( A == blas_invalid_handle )\n		",gotoferrlabel_pah);
+		res=sprintf("%s\t%s%s\n",res,"if( A == -1 )\n		",gotoferrlabel_pah);
+		if op=="ussv"
+			if mt=='u'
+				res=[res,"	if( BLAS_ussp(A,blas_upper_triangular) != RSB_BLAS_NO_ERROR )\n"]; # NEW
+			elseif mt=='l'
+				res=[res,"	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )\n"]; # NEW
+			else
+				res=[res,"	if( BLAS_ussp(A,blas_lower_triangular) != RSB_BLAS_NO_ERROR )\n"]; # NEW
+			endif
+			res=[res,"		",gotoferrlabel_ussp,"\n"];
+			#res=[res,"\tBLAS_ussp(A,blas_upper_triangular);\n"]; # NEW
+		endif
+		if md=='i'
+			res=[res,"	if( BLAS_ussp(A,blas_unit_diag) != RSB_BLAS_NO_ERROR )\n"];
+			res=[res,"		",gotoferrlabel_ussp,"\n"];
+		endif
+		if mt=='s'
+			res=[res,"	if( BLAS_ussp(A,blas_lower_symmetric) != RSB_BLAS_NO_ERROR )\n"];
+			res=[res,"		",gotoferrlabel_ussp,"\n"];
+		endif
+		if mt=='h'
+			res=[res,"	if( BLAS_ussp(A,blas_lower_hermitian) != RSB_BLAS_NO_ERROR )\n"];
+			res=[res,"		",gotoferrlabel_ussp,"\n"];
+		endif
+		res=sprintf("%s\t%s%s%s%s\n",res,"if( BLAS_",bc,"uscr_insert_entries(A,nnz,VA,IA,JA) != RSB_BLAS_NO_ERROR)\n		",gotoferrlabel_inse);
+		res=[res,sprintf("\t%s%s%s%s\n","if( BLAS_",bc,"uscr_end(A) != RSB_BLAS_NO_ERROR )\n		",gotoferrlabel_end)];
+	
+		if op=="usmv"
+			res=[res,sprintf("\tif( BLAS_%s%s(transT,%salpha,A,x,incx,y,incy) != RSB_BLAS_NO_ERROR )\n		%s\n",bc,op,pointer_symbol_if_type(tc),gotoferrlabel)];
+			res=[res,check_csmm(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op,lang)];
+		elseif op=="ussv"
+			res=[res,sprintf("\tif( BLAS_%s%s(transT,%salpha,A,y,incx) != RSB_BLAS_NO_ERROR )\n		%s\n",bc,op,pointer_symbol_if_type(tc),gotoferrlabel)];
+			res=[res,check_spsv(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op,lang)];
+		endif
+		res=[res,sprintf("%s%s\n","\n\tif( BLAS_usds(A) != RSB_BLAS_NO_ERROR )\n		",gotoferrlabel)];
+		cm=[check_message(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op)];
+		nok=[cm," is not ok"];
+		res=[res,"	goto ok;\n"];
+		res=[res,"ferr:\n"];
+		res=[res,"	RSB_ERROR(\"",nok,"\\n\");\n"];
+		res=[res,"	RSB_ERROR(lsc);\n"];
+		res=[res,"	RSB_ERROR(\"Computed solution: y'=\\n\");\n"];
+		typecode=["'",toupper(tc),"'"];
+		res=[res,"	rsb_sbtc_print_vec(y,nr,",typecode,");\n"];
+		res=[res,"err:\n"];
+#		res=[res,sprintf("	%s","return RSB_ERR_NO_ERROR;\n\n	BLAS_usds(A);\n")];
+		res=[res,sprintf("	return errval;\n")];
+		res=[res,sprintf("ok:	return RSB_ERR_NO_ERROR;\n}\n")];
+	endif
+	#
+	elseif lang=="f"
+	#
+		
+		if what=="CALL"
+			res=[res,findent,"",id,""];
+		endif
+		if what=="decl"
+			res=[res,findent,"SUBROUTINE ",id,"(errval)\n"];
+			res=[res,findent,"USE blas_sparse\n"];
+			res=[res,findent,"IMPLICIT NONE\n"];
+			res=[res,findent,"INTEGER::errval,istat=0,i\n"];
+			res=[res,findent,"INTEGER::A\n"];
+			res=[res,findent,"INTEGER::transT=",tr,"\n"];
+			res=[res,findent,decl_var(lang,"INTEGER","incx",incx)];
+			if op=="usmv"
+			res=[res,findent,decl_var(lang,"INTEGER","incy",incy)];
+			endif
+			res=[res,findent,decl_var(lang,tn,"alpha",alpha)];
+			a=gen_test_matrix(op,n,tc,mti,mdi);
+			res=sprintf("%s%s\n",res,print_matrix(a,lang));
+			res=sprintf("%s%s! declaration of VA,IA,JA \n%s\n",res,findent,dump_c_coo(a,tn,lang));
+			if op=="usmv"
+				res=[res,dump_csmm(a,mti,mdi,1,1,alpha,beta,tri,tn,incx,incy,lang)];
+			else
+			nrhs=1; res=[res,dump_spsv(a,mdi,1,1,alpha,nrhs,tri,tn,incx,incy,lang)];
+			endif
+			res=[res,findent,"errval=0\n"];
+			res=[res,findent,sprintf("CALL %suscr_begin(nr,nc,A,errval)\n",bc)];
+			res=[res,findent,"IF(errval.NE.0)GOTO 9999\n"];
+###############################################################################
+			if op=="ussv"
+				if mt=='u'
+					res=[res,findent,"CALL ussp(A,blas_upper_triangular,istat)\n"]; # NEW
+				elseif mt=='l'
+					res=[res,findent,"CALL ussp(A,blas_lower_triangular,istat)\n"]; # NEW
+				else
+					res=[res,findent,"CALL ussp(A,blas_lower_triangular,istat)\n"]; # NEW
+				endif
+			endif
+			if md=='i'
+				res=[res,findent,"CALL ussp(A,blas_unit_diag,istat)\n"];
+			endif
+			if mt=='s'
+				res=[res,findent,"CALL ussp(A,blas_lower_symmetric,istat)\n"];
+			endif
+			if mt=='h'
+				res=[res,findent,"CALL ussp(A,blas_lower_hermitian,istat)\n"];
+			endif
+			res=[res,findent,"IF(istat.NE.0)GOTO 9997\n"];
+			res=[res,findent,"CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)\n"];
+			res=[res,findent,"IF(istat.NE.0)GOTO 9997\n"];
+###############################################################################
+			res=[res,findent,"CALL uscr_end(A,istat)\n"];
+			res=[res,findent,"IF(istat.NE.0)GOTO 9997\n"];
+			if op=="usmv"
+				res=[res,findent,sprintf("CALL %s(transT,alpha,A,x,incx,y,incy,istat)\n",op),""];
+				res=[res,findent,"IF(istat.NE.0)GOTO 9997\n"];
+				res=[res,check_csmm(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op,lang)];
+			elseif op=="ussv"
+				res=[res,findent,sprintf("CALL %s(transT,alpha,A,y,incx,istat)\n",op)];
+			res=[res,findent,"IF(istat.NE.0)GOTO 9997\n"];
+				res=[res,check_spsv(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op,lang)];
+			endif
+			res=[res,findent,"GOTO 9998\n"];
+			res=[res,"9997",findent,"errval=-1\n"];
+			res=[res,"9998",findent,"CONTINUE\n"];
+			res=[res,findent,"CALL usds(A,istat)\n"];
+			res=[res,findent,"IF(istat.NE.0)errval=-1\n"];
+			res=[res,"9999",findent,"CONTINUE\n"];
+			res=[res,findent,"end SUBROUTINE ",id," \n"];
+		endif
+	#
+	elseif lang=="p"
+	#
+		
+		if what=="CALL"
+			res=[res,findent,"",id,""];
+		endif
+		if what=="decl"
+			a=gen_test_matrix(op,n,tc,mti,mdi);
+			cm=[check_message(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op)];
+			#pro=[cm," has problems"];
+			nok=[cm," is not ok"];
+			pro=nok;
+			ok=[cm," is ok"];
+			#
+			res=[res,findent,"SUBROUTINE ",id,"(errval,afmt,ictxt)\n"];
+			res=[res,findent,"USE psb_base_mod\n"];
+			res=[res,findent,"IMPLICIT NONE\n"];
+			res=[res,findent,"CHARACTER(LEN=*) :: afmt\n"];
+#			res=[res,findent,"CHARACTER(LEN=psb_fidasize_) :: afmt\n"];
+			res=[res,findent,"TYPE(psb_",bc,"spmat_type) :: a\n"];
+			res=[res,findent,"TYPE(psb_desc_type)   :: desc_a\n"];
+			res=[res,findent,"INTEGER            :: ictxt, iam=-1, np=-1\n"];
+			res=[res,findent,"INTEGER            :: info=-1\n"];
+#			res=[res,findent,"INTEGER   :: idim\n"];
+			res=[res,findent,"\n"];
+			res=[res,findent,"INTEGER::errval,istat=0,i\n"];
+			res=[res,findent,"CHARACTER::transA=",trc,"\n"];
+			res=[res,findent,decl_var(lang,"INTEGER","incx",incx)];
+			if op=="usmv"
+			res=[res,findent,decl_var(lang,"INTEGER","incy",incy)];
+			endif
+			res=[res,findent,decl_var(lang,tn,"alpha",alpha)];
+			res=[res,findent,decl_var(lang,tn,"beta",beta)];
+			res=sprintf("%s%s\n",res,print_matrix(a,lang));
+			res=sprintf("%s%s! declaration of VA,IA,JA \n%s\n",res,findent,dump_c_coo(a,tn,lang));
+			if op=="usmv"
+				res=[res,dump_csmm(a,mti,mdi,1,1,alpha,beta,tri,tn,incx,incy,lang)];
+			else
+			nrhs=1; res=[res,dump_spsv(a,mdi,1,1,alpha,nrhs,tri,tn,incx,incy,lang)];
+			endif
+			res=[res,findent,"errval=0\n"];
+#			res=[res,findent,"afmt='CSR'\n"];
+#			res=[res,findent,"CALL psb_init(ictxt)\n"];
+			res=[res,findent,"CALL psb_info(ictxt,iam,np)\n"];
+			res=[res,findent,"IF(iam<0)THEN\n"];
+			res=[res,findent,findent,"info=-1\n"];
+			res=[res,findent,findent,"GOTO 9999\n"];
+			res=[res,findent,"ENDIF\n"];
+			res=[res,findent,"CALL psb_barrier(ictxt)\n"];
+			res=[res,findent,"CALL psb_cdall(ictxt,desc_a,info,nl=nr)\n"];
+			res=[res,findent,"IF (info .NE. 0)GOTO 9996\n"];
+			res=[res,findent,"CALL psb_spall(a,desc_a,info,nnz=nnz)\n"];
+			res=[res,findent,"IF (info .NE. 0)GOTO 9996\n"];
+			if op=="ussv"
+			res=[res,findent,"a%descra='TLN'\n"];
+			endif
+			res=[res,findent,"CALL psb_barrier(ictxt)\n"];
+			res=[res,findent,"CALL psb_spins(nnz,IA,JA,VA,a,desc_a,info)\n"];
+			res=[res,findent,"IF (info .NE. 0)GOTO 9996\n"];
+			res=[res,findent,"CALL psb_cdasb(desc_a,info)\n"];
+			res=[res,findent,"IF (info .NE. 0)GOTO 9996\n"];
+			res=[res,findent,"CALL psb_spasb(a,desc_a,info,dupl=psb_dupl_err_,afmt=afmt)\n"];
+			res=[res,findent,"IF(info.NE.0)PRINT *,\"matrix assembly failed\"\n"];
+			res=[res,findent,"IF(info.NE.0)GOTO 9996\n"];
+			res=[res,findent,"\n"];
+#			res=[res,findent,sprintf("CALL %suscr_begin(nr,nc,A,errval)\n",bc)];
+#			res=[res,findent,"IF(errval.NE.0)GOTO 9999\n"];
+#			res=[res,findent,"CALL uscr_insert_entries(A,nnz,VA,IA,JA,istat)\n"];
+#			res=[res,findent,"IF(istat.NE.0)GOTO 9996\n"];
+#			res=[res,findent,"CALL uscr_end(A,istat)\n"];
+#			res=[res,findent,"IF(istat.NE.0)GOTO 9996\n"];
+			if op=="usmv"
+				res=[res,findent,sprintf("CALL psb_spmm(alpha,A,x,beta,y,desc_a,info,transA)\n",op),""];
+				res=[res,findent,"IF(info.NE.0)PRINT *,\"psb_spmm failed\"\n"];
+				res=[res,findent,"IF(info.NE.0)GOTO 9996\n"];
+				res=[res,check_csmm(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op,lang)];
+			elseif op=="ussv"
+#				res=[res,findent,"x(:)=y(:)\n"];
+#				res=[res,findent,"beta=0\n"];
+#				res=[res,findent,sprintf("CALL psb_spsm(alpha,A,x,beta,y,desc_a,info,transA,diag=x)\n",op)];
+				res=[res,findent,sprintf("CALL psb_spsm(alpha,A,x,beta,y,desc_a,info,transA)\n",op)];
+				res=[res,findent,"IF(info.NE.0)PRINT *,\"psb_spsm failed\"\n"];
+				res=[res,findent,"IF(info.NE.0)GOTO 9996\n"];
+				res=[res,check_spsv(a,mti,mdi,1,1,alpha,beta,tri,incx,incy,tc,op,lang)];
+			endif
+#			res=[res,findent,"GOTO 9998\n"];
+#			res=[res,"9996",findent,"errval=-1\n"];
+			res=[res,"9996",findent,"CONTINUE\n"];
+			res=[res,"",findent,"IF(info .NE. 0)errval=errval+1\n"];
+			res=[res,"",findent,"CALL psb_spfree(a,desc_a,info)\n"];
+			res=[res,findent,"IF (info .NE. 0)GOTO 9997\n"];
+			res=[res,"9997",findent,"CONTINUE\n"];
+			res=[res,"",findent,"IF(info .NE. 0)errval=errval+1\n"];
+			res=[res,"",findent,"CALL psb_cdfree(desc_a,info)\n"];
+			res=[res,findent,"IF (info .NE. 0)GOTO 9998\n"];
+			res=[res,"9998",findent,"CONTINUE\n"];
+			res=[res,"",findent,"IF(info .NE. 0)errval=errval+1\n"];
+#			res=[res,"9998",findent,"CONTINUE\n"];
+#			res=[res,findent,"CALL usds(A,istat)\n"];
+#			res=[res,findent,"IF(istat.NE.0)errval=-1\n"];
+#			res=[res,"9999",findent,"CONTINUE\n"];
+#			res=sprintf("%s%s",res,findent,"CALL psb_exit(ictxt)\n");
+			res=[res,"9999",findent,"CONTINUE\n"];
+			res=[res,"",findent,"IF(info .NE. 0)errval=errval+1\n"];
+			res=[res,findent,findent,"IF(errval.NE.0)PRINT*,\"",pro,"\"\n"];
+			res=[res,findent,findent,"IF(errval.EQ.0)PRINT*,\"",ok,"\"\n"];
+			res=[res,findent,"END SUBROUTINE ",id," \n"];
+		endif
+	end
+end
+
+function all_test(lang,what)
+global blas_op_codes_num;
+global max_random_matrix_size_a;
+global blas_type_codes_array;
+global blas_trans_codes_array;
+global alpha_array;
+global beta_array;
+global incx_array;
+global incy_array;
+global findent;
+global matrix_types_array;
+global matrix_diagonal;
+#
+if what=="decl"
+	what_here="decl";
+else
+	what_here="CALL";
+endif
+#
+#for mdi=2:length(matrix_diagonal)
+for mdi=1:length(matrix_diagonal)
+for mti=1:length(matrix_types_array)
+for ti=1:length(blas_type_codes_array)
+tc=blas_type_codes_array(ti);
+for oi=1:blas_op_codes_num
+#for oi=2:2
+#for oi=1:1
+for rmsi=1:length(max_random_matrix_size_a)
+rms=max_random_matrix_size_a(rmsi);
+for alphai=1:length(alpha_array)
+for betai=1:length(beta_array)
+for incxi=1:length(incx_array)
+for incyi=1:length(incy_array)
+incy=incy_array(incyi);
+incx=incx_array(incxi);
+for tri=1:length(blas_trans_codes_array)
+res="";
+beta=beta_array(betai);
+alpha=alpha_array(alphai);
+op=blas_op_codes_array(oi);
+mt=matrix_types_array(mti);
+md=matrix_diagonal(mdi);
+#
+#op
+#mt
+if !xor( (mt=='l' || mt=='u'),( strcmp(op,"usmv")==0) ) # FIXME: for some reason, op=="usmv" is not as op=="ussv"
+##if true
+#if (((mt=='l') || (mt=='u')) && ( op == "ussv")) || (((mt!='l') && (mt!='u')) && ( op != "ussv")) 
+#	res=[res,sprintf("/* %s %s */\n",op,mt)];
+#if (mt=='l' || mt=='u' ) && op=="ussv"
+#if 1
+#if (mt=='l' || mt=='u') && (op != "ussv") ; continue ; endif
+#
+if !(lang=="p" && (incx!=1 || incy!=1 || (beta!=0 && op=="ussv") || mt!='g' || md!='e'))
+#
+if (!(op=="ussv" && lang!="p"  && (beta!=1 || incy!=incx))) && (!(op=="usmv" && lang!="p"  && (beta==0 ))) && (!(op=="ussv" && (mt=='s' || mt=='h' || mt=='g')))
+#if !(op=="ussv" && lang!="p"  && (beta!=1 || tri>1 || incy!=incx)) && !(op=="usmv" && lang!="p"  && (beta==0 ))
+#
+if lang=="c"
+#
+if what_here=="CALL"
+	fid=blas_tester_function(what_here,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy);
+	res=sprintf("%s\t errval = %s;\n",res,fid);
+#	res=sprintf("%s\tif( errval != RSB_ERR_NO_ERROR )++failed;else++passed;\n",res);
+res=sprintf("%s\tif( errval== RSB_ERR_NO_ERROR )++passed;else{if(errval==RSB_ERR_UNSUPPORTED_TYPE)++skipped,errval=RSB_ERR_NO_ERROR ;else++failed;}\n",res);
+	res=sprintf("%s\tif( errval != RSB_ERR_NO_ERROR )RSB_ERROR(\"%s failed!\\n\");\n",res,fid);
+else
+	res=sprintf("%s",sprintf("%s\t%s",res,blas_tester_function(what_here,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy)));
+	res=sprintf("%s\n",res);
+endif
+#
+#
+elseif lang=="f"
+#
+if what_here=="CALL"
+	res=[res,findent,"CALL ",blas_tester_function(what_here,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy),"(errval)\n"];
+	res=[res,findent,"IF(errval.LT.0)failed=failed+1\n"];
+	res=[res,findent,"IF(errval.EQ.0)passed=passed+1\n"];
+	res=[res,findent,"\n"];
+else
+#	res=[res,"! declaration ... \n"];
+	res=[res,"! \n"];
+	res=[res,"",blas_tester_function(what_here,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy),""];
+#	res=[res,findent,"! TODO: still unimplemented \n"];
+endif
+#
+elseif lang=="p"
+#
+if what_here=="CALL"
+	res=[res,findent,"CALL ",blas_tester_function(what_here,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy),"(errval,afmt,ictxt)\n"];
+	res=[res,findent,"IF(errval.NE.0)failed=failed+1\n"];
+	res=[res,findent,"IF(errval.EQ.0)passed=passed+1\n"];
+	res=[res,findent,"errval=0\n"];
+	res=[res,findent,"\n"];
+else
+#	res=[res,"! declaration ... \n"];
+	res=[res,"! \n"];
+	res=[res,"",blas_tester_function(what_here,lang,mti,mdi,tc,oi,rms,tri,alpha,beta,incx,incy),""];
+#	res=[res,findent,"! TODO: still unimplemented \n"];
+endif
+#
+endif
+	printf("%s",res);
+#
+end
+end
+end
+end
+end
+end
+end
+end
+end
+end
+end
+end
+end
+#
+#
+end # end all_test function
+
+function lt=rsb_octave_license(lang);
+#
+pre="";
+if lang == "f" 
+	pre="! ";
+end 
+lt="";
+fd=fopen("rsb_license_header.inc","r");
+while (ll=fgets(fd,1024)) != -1 ;
+lt=sprintf("%s%s%s",lt,pre,ll);
+endwhile;
+fclose(fd);
+#
+end 
+
+
diff --git a/scripts/Makefile.am b/scripts/Makefile.am
new file mode 100644
index 0000000..dd1f7f6
--- /dev/null
+++ b/scripts/Makefile.am
@@ -0,0 +1,5 @@
+subdir=scripts
+
+EXTRA_DIST= \
+	*.sh \
+	*.awk
diff --git a/scripts/Makefile.in b/scripts/Makefile.in
new file mode 100644
index 0000000..7bb25b9
--- /dev/null
+++ b/scripts/Makefile.in
@@ -0,0 +1,435 @@
+# Makefile.in generated by automake 1.11.6 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
+# Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+ at SET_MAKE@
+VPATH = @srcdir@
+am__make_dryrun = \
+  { \
+    am__dry=no; \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        echo 'am--echo: ; @echo "AM"  OK' | $(MAKE) -f - 2>/dev/null \
+          | grep '^AM OK$$' >/dev/null || am__dry=yes;; \
+      *) \
+        for am__flg in $$MAKEFLAGS; do \
+          case $$am__flg in \
+            *=*|--*) ;; \
+            *n*) am__dry=yes; break;; \
+          esac; \
+        done;; \
+    esac; \
+    test $$am__dry = yes; \
+  }
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/rsb-config.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+CC = @CC@
+CCAS = @CCAS@
+CCASDEPMODE = @CCASDEPMODE@
+CCASFLAGS = @CCASFLAGS@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DOXYGEN = @DOXYGEN@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FC = @FC@
+FCFLAGS = @FCFLAGS@
+FGREP = @FGREP@
+GREP = @GREP@
+HELP2MAN = @HELP2MAN@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBRSB_ABI_VERSION = @LIBRSB_ABI_VERSION@
+LIBRSB_LIBRSB_VER = @LIBRSB_LIBRSB_VER@
+LIBRSB_MAIN_RELEASE = @LIBRSB_MAIN_RELEASE@
+LIBRSB_VERSION = @LIBRSB_VERSION@
+LIBRSB_VER_DATE = @LIBRSB_VER_DATE@
+LIBRSB_VER_MAJOR = @LIBRSB_VER_MAJOR@
+LIBRSB_VER_MINOR = @LIBRSB_VER_MINOR@
+LIBRSB_VER_PATCH = @LIBRSB_VER_PATCH@
+LIBRSB_VER_PRERS = @LIBRSB_VER_PRERS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+M4 = @M4@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NOUNROLLCFLAGS = @NOUNROLLCFLAGS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OCTAVE = @OCTAVE@
+OCTAVE_FLAGS = @OCTAVE_FLAGS@
+OPENMP_CFLAGS = @OPENMP_CFLAGS@
+OPENMP_FCFLAGS = @OPENMP_FCFLAGS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+RANLIB = @RANLIB@
+RSB_CONST_MAX_SUPPORTED_THREADS = @RSB_CONST_MAX_SUPPORTED_THREADS@
+RSB_DETECTED_MEM_HIERARCHY_INFO = @RSB_DETECTED_MEM_HIERARCHY_INFO@
+RSB_RSBENCH_CFLAGS = @RSB_RSBENCH_CFLAGS@
+RSB_RSBENCH_LIBS = @RSB_RSBENCH_LIBS@
+RSB_USER_SET_MEM_HIERARCHY_INFO = @RSB_USER_SET_MEM_HIERARCHY_INFO@
+SED = @SED@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+STRIP = @STRIP@
+SVN_REVISION = @SVN_REVISION@
+VERSION = @VERSION@
+WANT_COLUMN_UNLOOP_FACTORS = @WANT_COLUMN_UNLOOP_FACTORS@
+WANT_HALFWORD_INDICES = @WANT_HALFWORD_INDICES@
+WANT_LOOPING_KERNELS = @WANT_LOOPING_KERNELS@
+WANT_MATRIX_ALL_META_OPS = @WANT_MATRIX_ALL_META_OPS@
+WANT_MATRIX_ALL_OPS = @WANT_MATRIX_ALL_OPS@
+WANT_MATRIX_ALL_TYPES = @WANT_MATRIX_ALL_TYPES@
+WANT_MATRIX_BCOO_STORAGE = @WANT_MATRIX_BCOO_STORAGE@
+WANT_MATRIX_BCSS_STORAGE = @WANT_MATRIX_BCSS_STORAGE@
+WANT_MATRIX_LINKED_STORAGE = @WANT_MATRIX_LINKED_STORAGE@
+WANT_MATRIX_OPS = @WANT_MATRIX_OPS@
+WANT_MATRIX_STORAGE = @WANT_MATRIX_STORAGE@
+WANT_MATRIX_VB_STORAGE = @WANT_MATRIX_VB_STORAGE@
+WANT_ROW_UNLOOP_FACTORS = @WANT_ROW_UNLOOP_FACTORS@
+WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR = @WANT_SIMPLE_LOOP_UNROLL_DEFAULT_FACTOR@
+WANT_SPSM_DIAG_CHECK = @WANT_SPSM_DIAG_CHECK@
+WANT_TYPES = @WANT_TYPES@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_ct_FC = @ac_ct_FC@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+enable_openmp = @enable_openmp@
+enable_restrict = @enable_restrict@
+exec_prefix = @exec_prefix@
+have_grep = @have_grep@
+have_sed = @have_sed@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target_alias = @target_alias@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+subdir = scripts
+EXTRA_DIST = \
+	*.sh \
+	*.awk
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in:  $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu scripts/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu scripts/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure:  $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4):  $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags: TAGS
+TAGS:
+
+ctags: CTAGS
+CTAGS:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	distclean distclean-generic distclean-libtool distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/scripts/callgrind.sh b/scripts/callgrind.sh
new file mode 100644
index 0000000..9236f11
--- /dev/null
+++ b/scripts/callgrind.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+CMD=" ./rsbench  -oa -Ob --dense 100 --compare-competitors --verbose -R -qH --no-want-ancillary-execs -n 2"
+valgrind  --tool=callgrind --cache-sim=yes --dump-instr=yes  --separate-threads=yes $CMD
+# now one can use tools as e.g.: qcachegrind  to analyze 
diff --git a/scripts/configure_for_debug.sh b/scripts/configure_for_debug.sh
new file mode 100755
index 0000000..187f4b6
--- /dev/null
+++ b/scripts/configure_for_debug.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is an example of configuring the library for debug purposes.
+
+./configure '--enable-allocator-wrapper'  '--enable-debug' \
+       	'FC=gfortran' \
+       	'CC=gcc' \
+       	'CFLAGS=-O0 -ggdb -pipe -Wall -Wredundant-decls -Wno-switch -Wdisabled-optimization -Wdeclaration-after-statement   -Wpointer-arith -Wstrict-prototypes ' \
+	'FCFLAGS=-O0 -ggdb  '	\
+	'--enable-librsb-stats' \
+	'--enable-rsb-num-threads' \
+	'--enable-zero-division-checks-on-solve' \
+	"$@"
+
diff --git a/scripts/devtests.sh b/scripts/devtests.sh
new file mode 100755
index 0000000..c1a2370
--- /dev/null
+++ b/scripts/devtests.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2016 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+# in the following line, libefence may fail when librsb allocates e.g. 0 bytes (it happens)
+LD_PRELOAD=libefence.so.0.0 ./rsbench -B # || exit 255 # efence does not stand zero sized reallocs
+if grep RSB_REINIT_SINGLE_VALUE *.c --exclude rsb_rsb.c ; then exit 255 ; fi
+#if grep '//' *.c  ; then exit 255 ; fi # TODO: activate this.
+if grep -n 'RSB_DO_ERR_RETURN\>' rsb_rsb.c  ; then exit 255; else true ; fi
+if cpp rsb.h | grep '()$'   ; then echo '[!] failed'; exit 255 ; else true ; fi
+#for f in *.h ; do if cpp $f | grep '()$'   ; then echo '[!] failed'; exit 255 ; else true ; fi ; done
+if grep -n --exclude=rsb_rsb.c 'RSB_DO_ERR_RETURN_INTERFACE\>' *.c ; then exit 255 ; else true ; fi
+if test -f librsb.a ; then
+if nm librsb.a | grep  '\s[DG]\s' | grep -v '\s[DG]\s''rsb_' ; then exit 255 ; else true ; fi
+if nm librsb.a  | grep '\<T\>' | sed 's/^.*\s//g' | grep -v '^\(rsb\|BLAS\|blas\|__\)' ; then exit 255 ; else true ; fi
+if ar t librsb.a | grep -v  '_a-rsb' | grep -v ^rsb_ ; then echo '[!] failed source filenames check'; exit 255; else true ; fi
+else
+echo "no librsb.a -- skipping part of the test."
+fi
+flawfinder rsb_rsb.c | tee flawfinder.log
+echo "output of running flawfinder in rats.log"
+rats rsb_rsb.c | tee rats.log
+if cat *.F90 examples/*.F90| sed 's/!.*$//g'| grep '^.\{73,\}' ; then echo 'Some source code exceeds 72 chars!'; fi
+if grep -n '^.\{81,\}' README ; then exit 255 ; else true ; fi
+if grep -n '^.\{81,\}' NEWS       ; then exit 255 ; else true ; fi
+if grep '	' *.F90 */*.F90 ; then exit 255 ; else true ; fi
+./rsbench -E 0.1s || exit 255
+./rsbench  --limits-testing || exit 255
+echo "output of running rats in rats.log"
diff --git a/scripts/doc-tests.sh b/scripts/doc-tests.sh
new file mode 100755
index 0000000..285f0c7
--- /dev/null
+++ b/scripts/doc-tests.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+if test x"${srcdir}" = x ; then srcdir=. ; fi
+# This script is intended for the librsb developer usage.
+if cat ${srcdir}/examples/*.c | grep '^.\{71,\}' ; then echo 'Some source code exceeds 71 chars!'; grep -n '^.\{71,\}' ${srcdir}/examples/*.c ; exit 255 ; else true ; fi
+if cat ${srcdir}/README       | grep '^.\{81,\}' ; then echo 'Some source code exceeds 81 chars!'; grep -n '^.\{81,\}' ${srcdir}/README       ; exit 255 ; else true ; fi
+exit 0;
diff --git a/scripts/eda.sh b/scripts/eda.sh
new file mode 100755
index 0000000..13a8f15
--- /dev/null
+++ b/scripts/eda.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# eda.sh -- Use m4 and a Doxyfile to expand the Doxygen aliases in the documentation.
+filter() { sed 's/\\rsb/RSB_D_/g;s/\\librsb\>/RSB_D_librsb/g;s/\\see_/RSB_D_see/g'| sed "s/#/"'\`'"#'/g"; }
+cat doc/Doxyfile | grep -v "'" | grep -v '^#' | grep ^ALIASES | sed 's/^[^"]\+//g' | sed 's/^"/\\/g;s/" *$//g' |  filter | sed 's/\([^=]\+\)=\([^=]*\)/define(\`\1'"'"', \`\2'"'"')dnl/g'   > Doxyfile.m4
+( echo "include("'`'"Doxyfile.m4')dnl " ; cat rsb_rsb.c ; ) | filter > rsb_rsb.m4
+cat rsb_rsb.m4 | m4 -I . > rsb_rsb_e.c
+grep RSB_D_ rsb_rsb_e.c && false
diff --git a/scripts/gcov.sh b/scripts/gcov.sh
new file mode 100755
index 0000000..53b4d76
--- /dev/null
+++ b/scripts/gcov.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+if ! grep CFLAGS.*coverage Makefile 2>&1 > /dev/null  ; then
+	echo "[!] Cannot perform coverage test (did not compile with --coverage !?)" 
+	exit
+else
+	true;
+fi
+cd examples 
+cd -
+make -j 2 all sbtc || exit 1
+
+#rm -f *.gcda        *.gcov
+lcov           --directory `pwd` --zerocounters
+
+make qqtests        || exit 1
+scripts/devtests.sh
+./rsbench --generate-matrix -r 100 -c 100 -n 1024 >  /dev/shm/rsb_matrix.mtx && ./rsbench -oa -Ob -R  -f  /dev/shm/rsb_matrix.mtx # for coverage of rsb_util_sort_row_major_parallel
+./rsbench oa -Ob -R  --dense 2 --zig-zag # coverage of rsb_do_reverse_odd_rows
+RSB_SHORT_TEST_SH=1 sh scripts/test.sh || exit 1
+for f in *.o ; do gcov -f ${f/.o/}  ; done
+cd examples || exit 1
+#rm -f *.gcda        *.gcov
+make tests  || exit 1
+for f in *.o ; do gcov -f ${f/.o/}  ; done
+cd -
+
+rm -f *.info
+lcov --capture --directory `pwd`         --output-file coverage.info
+lcov --capture --directory `pwd`/examples/ --output-file coverage-examples.info 
+lcov  -a coverage.info -a coverage-examples.info  -o coverage-total.info
+genhtml coverage-total.info --highlight --legend --no-branch-coverage --function-coverage --branch-coverage  --output-directory coverage-info-dir
+echo "[*] Coverage test performed." 
+echo "[*] At next 'make clean', remember to rm -f *.gcov *.gcno" 
diff --git a/scripts/gendense.sh b/scripts/gendense.sh
new file mode 100755
index 0000000..3c49c92
--- /dev/null
+++ b/scripts/gendense.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+r=$1
+c=$2
+b=$3
+
+help()
+{
+	echo "#usage examples:" 
+	echo "#dense, r=3 c=3 ./$0"
+	echo "./$0 3 3"
+	echo "#banded, r=3 c=3 b=2 ./$0"
+	echo "./$0 3 3 2"
+	exit -1
+}
+
+[ -z "$r" ] && help
+[ -z "$c" ] && help
+
+# b should be less than r
+
+
+if test -z "$b" ;
+then
+	echo "%%MatrixMarket matrix coordinate real general"
+	echo "$r $c $((r*c))"
+	for((i=1;i<=$r;++i))
+	do
+	for((j=1;j<=$c;++j))
+	do
+		echo $i $j 1
+	done
+	done
+else
+	if [ $((r!=c)) == 1 ]
+	then
+		echo "banded matrices should be square!"
+		exit -1
+	fi
+
+	if [ $((b>=r)) == 1 ]
+	then
+		echo "band width cannot exceed matrix size-1!"
+		exit -1
+	fi
+
+	echo "%%MatrixMarket matrix coordinate real general"
+
+	echo "$r $c $(((2*b+1)*r-b*(b+1)))"
+	for((i=1;i<=$r;++i))
+	do
+	l=$((i-b<1?1:i-b))
+	u=$((i+b>c?c:i+b))
+	for((j=l;j<=u;++j))
+	do
+		echo $i $j 1
+	done
+	done
+fi
+
+
diff --git a/scripts/genstrided.sh b/scripts/genstrided.sh
new file mode 100755
index 0000000..5267a7e
--- /dev/null
+++ b/scripts/genstrided.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+r=$1
+c=$2
+rs=$3
+cs=$4
+
+help()
+{
+	echo "#usage examples:" 
+	echo "#dense , r=3 c=3 rs=1 cs=1 ./$0"
+	echo "./$0 3 3"
+	echo "#strided, r=3 c=3 rs=1 cs=1 ./$0"
+	echo "./$0 3 3 1 1 "
+	exit -1
+}
+
+
+[ -z "$r" ] && help
+[ -z "$c" ] && help
+
+echo "%%MatrixMarket matrix coordinate real general"
+echo "$r $c $(((r/rs)*(c/cs)))"
+for((i=1;i<=$r;i+=rs))
+do
+for((j=1;j<=$c;j+=cs))
+do
+	#echo $i $j 1
+	echo $i $j $i.$j
+done
+done
+
diff --git a/scripts/gprof.sh b/scripts/gprof.sh
new file mode 100755
index 0000000..feb9cbf
--- /dev/null
+++ b/scripts/gprof.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+grep CFLAGS.*pg Makefile 2>&1 > /dev/null || exit 
+make -j 2 all # sbtc
+rm -f gmon.out
+./rsbench -Q 10 || exit
+gprof ./rsbench | ~/src/scripts-ext/gprof2dot.py   | dot -Tps > profile.eps
diff --git a/scripts/hinfo.sh b/scripts/hinfo.sh
new file mode 100755
index 0000000..31e94e0
--- /dev/null
+++ b/scripts/hinfo.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+echo
+echo	cat /proc/cpuinfo 
+	cat /proc/cpuinfo 
+
+echo
+echo	./rsbench -I
+	./rsbench -I
+
+echo
+echo	./rsbench -M
+	./rsbench -M
+echo
diff --git a/scripts/librsb-here.sh b/scripts/librsb-here.sh
new file mode 100755
index 0000000..2bcc017
--- /dev/null
+++ b/scripts/librsb-here.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+RSBB_URL=svn+ssh://user@host/repository/trunk
+RSBB_SVN=${RSBB_SVN:-svn}
+RSBB_SVN_OPTIONS=${RSBB_SVN_OPTIONS:-}
+RSBB_RUN_AUTOGEN=1
+RSBB_RUN_CONFIGURE=1
+RSBB_CONFIGURE_ARGS=${RSBB_CONFIGURE_ARGS:-}
+RSBB_CONFIGURE_ADD=${RSBB_CONFIGURE_ADD:-}
+RSBB_RUN_MAKE_CLEAN=1
+RSBB_RUN_MAKE=1
+RSBB_RUN_MAKE_QTESTS=1
+RSBB_RUN_MAKE_INSTALL=1
+#
+env | grep ^RSBB
+#
+RSD=`pwd`/librsb-src
+RBD=`pwd`/librsb-usr
+#
+$RSBB_SVN ${RSBB_SVN_OPTIONS} --force co ${RSBB_URL} $RSD || exit -1
+cd $RSD || exit -1
+if test configure.ac -nt configure ; then sh autogen.sh || exit -1 ; fi
+	if test x$RSBB_RUN_AUTOGEN = x1 ; then
+		sh autogen.sh || exit -1
+	fi
+	if test x$RSBB_RUN_CONFIGURE = x1 ; then
+		$ECHO ./configure CC=${CC} CFLAGS="${CFLAGS} -fPIC" ${RSBB_CONFIGURE_ARGS} ${RSBB_CONFIGURE_ADD} --prefix=${RBD}
+		#touch config.h -r is.h
+	fi
+	if test x$RSBB_RUN_MAKE_CLEAN = x1 ; then
+		$ECHO make clean || exit -1
+	fi
+	if test x$RSBB_RUN_MAKE = x1 ; then
+		$ECHO make || exit -1
+	fi
+	if test x$RSBB_RUN_MAKE_QTESTS = x1 ; then
+		$ECHO make qtests || exit -1
+	fi
+	if test x$RSBB_RUN_MAKE_INSTALL = x1 ; then
+		$ECHO make install || exit -1
+	fi
+cd -
diff --git a/scripts/likwid.sh b/scripts/likwid.sh
new file mode 100755
index 0000000..232cfea
--- /dev/null
+++ b/scripts/likwid.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+CMD="likwid-perfctr -m -c 0 -g MEM ./rsbench -oa -Ob --dense 100 --compare-competitors --verbose -R -qH --no-want-ancillary-execs -n 1 --likwid"
+$CMD
diff --git a/scripts/linux-sys-cache.sh b/scripts/linux-sys-cache.sh
new file mode 100755
index 0000000..8665212
--- /dev/null
+++ b/scripts/linux-sys-cache.sh
@@ -0,0 +1,67 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script tries to produce information about caches.
+# It may fail easily, because the Linux sys interface is volatile.
+# It is used as an alternative way to get cache info from the system on pre-production systems, where the sysconf interface does not work effectively.
+
+ncpu=`ls -d /sys/devices/system/cpu/cpu* | wc -l`
+
+if test "x$ncpu" = x ; then
+	echo "" 1>&2
+       	exit
+fi
+
+if test ! -d "/sys/devices/system/cpu/cpu0/cache" ; then exit ; fi 
+ncache=`ls -d /sys/devices/system/cpu/cpu0/cache/index* | wc -l`
+
+if test "x$ncache" = x ; then
+	echo "" 1>&2
+       	exit
+fi
+
+cacheinfo=""
+
+#for n in `seq 1 $ncache`
+for cf in /sys/devices/system/cpu/cpu0/cache/index*
+do
+	if test ! -d "/sys/devices/system/cpu/cpu0/cache" ; then continue ; fi 
+	#echo $cf
+	tp=`cat $cf/type`
+	if test "x$tp" = x"Data" || test "x$tp" = x"Unified"  ; then
+		sz=`cat $cf/size`
+		as=`cat $cf/ways_of_associativity`
+		ls=`cat $cf/coherency_line_size`
+		lv=`cat $cf/level`
+		if test "x$lv" = x"1" ; then
+			cacheinfo="L$lv:$as/$ls/$sz" # a,b,c parameters
+		else
+			cacheinfo="L$lv:$as/$ls/$sz,$cacheinfo" # a,b,c parameters
+		fi
+	fi
+done
+
+#echo "ncpu:$ncpu ncache:$ncache cacheinfo:$cacheinfo"
+echo "$cacheinfo"
+
+# Examples:
+# L3:16/64/12288K;L2:8/64/256K;L1:8/64/32K;
+# L2:4/64/512K;L1:8/64/32K;
+
diff --git a/scripts/matrices_get.sh b/scripts/matrices_get.sh
new file mode 100755
index 0000000..0484fdb
--- /dev/null
+++ b/scripts/matrices_get.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+
+# This script gets a bunch of useful test matrices.
+
+# our default matrix set is :
+#bayer02.mtx  coater2.mtx  crystk03.mtx  ex11.mtx  lhr10.mtx  memplus.mtx  orani678.mtx  raefsky4.mtx  wang4.mtx
+
+#http://www.cise.ufl.edu/research/sparse/MM/Pothen/commanche_dual.tar.gz
+#http://www.cise.ufl.edu/research/sparse/MM/Simon/venkat01.tar.gz
+#http://www.cise.ufl.edu/research/sparse/MM/Simon/venkat25.tar.gz
+#http://www.cise.ufl.edu/research/sparse/MM/Simon/venkat50.tar.gz
+
+MATRICES="\
+http://www.cise.ufl.edu/research/sparse/MM/Grund/bayer02.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Brethour/coater2.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/Boeing/crystk03.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/FIDAP/ex11.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Mallya/lhr10.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Hamm/memplus.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/HB/orani678.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Simon/raefsky4.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/Simon/raefsky3.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/Wang/wang4.tar.gz"
+
+MATRICES_CSB="\
+http://www.cise.ufl.edu/research/sparse/MM/Sandia/ASIC_320k.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/FEMLAB/sme3Dc.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Wissgott/parabolic_fem.tar.gz\
+http://www.cise.ufl.edu/research/sparse/MM/Mittelmann/cont11_l.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/Rucci/Rucci1.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Norris/torso1.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/Zaoui/kkt_power.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/Rajat/rajat31.tar.gz		\
+http://www.cise.ufl.edu/research/sparse/MM/GHS_psdef/ldoor.tar.gz	\
+http://www.cise.ufl.edu/research/sparse/MM/Oberwolfach/bone010.tar.gz"
+
+
+[[ -d "$1" ]] && { cd "$1" || exit -1 ; }
+
+for m in $MATRICES
+do
+	mbn=`basename $m`
+	mn=${mbn//.tar.gz/}
+	mfn=$mn.mtx
+
+#	file based
+#	[ -f $mbn ] || wget $m
+#	tar xzf $mbn $mn/$mfn -O > $mfn
+	
+	# pipe based
+	[ -f $mfn ] || wget $m -O - | tar xzf - $mn/$mfn -O > $mfn || exit -1
+
+done
diff --git a/scripts/mmhead.sh b/scripts/mmhead.sh
new file mode 100755
index 0000000..182bfae
--- /dev/null
+++ b/scripts/mmhead.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+# It extracts headers out of MatrixMarket matrix files
+
+if=$1
+od=$2
+
+fail() { echo "[!] $@" ; exit ; }
+if   test x = x"$if" ; then fail "" ; fi
+if   test x = x"$od" ; then fail "" ; fi
+if ! test -f   "$if" ; then fail "" ; fi
+if ! test -d   "$od" ; then fail "" ; fi
+
+
+bf=`basename $if`
+of=$od/$bf.head
+echo "$bf > $of"  
+grep ^% -C 1 $if > $of
+
+
diff --git a/scripts/mmpci2gen.sh b/scripts/mmpci2gen.sh
new file mode 100755
index 0000000..2b3df61
--- /dev/null
+++ b/scripts/mmpci2gen.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+# It converts complex/integer/pattern Matrix Market matrices in stdin to real Matrix Market matrices on stdout.
+# needs sed, cat, bash
+shopt -s nocasematch
+p2r()
+{
+read line
+if [[ $line =~ pattern ]]
+then
+	printf "%s\n" "${line/ pattern/ real}"
+	while read line && [[ $line =~ ^% ]] ; do printf "%s\n" "$line" ; done
+	printf "%s\n" "% matrix adapted from pattern to real by the $0 script, `date`" 
+	printf "%s\n" "$line"
+	while read line ; do printf "%s 1\n" "$line" ; done
+else
+	printf "%s\n" "$line"
+	cat
+fi
+}
+
+i2r()
+{
+read line
+if [[ $line =~ integer ]]
+then
+	printf "%s\n" "${line/ integer/ real}"
+	while read line && [[ $line =~ ^% ]] ; do printf "%s\n" "$line" ; done
+	printf "%s\n" "% matrix adapted from integer to real by the $0 script, `date`" 
+	printf "%s\n" "$line"
+	while read line ; do printf "%s\n" "$line" ; done
+else
+	printf "%s\n" "$line"
+	cat
+fi
+}
+
+c2r()
+{
+read line
+if [[ $line =~ complex ]]
+then
+	printf "%s\n" "${line/ complex/ real}"
+	while read line && [[ $line =~ ^% ]] ; do printf "%s\n" "$line" ; done
+	printf "%s\n" "% matrix adapted from complex to real by the $0 script, `date`" 
+	printf "%s\n" "$line"
+	cat | sed 's/\s[^ 	]*$//g'
+else
+	printf "%s\n" "$line"
+	cat
+fi
+}
+
+i2r | p2r | c2r
diff --git a/scripts/mmsym2gen.sh b/scripts/mmsym2gen.sh
new file mode 100755
index 0000000..e21b344
--- /dev/null
+++ b/scripts/mmsym2gen.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script is intended for the librsb developer usage.
+sed 's/^\([0-9]\+\)\(\s\+\)\([0-9]\+\)\(\s\+\)\(.*\)/\1\2\3\4\5\n\3\2\1\4\5/g'  | uniq
diff --git a/scripts/nightly.sh b/scripts/nightly.sh
new file mode 100755
index 0000000..fca4503
--- /dev/null
+++ b/scripts/nightly.sh
@@ -0,0 +1,129 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# This script should be run as a cron sheduled job.
+# It is intended for the librsb developer usage.
+
+# You should set the FROMADDR (From Address), TOADDR (To Address), and SMTPHOST (Smtp Host) environment variables ...
+#FROMADDR=user at host
+#TOADDR=user at host
+#SMTPHOST=smtp.host.tld
+
+FROMADDR="${FROMADDR:-nightly.sh-for-user}"
+TOADDR="${TOADDR:-user at domain.tld}"
+SMTPHOST="${SMTPHOST:-some-smtp.tld}"
+
+CONFIGURE_OPTS="$@"
+RSB_SVN_REPOSITORY="${RSB_SVN_REPOSITORY:-svn+ssh://user@host/repository/trunk}"
+TMPDIR=/tmp/
+
+MS="librsb build FROMADDRILURE report"
+alias if_err='[ $? == 0 ] || '
+alias mail_stuff="mutt -F /dev/null -a autogen.log -a env.log -a make.log -a config.log -s librsb-automated-build  $TOADDR -e 'set from=$FROMADDR;set sendmail=\"msmtp --from $FROMADDR --host=$SMTPHOST\"' < librsb.log"
+
+alias check='if_err { touch config.log librsb.log ; svn info  >> librsb.log  ; mail_stuff ;  }'
+alias fail="return -1"
+
+
+einfo() { echo  -- "[!]" $@ ; }
+
+info()  { echo  -- "[*]" $@ ; }
+
+die()   { einfo $@ ; exit -1; } 
+
+get_rsb()
+{
+	#true;
+	svn --force export $RSB_SVN_REPOSITORY librsb 
+	#mkdir -p librsb 
+}
+
+autogen_error()
+{
+	# here should go automatic reporting of the failing configure.ac ...
+	einfo "please see the autogen.log file"
+}
+
+configure_error()
+{
+	# here should go automatic reporting of the failing configure/config.log ...
+	einfo "please see the config.log file"
+}
+
+make_error()
+{
+	# here should go automatic reporting of the failing make ...
+	einfo "please see the $MAKELOG file"
+}
+
+date_ymd() { date +%Y%m%d ; }
+
+build_rsb()
+{
+	MAKELOG="make.`date_ymd`.log"
+	AUTOGENLOG="autogen.`date_ymd`.log"
+	LOG="librsb.log"
+	touch $MAKELOG || fail
+	touch $AUTOGENLOG || fail
+	rm -f $LOG || fail
+	touch $LOG || fail
+	date >>  $LOG || fail
+	ln $MAKELOG make.log 
+	ln $AUTOGENLOG autogen.log 
+	check
+	sh autogen.sh 2>&1 | tee $AUTOGENLOG  
+	check
+	#|| autogen_error "error generating initial librsb scripts"
+	./configure "${CONFIGURE_OPTS}"
+	check
+	#|| configure_error "error configuring librsb"
+	make clean 2>&1 | tee $MAKELOG
+	check
+	#|| make_error "error in making clean librsb"
+	make       2>&1 | tee $MAKELOG
+	check
+	#alias mail_stuff
+	mail_stuff
+	#|| make_error "error making librsb"
+}
+
+[ -z "$RSB_SVN_REPOSITORY" ] && die "no librsb repository specified ?"
+[ -z "$TMPDIR" ] && die "no temporary directory specified ?"
+
+
+true
+PROGRAMS="msmtp mutt svn"
+which ${PROGRAMS}
+if_err die "error looking for programs (need all in: $PROGRAMS) "
+cd $TMPDIR 
+if_err die "error stepping in $TMPDIR"
+get_rsb
+if_err die "error getting sources"
+cd librsb
+if_err die "error stepping in librsb directory"
+env > env.log 
+if_err die "error getting environment"
+build_rsb 
+if_err die "error building rsb"
+cd -
+if_err die "error stepping out of librsb directory"
+rm -fR librsb 
+if_err die "error cleaning up librsb directory"
+
diff --git a/scripts/readme-tests.sh b/scripts/readme-tests.sh
new file mode 100644
index 0000000..083edcf
--- /dev/null
+++ b/scripts/readme-tests.sh
@@ -0,0 +1,16 @@
+if test x"${srcdir}" = x ; then srcdir=. ; fi
+ ./rsbench -oa -Ob -f ${srcdir}/A.mtx -qH -R -n1 -t100 --verbose  || exit 255
+ ./rsbench --help || exit 255
+ ./rsbench -oa -Ob --help || exit 255
+ ./rsbench --help || exit 255
+ ./rsbench --version || exit 255
+ ./rsbench -I || exit 255
+ ./rsbench -C || exit 255
+    test -f sbtc && ./sbtc||true  || exit 255
+    test -f sbtf && ./sbtf||true  || exit 255
+    ./rsbench -Q 10.0  || exit 255
+    ./rsbench  -oa -Ob -qH -R --dense 1                    --verbose || exit 255
+    ./rsbench  -oa -Ob -qH -R --dense 1024                 --verbose || exit 255
+    ./rsbench  -oa -Ob -qH -R --lower 1024 --as-symmetric  --verbose || exit 255
+    ./rsbench  -oa -Ob -qH -R --dense 1000 --gen-lband 10 --gen-uband 3 || exit 255
+    ./rsbench  -oa -Ob -qH -R --generate-diagonal 1000 || exit 255
diff --git a/scripts/rsb_h_to_rsb_fi.sh b/scripts/rsb_h_to_rsb_fi.sh
new file mode 100755
index 0000000..046c63e
--- /dev/null
+++ b/scripts/rsb_h_to_rsb_fi.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+SRCDIR=
+if test $# = 0 ; then SRCDIR=. ; else SRCDIR="$1"; fi
+IF=${SRCDIR}/rsb.h
+TF=${SRCDIR}/rsb_types.h
+CH2ICFB=./ch2icfb
+(
+${CH2ICFB} < ${IF} | grep -v 'END MODULE rsb' 
+SHEXP='s/0x\([A-F0-9]\+\)/INT(Z"0\1",C_INT)/g'
+#SHEXP='s/0x\([0-9]\+\)/Z"0\1"/g'
+IPD="INTEGER(C_INT),PARAMETER::"
+IPD2="INTEGER(C_INT),PARAMETER::"
+FD="s/^\([^\s]\+\) \([^\s]\+\)/${IPD2}\1=\2/g"
+CLEANUP='s/\s\s*/ /g;s/^,//g;s/=//g;s/\/.*$//g;s/^\s*//g;s/#define *//g'
+D2N='s/#define //g'
+DS='^#define '
+SEE='s/\(PARAMETER::*\) *\(RSB[A-Z_0-9]*\)\(.*$\)/\1\2\3 !< See #\2./g'
+IC='      '
+SHORTEN_DC='s/\(::\)/\&\n'"${IC}"'\&\1/g;'
+SHORTEN_EX='s/\([A-Z_]+\+\)/\1\&\n'"${IC}"'\&/g;'
+SHORTEN_PA='s/\( *:: *[A-Z_]\+\)/\1\&\n'"${IC}"'\&/g;'"$SHORTEN_EX""${SHORTEN_DC}"
+#SHORTEN_PM='s/\([=+]\)/\&\n'"${IC}"'\&\1/g;'
+SHORTEN_TK='s/\s\s*/\&\n'"${IC}"'\&/g;'
+NOTS='s/\s*$//g;'
+
+
+echo '! Error values '
+sed 's/\s\s*/ /g;s/^\(.define\) \(RSB_ERR[^ ]*\) RSB_ERR_CAST(0x\([^ ]*\))$/DEFINE \2 = -INT(Z"0\3",C_INT)/g;s/RSB_ERR_CAST/-/g;s/DEFINE */'"${IPD}"'/g;' < ${IF} | grep '^ *INTE.*RSB_ERR' | grep -v 'RSB_ERRS_UNSUPPORTED_FEATURES\|RSB_ERR_TO_PROGRAM_ERROR'  | sed "${SEE}"| sed "${NOTS}${SHORTEN_PA}"
+
+echo '! Matrix flags values '
+grep RSB_FLAG_ ${IF} | grep -v '\\$' | grep '^.define' | sed 's/\s\s*/ /g;'"${SHEXP}" | grep -v '\/.*' | sed 's/\s\s*/ /g;s/^\(.define\) \(RSB_FLAG[^\s]*\) \(INT(Z[^\s]*\)$/DEFINE\2 = \3/g;s/DEFINE/'"${IPD}"'/g;'  | grep '^ *INTE.*RSB_FLAG'  | sed "${SEE}"| sed "${NOTS}${SHORTEN_PA}"
+
+echo '! Composite flags '
+grep RSB_FLAG_ ${IF} | grep -v '[ 	]0x'  | sed 's/\s\s*/ /g;s/|/+/g;s/^\(.define\)\s\(RSB_FLAG[^	 ]*\)\s\(.*$\)/DEFINE \2 = \3/g;s/^ *//g;s/DEFINE/'"${IPD2}"'/g' | grep '^ *INTE.*RSB_FLAG' | sed "${SEE}" | sed "${NOTS}${SHORTEN_PA}"
+
+echo '! Transposition constants '
+grep "${DS} *"'RSB_TRANSPOSITION_[NTC]' "${TF}" | sed "${CLEANUP};${D2N};${SHEXP};${FD}"
+
+echo '! Numerical types constants '
+grep "${DS} *"'RSB_NUMERICAL_TYPE_FORTRAN_' "${TF}" | sed "${CLEANUP};${D2N};${SHEXP};${FD};s/_FORTRAN//g" | sed 's/C_INT/C_SIGNED_CHAR/g' | sed "${SHORTEN_DC}"
+
+echo '! Other enumerations constants '
+grep '^\(.define\|[ ,]*\) *RSB_\(IO_WANT\|MARF\|PRECF\|EXTF\|MIF\|ELOPF\)_' ${IF} | sed "${CLEANUP};${SHEXP};${FD};${SEE}"| sed "${NOTS}${SHORTEN_PA}${SHORTEN_EX}"
+grep '^\(.define\) *RSB_\(NULL\)_' ${IF} | sed "${CLEANUP};${SHEXP};${FD};${SEE}" | sed "${NOTS}${SHORTEN_PA}"| sed 's/\<NULL\>/C_NULL_PTR/g;s/INTEGER(C_INT)/TYPE(C_PTR)/g'
+
+echo 'END MODULE rsb'
+) | sed 's/^/      /g;s/^\( *!\)/!/g'
diff --git a/scripts/rsbmandesc.awk b/scripts/rsbmandesc.awk
new file mode 100644
index 0000000..c400cf3
--- /dev/null
+++ b/scripts/rsbmandesc.awk
@@ -0,0 +1,22 @@
+#!/usr/bin/awk -f
+#
+# This program prints out the Grammar section of a yacc.output file. 
+#
+BEGIN { sp=0; }
+/^.SH NAME$/ { sp=sp+1; }
+/.*/ {
+		if(sp==1){sp=sp+1;}
+		else
+		if(sp==2)
+		{
+			print "librsb - ";
+			sp=sp+1;
+		}
+		else
+		if(sp==3)
+		{
+			print ".SH DESCRIPTION";
+			sp=sp+1;
+		}
+		print;
+	}
diff --git a/scripts/rsbmanseealso.sh b/scripts/rsbmanseealso.sh
new file mode 100755
index 0000000..6257f38
--- /dev/null
+++ b/scripts/rsbmanseealso.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+echo '.SH "SEE ALSO"'
+for i in $@ ; do echo ".B `basename $i| sed 's/..$//g'`" ; done 
diff --git a/scripts/scalasca.sh b/scripts/scalasca.sh
new file mode 100755
index 0000000..ff7f54a
--- /dev/null
+++ b/scripts/scalasca.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# These suggestions are intended for the librsb developer usage.
+# E.g.: with scalasca-2.1, scorep-1.3, cube-4.2.3 .
+# (dependencies are: papi-5.3, libqt, ... )
+#
+# sh autogen.sh
+# ./configure CC=gcc        FC=gfortran CFLAGS='-fopenmp' --disable-dependency-tracking
+# make        clean         # not to interfere with code generation commands
+# make        CC='scorep gcc' FC='scorep gfortran' LD='scorep ld' -j 16
+#
+# Profiling
+# scan ./rsbench ... 
+# square ... # <give as argument the newly created analysis directory containing *cube* files>
+#
+# To collect PAPI events:
+# export SCOREP_METRIC_PAPI=PAPI_L1_TCM,PAPI_L2_TCM SCOREP_ENABLE_TRACING=false
+# ./rsbench ... 
+# square ... # <give as argument the newly created analysis directory containing *cube* files>
+# 
diff --git a/scripts/score_p.sh b/scripts/score_p.sh
new file mode 100755
index 0000000..9259c28
--- /dev/null
+++ b/scripts/score_p.sh
@@ -0,0 +1,40 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+CMD=" ./rsbench  -oa -Ob --dense 100 --compare-competitors --verbose -R -qH --no-want-ancillary-execs -n 2"
+#rm -vfRI scorep-*
+rm -vfR ./scorep-*_*_*
+
+export SCOREP_METRIC_PAPI=PAPI_L2_TCM,PAPI_L1_TCM
+$CMD
+
+export SCOREP_TOTAL_MEMORY=163840000 # memory may not suffice
+export SCOREP_FILTERING_FILE=scorep_filter.filt
+cat > $SCOREP_FILTERING_FILE << EOF
+SCOREP_REGION_NAMES_BEGIN
+  EXCLUDE *
+  INCLUDE rsb__do_spmv_uaua rsb__BCSR_spmv_uaua_double_H__tN_r1_c1_uu_sU_dE_uG rsb__BCSR_spmv_uaua_double_C__tN_r1_c1_uu_sU_dE_uG main rsb__mkl_csr_spmv rsb__mkl_coo_spmv
+SCOREP_REGION_NAMES_END
+EOF
+export SCOREP_ENABLE_TRACING=true
+$CMD
+
+#export SCOREP_ENABLE_PROFILING=true
+#$CMD
diff --git a/scripts/static-libs.sh b/scripts/static-libs.sh
new file mode 100755
index 0000000..3e0af65
--- /dev/null
+++ b/scripts/static-libs.sh
@@ -0,0 +1,349 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+#
+# This script configures and builds binaries for a number of different setups.
+# It is intended to work on the developer machine.
+
+# TO DO:
+# Need cryptographic hashes.
+#
+RSBB_DIR=${RSBB_DIR:-$HOME/src/src-tmp/} # an absolute address, please
+RSBB_URL=${RSBB_URL:-svn+ssh://user@host/repository/trunk}
+#RSBB_SVN=echo
+#RSBB_SVN=svn
+RSBB_MAKE=${RSBB_MAKE:-make}
+RSBB_SVN=${RSBB_SVN:-svn}
+RSBB_SVN_OPTIONS=${RSBB_SVN_OPTIONS:-}
+RSBB_RUN_CONFIGURE=${RSBB_RUN_CONFIGURE:-1}
+RSBB_RUN_AUTOGEN=${RSBB_RUN_AUTOGEN:-1}
+RSBB_RUN_MAKE=${RSBB_RUN_MAKE:-1}
+RSBB_RUN_TOUCH_HEADERS_TO_OLD=${RSBB_RUN_TOUCH_HEADERS_TO_OLD:-0}
+RSBB_RUN_MAKE_CLEAN=${RSBB_RUN_MAKE_CLEAN:-1}
+RSBB_RUN_MAKE_CLEANALL=${RSBB_RUN_MAKE_CLEANALL:-0}
+RSBB_RUN_MAKE_TESTRUN=${RSBB_RUN_MAKE_TESTRUN:-1}
+RSBB_RUN_MAKE_TESTS=${RSBB_RUN_MAKE_TESTS:-0}
+RSBB_EXIT_ON_FAILED_TESTS=${RSBB_EXIT_ON_FAILED_TESTS:-1}
+RSBB_RUN_MAKE_QTESTS=${RSBB_RUN_MAKE_QTESTS:-1}
+RSBB_RUN_MAKE_QQTESTS=${RSBB_RUN_MAKE_QQTESTS:-0}
+RSBB_RUN_MAKE_JOBS=${RSBB_RUN_MAKE_JOBS:-2}
+RSBB_RUN_BUILD_ARCHIVE=${RSBB_RUN_BUILD_ARCHIVE:-1}
+RSBB_RUN_MAKE_DOX=${RSBB_RUN_MAKE_DOX:-0}
+RSBB_RUN_MAKE_DOXONLY=${RSBB_RUN_MAKE_DOXONLY:-0}
+RSBB_RUN_MAKE_INSTALL=${RSBB_RUN_MAKE_INSTALL:-0}
+RSBB_RUN_MAKE_LOCAL_LIBRSB_LINK=${RSBB_RUN_MAKE_LOCAL_LIBRSB_LINK:-0}
+RSBB_WANT_LIST_FILE=${RSBB_WANT_LIST_FILE:-1}
+RSBB_WANT_README_FILE=${RSBB_WANT_README_FILE:-1}
+RSBB_REBUILD_STATIC_RSBENCH=${RSBB_REBUILD_STATIC_RSBENCH:-0}
+RSBB_RUN_SVN_CO=${RSBB_RUN_SVN_CO:-1}
+RSBB_RUN_SVN_UP=${RSBB_RUN_SVN_UP:-1}
+RSBB_RUN_SVN_DIFF=${RSBB_RUN_SVN_DIFF:-0}
+RSBB_RUN_SVN_REVERT=${RSBB_RUN_SVN_REVERT:-0}
+RSBB_DIST_URL=${RSBB_DIST_URL:-/dev/null}
+RSBB_DIST_DOWNLOAD=${RSBB_DIST_DOWNLOAD:-0}
+RSBB_DIST_UNPACK=${RSBB_DIST_UNPACK:-0}
+RSBB_DIST_UNPACK_ARCHIVE=${RSBB_DIST_UNPACK_ARCHIVE:-$RSBB_DIR/librsb.tar.gz}
+RSBB_BASENAME=${RSBB_BASENAME:-librsb-batch-static}
+RSBB_INSTALL_PREFIX=${RSBB_INSTALL_PREFIX:-/usr/local/}
+RSBB_WANT_OVERRIDE_PREFIX_WITH_SAME_DIR=${RSBB_WANT_OVERRIDE_PREFIX_WITH_SAME_DIR:-0}
+RSBB_WANT_SKIP_CANONICAL_CFLAGS=${RSBB_WANT_SKIP_CANONICAL_CFLAGS:-0}
+RSBB_CONFIGURE_ALTERNATIVES=${RSBB_CONFIGURE_ALTERNATIVES:---disable-openmp --enable-openmp}
+RSBB_CONFIGURE_ADD=${RSBB_CONFIGURE_ADD:-}
+RSBB_RUN_PRECONFIGURE_HOOK=${RSBB_RUN_PRECONFIGURE_HOOK:-}
+RSBB_RUN_PREMAKE_HOOK=${RSBB_RUN_PREMAKE_HOOK:-}
+RSBB_RUN_POSTMAKE_HOOK=${RSBB_RUN_POSTMAKE_HOOK:-}
+RSBB_MAKE_EXTRA_CFLAGS=${RSBB_MAKE_EXTRA_CFLAGS:-}
+CC=${CC:-gcc}
+FC=${FC:-gfortran}
+CXX=${CXX:-g++}
+RSBB_CC_ALTERNATIVES=${RSBB_CC_ALTERNATIVES:-$CC}
+RSBB_CFLAGS_ALTERNATIVES=${RSBB_CFLAGS_ALTERNATIVES:-}
+RSBB_CFLAGS_ADD=${RSBB_CFLAGS_ADD:--fPIC}
+RSBB_MKL=${RSBB_MKL:-}
+RSBB_WANT_TYPES=${RSBB_WANT_TYPES:-}
+RSBB_WANT_TOLERATE_MAKE_FAILURE=${RSBB_WANT_TOLERATE_MAKE_FAILURE:-0}
+#RSBB_WANT_TYPES=${RSBB_WANT_TYPES:---enable-matrix-types=double}
+#RSBB_WANT_TYPES=${RSBB_WANT_TYPES:---enable-matrix-types=double,float,float complex,double complex}
+#RSBB_WANT_TYPES=${RSBB_WANT_TYPES//%/ }
+RSBB_DIR_NAME_APPEND=${RSBB_DIR_NAME_APPEND:-}
+RSBB_DIR_NAME_PRETAG=${RSBB_DIR_NAME_PRETAG:-}
+RSBB_DIR_NAME_PREPEND=${RSBB_DIR_NAME_PREPEND:-}
+DATE=date
+#
+if test x$# = x0 ; then 
+	echo "Usage: $0 ''"
+	echo "Any of the following variables may be set:"
+	echo
+	declare | grep RSBB_
+	echo
+	exit
+else
+	true ;
+fi
+ECHO=echo
+ECHO=
+CP=cp
+BBL=batchbuild.log
+#cct='--enable-matrix-types=double,double complex'
+#cct=--disable-openmp
+cct=
+ARCH=`uname -m`
+RSBB_BN=${RSBB_BASENAME}-${ARCH}
+LIST=${RSBB_DIR}/${RSBB_BN}-files.txt
+RSBB_RF=
+if test x$RSBB_WANT_README_FILE = x1 ; then
+	RSBB_RF=${RSBB_DIR}/${RSBB_BN}-README.txt
+fi
+BA=${RSBB_DIR}/${RSBB_BN}.tar.gz
+sf="-all-static"
+#for co in "--enable-openmp"
+#for co in "--enable-openmp" "--disable-openmp"
+rm -f ${LIST}
+#
+mkdir -p $RSBB_DIR || exit -1
+#
+cd $RSBB_DIR || exit -1
+#
+if test x$RSBB_WANT_README_FILE = x1 ; then
+	echo "README (log) file for librsb build." > $RSBB_RF
+	#echo "$USER@$HOSTNAME" >> $RSBB_RF
+	echo "Host: $HOSTNAME" >> $RSBB_RF
+	uname -a >> $RSBB_RF
+	date >> $RSBB_RF
+	echo "Relevant built variables: " >> $RSBB_RF
+	declare | grep RSBB_  >> $RSBB_RF
+	echo " " >> $RSBB_RF
+fi
+#
+if test x${RSBB_DIST_DOWNLOAD} = x1 ; then
+	$ECHO wget ${RSBB_DIST_URL} -O librsb.tar.gz || exit -1
+fi
+#
+#for co in "--enable-openmp" "--disable-openmp"
+for cc in $RSBB_CC_ALTERNATIVES
+do
+export CC="${cc}"
+if test x$cc = xicc ; then export FC="ifort"; fi
+for co in $RSBB_CONFIGURE_ALTERNATIVES
+#for co in  "--disable-openmp"
+do
+alternative_already_done=no # when RSBB_CFLAGS_ALTERNATIVES is one of the presets
+for bo in "-O0 -ggdb" "-O2 -pg" "-O3" "${RSBB_CFLAGS_ALTERNATIVES}"
+#for bo in "-O3"  "${RSBB_CFLAGS_ALTERNATIVES}"
+#for bo in "-O0 -ggdb"  "${RSBB_CFLAGS_ALTERNATIVES}"
+#for bo in "-O2 -pg" "-O3"
+#for bo in "-O0 -ggdb"
+do
+	if test x"$bo" = x"$RSBB_CFLAGS_ALTERNATIVES" -a x"$bo" = x ; then continue ; fi
+	if test x"1" = x"$RSBB_WANT_SKIP_CANONICAL_CFLAGS" -a x"$bo" != x"$RSBB_CFLAGS_ALTERNATIVES" ; then continue ; fi
+	if test x"$alternative_already_done" = x"yes" ; then continue; fi
+	if test x"$bo" = x"$RSBB_CFLAGS_ALTERNATIVES" ; then alternative_already_done=yes ; fi
+	#bo="$bo ${RSBB_MAKE_EXTRA_CFLAGS}"
+	$DATE
+	#TAG=${bo// /}-${co// /}
+	TAG=`basename ${CC}`-${bo// /}-${co// /}
+	BT=${RSBB_DIR_NAME_PREPEND}$RSBB_BN-${RSBB_DIR_NAME_PRETAG}${TAG}${RSBB_DIR_NAME_APPEND}
+	BD=$RSBB_DIR/${BT}
+	CFLAGS="${bo} ${RSBB_CFLAGS_ADD} ${RSBB_MAKE_EXTRA_CFLAGS}"
+	export CFLAGS="$CFLAGS"
+	export CXXFLAGS=${CXXFLAGS:-$CFLAGS}
+	#export CXXFLAGS="$CFLAGS"
+	export CXX
+	# $ECHO $RSBB_SVN info $RSBB_URL || exit -1
+	#
+	if test "x${RSBB_DIST_UNPACK}" = x1 ; then
+		mkdir -p librsb/ || exit -1 
+		tar --transform 's/librsb-*[0-9:M.]\+\///' -C librsb -xzf ${RSBB_DIST_UNPACK_ARCHIVE} || exit -1
+		#cp -fvRp librsb ${BD}|| exit -1 
+		#mv librsb ${BD} || exit -1 
+		mkdir -p ${BD} || exit -1 
+		#mv -f librsb/* ${BD}/ || exit -1 
+		rsync -avz librsb/ ${BD}/ || exit -1 
+		rm -fR librsb  || exit -1 
+		#rmdir librsb  || exit -1 
+	fi
+	#
+	if test "x${RSBB_RUN_SVN_CO}" = x1 ; then
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS} --force co ${RSBB_URL} ${BD} || exit -1
+	fi
+	#
+	if test "x${RSBB_RUN_SVN_DIFF}" = x1 ; then
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS} info ${RSBB_URL} || exit -1
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS}         diff $BD || exit -1
+	fi
+	#
+	if test "x${RSBB_RUN_SVN_REVERT}" = x1 ; then
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS} info ${RSBB_URL} || exit -1
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS}         revert `$RSBB_SVN ${RSBB_SVN_OPTIONS}         ls $BD` || exit -1
+	fi
+	#
+	if test "x${RSBB_RUN_SVN_UP}" = x1 ; then
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS} info ${RSBB_URL} || exit -1
+		$ECHO $RSBB_SVN ${RSBB_SVN_OPTIONS} --force up  $BD || exit -1
+	fi
+	#
+	echo will cd $BD || exit -1
+	$ECHO cd $BD || exit -1
+	{
+	if test x"$RSBB_RUN_PRE_HOOK" != x ; then
+		eval "$RSBB_RUN_PRE_HOOK"
+	fi
+	#if test x"$RSBB_RUN_MAKE" = x1 -a '!' -e Makefile ; then
+	if test '!' -e Makefile && set | grep 'RSBB_RUN_MAKE.*=1$' ; then
+		RSBB_RUN_CONFIGURE=1 # create a Makefile if it does not exist
+	fi
+	if test x"$RSBB_RUN_CONFIGURE" = x1 ; then
+		if test configure.ac -nt configure ; then $ECHO sh autogen.sh || exit -1 ; fi
+	fi
+	mkdir -p m4 # for Makefile.am
+	if test x"$RSBB_RUN_AUTOGEN" = x1 ; then
+		sh autogen.sh || exit -1
+		# FIXME: temporary, for Helios:
+		#libtoolize -c # || exit -1
+		autoreconf --install --force || true # 20121016
+	fi
+	if test x"$RSBB_WANT_OVERRIDE_PREFIX_WITH_SAME_DIR" = x1 ; then
+		RSBB_INSTALL_PREFIX="`pwd`/local/"
+	fi
+	cco="--prefix=${RSBB_INSTALL_PREFIX}"
+	#cco='--prefix=/usr/local/'
+#
+	if test x"$RSBB_RUN_PRECONFIGURE_HOOK" != x ; then
+		# FIXME; seems like the following export does not work
+		#export RSBB_CONFIGURE_ADD="${RSBB_CONFIGURE_ADD} $co" # seems incorrect
+		coo="$co ${RSBB_CONFIGURE_ADD}" # seems correct
+		#export co="$co ${RSBB_CONFIGURE_ADD}" # seems correct
+		#echo "RSBB_CONFIGURE_ADD is now: ${RSBB_CONFIGURE_ADD}"
+		export CFLAGS="${CFLAGS}";
+		eval "$RSBB_RUN_PRECONFIGURE_HOOK"
+		if test x"$RSBB_RUN_CONFIGURE" = x1 ; then
+			echo "configure will now be invoked with: ${coo}"
+		fi
+	else
+		coo="$co " # seems correct
+	fi
+#
+	RSBB_MKL_OPTION=
+	if test x"$RSBB_MKL" != x ; then
+		RSBB_MKL_OPTION=--with-mkl="$RSBB_MKL"
+	fi
+	if test x"$RSBB_RUN_MAKE_LOCAL_LIBRSB_LINK" = x1 ; then
+		$ECHO rm -f local  || exit -1
+		$ECHO ln -v -f -s `echo ${BD} | sed s/${RSBB_BASENAME}/librsb/g`/local local  || exit -1
+	fi
+	if test x"$RSBB_RUN_CONFIGURE" = x1 ; then
+		$ECHO ./configure $coo "${RSBB_WANT_TYPES}" "${RSBB_MKL_OPTION}" CFLAGS="${CFLAGS}" ${cco} ${cct} CC=${CC} FC=${FC} CXX=${CXX} \
+			LIBRSB_CONFIG=`pwd`/local/bin/librsb-config # this is an extra for sparsersb
+	fi
+	if test x"$RSBB_RUN_TOUCH_HEADERS_TO_OLD" = x1 ; then
+		# FIXME: is.h is not necessary old :P
+		#touch config.h rsb.h -r is.h || exit -1
+		#touch *.h *.m4 -r is.h || exit -1
+		touch *.h -r is.h || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE_CLEAN" = x1 ; then
+		$ECHO ${RSBB_MAKE} clean || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE_CLEANALL" = x1 ; then
+		$ECHO ${RSBB_MAKE} cleanall || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE" = x1 ; then
+		if test x"$RSBB_RUN_PREMAKE_HOOK" != x ; then
+			eval "$RSBB_RUN_PREMAKE_HOOK"
+		fi
+		RSBB_MAKE_ERROR_EXIT=exit
+		if test x"$RSBB_WANT_TOLERATE_MAKE_FAILURE" = x1 ; then
+			RSBB_MAKE_ERROR_EXIT=echo
+		fi
+		$ECHO ${RSBB_MAKE} all -j ${RSBB_RUN_MAKE_JOBS} # || exit -1
+		#$ECHO ${RSBB_MAKE} all -j 1 || ${RSBB_MAKE_ERROR_EXIT} -1 # FIXME: this is temporary
+		if test x"$RSBB_RUN_POSTMAKE_HOOK" != x ; then
+			eval "$RSBB_RUN_POSTMAKE_HOOK"
+		fi
+	fi
+	if test x"$RSBB_RUN_MAKE_QTESTS" = x1 ; then
+		$ECHO ${RSBB_MAKE} qtests || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE_QQTESTS" = x1 ; then
+		$ECHO ${RSBB_MAKE} qqtests || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE_TESTS" = x1 ; then
+		$ECHO ${RSBB_MAKE} tests || if test x${RSBB_EXIT_ON_FAILED_TESTS} != x0 ; then exit -1 ; fi # too slow
+	fi
+	if test x"$RSBB_RUN_MAKE_DOX" = x1 ; then
+		$ECHO ${RSBB_MAKE} dox || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE_DOXONLY" = x1 ; then
+		$ECHO ${RSBB_MAKE} doxonly || exit -1
+	fi
+	if test x"$RSBB_RUN_MAKE_INSTALL" = x1 ; then
+		$ECHO ${RSBB_MAKE} install || exit -1
+	fi
+	if test x"$RSBB_REBUILD_STATIC_RSBENCH" = x1 ; then
+	if test x"$RSBB_RUN_MAKE" = x1 ; then
+		echo "Will attempt to rebuild statically.."
+		$ECHO rm -f rsbench 
+		#$ECHO ${RSBB_MAKE} CFLAGS="${CFLAGS} ${sf}" || exit -1
+		$ECHO ${RSBB_MAKE} AM_CFLAGS="${CFLAGS} ${sf}" || exit -1
+	fi
+	fi
+	if test x"$RSBB_RUN_MAKE_TESTRUN" = x1 ; then
+		./rsbench || exit -1
+	fi
+	if test x"$RSBB_WANT_LIST_FILE" = x1 ; then
+		sed 's/^prefix=.*$/prefix="\/usr\/local\/"/g' `pwd`/librsb-config > `pwd`/librsb-config-local
+		#ls -ltr 	`pwd`/librsb-config
+		#ls -ltr 	`pwd`/librsb-config-local
+		#for FN in rsbench librsb.a rsb.h rsb-spblas.h librsb-config rsb-types.h ; do
+		for FN in rsbench librsb.a rsb.h rsb-spblas.h librsb-config-local rsb-types.h ; do
+			#echo "$BD/$FN" >> ${LIST}
+			echo "$BT/$FN" >> ${LIST}
+			#echo "$BT/local/$FN" >> ${LIST}
+		done || exit -1
+	fi
+	#$ECHO $CP rsbench rsbench-$TAG || exit -1
+	#$ECHO $CP librsbench.a librsbench-$TAG.a || exit -1
+	#$ECHO $CP librsb-config librsb-config-$TAG || exit -1
+	} # > $BD/$BBL
+	# TODO: place email report sending here, or later on
+	$DATE
+	# 
+	$ECHO cd -
+	if test x"$RSBB_RUN_POST_HOOK" != x ; then
+		eval "$RSBB_RUN_POST_HOOK"
+	fi
+done
+done
+done
+	cd $RSBB_DIR || exit -1
+	if test x$RSBB_WANT_LIST_FILE = x1 -a x$RSBB_RUN_BUILD_ARCHIVE = x1 ; then
+		md5sum `cat ${LIST}` > ${LIST}.md5
+		tar cvzf ${BA} --transform='s/-local//' `cat ${LIST}` # ${LIST}.md5
+		#tar cvzf ${BA} `cat ${LIST}` # ${LIST}.md5
+		md5sum ${BA} > ${BA}.md5
+		echo "contents:"
+		tar tzvf  ${BA}
+		ls -ltr  ${BA}
+	fi
+	if test x$RSBB_WANT_README_FILE = x1 ; then
+		echo "Build terminated at: " >> $RSBB_RF
+		date >> $RSBB_RF
+	fi
+exit
diff --git a/scripts/test.sh b/scripts/test.sh
new file mode 100644
index 0000000..0074f9c
--- /dev/null
+++ b/scripts/test.sh
@@ -0,0 +1,273 @@
+#!/bin/bash
+#
+# Copyright (C) 2008-2016 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+if test x"${srcdir}" = x ; then srcdir=. ; fi
+ULIMIT_S=10000
+echo "Invoking ulimit -s ${ULIMIT_S}"
+ulimit -s ${ULIMIT_S} 
+
+# This should be the main script for library consistence checking.
+# TODO : for every:
+#	 * type
+#	 * matrix storage
+#	 * op <-
+#	 * missing boundary and 'limit' testing! e.g.: ./rsbench -g -r 1000000 -c 1000000 -b 20
+#	 * rsbenchxx (albeit minimal) testing
+#	 * -i (in place)
+#
+#	TODO: need minimal testing for -h --help
+# FIXME : complete the 'strict' option
+
+gen_liminal()
+{
+	ms=`$B --configuration | grep RSB_MAX_MATRIX_DIM |sed "s/^RSB_MAX_MATRIX_DIM://g"` ; 
+	[ -z "$ms" ] && exit 1
+	r=$ms
+	c=$ms
+	n=4
+cat << EOF > $M
+%%MatrixMarket matrix coordinate real general
+% this matrix is sized as the limits of your architecture allow
+$r $c $n
+1 1
+1 $c
+$r 1
+$r $c
+EOF
+}
+nulldev=/dev/null
+alias 2>&1 > $nulldev || exit 1
+
+alias fail='exit 1'
+#alias fail='( echo "[!]" ; exit 1 ; )' # seems to not fail..
+e='echo'
+x='echo "--"; echo'
+strict=1
+d=`pwd`
+B=$d/rsbench
+M=$d/test.mtx
+n=100
+pt=0
+ft=0
+st=0
+fc=""
+$x || exit -1
+
+make rsbench || fail	# we want our test programs
+
+ru=`$B -C | grep row.unrolls |sed "s/^row unrolls://g;s/ /,/g"`;
+cu=`$B -C | grep column.unrolls |sed "s/^column unrolls://g;s/ /,/g"`;
+
+if test -z "$ru" ; then fail ; fi
+if test -z "$cu" ; then fail ; fi
+
+# FIXME : compact all of these flags in some way..
+if true ; then 
+
+$x "$B -h"
+    $B -h || fail # 
+
+$x "$B -oa -Ob -h"
+    $B -oa -Ob -h || fail # 
+
+#$x "$B -M"
+#    $B -M || fail # 
+
+$x "$B -H"
+    $B -H || fail # 
+
+$x "$B -C"
+    $B -C || fail # 
+
+pdm=pd.mtx
+
+$x "$B -G $pdm"
+    $B -G $pdm || fail # 
+
+# FIXME: the following functionalities should be improved and made public
+$x "$B -oa -Ob  -f $pdm --matrix-dump-graph $pdm.dot --matrix-dump-internals"
+$B -oa -Ob  -f $pdm --matrix-dump-graph $pdm.dot --matrix-dump-internals || fail # 
+
+$x "$B -ot -Ob --lower 3"
+$B -ot -Ob --lower 3 || fail # 
+
+$x "$B -P $pdm"
+    $B -P $pdm || fail # 
+
+$x "$B -I"
+   # $B -I || make feedback
+    $B -I || fail # system information dumpout
+
+bmfn=test.mtx.rsb
+for deff in "-R -Fbo -qH" "-R -Fbo" ; do
+for detr in "--lower" "--dense" ; do
+if $B --configuration | grep 'XDR.*off' ; then
+	st=$((st+1))
+else
+	$x "$B -oa -Ob $detr=10 -w $bmfn $deff"
+	    $B -oa -Ob $detr=10 -w $bmfn  || fail # binary I/O test
+	
+	$x "$B -oa -Ob -b $bmfn $deff"
+	    $B -oa -Ob -b $bmfn  || fail # binary I/O test
+fi
+done
+done
+
+$x "$B -OR"
+    $B -OR || fail # 
+
+#$x "$B -Ot -b"
+#    $B -Ot -b || fail #  FIXME : broken
+
+#$x "$B -e"
+#    $B -e -f test.mtx || fail # FIXME : broken
+
+#$x "$B -Or"
+#    $B -Or || fail # FIXME : full testing . slow !
+
+$x "$B --matrix-ls ${srcdir}/A.mtx"
+    $B --matrix-ls ${srcdir}/A.mtx || fail # 
+
+#$x "$B -Oc -f $M"
+#    $B -Oc -f $M || fail # FIXME : fuller testing . slow !
+
+#$x "$B -os -Ob"
+#    $B -os -Ob || fail # FIXME
+
+
+#$x "$B -o$o -Ob"
+#    $B -o$o -Ob || fail # FIXME : o in v a m s c i n S
+
+	$B --plot-matrix -aRzd -f $pdm || fail
+fi
+
+# The following are here for (rather flimsy) testing/coverage purposes:
+$x "$B -oa -Ob -f ${srcdir}/A.mtx -Fo  --z-sorted-coo"
+$B -oa -Ob -f ${srcdir}/A.mtx -Fo  --z-sorted-coo || fail
+$x "$B -oa -Ob --lower 4 --want-no-recursive --ilu0"
+$B -oa -Ob --lower 4 --want-no-recursive --ilu0 || fail
+$x "$B -oa -Ob --lower 4 -K"
+$B -oa -Ob --lower 4 -K || fail
+$x "$B -oa -Ob --dense 10 --nrhs 1,2 --incy 1,2 --nrhs 1,2 -K"
+$B -oa -Ob --dense 10 --nrhs 1,2 --incy 1,2 --nrhs 1,2 -K
+
+if test "x`which gfortran`" != x -a gfortran -v ; then
+	make blas_sparse.F90 || fail
+	make sbtf.F90 || fail
+	CFLAGS='-O0 -ggdb -DHAVE_RSB_KERNELS=1 -fopenmp'
+	gfortran $CFLAGS -c blas_sparse.F90 || fail
+	gfortran $CFLAGS -o sbtf blas_sparse.F90 sbtf.F90 -lrsb -L. || fail
+	./sbtf || fail
+fi
+
+# FIXME : -b X implies a bandwidth of X!
+for o in "-b 9" "-n 10%" "-n 2%"; 
+do
+#for n in 10 100 1000 2000 4000 8000  ;
+for n in 1 2 3 4  10 100 200;
+do
+case $n in 
+	-1)
+	# FIXME : the code is still not mature for handling well this...
+	gen_liminal ;; # in one case we test for a limit sized matrix
+	1|2|3|4)
+	# matrix generation
+	$x "$B -g -r $n -c $n -b $n  > $M"
+	   $B -g -r $n -c $n -b $((n-1))  > $M || fail
+	;;
+	*)
+	# matrix generation
+	$x "$B -g -r $n -c $n $o  > $M"
+	   $B -g -r $n -c $n $o  > $M || fail
+esac
+
+# 20110607 FIXME this is only a partial fix, which shall provide a small matrix when matrix creation was disabled at configure time
+$B --configuration | grep RSB_IOLEVEL:0 && cp $pdm $M 
+
+sep="              *"
+
+for f in `$B --configuration | grep format.switches |sed "s/^format switches://g"` ; # for every supported format
+do
+
+# various formats testing
+#for a in "" "-A" "-R";	# with automatic and non automatic blocking size choice (still (still brokenbroken)
+for a in "" "-R";	# with automatic and non automatic blocking size choice (still (still brokenbroken)
+do
+	# matrix dumpout
+	$x "$B -Od -f $M -F $f"
+	   $B -Od -f $M -F $f > $nulldev || fail # needs -f matrixname
+
+	# basic matrix handling tests (FIXME : doesn't test/support -A flag!)
+	# 20100829 removed -s from the following
+#	$x "$B -Ot -f $M -r $ru -c $cu -F $f $a"
+#	   $B -Ot -f $M -r $ru -c $cu -F $f $a || fail 
+
+#	$x "$B -oa -Ob -f $M -d -F $f -t 1 $a"
+#	   $B -oa -Ob -f $M -d -F $f -t 1 $a || fail
+	c="$?"
+#	$x "$B -oa -Ob -f $M -d -F $f -t 1 $a"
+#	es="$B -oa -Ob -f $M -d -F $f -t 1 $a"
+
+	$x "$B -oa -Ob -f $M  -F $f -t 1 $a"
+	   $B -oa -Ob -f $M  -F $f -t 1 $a || fail
+	c="$?"
+	$x "$B -oa -Ob -f $M  -F $f -t 1 $a"
+	es="$B -oa -Ob -f $M  -F $f -t 1 $a"
+
+	if test x"$c" = x"0" 
+	then
+		pt=$((pt+1))
+	else
+		ft=$((ft+1))
+		fc="$fc\n$es"
+		if test x"$strict" = x"1" ; then
+			# FIXME
+			exit -1
+		fi
+	fi
+#	./rsbench -Ot -f $B # test_matops.c should care
+#	if test x"$?" = x"0" ; then pt=$((pt+1)) ; else ft=$((ft+1)) ; fi
+done
+done
+done
+done
+
+$x "$B -oa -Ob -R --dense 100 --write-performance-record test.rpr"
+    $B -oa -Ob -R --dense 100 --write-performance-record test.rpr || $x
+$x "$B                      --read-performance-record test.rpr"
+    $B                      --read-performance-record test.rpr || $x
+
+$x "$B -oa -Ob -R --write-performance-record test.rpr pd.mtx non-existing.mtx"
+    $B -oa -Ob -R --write-performance-record test.rpr pd.mtx non-existing.mtx || $x
+
+rm test.rpr || $x
+
+if ! test x"$RSB_SHORT_TEST_SH" = x1; then
+$x "$B -B"
+    $B -B || fail # 
+fi
+
+$e "passed  tests : $pt"
+$e "failed  tests : $ft"
+$e "skipped tests : $st"
+
+if test "$ft" != "0" ; then fail ; fi
+
+echo $fc
diff --git a/scripts/versions.sh b/scripts/versions.sh
new file mode 100755
index 0000000..cc0d8a7
--- /dev/null
+++ b/scripts/versions.sh
@@ -0,0 +1,49 @@
+#!/bin/sh
+#
+# Copyright (C) 2008-2015 Michele Martone
+# 
+# This file is part of librsb.
+# 
+# librsb is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+# 
+# librsb is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public
+# License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with librsb; see the file COPYING.
+# If not, see <http://www.gnu.org/licenses/>.
+
+# should dump out versions of programs used to build our library with success.
+# this info once collected wull be useful in case of debugging and compatibility issues.
+# FIXME : unfinished :)
+
+grep=grep
+which=which
+test=test
+mf=Makefile
+null=/dev/null
+sed=sed
+
+$which $grep 2>&1 > $null || exit
+$which $sed  2>&1 > $null || exit
+
+se='s/^.*=\s//g'
+s="$sed $se" 
+tr='tr "\n"   "_" '
+#tr=cat
+e=echo
+
+v=--version
+
+`$grep '^M4 ='     $mf | $s` $v | $tr
+$e
+`$grep '^CC ='     $mf | $s` $v | $tr
+$e
+`$grep '^OCTAVE =' $mf | $s` $v | $tr
+$e
+
diff --git a/testgen.sh.m4 b/testgen.sh.m4
new file mode 100644
index 0000000..839b3ae
--- /dev/null
+++ b/testgen.sh.m4
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+
+# FIXME : THIS SCRIPT IS NO LONGER NEEDED, AS rsbench PERFORMS WELL.
+return 0;
+
+MOPS=$(echo 'WANT_MATRIX_OPS'  | sed 's/[,()]/ /g')
+
+rm -f Makefile.mops
+
+echo CFLAGS=CFLAGS_	>>  Makefile.mops
+echo CC=CC_		>>  Makefile.mops
+
+for mop in $MOPS
+do
+	echo -e \
+	"extern int main_block_partitioned_$mop(int argc,char *argv[]);\n"\
+	"int main(int argc,char *argv[])\n"\
+	"{\n"\
+	"	return main_block_partitioned_$mop(argc,argv);\n"\
+	"}\n" > main_block_partitioned_$mop.c
+	echo main_block_partitioned_$mop: main_block_partitioned_$mop.o LIBOBJS >>  Makefile.mops
+	echo '	$(CC) $(CFLAGS)' -o main_block_partitioned_$mop main_block_partitioned_$mop.o LIBOBJS >>  Makefile.mops
+	echo  >>  Makefile.mops
+done
+for mop in $MOPS
+do
+	make -f Makefile.mops main_block_partitioned_$mop
+done
+
diff --git a/vf.mtx b/vf.mtx
new file mode 100644
index 0000000..61799bf
--- /dev/null
+++ b/vf.mtx
@@ -0,0 +1,8 @@
+%%MatrixMarket matrix array complex general
+6           1
+11.000000000000000E+000 12.000000000000000E+000 
+21.000000000000000E+000 22.000000000000000E+000 
+31.000000000000000E+000 32.000000000000000E+000 
+41.000000000000000E+000 42.000000000000000E+000 
+51.000000000000000E+000 52.000000000000000E+000 
+61.000000000000000E+000 62.000000000000000E+000 
diff --git a/wisdom.m4 b/wisdom.m4
new file mode 100644
index 0000000..9389b57
--- /dev/null
+++ b/wisdom.m4
@@ -0,0 +1,145 @@
+dnl
+dnl
+include(`rsb_misc.m4')dnl
+dnl
+dnl	Matrix modifying kernel operation codes.
+define(`RSB_M4_MATRIX_WRITEONLY_KERNEL_MOPS',``scale',`negation'')dnl
+dnl
+dnl	SPMV-like kernel operation codes.
+define(`RSB_M4_SPMV_KERNEL_MOPS',``spmv_sasa',`spmv_uauz',`spmv_uxux',`spmv_uaua',`spmv_uxua',`spmv_unua',`spmv_sxsx',`spmv_sxsa'')dnl
+dnl
+dnl	SPMM-like kernel operation codes.
+define(`RSB_M4_SPMM_KERNEL_MOPS',``spmm_az'')dnl
+dnl
+dnl	SPMV/SPMM-like kernel operation codes.
+define(`RSB_M4_SPMX_KERNEL_MOPS',`RSB_M4_SPMV_KERNEL_MOPS,RSB_M4_SPMM_KERNEL_MOPS')dnl
+dnl
+dnl	kernel operation codes scaling the output vector.
+define(`RSB_M4_SPSX_ZEROING_KERNEL_MOPS',``spsv_uxua'')dnl
+define(`RSB_M4_SPMX_ZEROING_KERNEL_MOPS',``spmv_uauz',`spmm_az'')dnl
+dnl define(`RSB_M4_SPMX_ZEROING_KERNEL_MOPS',``spmv_uauz',`spmm_az',`spsv_uxua'')dnl
+define(`RSB_M4_SPXX_ZEROING_KERNEL_MOPS',`RSB_M4_SPMX_ZEROING_KERNEL_MOPS')dnl
+dnl define(`RSB_M4_SPXX_ZEROING_KERNEL_MOPS',`RSB_M4_SPSX_ZEROING_KERNEL_MOPS,RSB_M4_SPMX_ZEROING_KERNEL_MOPS')dnl
+define(`RSB_M4_SPXX_ALLOW_ALIASING_KERNEL_MOPS',``spsv_uxua'')dnl
+dnl
+dnl	SP**-like kernel operation codes scaling the output vector.
+define(`RSB_M4_SPMX_SCALING_KERNEL_MOPS',``spmv_uxux',`spmv_sxsx'')dnl
+dnl
+dnl	SP**-like kernel operation codes scaling,scaling, or adding only to the computation result
+define(`RSB_M4_SPSX_OP_ADDING_KERNEL_MOPS',``spsv_uxua'')dnl
+define(`RSB_M4_SPMX_OP_ADDING_KERNEL_MOPS',``spmv_uauz',`spmv_uaua',`spmv_sasa'')dnl
+define(`RSB_M4_SPMX_OP_NEGATING_KERNEL_MOPS',``spmv_unua'')dnl
+define(`RSB_M4_SPMX_OP_SCALING_KERNEL_MOPS',``spmv_uxua',`spmv_uxux',`spmv_sxsx',`spmv_sxsa'')dnl
+define(`RSB_M4_SPSX_OP_SCALING_KERNEL_MOPS',``spsv_sxsx'')dnl
+define(`RSB_M4_SPXX_OP_ACC_WRITING_KERNEL_MOPS',``infty_norm',`rowssums'')dnl
+dnl
+define(`RSB_M4_SPXX_RC_BIASED_KERNEL_MOPS',``scale'')dnl
+dnl
+dnl	Matrix readonly kernel operation codes.
+define(`RSB_M4_MATRIX_READONLY_KERNEL_MOPS',`RSB_M4_SPXX_OP_ACC_WRITING_KERNEL_MOPS,RSB_M4_SPSX_KERNEL_MOPS,RSB_M4_SPMX_KERNEL_MOPS')dnl
+dnl
+dnl	SPSV-like kernel operation codes.
+define(`RSB_M4_SPSV_KERNEL_MOPS',``spsv_uxua',`spsv_sxsx'')dnl
+define(`RSB_M4_SPSX_KERNEL_MOPS',`RSB_M4_SPSV_KERNEL_MOPS')dnl
+dnl
+dnl	SPSV-like kernel operation codes scaling the output vector.
+dnl define(`RSB_M4_SPSX_SCALING_KERNEL_MOPS',`spsv_sxsx')dnl
+define(`RSB_M4_SPSX_SCALING_KERNEL_MOPS',`')dnl
+define(`RSB_M4_SPSX_OP_SETTING_KERNEL_MOPS',`spsv_uxua')dnl
+dnl
+dnl	kernel operation codes scaling the output vector.
+define(`RSB_M4_ZEROING_KERNEL_MOPS',`RSB_M4_SPXX_ZEROING_KERNEL_MOPS')dnl
+define(`RSB_M4_SCALING_KERNEL_MOPS',`RSB_M4_SPMX_SCALING_KERNEL_MOPS,RSB_M4_SPSX_SCALING_KERNEL_MOPS')dnl
+dnl
+define(`RSB_M4_STRIDED_KERNEL_MOPS',``spmv_sxsx',`spmv_sxsa',`spsv_sxsx',`spmv_sasa'')dnl
+dnl
+define(`RSB_M4_OP_SCALING_KERNEL_MOPS',`RSB_M4_SPMX_OP_SCALING_KERNEL_MOPS,RSB_M4_SPSX_OP_SCALING_KERNEL_MOPS')dnl
+define(`RSB_M4_OP_NEGATING_KERNEL_MOPS',`RSB_M4_SPMX_OP_NEGATING_KERNEL_MOPS')dnl
+define(`RSB_M4_OP_ADDING_KERNEL_MOPS',`RSB_M4_SPMX_OP_ADDING_KERNEL_MOPS,RSB_M4_SPSX_OP_ADDING_KERNEL_MOPS')dnl
+define(`RSB_M4_SPSX_NEGATING_KERNEL_MOPS',`')dnl
+dnl
+define(`RSB_M4_MATRIX_ALL_COMPLEX_TYPES',(`complex',`long complex',`float complex',`double complex'))dnl
+define(`RSB_M4_MATRIX_INT_TYPES',(`int'))dnl
+dnl
+define(`RSB_M4_HAVE_COMPLEX_TYPE',`(RSB_M4_INTERSECTION(RSB_M4_MATRIX_ALL_COMPLEX_TYPES,(WANT_TYPES)))')dnl
+define(`RSB_M4_HAVE_INT_TYPE',`(RSB_M4_INTERSECTION(RSB_M4_MATRIX_ALL_INT_TYPES,(WANT_TYPES)))')dnl
+dnl
+define(`RSB_M4_IS_READONLY_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_MATRIX_READONLY_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_WRITEONLY_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_MATRIX_WRITEONLY_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMV_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMV_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPSV_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSV_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMX_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPXX_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_KERNEL_MOPS,RSB_M4_SPSX_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPXV_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMV_KERNEL_MOPS,RSB_M4_SPSV_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPXM_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMM_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMX_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMX_OP_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_OP_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMX_OP_NEGATING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_OP_NEGATING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMX_OP_ADDING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_OP_ADDING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPSX_OP_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_OP_SCALING_KERNEL_MOPS)')dnl
+dnl define(`RSB_M4_IS_SPSX_OP_NEGATING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_OP_NEGATING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPSX_OP_SETTING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_OP_SETTING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPSX_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPSX_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPXX_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_SCALING_KERNEL_MOPS,RSB_M4_SPMX_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPXX_OP_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPSX_OP_SCALING_KERNEL_MOPS,RSB_M4_SPMX_OP_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPXX_TWO_VECTORS_OPERATING_KERNEL_MOP',`RSB_M4_IS_SPXX_KERNEL_MOP($1)')dnl
+define(`RSB_M4_IS_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SCALING_OR_ZEROING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SCALING_KERNEL_MOPS,RSB_M4_ZEROING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_OP_SCALING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_OP_SCALING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_OP_NEGATING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_OP_NEGATING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_OP_ADDING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_OP_ADDING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_ZEROING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_ZEROING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_SPMX_ZEROING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPMX_ZEROING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_ACC_WRITING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPXX_OP_ACC_WRITING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_ALLOWING_ALIASING_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPXX_ALLOW_ALIASING_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_STRIDED_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_STRIDED_KERNEL_MOPS)')dnl
+define(`RSB_M4_IS_RC_BIASED_KERNEL_MOP',`RSB_M4_MEMBER($1,RSB_M4_SPXX_RC_BIASED_KERNEL_MOPS)')dnl
+dnl
+dnl
+dnl
+dnl
+dnl
+define(`RSB_M4_there_is_real_blocking',`ifelse(`'minor_increment`'major_increment`',`11',`0',`1')')dnl
+define(`RSB_M4_want_verbose_comments',`0')dnl
+define(`RSB_M4_want_old_fortran_float_types',`0')dnl
+dnl
+define(`RSB_M4_there_is_no_real_blocking',`ifelse(`'minor_increment`'major_increment`',`11',`1',`0')')dnl
+dnl
+dnl
+define(`RSB_M4_should_merge_value_after_inner_loop',`dnl
+dnl
+pushdef(`transposed',ifelse(tolowercase(transposition),RSB_M4_TRANS_N,0,1))dnl
+RSB_M4_AND(dnl
+RSB_M4_IS_SPMX_KERNEL_MOP(mop),dnl
+RSB_M4_OR(dnl
+RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry),dnl
+RSB_M4_AND(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),transposed),dnl
+RSB_M4_AND(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),RSB_M4_NOT(transposed))))dnl
+popdef(`transposed')dnl
+dnl
+')dnl
+dnl
+define(`RSB_M4_should_merge_value_after_inner_loop_inner',`dnl
+dnl
+pushdef(`transposed',ifelse(tolowercase(transposition),RSB_M4_TRANS_N,0,1))dnl
+RSB_M4_AND(dnl
+RSB_M4_IS_SPMX_KERNEL_MOP(mop),dnl
+RSB_M4_IS_UNSYMMETRIC(k_symmetry),dnl
+RSB_M4_OR(dnl
+RSB_M4_AND(RSB_M4_IS_FORMAT_COLUMN_MAJOR(matrix_storage),transposed),dnl
+RSB_M4_AND(RSB_M4_IS_FORMAT_ROW_MAJOR(matrix_storage),RSB_M4_NOT(transposed))))dnl
+popdef(`transposed')dnl
+dnl
+')dnl
+dnl
+dnl
+define(`RSB_M4_is_transposed_spmv',`dnl
+dnl
+pushdef(`transposed',ifelse(tolowercase(transposition),RSB_M4_TRANS_N,0,1))dnl
+RSB_M4_AND(RSB_M4_OR(transposed,RSB_M4_IS_NOT_UNSYMMETRIC(k_symmetry)),RSB_M4_IS_SPMX_KERNEL_MOP(mop))dnl
+popdef(`transposed')dnl
+dnl
+')dnl
+dnl
+dnl

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/librsb.git



More information about the debian-science-commits mailing list